Project

General

Profile

Download (15.5 KB) Statistics
| Branch: | Revision:
1
##################    MULTI SAMPLING GAM FUSION METHOD ASSESSMENT ####################################
2
############################ Merging LST and station data ##########################################
3
#This script interpolates tmax values using MODIS LST and GHCND station data                      
4
#interpolation area. It requires the text file of stations and a shape file of the study area.           
5
#Note that the projection for both GHCND and study area is lonlat WGS84.       
6
#Options to run this program are:
7
#1) Multisampling: vary the porportions of hold out and use random samples for each run
8
#2)Constant sampling: use the same sample over the runs
9
#3)over dates: run over for example 365 dates without mulitsampling
10
#4)use seed number: use seed if random samples must be repeatable
11
#5)GAM fusion: possibilty of running GAM+FUSION or GAM+CAI and other options added
12
#The interpolation is done first at the monthly time scale then delta surfaces are added.
13
#AUTHOR: Benoit Parmentier                                                                        
14
#DATE: 02/26/2013                                                                                 
15
#PROJECT: NCEAS INPLANT: Environment and Organisms --TASK#568--                                   
16
###################################################################################################
17

    
18
###Loading R library and packages                                                      
19
library(gtools)                                         # loading some useful tools 
20
library(mgcv)                                           # GAM package by Simon Wood
21
library(sp)                                             # Spatial pacakge with class definition by Bivand et al.
22
library(spdep)                               # Spatial pacakge with methods and spatial stat. by Bivand et al.
23
library(rgdal)                               # GDAL wrapper for R, spatial utilities
24
library(gstat)                               # Kriging and co-kriging by Pebesma et al.
25
library(fields)                             # NCAR Spatial Interpolation methods such as kriging, splines
26
library(raster)                              # Hijmans et al. package for raster processing
27
library(rasterVis)
28
library(parallel)                            # Urbanek S. and Ripley B., package for multi cores & parralel processing
29
library(reshape)
30
library(plotrix)
31
library(maptools)
32

    
33
### Parameters and argument
34

    
35
infile2<-"list_365_dates_04212012.txt"
36
infile_monthly<-"monthly_covariates_ghcn_data_TMAXy2010_2010_VE_02082013.shp"
37
infile_daily<-"daily_covariates_ghcn_data_TMAXy2010_2010_VE_02082013.shp"
38
infile_locs<-"stations_venezuela_region_y2010_2010_VE_02082013.shp"
39
infile3<-"covariates__venezuela_region__VE_01292013.tif" #this is an output from covariate script
40

    
41
in_path<-"/home/parmentier/Data/IPLANT_project/Venezuela_interpolation/Venezuela_01142013/input_data"
42
out_path<-"/home/parmentier/Data/IPLANT_project/Venezuela_interpolation/Venezuela_01142013/output_data"
43
script_path<-"/home/parmentier/Data/IPLANT_project/Venezuela_interpolation/Venezuela_01142013/"
44
setwd(in_path)
45

    
46
y_var_name<-"dailyTmax"                                       
47
out_prefix<-"_365d_GAM_fus5_all_lstd_02202013"                #User defined output prefix
48
seed_number<- 100  #if seed zero then no seed?     
49
nb_sample<-1           #number of time random sampling must be repeated for every hold out proportion
50
prop_min<-0.3          #if prop_min=prop_max and step=0 then predicitons are done for the number of dates...
51
prop_max<-0.3
52
step<-0         
53
constant<-0             #if value 1 then use the same samples as date one for the all set of dates
54
#projection used in the interpolation of the study area: should be read directly from the outline of the study area
55
#CRS_interp<-"+proj=lcc +lat_1=43 +lat_2=45.5 +lat_0=41.75 +lon_0=-120.5 +x_0=400000 +y_0=0 +ellps=GRS80 +units=m +no_defs";
56
CRS_locs_WGS84<-CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +towgs84=0,0,0") #Station coords WGS84
57

    
58
#Models to run...this can be change for each run
59
list_models<-c("y_var ~ s(elev_1)",
60
               "y_var ~ s(LST)",
61
               "y_var ~ s(elev_1,LST)",
62
               "y_var ~ s(lat) + s(lon)+ s(elev_1)",
63
               "y_var ~ s(lat,lon,elev_1)",
64
               "y_var ~ s(lat,lon) + s(elev_1) + s(N_w,E_w) + s(LST)", 
65
               "y_var ~ s(lat,lon) + s(elev_1) + s(N_w,E_w) + s(LST) + s(LC2)",
66
               "y_var ~ s(lat,lon) + s(elev_1) + s(N_w,E_w) + s(LST) + s(LC6)", 
67
               "y_var ~ s(lat,lon) + s(elev_1) + s(N_w,E_w) + s(LST) + s(DISTOC)")
68

    
69
#Default name of LST avg to be matched               
70
lst_avg<-c("mm_01","mm_02","mm_03","mm_04","mm_05","mm_06","mm_07","mm_08","mm_09","mm_10","mm_11","mm_12")  
71

    
72
source(file.path(script_path,"GAM_fusion_function_multisampling_02262013.R"))
73
source(file.path(script_path,"GAM_fusion_function_multisampling_validation_metrics_02262013.R"))
74

    
75
###################### START OF THE SCRIPT ########################
76

    
77
################# CREATE LOG FILE #####################
78

    
79
#create log file to keep track of details such as processing times and parameters.
80

    
81
log_fname<-paste("R_log_t",out_prefix, ".log",sep="")
82

    
83
if (file.exists(log_fname)){  #Stop the script???
84
  file.remove(log_fname)
85
  log_file<-file(log_fname,"w")
86
}
87
if (!file.exists(log_fname)){
88
  log_file<-file(log_fname,"w")
89
}
90

    
91
time1<-proc.time()    #Start stop watch
92
writeLines(paste("Starting script at this local Date and Time: ",as.character(Sys.time()),sep=""),
93
           con=log_file,sep="\n")
94
writeLines("Starting script process time:",con=log_file,sep="\n")
95
writeLines(as.character(time1),con=log_file,sep="\n")    
96

    
97
############### Reading the daily station data and other data #################
98

    
99
ghcn<-readOGR(dsn=in_path,layer=sub(".shp","",infile_daily))
100
CRS_interp<-proj4string(ghcn)                       #Storing projection information (ellipsoid, datum,etc.)
101

    
102
stat_loc<-readOGR(dsn=in_path,layer=sub(".shp","",infile_locs))
103

    
104
data3<-readOGR(dsn=in_path,layer=sub(".shp","",infile_monthly))
105

    
106
#Remove NA for LC and CANHEIGHT: Need to check this part after
107
ghcn$LC1[is.na(ghcn$LC1)]<-0
108
ghcn$LC3[is.na(ghcn$LC3)]<-0
109
ghcn$CANHEIGHT[is.na(ghcn$CANHEIGHT)]<-0
110
ghcn$LC4[is.na(ghcn$LC4)]<-0
111
ghcn$LC6[is.na(ghcn$LC6)]<-0
112

    
113
dates <-readLines(file.path(in_path,infile2)) #dates to be predicted
114

    
115
##Extracting the variables values from the raster files                                             
116

    
117
#The names of covariates can be changed...
118
rnames <-c("x","y","lon","lat","N","E","N_w","E_w","elev","slope","aspect","CANHEIGHT","DISTOC")
119
lc_names<-c("LC1","LC2","LC3","LC4","LC5","LC6","LC7","LC8","LC9","LC10","LC11","LC12")
120
lst_names<-c("mm_01","mm_02","mm_03","mm_04","mm_05","mm_06","mm_07","mm_08","mm_09","mm_10","mm_11","mm_12",
121
                    "nobs_01","nobs_02","nobs_03","nobs_04","nobs_05","nobs_06","nobs_07","nobs_08",
122
                    "nobs_09","nobs_10","nobs_11","nobs_12")
123
                  
124
covar_names<-c(rnames,lc_names,lst_names)
125
                  
126
s_raster<-stack(infile3)                   #read in the data stack
127
names(s_raster)<-covar_names               #Assigning names to the raster layers: making sure it is included in the extraction
128

    
129
#Deal with no data value and zero      
130
#pos<-match("LC1",layerNames(s_raster)) #Find column with name "value"
131
#LC1<-raster(s_raster,layer=pos)             #Select layer from stack
132
#s_raster<-dropLayer(s_raster,pos)
133
#LC1[is.na(LC1)]<-0
134

    
135
#pos<-match("LC3",layerNames(s_raster)) #Find column with name "value"
136
#LC3<-raster(s_raster,layer=pos)             #Select layer from stack
137
#s_raster<-dropLayer(s_raster,pos)
138
#LC3[is.na(LC3)]<-0
139

    
140
#pos<-match("CANHEIGHT",layerNames(s_raster)) #Find column with name "value"
141
#CANHEIGHT<-raster(s_raster,layer=pos)             #Select layer from stack
142
#s_raster<-dropLayer(s_raster,pos)
143
#CANHEIGHT[is.na(CANHEIGHT)]<-0
144
#pos<-match("ELEV_SRTM",layerNames(s_raster)) #Find column with name "ELEV_SRTM"
145
#ELEV_SRTM<-raster(s_raster,layer=pos)             #Select layer from stack on 10/30
146
#s_raster<-dropLayer(s_raster,pos)
147
#ELEV_SRTM[ELEV_SRTM <0]<-NA
148

    
149
#s_sgdf<-as(s_raster,"SpatialGridDataFrame") #Conversion to spatial grid data frame
150

    
151
######### Preparing daily and monthly values for training and testing
152
                  
153
#Screening for daily bad values: value is tmax in this case
154
#ghcn$value<-as.numeric(ghcn$value)
155
#ghcn_all<-ghcn
156
#ghcn_test<-subset(ghcn,ghcn$value>-150 & ghcn$value<400)
157
#ghcn_test<-ghcn
158
#ghcn_test2<-subset(ghcn_test,ghcn_test$elev_1>0)
159
#ghcn<-ghcn_test2
160
#coords<- ghcn[,c('x_OR83M','y_OR83M')]
161

    
162
#Now clean and screen monthly values
163
#dst_all<-dst
164
dst_all<-data3
165
dst<-data3
166
#dst<-subset(dst,dst$TMax>-15 & dst$TMax<45) #may choose different threshold??
167
#dst<-subset(dst,dst$ELEV_SRTM>0) #This will drop two stations...or 24 rows
168

    
169
##### Create sampling: select training and testing sites ###
170

    
171
#Make this a a function!!!!
172
                  
173
if (seed_number>0) {
174
  set.seed(seed_number)                        #Using a seed number allow results based on random number to be compared...
175
}
176
nel<-length(dates)
177
dates_list<-vector("list",nel) #list of one row data.frame
178

    
179
prop_range<-(seq(from=prop_min,to=prop_max,by=step))*100     #range of proportion to run
180
sn<-length(dates)*nb_sample*length(prop_range)               #Number of samples to run
181

    
182
for(i in 1:length(dates)){
183
  d_tmp<-rep(dates[i],nb_sample*length(prop_range)) #repeating same date
184
  s_nb<-rep(1:nb_sample,length(prop_range))         #number of random sample per proportion
185
  prop_tmp<-sort(rep(prop_range, nb_sample))
186
  tab_run_tmp<-cbind(d_tmp,s_nb,prop_tmp)
187
  dates_list[[i]]<-tab_run_tmp
188
}
189

    
190
sampling_dat<-as.data.frame(do.call(rbind,dates_list))
191
names(sampling_dat)<-c("date","run_samp","prop")
192

    
193
for(i in 2:3){            # start of the for loop #1
194
  sampling_dat[,i]<-as.numeric(as.character(sampling_dat[,i]))  
195
}
196

    
197
sampling_dat$date<- as.character(sampling_dat[,1])
198
#ghcn.subsets <-lapply(dates, function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
199
ghcn.subsets <-lapply(as.character(sampling_dat$date), function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
200

    
201
#Make this a function??
202
## adding choice of constant sample 
203
if (seed_number>0) {
204
  set.seed(seed_number)                        #Using a seed number allow results based on random number to be compared...
205
}
206

    
207
sampling<-vector("list",length(ghcn.subsets))
208
sampling_station_id<-vector("list",length(ghcn.subsets))
209
for(i in 1:length(ghcn.subsets)){
210
  n<-nrow(ghcn.subsets[[i]])
211
  prop<-(sampling_dat$prop[i])/100
212
  ns<-n-round(n*prop)   #Create a sample from the data frame with 70% of the rows
213
  nv<-n-ns              #create a sample for validation with prop of the rows
214
  ind.training <- sample(nrow(ghcn.subsets[[i]]), size=ns, replace=FALSE) #This selects the index position for 70% of the rows taken randomly
215
  ind.testing <- setdiff(1:nrow(ghcn.subsets[[i]]), ind.training)
216
  #Find the corresponding 
217
  data_sampled<-ghcn.subsets[[i]][ind.training,] #selected the randomly sampled stations
218
  station_id.training<-data_sampled$station     #selected id for the randomly sampled stations (115)
219
  #Save the information
220
  sampling[[i]]<-ind.training
221
  sampling_station_id[[i]]<- station_id.training
222
}
223
## Use same samples across the year...
224
if (constant==1){
225
  sampled<-sampling[[1]]
226
  data_sampled<-ghcn.subsets[[1]][sampled,] #selected the randomly sampled stations
227
  station_sampled<-data_sampled$station     #selected id for the randomly sampled stations (115)
228
  list_const_sampling<-vector("list",sn)
229
  list_const_sampling_station_id<-vector("list",sn)
230
  for(i in 1:sn){
231
    station_id.training<-intersect(station_sampled,ghcn.subsets[[i]]$station)
232
    ind.training<-match(station_id.training,ghcn.subsets[[i]]$station)
233
    list_const_sampling[[i]]<-ind.training
234
    list_const_sampling_station_id[[i]]<-station_id.training
235
  }
236
  sampling<-list_const_sampling 
237
  sampling_station_id<-list_const_sampling_station_id
238
}
239

    
240
########### PREDICT FOR MONTHLY SCALE  #############
241

    
242
#First predict at the monthly time scale: climatology
243
writeLines("Predictions at monthly scale:",con=log_file,sep="\n")
244
t1<-proc.time()
245
gamclim_fus_mod<-mclapply(1:12, runClim_KGFusion,mc.preschedule=FALSE,mc.cores = 6) #This is the end bracket from mclapply(...) statement
246
#gamclim_fus_mod<-mclapply(1:12, runClim_KGFusion,mc.preschedule=FALSE,mc.cores = 4) #This is the end bracket from mclapply(...) statement
247
save(gamclim_fus_mod,file= paste("gamclim_fus_mod",out_prefix,".RData",sep=""))
248
t2<-proc.time()-t1
249
writeLines(as.character(t2),con=log_file,sep="\n")
250

    
251
#now get list of raster clim layers
252

    
253
list_tmp<-vector("list",length(gamclim_fus_mod))
254
for (i in 1:length(gamclim_fus_mod)){
255
  tmp<-gamclim_fus_mod[[i]]$clim
256
  list_tmp[[i]]<-tmp
257
}
258

    
259
################## PREDICT AT DAILY TIME SCALE #################
260

    
261
#put together list of clim models per month...
262
rast_clim_yearlist<-list_tmp
263
#Second predict at the daily time scale: delta
264

    
265
#gam_fus_mod<-mclapply(1:1, runGAMFusion,mc.preschedule=FALSE,mc.cores = 1) #This is the end bracket from mclapply(...) statement
266
writeLines("Predictions at the daily scale:",con=log_file,sep="\n")
267
t1<-proc.time()
268
gam_fus_mod<-mclapply(1:length(ghcn.subsets), runGAMFusion,mc.preschedule=FALSE,mc.cores = 9) #This is the end bracket from mclapply(...) statement
269
save(gam_fus_mod,file= paste("gam_fus_mod",out_prefix,".RData",sep=""))
270
t2<-proc.time()-t1
271
writeLines(as.character(t2),con=log_file,sep="\n")
272

    
273
############### NOW RUN VALIDATION #########################
274

    
275
list_tmp<-vector("list",length(gam_fus_mod))
276
for (i in 1:length(gam_fus_mod)){
277
  tmp<-gam_fus_mod[[i]][[y_var_name]]  #y_var_name is the variable predicted (tmax or tmin)
278
  list_tmp[[i]]<-tmp
279
}
280
rast_day_yearlist<-list_tmp #list of predicted images
281

    
282
writeLines("Validation step:",con=log_file,sep="\n")
283
t1<-proc.time()
284
#calculate_accuary_metrics<-function(i)
285
gam_fus_validation_mod<-mclapply(1:length(gam_fus_mod), calculate_accuracy_metrics,mc.preschedule=FALSE,mc.cores = 9) #This is the end bracket from mclapply(...) statement
286
#gam_fus_validation_mod<-mclapply(1:1, calculate_accuracy_metrics,mc.preschedule=FALSE,mc.cores = 1) #This is the end bracket from mclapply(...) statement
287
save(gam_fus_validation_mod,file= paste("gam_fus_validation_mod",out_prefix,".RData",sep=""))
288
t2<-proc.time()-t1
289
writeLines(as.character(t2),con=log_file,sep="\n")
290

    
291
#################### ASSESSMENT OF PREDICTIONS: PLOTS OF ACCURACY METRICS ###########
292

    
293
##Create data.frame with valiation metrics for a full year
294
tb_diagnostic_v<-extract_from_list_obj(gam_fus_validation_mod,"metrics_v")
295
rownames(tb_diagnostic_v)<-NULL #remove row names
296

    
297
#Call function to create plots of metrics for validation dataset
298
metric_names<-c("rmse","mae","me","r","m50")
299
summary_metrics<-boxplot_from_tb(tb_diagnostic_v,metric_names,out_prefix)
300
names(summary_metrics)<-c("avg","median")
301
##Write out information concerning accuracy and predictions
302
outfile<-file.path(in_path,paste("assessment_measures_",out_prefix,".txt",sep=""))
303
write.table(tb_diagnostic_v,file= outfile,row.names=FALSE,sep=",")
304
write.table(summary_metrics[[1]], file= outfile, append=TRUE,sep=",") #write out avg
305
write.table(summary_metrics[[2]], file= outfile, append=TRUE,sep=",") #write out median
306

    
307
#################### CLOSE LOG FILE  #############
308

    
309
#close log_file connection and add meta data
310
writeLines("Finished script process time:",con=log_file,sep="\n")
311
time2<-proc.time()-time1
312
writeLines(as.character(time2),con=log_file,sep="\n")
313
#later on add all the paramters used in the script...
314
writeLines(paste("Finished script at this local Date and Time: ",as.character(Sys.time()),sep=""),
315
           con=log_file,sep="\n")
316
writeLines("End of script",con=log_file,sep="\n")
317
close(log_file)
318

    
319
############################################################
320
######################## END OF SCRIPT #####################
(9-9/37)