Project

General

Profile

Download (16 KB) Statistics
| Branch: | Revision:
1
##################    MULTI SAMPLING GAM FUSION METHOD ASSESSMENT ####################################
2
############################ Merging LST and station data ##########################################
3
#This script interpolates tmax values using MODIS LST and GHCND station data                      
4
#interpolation area. It requires the text file of stations and a shape file of the study area.           
5
#Note that the projection for both GHCND and study area is lonlat WGS84.       
6
#Options to run this program are:
7
#1) Multisampling: vary the porportions of hold out and use random samples for each run
8
#2)Constant sampling: use the same sample over the runs
9
#3)over dates: run over for example 365 dates without mulitsampling
10
#4)use seed number: use seed if random samples must be repeatable
11
#5)GAM fusion: possibilty of running GAM+FUSION and other options added
12
#The interpolation is done first at the monthly time scale then delta surfaces are added.
13
#AUTHOR: Benoit Parmentier                                                                        
14
#DATE: 02/20/2013                                                                                 
15
#PROJECT: NCEAS INPLANT: Environment and Organisms --TASK#363--                                   
16
###################################################################################################
17

    
18
###Loading R library and packages                                                      
19
library(gtools)                                         # loading some useful tools 
20
library(mgcv)                                           # GAM package by Simon Wood
21
library(sp)                                             # Spatial pacakge with class definition by Bivand et al.
22
library(spdep)                               # Spatial pacakge with methods and spatial stat. by Bivand et al.
23
library(rgdal)                               # GDAL wrapper for R, spatial utilities
24
library(gstat)                               # Kriging and co-kriging by Pebesma et al.
25
library(fields)                             # NCAR Spatial Interpolation methods such as kriging, splines
26
library(raster)                              # Hijmans et al. package for raster processing
27
library(rasterVis)
28
library(parallel)                            # Urbanek S. and Ripley B., package for multi cores & parralel processing
29
library(reshape)
30
library(plotrix)
31
library(maptools)
32

    
33
### Parameters and argument
34

    
35
infile2<-"list_365_dates_04212012.txt"
36
infile_monthly<-"monthly_covariates_ghcn_data_TMAXy2010_2010_VE_02082013.shp"
37
infile_daily<-"daily_covariates_ghcn_data_TMAXy2010_2010_VE_02082013.shp"
38
infile_locs<-"stations_venezuela_region_y2010_2010_VE_02082013.shp"
39
infile3<-"covariates__venezuela_region__VE_01292013.tif" #this is an output from covariate script
40

    
41
in_path<-"/home/parmentier/Data/IPLANT_project/Venezuela_interpolation/Venezuela_01142013/input_data"
42
out_path<-"/home/parmentier/Data/IPLANT_project/Venezuela_interpolation/Venezuela_01142013/output_data"
43
script_path<-"/home/parmentier/Data/IPLANT_project/Venezuela_interpolation/Venezuela_01142013/"
44
setwd(in_path)
45

    
46
y_var_name<-"dailyTmax"                                       
47
out_prefix<-"_365d_GAM_fus5_all_lstd_02202013"                #User defined output prefix
48
seed_number<- 100  #if seed zero then no seed?     
49
nb_sample<-1           #number of time random sampling must be repeated for every hold out proportion
50
prop_min<-0.3          #if prop_min=prop_max and step=0 then predicitons are done for the number of dates...
51
prop_max<-0.3
52
step<-0         
53
constant<-0             #if value 1 then use the same samples as date one for the all set of dates
54
#projection used in the interpolation of the study area: should be read directly from the outline of the study area
55
#CRS_interp<-"+proj=lcc +lat_1=43 +lat_2=45.5 +lat_0=41.75 +lon_0=-120.5 +x_0=400000 +y_0=0 +ellps=GRS80 +units=m +no_defs";
56
CRS_locs_WGS84<-CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +towgs84=0,0,0") #Station coords WGS84
57

    
58
#Models to run...this can be change for each run
59
list_models<-c("y_var ~ s(elev_1)",
60
               "y_var ~ s(LST)",
61
               "y_var ~ s(elev_1,LST)",
62
               "y_var ~ s(lat) + s(lon)+ s(elev_1)",
63
               "y_var ~ s(lat,lon,elev_1)",
64
               "y_var ~ s(lat,lon) + s(elev_1) + s(N_w,E_w) + s(LST)", 
65
               "y_var ~ s(lat,lon) + s(elev_1) + s(N_w,E_w) + s(LST) + s(LC2)",
66
               "y_var ~ s(lat,lon) + s(elev_1) + s(N_w,E_w) + s(LST) + s(LC6)", 
67
               "y_var ~ s(lat,lon) + s(elev_1) + s(N_w,E_w) + s(LST) + s(DISTOC)")
68

    
69
#Default name of LST avg to be matched               
70
lst_avg<-c("mm_01","mm_02","mm_03","mm_04","mm_05","mm_06","mm_07","mm_08","mm_09","mm_10","mm_11","mm_12")  
71

    
72
source(file.path(script_path,"GAM_fusion_function_multisampling_02202013.R"))
73
source(file.path(script_path,"GAM_fusion_function_multisampling_validation_metrics_02202013.R"))
74
                              
75
###################### START OF THE SCRIPT ########################
76

    
77
#Create log file to keep track of details such as processing times and parameters.
78

    
79
log_fname<-paste("R_log_t",out_prefix, ".log",sep="")
80

    
81
if (file.exists(log_fname)){  #Stop the script???
82
  file.remove(log_fname)
83
  log_file<-file(log_fname,"w")
84
}
85
if (!file.exists(log_fname)){
86
  log_file<-file(log_fname,"w")
87
}
88

    
89
time1<-proc.time()    #Start stop watch
90
writeLines(paste("Starting script at this local Date and Time: ",as.character(Sys.time()),sep=""),
91
           con=log_file,sep="\n")
92
writeLines("Starting script process time:",con=log_file,sep="\n")
93
writeLines(as.character(time1),con=log_file,sep="\n")    
94

    
95
###Reading the daily station data and setting up for models' comparison
96
ghcn<-readOGR(dsn=in_path,layer=sub(".shp","",infile_daily))
97
CRS_interp<-proj4string(ghcn)                       #Storing projection information (ellipsoid, datum,etc.)
98

    
99
stat_loc<-readOGR(dsn=in_path,layer=sub(".shp","",infile_locs))
100

    
101
data3<-readOGR(dsn=in_path,layer=sub(".shp","",infile_monthly))
102

    
103
#Remove NA for LC and CANHEIGHT: Need to check this part after
104
ghcn$LC1[is.na(ghcn$LC1)]<-0
105
ghcn$LC3[is.na(ghcn$LC3)]<-0
106
ghcn$CANHEIGHT[is.na(ghcn$CANHEIGHT)]<-0
107
ghcn$LC4[is.na(ghcn$LC4)]<-0
108
ghcn$LC6[is.na(ghcn$LC6)]<-0
109

    
110
dates <-readLines(file.path(in_path,infile2)) #dates to be predicted
111

    
112
##Extracting the variables values from the raster files                                             
113

    
114
#The names of covariates can be changed...
115
rnames <-c("x","y","lon","lat","N","E","N_w","E_w","elev","slope","aspect","CANHEIGHT","DISTOC")
116
lc_names<-c("LC1","LC2","LC3","LC4","LC5","LC6","LC7","LC8","LC9","LC10","LC11","LC12")
117
lst_names<-c("mm_01","mm_02","mm_03","mm_04","mm_05","mm_06","mm_07","mm_08","mm_09","mm_10","mm_11","mm_12",
118
                    "nobs_01","nobs_02","nobs_03","nobs_04","nobs_05","nobs_06","nobs_07","nobs_08",
119
                    "nobs_09","nobs_10","nobs_11","nobs_12")
120
                  
121
covar_names<-c(rnames,lc_names,lst_names)
122
                  
123
s_raster<-stack(infile3)                   #read in the data stack
124
names(s_raster)<-covar_names               #Assigning names to the raster layers: making sure it is included in the extraction
125

    
126
#Deal with no data value and zero      
127
#pos<-match("LC1",layerNames(s_raster)) #Find column with name "value"
128
#LC1<-raster(s_raster,layer=pos)             #Select layer from stack
129
#s_raster<-dropLayer(s_raster,pos)
130
#LC1[is.na(LC1)]<-0
131

    
132
#pos<-match("LC3",layerNames(s_raster)) #Find column with name "value"
133
#LC3<-raster(s_raster,layer=pos)             #Select layer from stack
134
#s_raster<-dropLayer(s_raster,pos)
135
#LC3[is.na(LC3)]<-0
136

    
137
#pos<-match("CANHEIGHT",layerNames(s_raster)) #Find column with name "value"
138
#CANHEIGHT<-raster(s_raster,layer=pos)             #Select layer from stack
139
#s_raster<-dropLayer(s_raster,pos)
140
#CANHEIGHT[is.na(CANHEIGHT)]<-0
141
#pos<-match("ELEV_SRTM",layerNames(s_raster)) #Find column with name "ELEV_SRTM"
142
#ELEV_SRTM<-raster(s_raster,layer=pos)             #Select layer from stack on 10/30
143
#s_raster<-dropLayer(s_raster,pos)
144
#ELEV_SRTM[ELEV_SRTM <0]<-NA
145

    
146
#s_sgdf<-as(s_raster,"SpatialGridDataFrame") #Conversion to spatial grid data frame
147

    
148
######### Preparing daily and monthly values for training and testing
149
                  
150
#Screening for daily bad values: value is tmax in this case
151
#ghcn$value<-as.numeric(ghcn$value)
152
#ghcn_all<-ghcn
153
#ghcn_test<-subset(ghcn,ghcn$value>-150 & ghcn$value<400)
154
#ghcn_test<-ghcn
155
#ghcn_test2<-subset(ghcn_test,ghcn_test$elev_1>0)
156
#ghcn<-ghcn_test2
157
#coords<- ghcn[,c('x_OR83M','y_OR83M')]
158

    
159
#Now clean and screen monthly values
160
#dst_all<-dst
161
dst_all<-data3
162
dst<-data3
163
#dst<-subset(dst,dst$TMax>-15 & dst$TMax<45) #may choose different threshold??
164
#dst<-subset(dst,dst$ELEV_SRTM>0) #This will drop two stations...or 24 rows
165

    
166
##Sampling: training and testing sites.
167

    
168
#Make this a a function
169
                  
170
if (seed_number>0) {
171
  set.seed(seed_number)                        #Using a seed number allow results based on random number to be compared...
172
}
173
nel<-length(dates)
174
dates_list<-vector("list",nel) #list of one row data.frame
175

    
176
prop_range<-(seq(from=prop_min,to=prop_max,by=step))*100     #range of proportion to run
177
sn<-length(dates)*nb_sample*length(prop_range)               #Number of samples to run
178

    
179
for(i in 1:length(dates)){
180
  d_tmp<-rep(dates[i],nb_sample*length(prop_range)) #repeating same date
181
  s_nb<-rep(1:nb_sample,length(prop_range))         #number of random sample per proportion
182
  prop_tmp<-sort(rep(prop_range, nb_sample))
183
  tab_run_tmp<-cbind(d_tmp,s_nb,prop_tmp)
184
  dates_list[[i]]<-tab_run_tmp
185
}
186

    
187
sampling_dat<-as.data.frame(do.call(rbind,dates_list))
188
names(sampling_dat)<-c("date","run_samp","prop")
189

    
190
for(i in 2:3){            # start of the for loop #1
191
  sampling_dat[,i]<-as.numeric(as.character(sampling_dat[,i]))  
192
}
193

    
194
sampling_dat$date<- as.character(sampling_dat[,1])
195
#ghcn.subsets <-lapply(dates, function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
196
ghcn.subsets <-lapply(as.character(sampling_dat$date), function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
197

    
198
#Make this a function??
199
## adding choice of constant sample 
200
if (seed_number>0) {
201
  set.seed(seed_number)                        #Using a seed number allow results based on random number to be compared...
202
}
203

    
204
sampling<-vector("list",length(ghcn.subsets))
205
sampling_station_id<-vector("list",length(ghcn.subsets))
206
for(i in 1:length(ghcn.subsets)){
207
  n<-nrow(ghcn.subsets[[i]])
208
  prop<-(sampling_dat$prop[i])/100
209
  ns<-n-round(n*prop)   #Create a sample from the data frame with 70% of the rows
210
  nv<-n-ns              #create a sample for validation with prop of the rows
211
  ind.training <- sample(nrow(ghcn.subsets[[i]]), size=ns, replace=FALSE) #This selects the index position for 70% of the rows taken randomly
212
  ind.testing <- setdiff(1:nrow(ghcn.subsets[[i]]), ind.training)
213
  #Find the corresponding 
214
  data_sampled<-ghcn.subsets[[i]][ind.training,] #selected the randomly sampled stations
215
  station_id.training<-data_sampled$station     #selected id for the randomly sampled stations (115)
216
  #Save the information
217
  sampling[[i]]<-ind.training
218
  sampling_station_id[[i]]<- station_id.training
219
}
220
## Use same samples across the year...
221
if (constant==1){
222
  sampled<-sampling[[1]]
223
  data_sampled<-ghcn.subsets[[1]][sampled,] #selected the randomly sampled stations
224
  station_sampled<-data_sampled$station     #selected id for the randomly sampled stations (115)
225
  list_const_sampling<-vector("list",sn)
226
  list_const_sampling_station_id<-vector("list",sn)
227
  for(i in 1:sn){
228
    station_id.training<-intersect(station_sampled,ghcn.subsets[[i]]$station)
229
    ind.training<-match(station_id.training,ghcn.subsets[[i]]$station)
230
    list_const_sampling[[i]]<-ind.training
231
    list_const_sampling_station_id[[i]]<-station_id.training
232
  }
233
  sampling<-list_const_sampling 
234
  sampling_station_id<-list_const_sampling_station_id
235
}
236

    
237
######## Prediction for the range of dates and sampling data
238

    
239
#First predict at the monthly time scale: climatology
240
writeLines("Predictions at monthly scale:",con=log_file,sep="\n")
241
t1<-proc.time()
242
gamclim_fus_mod<-mclapply(1:12, runClim_KGFusion,mc.preschedule=FALSE,mc.cores = 6) #This is the end bracket from mclapply(...) statement
243
#gamclim_fus_mod<-mclapply(1:12, runClim_KGFusion,mc.preschedule=FALSE,mc.cores = 4) #This is the end bracket from mclapply(...) statement
244
save(gamclim_fus_mod,file= paste("gamclim_fus_mod",out_prefix,".RData",sep=""))
245
t2<-proc.time()-t1
246
writeLines(as.character(t2),con=log_file,sep="\n")
247

    
248
#now get list of raster clim layers
249

    
250
list_tmp<-vector("list",length(gamclim_fus_mod))
251
for (i in 1:length(gamclim_fus_mod)){
252
  tmp<-gamclim_fus_mod[[i]]$clim
253
  list_tmp[[i]]<-tmp
254
}
255

    
256
#put together list of clim models per month...
257
rast_clim_yearlist<-list_tmp
258
#Second predict at the daily time scale: delta
259

    
260
#gam_fus_mod<-mclapply(1:1, runGAMFusion,mc.preschedule=FALSE,mc.cores = 1) #This is the end bracket from mclapply(...) statement
261
writeLines("Predictions at the daily scale:",con=log_file,sep="\n")
262
t1<-proc.time()
263
gam_fus_mod<-mclapply(1:length(ghcn.subsets), runGAMFusion,mc.preschedule=FALSE,mc.cores = 9) #This is the end bracket from mclapply(...) statement
264
save(gam_fus_mod,file= paste("gam_fus_mod",out_prefix,".RData",sep=""))
265
t2<-proc.time()-t1
266
writeLines(as.character(t2),con=log_file,sep="\n")
267

    
268
#Add accuracy_metrics section/function
269
#now get list of raster daily prediction layers
270
#gam_fus_mod_tmp<-gam_fus_mod
271
#this should be change later once correction has been made
272
#for (i in 1:length(gam_fus_mod)){
273
#  obj_names<-c(y_var_name,"clim","delta","data_s","sampling_dat","data_v",
274
#               "mod_kr_day")
275
#  names(gam_fus_mod[[i]])<-obj_names
276
#  names(gam_fus_mod[[i]][[y_var_name]])<-c("mod1","mod2","mod3","mod4","mod_kr")
277
#}
278

    
279
##
280
list_tmp<-vector("list",length(gam_fus_mod))
281
for (i in 1:length(gam_fus_mod)){
282
  tmp<-gam_fus_mod[[i]][[y_var_name]]  #y_var_name is the variable predicted (tmax or tmin)
283
  list_tmp[[i]]<-tmp
284
}
285
rast_day_yearlist<-list_tmp #list of predicted images
286

    
287
writeLines("Validation step:",con=log_file,sep="\n")
288
t1<-proc.time()
289
#calculate_accuary_metrics<-function(i)
290
gam_fus_validation_mod<-mclapply(1:length(gam_fus_mod), calculate_accuracy_metrics,mc.preschedule=FALSE,mc.cores = 9) #This is the end bracket from mclapply(...) statement
291
#gam_fus_validation_mod<-mclapply(1:1, calculate_accuracy_metrics,mc.preschedule=FALSE,mc.cores = 1) #This is the end bracket from mclapply(...) statement
292
save(gam_fus_validation_mod,file= paste("gam_fus_validation_mod",out_prefix,".RData",sep=""))
293
t2<-proc.time()-t1
294
writeLines(as.character(t2),con=log_file,sep="\n")
295

    
296
####This part concerns validation assessment and must be moved later...
297
## make this a function??
298
list_tmp<-vector("list",length(gam_fus_validation_mod))
299
for (i in 1:length(gam_fus_validation_mod)){
300
  tmp<-gam_fus_validation_mod[[i]]$metrics_v
301
  list_tmp[[i]]<-tmp
302
}
303
tb_diagnostic<-do.call(rbind,list_tmp) #long rownames
304
rownames(tb_diagnostic)<-NULL #remove row names
305

    
306
mod_names<-unique(tb_diagnostic$pred_mod) #models that have accuracy metrics
307

    
308
#now boxplots and mean per models
309
t<-melt(tb_diagnostic,
310
        #measure=mod_var, 
311
        id=c("date","pred_mod","prop"),
312
        na.rm=F)
313
avg_tb<-cast(t,pred_mod~variable,mean)
314
tb<-tb_diagnostic
315
tb_mod_list<-vector("list",length(mod_names))
316
for(i in 1:length(mod_names)){            # Reorganizing information in terms of metrics 
317
  mod_name_tb<-paste("tb_",mod_names[i],sep="")
318
  tb_mod<-subset(tb, pred_mod==mod_names[i])
319
  assign(mod_name_tb,tb_mod)
320
  tb_mod_list[[i]]<-tb_mod
321
}
322
names(tb_mod_list)<-mod_names
323

    
324
mod_metrics<-do.call(cbind,tb_mod_list)
325
mod_pat<-glob2rx("*.rmse")   
326
mod_var<-grep(mod_pat,names(mod_metrics),value=TRUE) # using grep with "value" extracts the matching names         
327

    
328
boxplot(mod_metrics[[mod_var]])
329
test<-mod_metrics[mod_var]
330
boxplot(test,outline=FALSE,horizontal=FALSE,cex=0.5)
331

    
332
#close log_file connection and add meta data
333
writeLines("Finished script process time:",con=log_file,sep="\n")
334
time2<-proc.time()-time1
335
writeLines(as.character(time2),con=log_file,sep="\n")
336
#later on add all the paramters used in the script...
337
writeLines(paste("Finished script at this local Date and Time: ",as.character(Sys.time()),sep=""),
338
           con=log_file,sep="\n")
339
writeLines("End of script",con=log_file,sep="\n")
340
close(log_file)
341

    
342
####End of part to be changed...
343

    
344
#### END OF SCRIPT #######
(9-9/37)