Project

General

Profile

Download (17.6 KB) Statistics
| Branch: | Revision:
1
##################    MULTI SAMPLING GAM FUSION METHOD ASSESSMENT ####################################
2
############################ Merging LST and station data ##########################################
3
#This script interpolates tmax values using MODIS LST and GHCND station data                      
4
#interpolation area. It requires the text file of stations and a shape file of the study area.           
5
#Note that the projection for both GHCND and study area is lonlat WGS84.       
6
#Options to run this program are:
7
#1) Multisampling: vary the porportions of hold out and use random samples for each run
8
#2)Constant sampling: use the same sample over the runs
9
#3)over dates: run over for example 365 dates without mulitsampling
10
#4)use seed number: use seed if random samples must be repeatable
11
#5)GAM fusion: possibilty of running GAM+FUSION or GAM separately 
12
#AUTHOR: Benoit Parmentier                                                                        
13
#DATE: 02/06/2013                                                                                 
14
#PROJECT: NCEAS INPLANT: Environment and Organisms --TASK#363--                                   
15
###################################################################################################
16

    
17
###Loading R library and packages                                                      
18
library(gtools)                                         # loading some useful tools 
19
library(mgcv)                                           # GAM package by Simon Wood
20
library(sp)                                             # Spatial pacakge with class definition by Bivand et al.
21
library(spdep)                               # Spatial pacakge with methods and spatial stat. by Bivand et al.
22
library(rgdal)                               # GDAL wrapper for R, spatial utilities
23
library(gstat)                               # Kriging and co-kriging by Pebesma et al.
24
library(fields)                              # NCAR Spatial Interpolation methods such as kriging, splines
25
library(raster)                              # Hijmans et al. package for raster processing
26
library(rasterVis)
27
library(parallel)                            # Urbanek S. and Ripley B., package for multi cores & parralel processing
28
library(reshape)
29
library(plotrix)
30
### Parameters and argument
31

    
32
infile1<- "ghcn_or_tmax_covariates_06262012_OR83M.shp"             #GHCN shapefile containing variables for modeling 2010                 
33
infile2<-"list_365_dates_04212012.txt"
34
#infile3<-"LST_dates_var_names.txt"                        #LST dates name
35
#infile4<-"models_interpolation_05142012.txt"              #Interpolation model names
36
#infile5<-"mean_day244_rescaled.rst"                       #Raster or grid for the locations of predictions
37
#infile6<-"lst_climatology.txt"
38
#infile6<-"LST_files_monthly_climatology.txt"
39
#inlistf<-"list_files_05032012.txt"                        #Stack of images containing the Covariates
40

    
41
infile_monthly<-"monthly_covariates_ghcn_data_TMAXy2010_2010_VE_02062013.shp"
42
infile_daily<-"daily_covariates_ghcn_data_TMAXy2010_2010_VE_02062013.shp"
43
infile_locs<-"stations_venezuela_region_y2010_2010_VE_02062013.shp"
44
infile3<-"covariates__venezuela_region__VE_01292013.tif" #this is an output from covariate script
45

    
46
in_path<-"/home/parmentier/Data/IPLANT_project/Venezuela_interpolation/Venezuela_01142013/input_data"
47
out_path<-"/home/parmentier/Data/IPLANT_project/Venezuela_interpolation/Venezuela_01142013/output_data"
48
setwd(in_path)
49

    
50
nmodels<-9   #number of models running
51
y_var_name<-"dailyTmax"
52
predval<-1
53
prop<-0.3             #Proportion of testing retained for validation   
54
#prop<-0.25
55
seed_number<- 100  #if seed zero then no seed?                                                                 #Seed number for random sampling
56
out_prefix<-"_10d_GAM_fus5_all_lstd_020632013"                #User defined output prefix
57
#out_prefix<-"_365d_GAM_12272012"                #User defined output prefix
58

    
59
bias_val<-0            #if value 1 then training data is used in the bias surface rather than the all monthly stations
60
bias_prediction<-1     #if value 1 then use GAM for the BIAS prediction otherwise GAM direct repdiction for y_var (daily tmax)
61
nb_sample<-1           #number of time random sampling must be repeated for every hold out proportion
62
prop_min<-0.3          #if prop_min=prop_max and step=0 then predicitons are done for the number of dates...
63
prop_max<-0.3
64
step<-0         
65
constant<-0             #if value 1 then use the same samples as date one for the all set of dates
66
#projection used in the interpolation of the study area: should be read directly from the outline of the study area
67
CRS_interp<-"+proj=lcc +lat_1=43 +lat_2=45.5 +lat_0=41.75 +lon_0=-120.5 +x_0=400000 +y_0=0 +ellps=GRS80 +units=m +no_defs";
68
CRS_locs_WGS84<-CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +towgs84=0,0,0") #Station coords WGS84
69

    
70
source("GAM_fusion_function_multisampling_02062013.R")
71

    
72
###################### START OF THE SCRIPT ########################
73

    
74
###Reading the daily station data and setting up for models' comparison
75
#filename<-sub(".shp","",infile1)             #Removing the extension from file.
76
#ghcn<-readOGR(".", filename)                 #reading shapefile 
77
ghcn<-readOGR(dsn=in_path,layer=sub(".shp","",infile_daily))
78
CRS_interp<-proj4string(ghcn)                       #Storing projection information (ellipsoid, datum,etc.)
79

    
80
#mean_LST<- readGDAL(infile5)                 #Reading the whole raster in memory. This provides a grid for kriging
81
#proj4string(mean_LST)<-CRS_interp                   #Assigning coordinate information to prediction grid.
82

    
83
#Station location of the study area
84
#stat_loc<-read.table(paste(path,"/","location_study_area_OR_0602012.txt",sep=""),sep=",", header=TRUE)
85
stat_loc<-readOGR(dsn=in_path,layer=sub(".shp","",infile_locs))
86

    
87
#GHCN Database for 1980-2010 for study area (OR) 
88
#data3<-read.table(paste(path,"/","ghcn_data_TMAXy1980_2010_OR_0602012.txt",sep=""),sep=",", header=TRUE)
89
#data3<-file.path(in_path,infile_monthly)
90
data3<-readOGR(dsn=in_path,layer=sub(".shp","",infile_monthly))
91

    
92
#Remove NA for LC and CANHEIGHT: Need to check this part
93
ghcn$LC1[is.na(ghcn$LC1)]<-0
94
ghcn$LC3[is.na(ghcn$LC3)]<-0
95
ghcn$CANHEIGHT[is.na(ghcn$CANHEIGHT)]<-0
96
ghcn$LC4[is.na(ghcn$LC4)]<-0
97
ghcn$LC6[is.na(ghcn$LC6)]<-0
98

    
99
dates <-readLines(paste(file.path(in_path,infile2))
100
#LST_dates <-readLines(file.path(in_path,infile3))
101
#models <-readLines(paste(path,"/",infile4, sep=""))
102

    
103
##Extracting the variables values from the raster files                                             
104

    
105
#The names of covariates can be changed...
106
rnames<-c("x","y","lon","lat","N","E","N_w","E_w","elev","slope","aspect","CANHEIGHT","DISTOC")
107
lc_names<-c("LC1","LC2","LC3","LC4","LC5","LC6","LC7","LC8","LC9","LC10","LC11","LC12")
108
lst_names<-c("mm_01","mm_02","mm_03","mm_04","mm_05","mm_06","mm_07","mm_08","mm_09","mm_10","mm_11","mm_12",
109
                    "nobs_01","nobs_02","nobs_03","nobs_04","nobs_05","nobs_06","nobs_07","nobs_08",
110
                    "nobs_09","nobs_10","nobs_11","nobs_12")
111
                  
112
covar_names<-c(rnames,lc_names,lst_names)
113
                  
114
s_raster<-stack(infile3)                   #read in the data stack
115
names(s_raster)<-covar_names               #Assigning names to the raster layers: making sure it is included in the extraction
116

    
117
#Deal with no data value and zero      
118
pos<-match("LC1",layerNames(s_raster)) #Find column with name "value"
119
LC1<-raster(s_raster,layer=pos)             #Select layer from stack
120
s_raster<-dropLayer(s_raster,pos)
121
LC1[is.na(LC1)]<-0
122

    
123
pos<-match("LC3",layerNames(s_raster)) #Find column with name "value"
124
LC3<-raster(s_raster,layer=pos)             #Select layer from stack
125
s_raster<-dropLayer(s_raster,pos)
126
LC3[is.na(LC3)]<-0
127

    
128
pos<-match("CANHEIGHT",layerNames(s_raster)) #Find column with name "value"
129
CANHEIGHT<-raster(s_raster,layer=pos)             #Select layer from stack
130
s_raster<-dropLayer(s_raster,pos)
131
CANHEIGHT[is.na(CANHEIGHT)]<-0
132
pos<-match("ELEV_SRTM",layerNames(s_raster)) #Find column with name "ELEV_SRTM"
133
ELEV_SRTM<-raster(s_raster,layer=pos)             #Select layer from stack on 10/30
134
s_raster<-dropLayer(s_raster,pos)
135
ELEV_SRTM[ELEV_SRTM <0]<-NA
136

    
137
#s_sgdf<-as(s_raster,"SpatialGridDataFrame") #Conversion to spatial grid data frame
138

    
139
####### Preparing LST stack of climatology...
140

    
141
#l=list.files(pattern="mean_month.*rescaled.rst")
142
l <-readLines(paste(path,"/",infile6, sep=""))
143
molst<-stack(l)  #Creating a raster stack...
144
#setwd(old)
145
molst<-molst-273.16  #K->C          #LST stack of monthly average...
146
idx <- seq(as.Date('2010-01-15'), as.Date('2010-12-15'), 'month')
147
molst <- setZ(molst, idx)
148
layerNames(molst) <- month.abb
149

    
150
######  Preparing tables for model assessment: specific diagnostic/metrics
151

    
152
#Model assessment: specific diagnostics/metrics
153
results_AIC<- matrix(1,1,nmodels+3)  
154
results_GCV<- matrix(1,1,nmodels+3)
155
results_DEV<- matrix(1,1,nmodels+3)
156
#results_RMSE_f<- matrix(1,length(models)+3)
157

    
158
#Model assessment: general diagnostic/metrics 
159
results_RMSE <- matrix(1,1,nmodels+4)
160
results_MAE <- matrix(1,1,nmodels+4)
161
results_ME <- matrix(1,1,nmodels+4)       #There are 8+1 models
162
results_R2 <- matrix(1,1,nmodels+4)       #Coef. of determination for the validation dataset
163

    
164
results_RMSE_f<- matrix(1,1,nmodels+4)    #RMSE fit, RMSE for the training dataset
165
results_MAE_f <- matrix(1,1,nmodels+4)
166

    
167
######## Preparing monthly averages from the ProstGres database and extracting covarvariates from stack
168

    
169
# do this work outside of (before) this function
170
# to avoid making a copy of the data frame inside the function call
171
# date1<-ISOdate(data3$year,data3$month,data3$day) #Creating a date object from 3 separate column
172
# date2<-as.POSIXlt(as.Date(date1))
173
# data3$date<-date2
174
# d<-subset(data3,year>=2000 & mflag=="0" ) #Selecting dataset 2000-2010 with good quality: 193 stations
175
# #May need some screeing??? i.e. range of temp and elevation...
176
# d1<-aggregate(value~station+month, data=d, mean)  #Calculate monthly mean for every station in OR
177
# id<-as.data.frame(unique(d1$station))     #Unique station in OR for year 2000-2010: 193 but 7 loss of monthly avg    
178
# 
179
# dst<-merge(d1, stat_loc, by.x="station", by.y="STAT_ID")   #Inner join all columns are retained
180
# 
181
# #This allows to change only one name of the data.frame
182
# pos<-match("value",names(dst)) #Find column with name "value"
183
# names(dst)[pos]<-c("TMax")
184
# dst$TMax<-dst$TMax/10                #TMax is the average max temp for monthy data
185
# #dstjan=dst[dst$month==9,]  #dst contains the monthly averages for tmax for every station over 2000-2010
186
# 
187
# #Extracting covariates from stack
188
# coords<- dst[c('lon','lat')]              #Define coordinates in a data frame
189
# coordinates(dst)<-coords                      #Assign coordinates to the data frame
190
# proj4string(dst)<-CRS_locs_WGS84                  #Assign coordinates reference system in PROJ4 format
191
# dst_month<-spTransform(dst,CRS(CRS_interp))     #Project from WGS84 to new coord. system
192
# 
193
# stations_val<-extract(s_raster,dst_month)  #extraction of the infomration at station location
194
# stations_val<-as.data.frame(stations_val)
195
# dst_extract<-cbind(dst_month,stations_val)
196
# dst<-dst_extract
197
                  
198
#Now clean and screen monthly values
199
#dst_all<-dst
200
dst_all<-data3
201
dst<-data3
202
#dst<-subset(dst,dst$TMax>-15 & dst$TMax<45) #may choose different threshold??
203
#dst<-subset(dst,dst$ELEV_SRTM>0) #This will drop two stations...or 24 rows
204

    
205
######### Preparing daily values for training and testing
206
                  
207
#Screening for bad values: value is tmax in this case
208
#ghcn$value<-as.numeric(ghcn$value)
209
#ghcn_all<-ghcn
210
#ghcn_test<-subset(ghcn,ghcn$value>-150 & ghcn$value<400)
211
#ghcn_test<-ghcn
212
#ghcn_test2<-subset(ghcn_test,ghcn_test$elev_1>0)
213
#ghcn<-ghcn_test2
214
#coords<- ghcn[,c('x_OR83M','y_OR83M')]
215

    
216
##Sampling: training and testing sites.
217

    
218
#Make this a a function
219
                  
220
if (seed_number>0) {
221
  set.seed(seed_number)                        #Using a seed number allow results based on random number to be compared...
222
}
223
nel<-length(dates)
224
dates_list<-vector("list",nel) #list of one row data.frame
225

    
226
prop_range<-(seq(from=prop_min,to=prop_max,by=step))*100     #range of proportion to run
227
sn<-length(dates)*nb_sample*length(prop_range)               #Number of samples to run
228

    
229
for(i in 1:length(dates)){
230
  d_tmp<-rep(dates[i],nb_sample*length(prop_range)) #repeating same date
231
  s_nb<-rep(1:nb_sample,length(prop_range))         #number of random sample per proportion
232
  prop_tmp<-sort(rep(prop_range, nb_sample))
233
  tab_run_tmp<-cbind(d_tmp,s_nb,prop_tmp)
234
  dates_list[[i]]<-tab_run_tmp
235
}
236

    
237
sampling_dat<-as.data.frame(do.call(rbind,dates_list))
238
names(sampling_dat)<-c("date","run_samp","prop")
239

    
240
for(i in 2:3){            # start of the for loop #1
241
  sampling_dat[,i]<-as.numeric(as.character(sampling_dat[,i]))  
242
}
243

    
244
sampling_dat$date<- as.character(sampling_dat[,1])
245
#ghcn.subsets <-lapply(dates, function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
246
ghcn.subsets <-lapply(as.character(sampling_dat$date), function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
247

    
248
## adding choice of constant sample 
249
if (seed_number>0) {
250
  set.seed(seed_number)                        #Using a seed number allow results based on random number to be compared...
251
}
252

    
253
sampling<-vector("list",length(ghcn.subsets))
254
sampling_station_id<-vector("list",length(ghcn.subsets))
255
for(i in 1:length(ghcn.subsets)){
256
  n<-nrow(ghcn.subsets[[i]])
257
  prop<-(sampling_dat$prop[i])/100
258
  ns<-n-round(n*prop)   #Create a sample from the data frame with 70% of the rows
259
  nv<-n-ns              #create a sample for validation with prop of the rows
260
  ind.training <- sample(nrow(ghcn.subsets[[i]]), size=ns, replace=FALSE) #This selects the index position for 70% of the rows taken randomly
261
  ind.testing <- setdiff(1:nrow(ghcn.subsets[[i]]), ind.training)
262
  #Find the corresponding 
263
  data_sampled<-ghcn.subsets[[i]][ind.training,] #selected the randomly sampled stations
264
  station_id.training<-data_sampled$station     #selected id for the randomly sampled stations (115)
265
  #Save the information
266
  sampling[[i]]<-ind.training
267
  sampling_station_id[[i]]<- station_id.training
268
}
269
## Use same samples across the year...
270
if (constant==1){
271
  sampled<-sampling[[1]]
272
  data_sampled<-ghcn.subsets[[1]][sampled,] #selected the randomly sampled stations
273
  station_sampled<-data_sampled$station     #selected id for the randomly sampled stations (115)
274
  list_const_sampling<-vector("list",sn)
275
  list_const_sampling_station_id<-vector("list",sn)
276
  for(i in 1:sn){
277
    station_id.training<-intersect(station_sampled,ghcn.subsets[[i]]$station)
278
    ind.training<-match(station_id.training,ghcn.subsets[[i]]$station)
279
    list_const_sampling[[i]]<-ind.training
280
    list_const_sampling_station_id[[i]]<-station_id.training
281
  }
282
  sampling<-list_const_sampling 
283
  sampling_station_id<-list_const_sampling_station_id
284
}
285

    
286
######## Prediction for the range of dates and sampling data
287

    
288
#gam_fus_mod<-mclapply(1:length(dates), runGAMFusion,mc.preschedule=FALSE,mc.cores = 8) #This is the end bracket from mclapply(...) statement
289
#gam_fus_mod_s<-mclapply(1:1, runGAMFusion,mc.preschedule=FALSE,mc.cores = 1) #This is the end bracket from mclapply(...) statement
290
gam_fus_mod_s<-mclapply(1:length(ghcn.subsets), runGAMFusion,mc.preschedule=FALSE,mc.cores = 9) #This is the end bracket from mclapply(...) statement
291
#gam_fus_mod2<-mclapply(4:4, runGAMFusion,mc.preschedule=FALSE,mc.cores = 1) #This is the end bracket from mclapply(...) statement
292

    
293
save(gam_fus_mod_s,file= paste(path,"/","results2_fusion_Assessment_measure_all",out_prefix,".RData",sep=""))
294

    
295
## Plotting and saving diagnostic measures
296

    
297
tb<-gam_fus_mod_s[[1]][[3]][0,]  #empty data frame with metric table structure that can be used in rbinding...
298
tb_tmp<-gam_fus_mod_s #copy
299

    
300
for (i in 1:length(tb_tmp)){
301
  tmp<-tb_tmp[[i]][[3]]
302
  tb<-rbind(tb,tmp)
303
}
304
rm(tb_tmp)
305

    
306
for(i in 4:ncol(tb)){            # start of the for loop #1
307
  tb[,i]<-as.numeric(as.character(tb[,i]))  
308
}
309

    
310
metrics<-as.character(unique(tb$metric))            #Name of accuracy metrics (RMSE,MAE etc.)
311
tb_metric_list<-vector("list",length(metrics))
312

    
313
for(i in 1:length(metrics)){            # Reorganizing information in terms of metrics 
314
  metric_name<-paste("tb_",metrics[i],sep="")
315
  tb_metric<-subset(tb, metric==metrics[i])
316
  tb_metric<-cbind(tb_metric,sampling_dat[,2:3])
317
  assign(metric_name,tb_metric)
318
  tb_metric_list[[i]]<-tb_metric
319
}
320

    
321
tb_diagnostic<-do.call(rbind,tb_metric_list)
322
tb_diagnostic[["prop"]]<-as.factor(tb_diagnostic[["prop"]])
323

    
324
mod_pat<-glob2rx("mod*")   
325
mod_var<-grep(mod_pat,names(tb_diagnostic),value=TRUE) # using grep with "value" extracts the matching names         
326

    
327
t<-melt(tb_diagnostic,
328
        measure=mod_var, 
329
        id=c("dates","metric","prop"),
330
        na.rm=F)
331
avg_tb<-cast(t,metric+prop~variable,mean)
332
median_tb<-cast(t,metric+prop~variable,median)
333
avg_tb[["prop"]]<-as.numeric(as.character(avg_tb[["prop"]]))
334
avg_RMSE<-subset(avg_tb,metric=="RMSE")
335

    
336
sampling_obj<-list(sampling_dat=sampling_dat,training=sampling, training_id=sampling_station_id, tb=tb_diagnostic)
337

    
338
write.table(avg_tb, file= paste(path,"/","results2_fusion_Assessment_measure_avg_",out_prefix,".txt",sep=""), sep=",")
339
write.table(median_tb, file= paste(path,"/","results2_fusion_Assessment_measure_median_",out_prefix,".txt",sep=""), sep=",")
340
write.table(tb_diagnostic, file= paste(path,"/","results2_fusion_Assessment_measure",out_prefix,".txt",sep=""), sep=",")
341
write.table(tb, file= paste(path,"/","results2_fusion_Assessment_measure_all",out_prefix,".txt",sep=""), sep=",")
342

    
343
save(sampling_obj, file= paste(path,"/","results2_fusion_sampling_obj",out_prefix,".RData",sep=""))
344
#save(gam_fus_mod_s,file= paste(path,"/","results2_fusion_Assessment_measure_all",out_prefix,".RData",sep=""))
345
gam_fus_mod_obj<-list(gam_fus_mod=gam_fus_mod_s,sampling_obj=sampling_obj)
346
save(gam_fus_mod_obj,file= paste(path,"/","results_mod_obj_",out_prefix,".RData",sep=""))
347

    
348
#### END OF SCRIPT
(9-9/35)