Project

General

Profile

Download (15.1 KB) Statistics
| Branch: | Revision:
1
##################    MULTI SAMPLING GAM FUSION METHOD ASSESSMENT ####################################
2
############################ Merging LST and station data ##########################################
3
#This script interpolates tmax values using MODIS LST and GHCND station data                      #
4
#interpolation area. It requires the text file of stations and a shape file of the study area.    #       
5
#Note that the projection for both GHCND and study area is lonlat WGS84.                          #
6
#AUTHOR: Benoit Parmentier                                                                        #
7
#DATE: 08/15/2012                                                                                 #
8
#PROJECT: NCEAS INPLANT: Environment and Organisms --TASK#363--                                   #
9
###################################################################################################
10

    
11
###Loading R library and packages                                                      
12
library(gtools)                                         # loading some useful tools 
13
library(mgcv)                                           # GAM package by Simon Wood
14
library(sp)                                             # Spatial pacakge with class definition by Bivand et al.
15
library(spdep)                               # Spatial pacakge with methods and spatial stat. by Bivand et al.
16
library(rgdal)                               # GDAL wrapper for R, spatial utilities
17
library(gstat)                               # Kriging and co-kriging by Pebesma et al.
18
library(fields)                              # NCAR Spatial Interpolation methods such as kriging, splines
19
library(raster)                              # Hijmans et al. package for raster processing
20
library(rasterVis)
21
library(parallel)                            # Urbanek S. and Ripley B., package for multi cores & parralel processing
22

    
23
### Parameters and argument
24

    
25
infile1<- "ghcn_or_tmax_covariates_06262012_OR83M.shp"             #GHCN shapefile containing variables for modeling 2010                 
26
#tinfile2<-"list_10_dates_04212012.txt"                     #List of 10 dates for the regression
27
infile2<-"list_2_dates_04212012.txt"
28
#infile2<-"list_365_dates_04212012.txt"
29
infile3<-"LST_dates_var_names.txt"                        #LST dates name
30
infile4<-"models_interpolation_05142012.txt"              #Interpolation model names
31
infile5<-"mean_day244_rescaled.rst"                       #Raster or grid for the locations of predictions
32
#infile6<-"lst_climatology.txt"
33
infile6<-"LST_files_monthly_climatology.txt"
34
inlistf<-"list_files_05032012.txt"                        #Stack of images containing the Covariates
35

    
36
#path<-"/home/parmentier/Data/IPLANT_project/data_Oregon_stations"
37
path<-"/home/parmentier/Data/IPLANT_project/methods_interpolation_comparison"
38
#path<-"/home/parmentier/Data/IPLANT_project/data_Oregon_stations_GAM"
39
#path<-"/home/parmentier/Data/IPLANT_project/data_Oregon_stations_07152012"     #Jupiter LOCATION on Atlas for kriging"
40
#path<-"M:/Data/IPLANT_project/data_Oregon_stations"   #Locations on Atlas
41

    
42
setwd(path)
43
#Station location of the study area
44
stat_loc<-read.table(paste(path,"/","location_study_area_OR_0602012.txt",sep=""),sep=",", header=TRUE)
45
#GHCN Database for 1980-2010 for study area (OR) 
46
data3<-read.table(paste(path,"/","ghcn_data_TMAXy1980_2010_OR_0602012.txt",sep=""),sep=",", header=TRUE)
47

    
48
nmodels<-8   #number of models running
49
y_var_name<-"dailyTmax"
50
predval<-1
51
prop<-0.3                                                                           #Proportion of testing retained for validation   
52
#prop<-0.25
53
seed_number<- 100  #if seedzero then no seed?                                                                 #Seed number for random sampling
54
out_prefix<-"_365d_GAM_fusion_multisamp2_0823012"                #User defined output prefix
55

    
56
bias_val<-0            #if value 1 then training data is used in the bias surface rather than the all monthly stations
57
nb_sample<-15
58
prop_min<-0.1
59
prop_max<-0.7
60
step<-0.1
61

    
62
#source("fusion_function_07192012.R")
63
source("GAM_fusion_function_multisampling_08232012.R")
64
############ START OF THE SCRIPT ##################
65
#
66
#
67
### Step 0/Step 6 in Brian's code...preparing year 2010 data for modeling 
68
#
69

    
70
###Reading the station data and setting up for models' comparison
71
filename<-sub(".shp","",infile1)             #Removing the extension from file.
72
ghcn<-readOGR(".", filename)                 #reading shapefile 
73

    
74
CRS<-proj4string(ghcn)                       #Storing projection information (ellipsoid, datum,etc.)
75

    
76
mean_LST<- readGDAL(infile5)                 #Reading the whole raster in memory. This provides a grid for kriging
77
proj4string(mean_LST)<-CRS                   #Assigning coordinate information to prediction grid.
78

    
79
ghcn <- transform(ghcn,Northness = cos(ASPECT*pi/180)) #Adding a variable to the dataframe
80
ghcn <- transform(ghcn,Eastness = sin(ASPECT*pi/180))  #adding variable to the dataframe.
81
ghcn <- transform(ghcn,Northness_w = sin(slope*pi/180)*cos(ASPECT*pi/180)) #Adding a variable to the dataframe
82
ghcn <- transform(ghcn,Eastness_w = sin(slope*pi/180)*sin(ASPECT*pi/180))  #adding variable to the dataframe.
83

    
84
#Remove NA for LC and CANHEIGHT
85
ghcn$LC1[is.na(ghcn$LC1)]<-0
86
ghcn$LC3[is.na(ghcn$LC3)]<-0
87
ghcn$CANHEIGHT[is.na(ghcn$CANHEIGHT)]<-0
88

    
89
dates <-readLines(paste(path,"/",infile2, sep=""))
90
LST_dates <-readLines(paste(path,"/",infile3, sep=""))
91
models <-readLines(paste(path,"/",infile4, sep=""))
92

    
93
##Extracting the variables values from the raster files                                             
94

    
95
lines<-read.table(paste(path,"/",inlistf,sep=""), sep=" ")                  #Column 1 contains the names of raster files
96
inlistvar<-lines[,1]
97
inlistvar<-paste(path,"/",as.character(inlistvar),sep="")
98
covar_names<-as.character(lines[,2])                                         #Column two contains short names for covaraites
99

    
100
s_raster<- stack(inlistvar)                                                  #Creating a stack of raster images from the list of variables.
101
layerNames(s_raster)<-covar_names                                            #Assigning names to the raster layers
102
projection(s_raster)<-CRS
103

    
104
#stat_val<- extract(s_raster, ghcn3)                                          #Extracting values from the raster stack for every point location in coords data frame.
105
pos<-match("ASPECT",layerNames(s_raster)) #Find column with name "value"
106
r1<-raster(s_raster,layer=pos)             #Select layer from stack
107
pos<-match("slope",layerNames(s_raster)) #Find column with name "value"
108
r2<-raster(s_raster,layer=pos)             #Select layer from stack
109
N<-cos(r1*pi/180)
110
E<-sin(r1*pi/180)
111
Nw<-sin(r2*pi/180)*cos(r1*pi/180)   #Adding a variable to the dataframe
112
Ew<-sin(r2*pi/180)*sin(r1*pi/180)   #Adding variable to the dataframe.
113

    
114
pos<-match("LC1",layerNames(s_raster)) #Find column with name "value"
115
LC1<-raster(s_raster,layer=pos)             #Select layer from stack
116
s_raster<-dropLayer(s_raster,pos)
117
LC1[is.na(LC1)]<-0
118
pos<-match("LC3",layerNames(s_raster)) #Find column with name "value"
119
LC3<-raster(s_raster,layer=pos)             #Select layer from stack
120
s_raster<-dropLayer(s_raster,pos)
121
LC3[is.na(LC3)]<-0
122
pos<-match("CANHEIGHT",layerNames(s_raster)) #Find column with name "value"
123
CANHEIGHT<-raster(s_raster,layer=pos)             #Select layer from stack
124
s_raster<-dropLayer(s_raster,pos)
125
CANHEIGHT[is.na(CANHEIGHT)]<-0
126

    
127
xy<-coordinates(r1)  #get x and y projected coordinates...
128
xy_latlon<-project(xy, CRS, inv=TRUE) # find lat long for projected coordinats (or pixels...)
129
lon<-raster(xy_latlon) #Transform a matrix into a raster object ncol=ncol(r1), nrow=nrow(r1))
130
ncol(lon)<-ncol(r1)
131
nrow(lon)<-nrow(r1)
132
extent(lon)<-extent(r1)
133
projection(lon)<-CRS  #At this stage this is still an empty raster with 536 nrow and 745 ncell 
134
lat<-lon
135
values(lon)<-xy_latlon[,1]
136
values(lat)<-xy_latlon[,2]
137

    
138
r<-stack(N,E,Nw,Ew,lon,lat,LC1,LC3,CANHEIGHT)
139
rnames<-c("Northness","Eastness","Northness_w","Eastness_w", "lon","lat","LC1","LC3","CANHEIGHT")
140
layerNames(r)<-rnames
141
s_raster<-addLayer(s_raster, r)
142

    
143
#s_sgdf<-as(s_raster,"SpatialGridDataFrame") #Conversion to spatial grid data frame
144

    
145
####### Preparing LST stack of climatology...
146

    
147
#l=list.files(pattern="mean_month.*rescaled.rst")
148
l <-readLines(paste(path,"/",infile6, sep=""))
149
molst<-stack(l)  #Creating a raster stack...
150
#setwd(old)
151
molst<-molst-273.16  #K->C          #LST stack of monthly average...
152
idx <- seq(as.Date('2010-01-15'), as.Date('2010-12-15'), 'month')
153
molst <- setZ(molst, idx)
154
layerNames(molst) <- month.abb
155

    
156

    
157
######  Preparing tables for model assessment: specific diagnostic/metrics
158

    
159
#Model assessment: specific diagnostics/metrics
160
results_AIC<- matrix(1,1,nmodels+3)  
161
results_GCV<- matrix(1,1,nmodels+3)
162
results_DEV<- matrix(1,1,nmodels+3)
163
#results_RMSE_f<- matrix(1,length(models)+3)
164

    
165
#Model assessment: general diagnostic/metrics 
166
results_RMSE <- matrix(1,1,nmodels+4)
167
results_MAE <- matrix(1,1,nmodels+4)
168
results_ME <- matrix(1,1,nmodels+4)       #There are 8+1 models
169
results_R2 <- matrix(1,1,nmodels+4)       #Coef. of determination for the validation dataset
170

    
171
results_RMSE_f<- matrix(1,1,nmodels+4)    #RMSE fit, RMSE for the training dataset
172
results_MAE_f <- matrix(1,1,nmodels+4)
173

    
174
######## Preparing monthly averages from the ProstGres database
175

    
176
# do this work outside of (before) this function
177
# to avoid making a copy of the data frame inside the function call
178
date1<-ISOdate(data3$year,data3$month,data3$day) #Creating a date object from 3 separate column
179
date2<-as.POSIXlt(as.Date(date1))
180
data3$date<-date2
181
d<-subset(data3,year>=2000 & mflag=="0" ) #Selecting dataset 2000-2010 with good quality: 193 stations
182
#May need some screeing??? i.e. range of temp and elevation...
183
d1<-aggregate(value~station+month, data=d, mean)  #Calculate monthly mean for every station in OR
184
id<-as.data.frame(unique(d1$station))     #Unique station in OR for year 2000-2010: 193 but 7 loss of monthly avg    
185

    
186
dst<-merge(d1, stat_loc, by.x="station", by.y="STAT_ID")   #Inner join all columns are retained
187

    
188
#This allows to change only one name of the data.frame
189
pos<-match("value",names(dst)) #Find column with name "value"
190
names(dst)[pos]<-c("TMax")
191
dst$TMax<-dst$TMax/10                #TMax is the average max temp for monthy data
192
#dstjan=dst[dst$month==9,]  #dst contains the monthly averages for tmax for every station over 2000-2010
193

    
194
######### Preparing daily values for training and testing
195

    
196
#Screening for bad values: value is tmax in this case
197
#ghcn$value<-as.numeric(ghcn$value)
198
ghcn_all<-ghcn
199
ghcn_test<-subset(ghcn,ghcn$value>-150 & ghcn$value<400)
200
ghcn_test2<-subset(ghcn_test,ghcn_test$ELEV_SRTM>0)
201
ghcn<-ghcn_test2
202
#coords<- ghcn[,c('x_OR83M','y_OR83M')]
203

    
204
##Sampling: training and testing sites.
205

    
206
#set.seed(seed_number)                        #Using a seed number allow results based on random number to be compared...
207
nel<-length(dates)
208
dates_list<-vector("list",nel) #list of one row data.frame
209

    
210
prop_range<-(seq(from=prop_min,to=prop_max,by=step))*100
211
sn<-length(dates)*nb_sample*length(prop_range)
212

    
213
for(i in 1:length(dates)){
214
  d_tmp<-rep(dates[i],nb_sample*length(prop_range)) #repeating same date
215
  s_nb<-rep(1:nb_sample,length(prop_range))         #number of random sample per proportion
216
  prop_tmp<-sort(rep(prop_range, nb_sample))
217
  tab_run_tmp<-cbind(d_tmp,s_nb,prop_tmp)
218
  dates_list[[i]]<-tab_run_tmp
219
}
220

    
221
sampling_dat<-as.data.frame(do.call(rbind,dates_list))
222
names(sampling_dat)<-c("date","run_samp","prop")
223

    
224
for(i in 2:3){            # start of the for loop #1
225
  sampling_dat[,i]<-as.numeric(as.character(sampling_dat[,i]))  
226
}
227

    
228
sampling_dat$date<- as.character(sampling_dat[,1])
229
#ghcn.subsets <-lapply(dates, function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
230
ghcn.subsets <-lapply(as.character(sampling_dat$date), function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
231
  
232
sampling<-vector("list",length(ghcn.subsets))
233

    
234
for(i in 1:length(ghcn.subsets)){
235
  n<-nrow(ghcn.subsets[[i]])
236
  prop<-(sampling_dat$prop[i])/100
237
  ns<-n-round(n*prop)   #Create a sample from the data frame with 70% of the rows
238
  nv<-n-ns              #create a sample for validation with prop of the rows
239
  ind.training <- sample(nrow(ghcn.subsets[[i]]), size=ns, replace=FALSE) #This selects the index position for 70% of the rows taken randomly
240
  ind.testing <- setdiff(1:nrow(ghcn.subsets[[i]]), ind.training)
241
  sampling[[i]]<-ind.training
242
}
243

    
244
######## Prediction for the range of dates and sampling data
245

    
246
#gam_fus_mod<-mclapply(1:length(dates), runGAMFusion,mc.preschedule=FALSE,mc.cores = 8) #This is the end bracket from mclapply(...) statement
247
#gam_fus_mod<-mclapply(1:1, runGAMFusion,mc.preschedule=FALSE,mc.cores = 8) #This is the end bracket from mclapply(...) statement
248
gam_fus_mod_s<-mclapply(1:length(ghcn.subsets), runGAMFusion,mc.preschedule=FALSE,mc.cores = 2) #This is the end bracket from mclapply(...) statement
249
#gam_fus_mod2<-mclapply(11:11, runGAMFusion,mc.preschedule=FALSE,mc.cores = 1) #This is the end bracket from mclapply(...) statement
250

    
251

    
252
## Plotting and saving diagnostic measures
253
accuracy_tab_fun<-function(i,f_list){
254
tb<-f_list[[i]][[3]]
255
return(tb)
256
}
257

    
258

    
259
tb<-gam_fus_mod_s[[1]][[3]][0,]  #empty data frame with metric table structure that can be used in rbinding...
260
tb_tmp<-gam_fus_mod_s #copy
261

    
262
for (i in 1:length(tb_tmp)){
263
  tmp<-tb_tmp[[i]][[3]]
264
  tb<-rbind(tb,tmp)
265
}
266
rm(tb_tmp)
267

    
268
for(i in 4:ncol(tb)){            # start of the for loop #1
269
  tb[,i]<-as.numeric(as.character(tb[,i]))  
270
}
271

    
272
metrics<-as.character(unique(tb$metric))
273
tb_metric_list<-vector("list",length(metrics))
274

    
275
for(i in 1:length(metrics)){            # start of the for loop #1  
276
  metric_name<-paste("tb_",metrics[i],sep="")
277
  tb_metric<-subset(tb, metric==metrics[i])
278
  tb_metric<-cbind(tb_metric,sampling_dat[,2:3])
279
  assign(metric_name,tb_metric)
280
  tb_metric_list[[i]]<-tb_metric
281
}
282

    
283
#tb_diagnostic1<-rbind(tb_RMSE,tb_MAE,tb_ME,tb_R2)
284
tb_diagnostic<-do.call(rbind,tb_metric_list)
285

    
286
avg_list<-vector("list",nmodels+1)
287

    
288
for (i in 1:(nmodels+1)){
289
  formag<-paste("mod",i,sep="")
290
  form<-as.formula(paste(formag,"~prop+metric"))
291
  avg_all1<-aggregate(form, data=tb_diagnostic, mean) 
292
  file<-paste("agg_metrics_",formag,out_prefix,".txt")
293
  write.table(avg_all1,file=file,sep=",")
294
  avg_list[[i]]<-avg_all1
295
}
296

    
297
test<-aggregate(mod9 ~ prop + metric + dates, data=tb_diagnostic, mean)
298
data_plot<-as.matrix(subset(avg_list[[9]],metric=="RMSE" & dates=="20100102"))
299

    
300
#x<- matrix(1,1,nmodels+3)  
301
y<- matrix(1,7,2)  
302

    
303
y[,1]<-as.numeric(data_plot[,4])
304
y[,2]<-as.numeric(data_plot[,5])
305

    
306
x<-cbind(unique(test$prop),unique(test$prop))
307
plot(x,y,col=c("red","blue"))
308
lines(x,y,col=c("red","blue"))
309
plot(data_plot[,4:5]~prop_t)
310

    
311
plot(x,y)
312
plot(prop,mod1,data=subset(test,metric=="RMSE" & dates=="20100101"))
313

    
314
write.table(tb_diagnostic, file= paste(path,"/","results2_fusion_Assessment_measure",out_prefix,".txt",sep=""), sep=",")
315
write.table(tb, file= paste(path,"/","results2_fusion_Assessment_measure_all",out_prefix,".txt",sep=""), sep=",")
316
save(gam_fus_mod_s,file= paste(path,"/","results2_fusion_Assessment_measure_all",out_prefix,".RData",sep=""))
317
#tb<-as.data.frame(tb_diagnostic1)
318

    
319
#write.table(tb_1, file= paste(path,"/","results2_fusion_Assessment_measure1",out_prefix,".txt",sep=""), sep=",")
320

    
321
#write.table(tb_diagnostic2, file= paste(path,"/","results_fusion_Assessment_measure2",out_prefix,".txt",sep=""), sep=",")
322

    
323
#### END OF SCRIPT
(7-7/32)