Project

General

Profile

Download (20.1 KB) Statistics
| Branch: | Revision:
1
######################################## METHOD COMPARISON #######################################
2
############################ Constant sampling for GAM CAI method #####################################
3
#This script interpolates tmax values using MODIS LST and GHCND station data                     #
4
#interpolation area. It requires the text file of stations and a shape file of the study area.   #       
5
#Note that the projection for both GHCND and study area is lonlat WGS84.                         #
6
#Method is assedsed using constant sampling with variation  of validation sample with different  #
7
#hold out proportions.                                                                           #
8
#AUTHOR: Benoit Parmentier                                                                       #
9
#DATE: 12/27/2012                                                                                #
10
#PROJECT: NCEAS INPLANT: Environment and Organisms --TASK#491--                                  #
11
###################################################################################################
12

    
13
###Loading R library and packages                                                      
14
library(gtools)                                         # loading some useful tools 
15
library(mgcv)                                           # GAM package by Simon Wood
16
library(sp)                                             # Spatial pacakge with class definition by Bivand et al.
17
library(spdep)                               # Spatial pacakge with methods and spatial stat. by Bivand et al.
18
library(rgdal)                               # GDAL wrapper for R, spatial utilities
19
library(gstat)                               # Kriging and co-kriging by Pebesma et al.
20
library(fields)                              # NCAR Spatial Interpolation methods such as kriging, splines
21
library(raster)                              # Hijmans et al. package for raster processing
22
library(rasterVis)
23
library(parallel)                            # Urbanek S. and Ripley B., package for multi cores & parralel processing
24
library(reshape)
25

    
26
### Parameters and argument
27

    
28
infile1<- "ghcn_or_tmax_covariates_06262012_OR83M.shp"             #GHCN shapefile containing variables for modeling 2010                 
29
#infile2<-"list_10_dates_04212012.txt"                     #List of 10 dates for the regression
30
infile2<-"list_365_dates_04212012.txt"
31
infile3<-"LST_dates_var_names.txt"                        #LST dates name
32
infile4<-"models_interpolation_05142012.txt"              #Interpolation model names
33
infile5<-"mean_day244_rescaled.rst"                       #Raster or grid for the locations of predictions
34
#infile6<-"lst_climatology.txt"
35
infile6<-"LST_files_monthly_climatology.txt"
36
inlistf<-"list_files_05032012.txt"                        #Stack of images containing the Covariates
37

    
38
path<-"/home/parmentier/Data/IPLANT_project/data_Oregon_stations_10242012_CAI" #Atlas location
39
setwd(path)
40

    
41
#Station location for the study area
42
stat_loc<-read.table(paste(path,"/","location_study_area_OR_0602012.txt",sep=""),sep=",", header=TRUE)
43
#GHCN Database for 1980-2010 for study area (OR) 
44
data3<-read.table(paste(path,"/","ghcn_data_TMAXy1980_2010_OR_0602012.txt",sep=""),sep=",", header=TRUE)
45

    
46
nmodels<-9                #number of models running
47
y_var_name<-"dailyTmax"   #climate variable interpolated
48
climgam<-1                                             #if 1, then GAM is run on the climatology rather than the daily deviation surface...
49
predval<-1                                              #if 1, produce raster prediction
50
prop<-0.3                                               #Proportion of testing retained for validation   
51

    
52
seed_number<- 100                                             #Seed number for random sampling, if seed_number<0, no seed number is used..
53
#out_prefix<-"_365d_GAM_CAI2_const_10222012_"                 #User defined output prefix
54
#out_prefix<-"_365d_GAM_CAI2_const_all_lstd_10272012"         #User defined output prefix
55
out_prefix<-"_365d_GAM_CAI4_all_12272012"               #User defined output prefix
56

    
57
bias_val<-0            #if value 1 then daily training data is used in the bias surface rather than the all monthly stations (added on 07/11/2012)
58
bias_prediction<-1     #if value 1 then use GAM for the BIAS prediction otherwise GAM direct reprediction for y_var (daily tmax)
59
nb_sample<-1           #number of time random sampling must be repeated for every hold out proportion
60
prop_min<-0.3          #if prop_min=prop_max and step=0 then predicitons are done for the number of dates...
61
prop_max<-0.3
62
step<-0         
63
constant<-0            #if value 1 then use the same sample used in the first date for interpolation over the set of dates
64
#projection used in the interpolation of the study area
65
CRS_interp<-"+proj=lcc +lat_1=43 +lat_2=45.5 +lat_0=41.75 +lon_0=-120.5 +x_0=400000 +y_0=0 +ellps=GRS80 +units=m +no_defs";
66
CRS_locs_WGS84<-CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +towgs84=0,0,0") #Station coords WGS84
67

    
68
#This can be entered as textfile or option later...ok for running now on 12/07/2012
69
list_formulas<-vector("list",nmodels)
70

    
71
list_formulas[[1]] <- as.formula("y_var~ s(ELEV_SRTM)", env=.GlobalEnv)
72
list_formulas[[2]] <- as.formula("y_var~ s(LST)", env=.GlobalEnv)
73
list_formulas[[3]] <- as.formula("y_var~ s(ELEV_SRTM,LST)", env=.GlobalEnv)
74
list_formulas[[4]] <- as.formula("y_var~ s(lat)+s(lon)+s(ELEV_SRTM)", env=.GlobalEnv)
75
list_formulas[[5]] <- as.formula("y_var~ s(lat,lon,ELEV_SRTM)", env=.GlobalEnv)
76
list_formulas[[6]] <- as.formula("y_var~ s(lat,lon)+s(ELEV_SRTM)+s(Northness_w,Eastness_w)+s(LST)", env=.GlobalEnv)
77
list_formulas[[7]] <- as.formula("y_var~ s(lat,lon)+s(ELEV_SRTM)+s(Northness_w,Eastness_w)+s(LST)+s(LC1)", env=.GlobalEnv)
78
list_formulas[[8]] <- as.formula("y_var~ s(lat,lon)+s(ELEV_SRTM)+s(Northness_w,Eastness_w)+s(LST)+s(LC3)", env=.GlobalEnv)
79
list_formulas[[9]] <- as.formula("y_var~ s(x_OR83M,y_OR83M)", env=.GlobalEnv)
80

    
81
#source("GAM_CAI_function_multisampling_10252012.R")
82
source("GAM_CAI_function_multisampling_12072012.R")
83

    
84
############ START OF THE SCRIPT ##################
85

    
86
###Reading the station data and setting up for models' comparison
87
filename<-sub(".shp","",infile1)             #Removing the extension from file.
88
ghcn<-readOGR(".", filename)                 #reading shapefile 
89

    
90
CRS<-proj4string(ghcn)                       #Storing projection information (ellipsoid, datum,etc.)
91

    
92
mean_LST<- readGDAL(infile5)                 #Reading the whole raster in memory. This provides a grid for kriging
93
proj4string(mean_LST)<-CRS                   #Assigning coordinate information to prediction grid.
94

    
95
ghcn <- transform(ghcn,Northness = cos(ASPECT*pi/180)) #Adding a variable to the dataframe
96
ghcn <- transform(ghcn,Eastness = sin(ASPECT*pi/180))  #adding variable to the dataframe.
97
ghcn <- transform(ghcn,Northness_w = sin(slope*pi/180)*cos(ASPECT*pi/180)) #Adding a variable to the dataframe
98
ghcn <- transform(ghcn,Eastness_w = sin(slope*pi/180)*sin(ASPECT*pi/180))  #adding variable to the dataframe.
99

    
100
#Remove NA for LC and CANHEIGHT
101
ghcn$LC1[is.na(ghcn$LC1)]<-0
102
ghcn$LC3[is.na(ghcn$LC3)]<-0
103
ghcn$CANHEIGHT[is.na(ghcn$CANHEIGHT)]<-0
104
ghcn$LC4[is.na(ghcn$LC4)]<-0
105
ghcn$LC6[is.na(ghcn$LC6)]<-0
106

    
107
#Use file.path for to construct pathfor independent os platform? !!!
108
dates<-readLines(file.path(path,infile2))
109
#dates <-readLines(paste(path,"/",infile2, sep=""))
110
LST_dates <-readLines(paste(path,"/",infile3, sep=""))
111
models <-readLines(paste(path,"/",infile4, sep=""))
112

    
113
##Extracting the variables values from the raster files                                             
114

    
115
lines<-read.table(paste(path,"/",inlistf,sep=""), sep=" ")                  #Column 1 contains the names of raster files
116
inlistvar<-lines[,1]
117
inlistvar<-paste(path,"/",as.character(inlistvar),sep="")
118
covar_names<-as.character(lines[,2])                                         #Column two contains short names for covaraites
119

    
120
s_raster<- stack(inlistvar)                                                  #Creating a stack of raster images from the list of variables.
121
layerNames(s_raster)<-covar_names                                            #Assigning names to the raster layers
122
projection(s_raster)<-CRS
123

    
124
#stat_val<- extract(s_raster, ghcn3)                                          #Extracting values from the raster stack for every point location in coords data frame.
125
pos<-match("ASPECT",layerNames(s_raster)) #Find column with name "value"
126
r1<-raster(s_raster,layer=pos)             #Select layer from stack
127
pos<-match("slope",layerNames(s_raster)) #Find column with name "value"
128
r2<-raster(s_raster,layer=pos)             #Select layer from stack
129
N<-cos(r1*pi/180)
130
E<-sin(r1*pi/180)
131
Nw<-sin(r2*pi/180)*cos(r1*pi/180)   #Adding a variable to the dataframe
132
Ew<-sin(r2*pi/180)*sin(r1*pi/180)   #Adding variable to the dataframe.
133

    
134
pos<-match("LC1",layerNames(s_raster)) #Find column with name "value"
135
LC1<-raster(s_raster,layer=pos)             #Select layer from stack
136
s_raster<-dropLayer(s_raster,pos)
137
LC1[is.na(LC1)]<-0                      #NA must be set to zero.
138
pos<-match("LC3",layerNames(s_raster)) #Find column with name "value"
139
LC3<-raster(s_raster,layer=pos)             #Select layer from stack
140
s_raster<-dropLayer(s_raster,pos)
141
LC3[is.na(LC3)]<-0
142

    
143
#Modification added to account for other land cover
144

    
145
pos<-match("LC4",layerNames(s_raster)) #Find column with name "value"
146
LC4<-raster(s_raster,layer=pos)             #Select layer from stack
147
s_raster<-dropLayer(s_raster,pos)
148
LC4[is.na(LC4)]<-0
149

    
150
pos<-match("LC6",layerNames(s_raster)) #Find column with name "value"
151
LC6<-raster(s_raster,layer=pos)             #Select layer from stack
152
s_raster<-dropLayer(s_raster,pos)
153
LC6[is.na(LC6)]<-0
154

    
155
LC_s<-stack(LC1,LC3,LC4,LC6)
156
layerNames(LC_s)<-c("LC1_forest","LC3_grass","LC4_crop","LC6_urban")
157
#plot(LC_s)
158

    
159
pos<-match("CANHEIGHT",layerNames(s_raster)) #Find column with name "value"
160
CANHEIGHT<-raster(s_raster,layer=pos)             #Select layer from stack
161
s_raster<-dropLayer(s_raster,pos)
162
CANHEIGHT[is.na(CANHEIGHT)]<-0
163
pos<-match("ELEV_SRTM",layerNames(s_raster)) #Find column with name "ELEV_SRTM"
164
ELEV_SRTM<-raster(s_raster,layer=pos)             #Select layer from stack on 10/30
165
s_raster<-dropLayer(s_raster,pos)
166
ELEV_SRTM[ELEV_SRTM <0]<-NA
167

    
168
xy<-coordinates(r1)  #get x and y projected coordinates...
169
xy_latlon<-project(xy, CRS, inv=TRUE) # find lat long for projected coordinats (or pixels...)
170
lon<-raster(xy_latlon) #Transform a matrix into a raster object ncol=ncol(r1), nrow=nrow(r1))
171
ncol(lon)<-ncol(r1)
172
nrow(lon)<-nrow(r1)
173
extent(lon)<-extent(r1)
174
projection(lon)<-CRS  #At this stage this is still an empty raster with 536 nrow and 745 ncell 
175
lat<-lon
176
values(lon)<-xy_latlon[,1]
177
values(lat)<-xy_latlon[,2]
178

    
179
r<-stack(N,E,Nw,Ew,lon,lat,LC1,LC3,LC4,LC6, CANHEIGHT,ELEV_SRTM)
180
rnames<-c("Northness","Eastness","Northness_w","Eastness_w", "lon","lat","LC1","LC3","LC4","LC6","CANHEIGHT","ELEV_SRTM")
181
layerNames(r)<-rnames
182
s_raster<-addLayer(s_raster, r)
183

    
184
#s_sgdf<-as(s_raster,"SpatialGridDataFrame") #Conversion to spatial grid data frame
185

    
186
####### Preparing LST stack of climatology...
187

    
188
#l=list.files(pattern="mean_month.*rescaled.rst")
189
l <-readLines(paste(path,"/",infile6, sep=""))
190
molst<-stack(l)  #Creating a raster stack...
191
#setwd(old)
192
molst<-molst-273.16  #K->C          #LST stack of monthly average...
193
idx <- seq(as.Date('2010-01-15'), as.Date('2010-12-15'), 'month')
194
molst <- setZ(molst, idx)
195
layerNames(molst) <- month.abb
196

    
197
######  Preparing tables for model assessment: specific diagnostic/metrics
198

    
199
#Model assessment: specific diagnostics/metrics
200
results_m1<- matrix(1,1,nmodels+3)  
201
results_m2<- matrix(1,1,nmodels+3)
202
results_m3<- matrix(1,1,nmodels+3)
203
#results_RMSE_f<- matrix(1,length(models)+3)
204

    
205
#Model assessment: general diagnostic/metrics 
206
results_RMSE <- matrix(1,1,nmodels+4)
207
results_MAE <- matrix(1,1,nmodels+4)
208
results_ME <- matrix(1,1,nmodels+4)       #There are 8+1 models
209
results_R2 <- matrix(1,1,nmodels+4)       #Coef. of determination for the validation dataset
210

    
211
results_RMSE_f<- matrix(1,1,nmodels+4)    #RMSE fit, RMSE for the training dataset
212
results_MAE_f <- matrix(1,1,nmodels+4)
213
results_R2_f<-matrix(1,1,nmodels+4)
214
######## Preparing monthly averages from the ProstGres database
215

    
216
# do this work outside of (before) this function
217
# to avoid making a copy of the data frame inside the function call
218
date1<-ISOdate(data3$year,data3$month,data3$day) #Creating a date object from 3 separate column
219
date2<-as.POSIXlt(as.Date(date1))
220
data3$date<-date2
221
d<-subset(data3,year>=2000 & mflag=="0" ) #Selecting dataset 2000-2010 with good quality: 193 stations
222
#May need some screeing??? i.e. range of temp and elevation...
223
d1<-aggregate(value~station+month, data=d, mean)  #Calculate monthly mean for every station in OR
224
id<-as.data.frame(unique(d1$station))     #Unique station in OR for year 2000-2010: 193 but 7 loss of monthly avg    
225

    
226
dst<-merge(d1, stat_loc, by.x="station", by.y="STAT_ID")   #Inner join all columns are retained
227

    
228
#This allows to change only one name of the data.frame
229
pos<-match("value",names(dst)) #Find column with name "value"
230
names(dst)[pos]<-c("TMax")
231
dst$TMax<-dst$TMax/10                #TMax is the average max temp for monthy data
232
#dstjan=dst[dst$month==9,]  #dst contains the monthly averages for tmax for every station over 2000-2010
233

    
234
#Extracting covariates from stack for the monthly dataset...
235
coords<- dst[c('lon','lat')]              #Define coordinates in a data frame
236
coordinates(dst)<-coords                      #Assign coordinates to the data frame
237
proj4string(dst)<-CRS_locs_WGS84                  #Assign coordinates reference system in PROJ4 format
238
dst_month<-spTransform(dst,CRS(CRS_interp))     #Project from WGS84 to new coord. system
239

    
240
stations_val<-extract(s_raster,dst_month)  #extraction of the infomration at station location
241
stations_val<-as.data.frame(stations_val)
242
dst_extract<-cbind(dst_month,stations_val)
243
dst<-dst_extract
244
#Now clean and screen monthly values
245
dst_all<-dst
246
dst<-subset(dst,dst$TMax>-15 & dst$TMax<40)
247
dst<-subset(dst,dst$ELEV_SRTM>0) #This will drop two stations...or 24 rows
248

    
249
######### Preparing daily values for training and testing
250

    
251
#Screening for bad values: value is tmax in this case
252
#ghcn$value<-as.numeric(ghcn$value)
253
ghcn_all<-ghcn
254
ghcn_test<-subset(ghcn,ghcn$value>-150 & ghcn$value<400)
255
ghcn_test2<-subset(ghcn_test,ghcn_test$ELEV_SRTM>0)
256
ghcn<-ghcn_test2
257
#coords<- ghcn[,c('x_OR83M','y_OR83M')]
258

    
259
##Sampling: training and testing sites...
260

    
261
if (seed_number>0) {
262
  set.seed(seed_number)                        #Using a seed number allow results based on random number to be compared...
263
}
264
nel<-length(dates)
265
dates_list<-vector("list",nel) #list of one row data.frame
266

    
267
prop_range<-(seq(from=prop_min,to=prop_max,by=step))*100
268
sn<-length(dates)*nb_sample*length(prop_range)
269

    
270
for(i in 1:length(dates)){
271
  d_tmp<-rep(dates[i],nb_sample*length(prop_range)) #repeating same date
272
  s_nb<-rep(1:nb_sample,length(prop_range))         #number of random sample per proportion
273
  prop_tmp<-sort(rep(prop_range, nb_sample))
274
  tab_run_tmp<-cbind(d_tmp,s_nb,prop_tmp)
275
  dates_list[[i]]<-tab_run_tmp
276
}
277

    
278
sampling_dat<-as.data.frame(do.call(rbind,dates_list))
279
names(sampling_dat)<-c("date","run_samp","prop")
280

    
281
for(i in 2:3){            # start of the for loop #1
282
  sampling_dat[,i]<-as.numeric(as.character(sampling_dat[,i]))  
283
}
284

    
285
sampling_dat$date<- as.character(sampling_dat[,1])
286
#ghcn.subsets <-lapply(dates, function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
287
ghcn.subsets <-lapply(as.character(sampling_dat$date), function(d) subset(ghcn, date==d)) #this creates a list of 10 or 365 subsets dataset based on dates
288

    
289
if (seed_number>0) {
290
  set.seed(seed_number)                        #Using a seed number allow results based on random number to be compared...
291
}
292

    
293
sampling<-vector("list",length(ghcn.subsets))
294
sampling_station_id<-vector("list",length(ghcn.subsets))
295
for(i in 1:length(ghcn.subsets)){
296
  n<-nrow(ghcn.subsets[[i]])
297
  prop<-(sampling_dat$prop[i])/100
298
  ns<-n-round(n*prop)   #Create a sample from the data frame with 70% of the rows
299
  nv<-n-ns              #create a sample for validation with prop of the rows
300
  ind.training <- sample(nrow(ghcn.subsets[[i]]), size=ns, replace=FALSE) #This selects the index position for 70% of the rows taken randomly
301
  ind.testing <- setdiff(1:nrow(ghcn.subsets[[i]]), ind.training)
302
  #Find the corresponding 
303
  data_sampled<-ghcn.subsets[[i]][ind.training,] #selected the randomly sampled stations
304
  station_id.training<-data_sampled$station     #selected id for the randomly sampled stations (115)
305
  #Save the information
306
  sampling[[i]]<-ind.training
307
  sampling_station_id[[i]]<- station_id.training
308
}
309
## Use same samples across the year...
310
if (constant==1){
311
  sampled<-sampling[[1]]
312
  data_sampled<-ghcn.subsets[[1]][sampled,] #selected the randomly sampled stations
313
  station_sampled<-data_sampled$station     #selected id for the randomly sampled stations (115)
314
  list_const_sampling<-vector("list",sn)
315
  list_const_sampling_station_id<-vector("list",sn)
316
  for(i in 1:sn){
317
    station_id.training<-intersect(station_sampled,ghcn.subsets[[i]]$station)
318
    ind.training<-match(station_id.training,ghcn.subsets[[i]]$station)
319
    list_const_sampling[[i]]<-ind.training
320
    list_const_sampling_station_id[[i]]<-station_id.training
321
  }
322
  sampling<-list_const_sampling 
323
  sampling_station_id<-list_const_sampling_station_id
324
}
325

    
326
######## Prediction for the range of dates
327

    
328
#Start loop here...
329

    
330
#gam_CAI_mod<-mclapply(1:length(dates), runGAMCAI,mc.preschedule=FALSE,mc.cores = 8) #This is the end bracket from mclapply(...) statement
331
gam_CAI_mod<-mclapply(1:length(ghcn.subsets), runGAMCAI,mc.preschedule=FALSE,mc.cores = 9) #This is the end bracket from mclapply(...) statement
332
#gam_CAI_mod<-mclapply(1:2, runGAMCAI,mc.preschedule=FALSE,mc.cores = 2) #This is the end bracket from mclapply(...) statement#
333
#gam_CAI_mod<-mclapply(1:2, runGAMCAI,mc.preschedule=FALSE,mc.cores = 2) #This is the end bracket from mclapply(...) statement
334

    
335
tb<-gam_CAI_mod[[1]][[3]][0,]  #empty data frame with metric table structure that can be used in rbinding...
336
tb_tmp<-gam_CAI_mod #copy
337

    
338
for (i in 1:length(tb_tmp)){
339
  tmp<-tb_tmp[[i]][[3]]
340
  tb<-rbind(tb,tmp)
341
}
342
rm(tb_tmp)
343

    
344
for(i in 4:(nmodels+4)){            # start of the for loop #1
345
  tb[,i]<-as.numeric(as.character(tb[,i]))  
346
}
347

    
348
metrics<-as.character(unique(tb$metric))            #Name of accuracy metrics (RMSE,MAE etc.)
349
tb_metric_list<-vector("list",length(metrics))
350

    
351
for(i in 1:length(metrics)){            # Reorganizing information in terms of metrics 
352
  metric_name<-paste("tb_",metrics[i],sep="")
353
  tb_metric<-subset(tb, metric==metrics[i])
354
  tb_metric<-cbind(tb_metric,sampling_dat[,2:3])
355
  assign(metric_name,tb_metric)
356
  tb_metric_list[[i]]<-tb_metric
357
}
358
mod_labels<-rep("mod",nmodels+1)
359
index<-as.character(1:(nmodels+1))
360
mod_labels<-paste(mod_labels,index,sep="")
361

    
362
tb_diagnostic<-do.call(rbind,tb_metric_list)  #produce a data.frame from the list ...
363
tb_diagnostic[["prop"]]<-as.factor(tb_diagnostic[["prop"]])
364

    
365
t<-melt(tb_diagnostic,
366
        measure=mod_labels, 
367
        id=c("dates","metric","prop"),
368
        na.rm=F)
369
avg_tb<-cast(t,metric+prop~variable,mean)
370
median_tb<-cast(t,metric+prop~variable,mean)
371
avg_tb[["prop"]]<-as.numeric(as.character(avg_tb[["prop"]]))
372
avg_RMSE<-subset(avg_tb,metric=="RMSE")
373

    
374
# Save before plotting
375
#sampling_obj<-list(sampling_dat=sampling_dat,training=sampling)
376
#sampling_obj<-list(sampling_dat=sampling_dat,training=sampling, tb=tb_diagnostic)
377
sampling_obj<-list(sampling_dat=sampling_dat,training=sampling, training_id=sampling_station_id, tb=tb_diagnostic)
378

    
379
write.table(avg_tb, file= paste(path,"/","results2_fusion_Assessment_measure_avg_",out_prefix,".txt",sep=""), sep=",")
380
write.table(median_tb, file= paste(path,"/","results2_fusion_Assessment_measure_median_",out_prefix,".txt",sep=""), sep=",")
381
write.table(tb_diagnostic, file= paste(path,"/","results2_fusion_Assessment_measure",out_prefix,".txt",sep=""), sep=",")
382
write.table(tb, file= paste(path,"/","results2_fusion_Assessment_measure_all",out_prefix,".txt",sep=""), sep=",")
383

    
384
save(sampling_obj, file= paste(path,"/","results2_CAI_sampling_obj",out_prefix,".RData",sep=""))
385
save(gam_CAI_mod,file= paste(path,"/","results2_CAI_Assessment_measure_all",out_prefix,".RData",sep=""))
386

    
387
#new combined object used since november 2012
388
gam_CAI_mod_obj<-list(gam_CAI_mod=gam_CAI_mod,sampling_obj=sampling_obj)
389
save(gam_CAI_mod_obj,file= paste(path,"/","results_mod_obj_",out_prefix,".RData",sep=""))
390

    
391
#### END OF SCRIPT
(4-4/37)