Revision 9d52d7e0
Added by Adam Wilson about 12 years ago
climate/procedures/Pleiades.R | ||
---|---|---|
21 | 21 |
## get all unique dates |
22 | 22 |
alldates=unique(fs$dateid) |
23 | 23 |
|
24 |
|
|
25 |
## get MODLAND tile information |
|
26 |
tb=read.table("http://landweb.nascom.nasa.gov/developers/sn_tiles/sn_bound_10deg.txt",skip=6,nrows=648,header=T) |
|
27 |
tb$tile=paste("h",sprintf("%02d",tb$ih),"v",sprintf("%02d",tb$iv),sep="") |
|
28 |
### use MODIS tile as ROI |
|
29 |
#modt=readOGR("modgrid","modis_sinusoidal_grid_world",) |
|
30 |
#modt@data[,colnames(tb)[3:6]]=tb[match(paste(modt$h,modt$v),paste(tb$ih,tb$iv)),3:6] |
|
31 |
#write.csv(modt@data,file="modistile.csv") |
|
32 |
|
|
33 |
|
|
24 | 34 |
## write it out |
25 |
save(fs,file="allfiles.Rdata") |
|
26 |
save(alldates,file="alldates.Rdata") |
|
35 |
save(fs,tb,file="allfiles.Rdata")
|
|
36 |
#save(alldates,file="alldates.Rdata")
|
|
27 | 37 |
|
28 |
notdonedates=alldates |
|
29 |
save(notdonedates,file="notdonedates.Rdata") |
|
38 |
## identify which have been completed |
|
39 |
outdir="2_daily" |
|
40 |
done=alldates%in%substr(list.files(outdir),5,12) |
|
41 |
table(done) |
|
42 |
notdone=alldates[!done] |
|
30 | 43 |
|
44 |
#notdone=alldates[1:4] |
|
45 |
|
|
46 |
save(notdone,file="notdone.Rdata") |
|
31 | 47 |
|
32 |
## output ROI |
|
33 |
#get bounding box of region in m |
|
34 |
#ge=SpatialPoints(data.frame(lon=c(-125,-115),lat=c(40,47))) |
|
35 |
#projection(ge)=CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs") |
|
36 |
#ge2=spTransform(ge, CRS(" +proj=sinu +lon_0=0 +x_0=0 +y_0=0")) |
|
37 | 48 |
|
38 | 49 |
## vars |
39 | 50 |
vars=as.data.frame(matrix(c( |
... | ... | |
52 | 63 |
|
53 | 64 |
|
54 | 65 |
### Submission script |
55 |
cat(" |
|
56 |
#PBS -S /bin/csh |
|
57 |
#PBS -N cfd |
|
58 |
# This example uses the Harpertown nodes |
|
59 |
# User job can access ~7.6 GB of memory per Harpertown node. |
|
60 |
# A memory intensive job that needs more than ~0.9 GB |
|
61 |
# per process should use less than 8 cores per node |
|
62 |
# to allow more memory per MPI process. This example |
|
63 |
# asks for 64 nodes and 4 MPI processes per node. |
|
64 |
# This request implies 64x4 = 256 MPI processes for the job. |
|
65 |
#PBS -l select=64:ncpus=8:mpiprocs=4:model=har |
|
66 |
#PBS -l walltime=4:00:00 |
|
66 |
|
|
67 |
cat(paste(" |
|
68 |
#PBS -S /bin/sh |
|
69 |
#PBS -J 700-899 |
|
70 |
###PBS -J 1-",length(notdone)," |
|
71 |
#PBS -l walltime=0:10:00 |
|
72 |
#PBS -l ncpus=100 |
|
67 | 73 |
#PBS -j oe |
68 |
#PBS -W group_list=a0801
|
|
74 |
#PBS -o log/log_^array_index^
|
|
69 | 75 |
#PBS -m e |
70 |
|
|
71 |
# Load some modules |
|
72 |
module load gcc |
|
73 |
module load hdf5 |
|
74 |
module load netcdf/4.1.3/gcc/mpt |
|
75 |
module load mpi |
|
76 |
module load tcl-tk/8.5.11 |
|
77 |
module load udunits/2.1.19 |
|
78 |
module load szip/2.1/gcc |
|
79 |
module load R |
|
80 |
module load git |
|
81 |
|
|
82 |
# By default, PBS executes your job from your home directory. |
|
83 |
# However, you can use the environment variable |
|
84 |
# PBS_O_WORKDIR to change to the directory where |
|
85 |
# you submitted your job. |
|
86 |
|
|
87 |
cd $PBS_O_WORKDIR |
|
88 |
|
|
89 |
# use of dplace to pin processes to processors may improve performance |
|
90 |
# Here you request to pin processes to processors 2, 3, 6, 7 of each node. |
|
91 |
# This helps for using the Harpertown nodes, but not for Nehalem-EP or |
|
92 |
# Westmere-EP nodes |
|
93 |
|
|
94 |
# The resource request of select=64 and mpiprocs=4 implies |
|
95 |
# that you want to have 256 MPI processes in total. |
|
96 |
# If this is correct, you can omit the -np 256 for mpiexec |
|
97 |
# that you might have used before. |
|
98 |
|
|
99 |
mpiexec dplace -s1 -c2,3,6,7 ./grinder < run_input > output |
|
100 |
|
|
101 |
# It is a good practice to write stderr and stdout to a file (ex: output) |
|
102 |
# Otherwise, they will be written to the PBS stderr and stdout in /PBS/spool, |
|
103 |
# which has limited amount of space. When /PBS/spool is filled up, any job |
|
104 |
# that tries to write to /PBS/spool will die. |
|
105 |
|
|
106 |
# -end of script- |
|
76 |
#PBS -M adam.wilson@yale.edu |
|
77 |
#PBS -N MOD06 |
|
78 |
|
|
79 |
## cd to working directory |
|
80 |
cd /nobackupp1/awilso10/mod06 |
|
81 |
|
|
82 |
## set some memory limits |
|
83 |
# ulimit -d 1500000 -m 1500000 -v 1500000 #limit memory usage |
|
84 |
source /usr/local/lib/global.profile |
|
85 |
## export a few important variables |
|
86 |
export PATH=$PATH:/nobackupp1/awilso10/bin:/nobackupp1/awilso10/software/bin |
|
87 |
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/nobackupp1/awilso10/software/lib |
|
88 |
export MRTDATADIR=/nobackupp1/awilso10/software/heg/data |
|
89 |
export PGSHOME=/nobackupp1/awilso10/software/heg |
|
90 |
export MRTBINDIR=/nobackup1/awilso10/software/TOOLKIT_MTD |
|
91 |
export R_LIBS=\"/u/awilso10/R/x86_64-unknown-linux-gnu-library/2.15/\" |
|
92 |
## load modules |
|
93 |
module load gcc mpi-sgi/mpt.2.06r6 hdf4 udunits R |
|
94 |
## Run the script! |
|
95 |
Rscript --verbose --vanilla /u/awilso10/environmental-layers/climate/procedures/MOD06_L2_data_compile_Pleiades.r i=${PBS_ARRAY_INDEX} |
|
96 |
rm -r $TMPDIR |
|
97 |
exit 0 |
|
98 |
",sep=""),file="MOD06_process") |
|
99 |
|
|
100 |
### Check the file |
|
101 |
system("cat MOD06_process") |
|
102 |
#system("chmod +x MOD06_process") |
|
103 |
|
|
104 |
## Submit it! |
|
105 |
#system("qsub -q devel MOD06_process") |
|
106 |
system("qsub MOD06_process") |
|
107 |
|
|
108 |
## check progress |
|
109 |
system("qstat -u awilso10") |
|
110 |
system("qstat -t 391843[]") |
|
111 |
system("qstat -f 391843[2]") |
|
112 |
|
|
113 |
#system("qstat devel ") |
|
114 |
#system("qstat | grep awilso10") |
|
115 |
|
|
116 |
#print(paste(max(0,length(system("qstat",intern=T))-2)," processes running")) |
|
117 |
# system("ssh c0-8.farm.caes.ucdavis.edu") |
|
118 |
# system("qalter -p +1024 25964") #decrease priority of job to run extraction below. |
|
119 |
system("cat log/InterpScript.o55934.2") |
|
120 |
|
|
121 |
## check log |
|
122 |
system(paste("cat",list.files("log",pattern="InterpScript",full=T)[100])) |
|
123 |
#system(paste("cat",list.files("log",pattern="InterpScript",full=T)[13]," | grep \"Temporary Directory\"")) |
Also available in: Unified diff
Successfully running MOD06 processing on Pleiades as an array job (though submissions are limited to < 365 jobs so will have to find another way to submit them)