Revision 827b4b87
Added by Adam Wilson about 12 years ago
climate/procedures/HelloRmpi.r | ||
---|---|---|
1 |
# Echo out library paths |
|
2 |
.libPaths() |
|
3 |
|
|
4 |
# Load the R MPI package if it is not already loaded |
|
5 |
if (!is.loaded("mpi_initialize")) { |
|
6 |
library("Rmpi") |
|
7 |
} |
|
8 |
|
|
9 |
# Echo out libraries loaded |
|
10 |
library() |
|
11 |
|
|
12 |
# Echo out what is loaded |
|
13 |
search() |
|
14 |
|
|
15 |
# Spawn as many slaves as possible |
|
16 |
mpi.spawn.Rslaves() |
|
17 |
|
|
18 |
# In case R exits unexpectedly, have it automatically clean up |
|
19 |
# resources taken up by Rmpi (slaves, memory, etc...) |
|
20 |
.Last <- function(){ |
|
21 |
if (is.loaded("mpi_initialize")){ |
|
22 |
if (mpi.comm.size(1) > 0){ |
|
23 |
print("Please use mpi.close.Rslaves() to close slaves.") |
|
24 |
mpi.close.Rslaves() |
|
25 |
} |
|
26 |
print("Please use mpi.quit() to quit R") |
|
27 |
.Call("mpi_finalize") |
|
28 |
} |
|
29 |
} |
|
30 |
|
|
31 |
# Tell all slaves to return a message identifying themselves |
|
32 |
mpi.remote.exec(paste("I am",mpi.comm.rank(),"of",mpi.comm.size())) |
|
33 |
|
|
34 |
# Tell all slaves to close down, and exit the program |
|
35 |
mpi.close.Rslaves() |
|
36 |
mpi.quit() |
climate/procedures/HelloRmpi.r.Rout | ||
---|---|---|
1 |
|
|
2 |
R version 2.15.1 (2012-06-22) -- "Roasted Marshmallows" |
|
3 |
Copyright (C) 2012 The R Foundation for Statistical Computing |
|
4 |
ISBN 3-900051-07-0 |
|
5 |
Platform: x86_64-unknown-linux-gnu (64-bit) |
|
6 |
|
|
7 |
R is free software and comes with ABSOLUTELY NO WARRANTY. |
|
8 |
You are welcome to redistribute it under certain conditions. |
|
9 |
Type 'license()' or 'licence()' for distribution details. |
|
10 |
|
|
11 |
R is a collaborative project with many contributors. |
|
12 |
Type 'contributors()' for more information and |
|
13 |
'citation()' on how to cite R or R packages in publications. |
|
14 |
|
|
15 |
Type 'demo()' for some demos, 'help()' for on-line help, or |
|
16 |
'help.start()' for an HTML browser interface to help. |
|
17 |
Type 'q()' to quit R. |
|
18 |
|
|
19 |
> # Echo out library paths |
|
20 |
> .libPaths() |
|
21 |
[1] "/home1/awilso10/R/x86_64-unknown-linux-gnu-library/2.15" |
|
22 |
[2] "/nasa/R/2.15.1/lib64/R/library" |
|
23 |
> |
|
24 |
> # Load the R MPI package if it is not already loaded |
|
25 |
> if (!is.loaded("mpi_initialize")) { |
|
26 |
+ library("Rmpi") |
|
27 |
+ } |
climate/procedures/MOD06_L2_process.r | ||
---|---|---|
3 | 3 |
|
4 | 4 |
|
5 | 5 |
## load libraries |
6 |
require(sp) |
|
7 | 6 |
require(rgdal) |
8 | 7 |
require(reshape) |
9 |
require(ncdf4) |
|
8 |
#require(ncdf4)
|
|
10 | 9 |
require(geosphere) |
11 | 10 |
require(raster) |
12 | 11 |
require(spgrass6) |
12 |
## packages for parallelization |
|
13 |
#library(foreach) |
|
14 |
#library(doMPI) |
|
13 | 15 |
library(multicore) |
14 | 16 |
|
15 |
## number of cores to use |
|
17 |
## register cluster and number of cores to use
|
|
16 | 18 |
ncores=as.numeric(system("echo $NCORES",intern=T)) |
19 |
#cl=startMPIcluster(20,verbose=F) |
|
20 |
#registerDoMPI(cl) |
|
17 | 21 |
|
18 | 22 |
## specify some working directories |
19 | 23 |
setwd("/nobackupp1/awilso10/mod06") |
... | ... | |
288 | 292 |
##mod06(date,tile) |
289 | 293 |
|
290 | 294 |
## run it for all dates |
291 |
mclapply(notdone,mod06,tile,mc.cores=ncores/2) # use ncores/2 because system() commands can add second process for each spawned R
|
|
295 |
mclapply(notdone,mod06,tile,mc.cores=ncores) # use ncores/2 because system() commands can add second process for each spawned R |
|
292 | 296 |
|
297 |
#foreach(i=notdone[1:3],.packages=(.packages())) %dopar% mod06(i,tile) |
|
298 |
|
|
299 |
#foreach(i=1:20) %dopar% print(i) |
|
293 | 300 |
|
294 | 301 |
|
295 | 302 |
################################################################################ |
climate/procedures/MOD06_process | ||
---|---|---|
1 |
|
|
2 |
#PBS -S /bin/bash |
|
3 |
#PBS -l select=1:ncpus=16:model=san |
|
4 |
###PBS -l select=4:ncpus=8:model=neh |
|
5 |
##PBS -l select=1:ncpus=12:model=wes |
|
6 |
####### old: select=48:ncpus=8:mpiprocs=8:model=neh |
|
7 |
#PBS -l walltime=2:00:00 |
|
8 |
#PBS -j oe |
|
9 |
#PBS -m e |
|
10 |
#PBS -V |
|
11 |
####PBS -W group_list=s1007 |
|
12 |
#PBS -q devel |
|
13 |
#PBS -o log/log_^array_index^ |
|
14 |
#PBS -o log/log_DataCompile |
|
15 |
#PBS -M adam.wilson@yale.edu |
|
16 |
#PBS -N MOD06 |
|
17 |
|
|
18 |
#source /usr/share/modules/init/bash |
|
19 |
|
|
20 |
## cd to working directory |
|
21 |
cd /nobackupp1/awilso10/mod06 |
|
22 |
|
|
23 |
## set some memory limits |
|
24 |
# ulimit -d 1500000 -m 1500000 -v 1500000 #limit memory usage |
|
25 |
source /u/awilso10/.bashrc |
|
26 |
source /usr/local/lib/global.profile |
|
27 |
## export a few important variables |
|
28 |
export NCORES=16 # use to limit mclapply() to set nubmer of cores, should be select*ncpus above |
|
29 |
export R_LIBS="/u/awilso10/R/x86_64-unknown-linux-gnu-library/2.15/" |
|
30 |
## load modules |
|
31 |
module load gcc hdf4 udunits R nco mpi-intel #mpi-sgi/mpt.2.06r6 |
|
32 |
## Run the script! |
|
33 |
## current version not parallelizing across nodes! |
|
34 |
TMPDIR=$TMPDIR Rscript --verbose --vanilla /u/awilso10/environmental-layers/climate/procedures/MOD06_L2_process.r |
|
35 |
exit 0 |
|
36 |
exit 0 |
|
37 |
|
climate/procedures/Pleiades.R | ||
---|---|---|
11 | 11 |
|
12 | 12 |
cat(paste(" |
13 | 13 |
#PBS -S /bin/bash |
14 |
##PBS -l select=1:ncpus=16:model=san
|
|
14 |
#PBS -l select=1:ncpus=16:model=san |
|
15 | 15 |
###PBS -l select=4:ncpus=8:model=neh |
16 |
#PBS -l select=1:ncpus=12:model=wes |
|
16 |
##PBS -l select=1:ncpus=12:model=wes
|
|
17 | 17 |
####### old: select=48:ncpus=8:mpiprocs=8:model=neh |
18 |
#PBS -l walltime=10:00:00
|
|
18 |
#PBS -l walltime=2:00:00
|
|
19 | 19 |
#PBS -j oe |
20 | 20 |
#PBS -m e |
21 | 21 |
#PBS -V |
22 | 22 |
####PBS -W group_list=s1007 |
23 |
###PBS -q devel
|
|
23 |
#PBS -q devel |
|
24 | 24 |
#PBS -o log/log_^array_index^ |
25 | 25 |
#PBS -o log/log_DataCompile |
26 | 26 |
#PBS -M adam.wilson@yale.edu |
27 | 27 |
#PBS -N MOD06 |
28 | 28 |
|
29 |
source /usr/share/modules/init/bash |
|
29 |
#source /usr/share/modules/init/bash
|
|
30 | 30 |
|
31 | 31 |
## cd to working directory |
32 | 32 |
cd /nobackupp1/awilso10/mod06 |
33 | 33 |
|
34 | 34 |
## set some memory limits |
35 | 35 |
# ulimit -d 1500000 -m 1500000 -v 1500000 #limit memory usage |
36 |
source /usr/local/lib/global.profile |
|
37 | 36 |
source /u/awilso10/.bashrc |
37 |
source /u/awilso10/moduleload |
|
38 |
source /usr/local/lib/global.profile |
|
38 | 39 |
## export a few important variables |
39 |
export NCORES=24 # use to limit mclapply() to set nubmer of cores, should be select*ncpus above |
|
40 |
export PATH=$PATH:/nobackupp1/awilso10/bin:/nobackupp1/awilso10/software/bin |
|
41 |
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/nobackupp1/awilso10/software/lib |
|
40 |
export NCORES=16 # use to limit mclapply() to set nubmer of cores, should be select*ncpus above |
|
42 | 41 |
export R_LIBS=\"/u/awilso10/R/x86_64-unknown-linux-gnu-library/2.15/\" |
43 |
export TMPDIR=/nobackupp1/awilso10/mod06/tmp |
|
44 |
## HEG related variables |
|
45 |
export MRTDATADIR=/nobackupp1/awilso10/software/heg/data |
|
46 |
export PGSHOME=/nobackupp1/awilso10/software/heg/TOOLKIT_MTD |
|
47 |
export HEGUSER=ME |
|
48 | 42 |
## load modules |
49 |
module load gcc mpi-sgi/mpt.2.06r6 hdf4 udunits R nco
|
|
43 |
module load gcc hdf4 udunits R nco mpi-intel #mpi-sgi/mpt.2.06r6
|
|
50 | 44 |
## Run the script! |
51 | 45 |
## current version not parallelizing across nodes! |
52 | 46 |
TMPDIR=$TMPDIR Rscript --verbose --vanilla /u/awilso10/environmental-layers/climate/procedures/MOD06_L2_process.r |
53 | 47 |
exit 0 |
54 | 48 |
exit 0 |
49 |
|
|
55 | 50 |
",sep=""),file="MOD06_process") |
56 | 51 |
|
57 | 52 |
### Check the file |
... | ... | |
62 | 57 |
system("/u/scicon/tools/bin/node_stats.sh") |
63 | 58 |
|
64 | 59 |
## Submit it (and keep the pid)! |
65 |
pid=system("qsub MOD06_process",intern=T); pid; pid=strsplit(pid,split="[.]")[[1]][1] |
|
66 |
|
|
67 |
#system("qsub MOD06_process") |
|
60 |
system("qsub MOD06_process") |
|
68 | 61 |
|
69 | 62 |
## work in interactive mode |
70 |
# system("qsub -I -l walltime=1:00:00 -lselect=2:ncpus=16:model=san -q devel") |
|
63 |
# system("qsub -I -l walltime=2:00:00 -lselect=2:ncpus=16:model=san -q devel") |
|
64 |
# mpirun -np 1 -r ssh R --no-save |
|
71 | 65 |
|
72 | 66 |
## check progress |
73 | 67 |
system("qstat -u awilso10") |
climate/procedures/installRmpi.r | ||
---|---|---|
1 |
|
|
2 |
module load R |
|
3 |
module load mpi-sgi/mpt.2.06a67 |
|
4 |
export MPIHOME=/nasa/sgi/mpt/2.06a67/ |
|
5 |
oexport R_LIBS="/u/awilso10/R/x86_64-unknown-linux-gnu-library/2.15/" |
|
6 |
|
|
7 |
CC=mpicc R CMD INSTALL --no-test-load --configure-args="--with-Rmpi-type=OPENMPI --with-Rmpi-libpath=$MPIHOME/lib --with-Rmpi-include=$MPIHOME/include" ~/Rmpi_0.6-1.tar.gz |
|
8 |
|
|
9 |
mpirun -np 1 R --no-save |
|
10 |
|
|
11 |
|
|
12 |
/u/scicon/tools/bin/qps |
Also available in: Unified diff
MOD06_L2 now running properly but Rmpi is not running due to (probably) incompatability between Rmpi and the version of sgi-mpi available on Pleiades