1 | #!/bin/ksh |
---|
2 | #set -xv |
---|
3 | ###################################################################### |
---|
4 | # |
---|
5 | host=`uname -n` |
---|
6 | user=`whoami` |
---|
7 | # |
---|
8 | ## - Define paths |
---|
9 | srcdir=`pwd` |
---|
10 | datadir=$srcdir/data_spoc |
---|
11 | casename=`basename $srcdir` |
---|
12 | # |
---|
13 | ## - Name of the executables |
---|
14 | exe1=ocean |
---|
15 | exe2=atmos |
---|
16 | # |
---|
17 | ############### User's section ####################################### |
---|
18 | # |
---|
19 | ## - Define architecture and coupler |
---|
20 | arch=pgi_openmpi_openmp_linux # training, belenos, nemo_lenovo, mac |
---|
21 | # kraken, gfortran_openmpi_openmp_linux |
---|
22 | # pgi_openmpi_openmp_linux, |
---|
23 | # pgi20.4_openmpi_openmp_linux (not work with 4.0) |
---|
24 | # gnu1020_openmpi_openmp_linux (not work with 4.0) |
---|
25 | # |
---|
26 | # - Define number of processes to run each executable |
---|
27 | nproc_exe1=4 |
---|
28 | nproc_exe2=4 |
---|
29 | # |
---|
30 | ############### End of user's section ################################ |
---|
31 | # |
---|
32 | # - Define rundir |
---|
33 | rundir=${srcdir}/work_${casename}_${nproc_exe1}_${nproc_exe2} |
---|
34 | # |
---|
35 | echo '*****************************************************************' |
---|
36 | echo '*** '$casename' : '$run |
---|
37 | echo '' |
---|
38 | echo 'Rundir :' $rundir |
---|
39 | echo 'Architecture :' $arch |
---|
40 | echo 'Host : '$host |
---|
41 | echo 'User : '$user |
---|
42 | echo '' |
---|
43 | echo $exe1' runs on '$nproc_exe1 'processes' |
---|
44 | echo $exe2' runs on '$nproc_exe2 'processes' |
---|
45 | echo '' |
---|
46 | ###################################################################### |
---|
47 | ### 1. Create rundir and copy everything needed |
---|
48 | # |
---|
49 | \rm -fr $rundir |
---|
50 | mkdir -p $rundir |
---|
51 | cp -f $datadir/*nc $rundir/. |
---|
52 | cp -f $srcdir/$exe1 $rundir/. |
---|
53 | cp -f $srcdir/$exe2 $rundir/. |
---|
54 | cp -f $datadir/namcouple $rundir/. |
---|
55 | cd $rundir |
---|
56 | ###################################################################### |
---|
57 | ### 2. Definition of mpirun command and batch script |
---|
58 | # |
---|
59 | if [ $arch == training ]; then |
---|
60 | MPIRUN=/usr/local/intel/impi/2018.1.163/bin64/mpirun |
---|
61 | elif [ $arch == gfortran_openmpi_openmp_linux ]; then |
---|
62 | MPIRUN=/usr/lib64/openmpi/bin/mpirun |
---|
63 | elif [ $arch == pgi_openmpi_openmp_linux ]; then |
---|
64 | MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun |
---|
65 | elif [ $arch == gnu1020_openmpi_openmp_linux ]; then |
---|
66 | MPIRUN=/usr/local/openmpi/4.1.0_gcc1020/bin/mpirun |
---|
67 | elif [ $arch == pgi20.4_openmpi_openmp_linux ]; then |
---|
68 | MPIRUN=/usr/local/pgi/linux86-64/20.4/mpi/openmpi-3.1.3/bin/mpirun |
---|
69 | elif [ $arch == belenos ] ; then |
---|
70 | (( nproc = $nproc_exe1 + $nproc_exe2 )) |
---|
71 | cat <<EOF > $rundir/run_$casename.$arch |
---|
72 | #!/bin/bash |
---|
73 | #SBATCH --exclusive |
---|
74 | #SBATCH --partition=normal256 |
---|
75 | #SBATCH --time=00:10:00 |
---|
76 | #SBATCH --job-name=spoc # job name |
---|
77 | #SBATCH -N 1 # number of nodes |
---|
78 | #SBATCH -n $nproc # number of procs |
---|
79 | #SBATCH -o $rundir/$casename.o |
---|
80 | #SBATCH -e $rundir/$casename.e |
---|
81 | ulimit -s unlimited |
---|
82 | cd $rundir |
---|
83 | module load intelmpi/2018.5.274 |
---|
84 | module load intel/2018.5.274 |
---|
85 | module load netcdf-fortran/4.5.2_V2 |
---|
86 | # |
---|
87 | export KMP_STACKSIZE=1GB |
---|
88 | export I_MPI_WAIT_MODE=enable |
---|
89 | # |
---|
90 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
91 | # |
---|
92 | EOF |
---|
93 | # |
---|
94 | elif [ ${arch} == nemo_lenovo ] ; then |
---|
95 | MPIRUN=mpirun |
---|
96 | (( nproc = $nproc_exe1 + $nproc_exe2 )) |
---|
97 | cat <<EOF > $rundir/run_$casename.$arch |
---|
98 | #!/bin/bash -l |
---|
99 | # Nom du job |
---|
100 | #SBATCH --job-name spoc |
---|
101 | # Temps limite du job |
---|
102 | #SBATCH --time=00:10:00 |
---|
103 | #SBATCH --partition debug |
---|
104 | #SBATCH --output=$rundir/$casename.o |
---|
105 | #SBATCH --error=$rundir/$casename.e |
---|
106 | # Nombre de noeuds et de processus |
---|
107 | #SBATCH --nodes=1 --ntasks-per-node=$nproc |
---|
108 | #SBATCH --distribution cyclic |
---|
109 | cd $rundir |
---|
110 | ulimit -s unlimited |
---|
111 | #SPOC module purge |
---|
112 | #SPOC module -s load compiler/intel/2015.2.164 mkl/2015.2.164 mpi/intelmpi/5.0.3.048 |
---|
113 | # |
---|
114 | time $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
115 | # |
---|
116 | EOF |
---|
117 | elif [ ${arch} == kraken ] ; then |
---|
118 | (( nproc = $nproc_exe1 + $nproc_exe2 )) |
---|
119 | cat <<EOF > $rundir/run_$casename.$arch |
---|
120 | #!/bin/bash -l |
---|
121 | #SBATCH --partition prod |
---|
122 | # Nom du job |
---|
123 | #SBATCH --job-name spoc |
---|
124 | # Temps limite du job |
---|
125 | #SBATCH --time=00:10:00 |
---|
126 | #SBATCH --output=$rundir/$casename.o |
---|
127 | #SBATCH --error=$rundir/$casename.e |
---|
128 | # Nombre de noeuds et de processus |
---|
129 | #SBATCH --nodes=1 --ntasks-per-node=$nproc |
---|
130 | #SBATCH --distribution cyclic |
---|
131 | |
---|
132 | cd $rundir |
---|
133 | |
---|
134 | ulimit -s unlimited |
---|
135 | module purge |
---|
136 | module load compiler/intel/18.0.1.163 |
---|
137 | module load mpi/intelmpi/2018.1.163 |
---|
138 | module load lib/netcdf-fortran/4.4.4_impi |
---|
139 | module load lib/phdf5/1.8.20_impi |
---|
140 | |
---|
141 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
142 | EOF |
---|
143 | |
---|
144 | fi |
---|
145 | |
---|
146 | ###################################################################### |
---|
147 | ### 3. Model execution or batch submission |
---|
148 | # |
---|
149 | if [ $arch == training ] || [ $arch == gfortran_openmpi_openmp_linux ] || [ $arch == gnu1020_openmpi_openmp_linux ] || [ $arch == pgi_openmpi_openmp_linux ] || [ $arch == pgi20.4_openmpi_openmp_linux ]; then |
---|
150 | export OMP_NUM_THREADS=1 |
---|
151 | echo 'Executing the model using '$MPIRUN |
---|
152 | $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
153 | elif [ $arch == belenos ]; then |
---|
154 | echo 'Submitting the job to queue using sbatch' |
---|
155 | sbatch $rundir/run_$casename.$arch |
---|
156 | squeue -u $user |
---|
157 | elif [ ${arch} == nemo_lenovo ] || [ ${arch} == kraken ]; then |
---|
158 | echo 'Submitting the job to queue using sbatch' |
---|
159 | sbatch $rundir/run_$casename.$arch |
---|
160 | squeue -u $user |
---|
161 | elif [ ${arch} == mac ]; then |
---|
162 | echo 'Executing the model using mpirun' |
---|
163 | ulimit -s unlimited |
---|
164 | mpirun --oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
165 | fi |
---|
166 | echo $casename 'is executed or submitted to queue.' |
---|
167 | echo 'Results are found in rundir : '$rundir |
---|
168 | # |
---|
169 | ###################################################################### |
---|