1 | #!/bin/ksh |
---|
2 | #set -x |
---|
3 | |
---|
4 | host=`uname -n` |
---|
5 | user=`whoami` |
---|
6 | |
---|
7 | ## - Define paths |
---|
8 | srcdir=`pwd` |
---|
9 | datadir=$srcdir/data_oasis3 |
---|
10 | casename=`basename $srcdir` |
---|
11 | |
---|
12 | ## - Define case |
---|
13 | if [ $# -eq 0 ] ; then |
---|
14 | echo "Default usage: ./run_testinterp.sh 2_1_1 (i.e. nnodes=2, nprocs=1, nthreads=1)" |
---|
15 | echo "nnodes: total number of nodes fr the run" |
---|
16 | echo "nprocs: number of MPI tasks per node" |
---|
17 | echo "nthreads: number of OpenMP threads per MPI task" |
---|
18 | n_p_t=1 |
---|
19 | nnode=2 |
---|
20 | mpiprocs=1 |
---|
21 | threads=1 |
---|
22 | else |
---|
23 | n_p_t=$1 |
---|
24 | nargs=`echo $n_p_t | awk -F _ '{print NF}'` |
---|
25 | if [ $nargs -ne 3 ] ; then |
---|
26 | echo "You can run this script without argument (default nnodes=2, nprocs=1, nthreads=1 will be used)" |
---|
27 | echo "or as ./run_testinterp.sh nnodes_ nprocs_nthreads where:" |
---|
28 | echo "nnodes: total number of nodes fr the run" |
---|
29 | echo "nprocs: number of MPI tasks per node" |
---|
30 | echo "nthreads: number of OpenMP threads per MPI task" |
---|
31 | exit |
---|
32 | else |
---|
33 | nnode=`echo $n_p_t | awk -F _ '{print $1}'` |
---|
34 | mpiprocs=`echo $n_p_t | awk -F _ '{print $2}'` |
---|
35 | threads=`echo $n_p_t | awk -F _ '{print $3}'` |
---|
36 | fi |
---|
37 | fi |
---|
38 | |
---|
39 | ###################################################################### |
---|
40 | ## - User's section |
---|
41 | # Some examples of namcouples are given in data_oasis3 |
---|
42 | # Warning: If you add any extra lines in one of the namcouple given as examples you will have to |
---|
43 | # change the definition of SRC_GRID_TYPE, SRC_GRID_PERIOD and SRC_GRID_OVERLAP in this script (see below lines 140-142) |
---|
44 | ## - Source grids (you have the choice between bggd, ssea, icos) |
---|
45 | ## bggd is an atmosphere structured (LR) grid |
---|
46 | ## ssea is an atmosphere gaussian reduced grid (D) : no conserv2nd remapping |
---|
47 | ## icos is an atmosphere unstructured grid (U) : no bili, no bicu nor conserv2nd remapping |
---|
48 | SRC_GRID=bggd # bggd, ssea, icos |
---|
49 | ## |
---|
50 | ## - Target grid (the only grid supported in this environment is nogt) |
---|
51 | ## nogt is an ocean structured grid (LR) |
---|
52 | TGT_GRID=nogt |
---|
53 | ## |
---|
54 | ## - Remapping (see restrictions above) |
---|
55 | remap=conserv1st #distwgt, bicu, bili, conserv1st, conserv2nd |
---|
56 | |
---|
57 | ## - Verification source grid type and remapping |
---|
58 | if [ ${SRC_GRID} == "ssea" ]; then |
---|
59 | if [ ${remap} == "conserv2nd" ]; then |
---|
60 | echo "Impossible to perform conserv2nd remapping from gaussian reduced grid ssea" |
---|
61 | exit |
---|
62 | fi |
---|
63 | fi |
---|
64 | if [ ${SRC_GRID} == "icos" ]; then |
---|
65 | if [ ${remap} == "conserv2nd" ] || [ ${remap} == "bicu" ] || [ ${remap} == "bili" ]; then |
---|
66 | echo "Impossible to perform ${remap} remapping from unstructured grid icos" |
---|
67 | exit |
---|
68 | fi |
---|
69 | fi |
---|
70 | |
---|
71 | arch=kraken_intel_impi_openmp # nemo_lenovo_intel_impi, nemo_lenovo_intel_impi_openmp or beaufix_intel_impi_openmp |
---|
72 | # kraken_intel_impi, kraken_intel_impi_openmp, training_computer |
---|
73 | # linux_gfortran_openmpi_openmp, linux_gfortran_openmpi |
---|
74 | # linux_pgi_openmpi_openmp, linux_pgi_openmpi |
---|
75 | # For arch=beaufix_intel_impi_openmp you must put in your .bashrc |
---|
76 | #module load intel |
---|
77 | #module load intelmpi |
---|
78 | #module load netcdf |
---|
79 | #module load hdf5/1.8.16_par_thrsaf |
---|
80 | |
---|
81 | rundir=$srcdir/${casename}_${SRC_GRID}_${TGT_GRID}_${remap}/rundir_${nnode}_${mpiprocs}_${threads} |
---|
82 | |
---|
83 | ## - End of user's section |
---|
84 | ###################################################################### |
---|
85 | |
---|
86 | typeset -Z4 nodes |
---|
87 | nodes=$nnode |
---|
88 | typeset -Z2 mpiprocesses |
---|
89 | mpiprocesses=$mpiprocs |
---|
90 | typeset -Z2 nthreads |
---|
91 | nthreads=$threads |
---|
92 | |
---|
93 | ## - Name of the executables |
---|
94 | exe1=model1 |
---|
95 | exe2=model2 |
---|
96 | |
---|
97 | ## - Define number of processes to run each executable |
---|
98 | (( nproc = $nnode * $mpiprocs )) |
---|
99 | (( nproc_exe2 = $nproc / 2 )) |
---|
100 | (( nproc_exe1 = $nproc - $nproc_exe2 )) |
---|
101 | |
---|
102 | echo '' |
---|
103 | echo '*****************************************************************' |
---|
104 | echo '*** '$casename' : '$run |
---|
105 | echo '' |
---|
106 | echo "Running test_interpolation with nnodes=$nnode nprocs=$mpiprocs nthreads=$threads" |
---|
107 | echo '*****************************************************************' |
---|
108 | echo 'Source grid :' $SRC_GRID |
---|
109 | echo 'Target grid :' $TGT_GRID |
---|
110 | echo 'Rundir :' $rundir |
---|
111 | echo 'Architecture :' $arch |
---|
112 | echo 'Host : '$host |
---|
113 | echo 'User : '$user |
---|
114 | echo 'Grids : '$SRC_GRID'-->'$TGT_GRID |
---|
115 | echo 'Remap : '$remap |
---|
116 | echo 'Threads : '$threads |
---|
117 | echo '' |
---|
118 | echo $exe1' runs on '$nproc_exe1 'processes' |
---|
119 | echo $exe2' runs on '$nproc_exe2 'processes' |
---|
120 | echo '' |
---|
121 | echo '' |
---|
122 | |
---|
123 | ## - Copy everything needed into rundir |
---|
124 | \rm -fr $rundir/* |
---|
125 | mkdir -p $rundir |
---|
126 | |
---|
127 | ln -sf $datadir/grids.nc $rundir/grids.nc |
---|
128 | ln -sf $datadir/masks.nc $rundir/masks.nc |
---|
129 | ln -sf $datadir/areas.nc $rundir/areas.nc |
---|
130 | |
---|
131 | ln -sf $srcdir/$exe1 $rundir/. |
---|
132 | ln -sf $srcdir/$exe2 $rundir/. |
---|
133 | |
---|
134 | cp -f $datadir/namcouple_${SRC_GRID}_${TGT_GRID}_${remap} $rundir/namcouple |
---|
135 | |
---|
136 | ## - Grid source characteristics |
---|
137 | # If you add any additional lines in the namcouples given as examples you will have |
---|
138 | # to change the 3 lines below |
---|
139 | SRC_GRID_TYPE=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # source grid type |
---|
140 | SRC_GRID_PERIOD=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` # "P" for periodic, "R" for non-periodic |
---|
141 | SRC_GRID_OVERLAP=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # Number of overlapping grid points for periodic grids |
---|
142 | |
---|
143 | echo "SRC_GRID_TYPE : $SRC_GRID_TYPE" |
---|
144 | echo "SRC_GRID_PERIOD : $SRC_GRID_PERIOD" |
---|
145 | echo "SRC_GRID_OVERLAP : $SRC_GRID_OVERLAP" |
---|
146 | |
---|
147 | ## - Create name_grids.dat from namcouple informations |
---|
148 | cat <<EOF >> $rundir/name_grids.dat |
---|
149 | \$grid_source_characteristics |
---|
150 | cl_grd_src='$SRC_GRID' |
---|
151 | cl_remap='$remap' |
---|
152 | cl_type_src='$SRC_GRID_TYPE' |
---|
153 | cl_period_src='$SRC_GRID_PERIOD' |
---|
154 | il_overlap_src=$SRC_GRID_OVERLAP |
---|
155 | \$end |
---|
156 | \$grid_target_characteristics |
---|
157 | cl_grd_tgt='$TGT_GRID' |
---|
158 | \$end |
---|
159 | EOF |
---|
160 | # |
---|
161 | cd $rundir |
---|
162 | |
---|
163 | ###################################################################### |
---|
164 | ## - Creation of configuration scripts |
---|
165 | |
---|
166 | ###--------------------------------------------------------------------- |
---|
167 | ### BEAUFIX |
---|
168 | ###--------------------------------------------------------------------- |
---|
169 | if [ $arch == beaufix_intel_impi_openmp ] ; then |
---|
170 | ncore_per_node=40 |
---|
171 | (( cpus_per_task = $ncore_per_node * 2 / $mpiprocs )) |
---|
172 | timreq=12:00:00 |
---|
173 | cat <<EOF > $rundir/run_$casename.$arch |
---|
174 | #!/bin/bash |
---|
175 | #SBATCH --exclusive |
---|
176 | #SBATCH --partition=normal64 |
---|
177 | #SBATCH --job-name ${remap}_${nthreads} |
---|
178 | # Time limit for the job |
---|
179 | #SBATCH --time=$timreq |
---|
180 | #SBATCH -o $rundir/$casename.o |
---|
181 | #SBATCH -e $rundir/$casename.e |
---|
182 | # Number of nodes |
---|
183 | #SBATCH --nodes=$nnode |
---|
184 | # Number of MPI tasks per node |
---|
185 | #SBATCH --ntasks-per-node=$mpiprocs |
---|
186 | # Number of threads per MPI task ombre de thread openmp par proc MPI = nombre de coeur par proc |
---|
187 | #SBATCH -c $cpus_per_task |
---|
188 | ulimit -s unlimited |
---|
189 | # rundir must be in the TMPDIR |
---|
190 | cd \$TMPDIR |
---|
191 | cp $rundir/* \$TMPDIR |
---|
192 | # |
---|
193 | export KMP_STACKSIZE=1GB |
---|
194 | export I_MPI_WAIT_MODE=enable |
---|
195 | (( map = $threads - 1 )) |
---|
196 | affin="verbose,granularity=fine,proclist=[0" |
---|
197 | for place in \$(seq \$map); do |
---|
198 | affin=\${affin}",\${place}" |
---|
199 | echo \$place |
---|
200 | done |
---|
201 | echo affin1 \$affin |
---|
202 | affin=\${affin}"],explicit" |
---|
203 | export KMP_AFFINITY=\$affin |
---|
204 | echo KMP_AFFINITY \$KMP_AFFINITY |
---|
205 | export OASIS_OMP_NUM_THREADS=$threads |
---|
206 | export OMP_NUM_THREADS=$threads |
---|
207 | |
---|
208 | # Binding IntelMPI |
---|
209 | MAP_CPU="0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39" |
---|
210 | INTELMPI_BINDING="-env I_MPI_PIN_PROCESSOR_LIST \${MAP_CPU}" |
---|
211 | I_IMPI_BINDING="-env I_MPI_PERHOST \${mpiprocs} \${INTELMPI_BINDING}" |
---|
212 | # |
---|
213 | time mpirun -np ${nproc_exe1} ./$exe1 : -np ${nproc_exe2} ./$exe2 |
---|
214 | # |
---|
215 | cp * $rundir |
---|
216 | |
---|
217 | EOF |
---|
218 | |
---|
219 | ###--------------------------------------------------------------------- |
---|
220 | ### NEMO_LENOVO_INTEL_IMPI |
---|
221 | ###--------------------------------------------------------------------- |
---|
222 | elif [ ${arch} == nemo_lenovo_intel_impi ]; then |
---|
223 | |
---|
224 | (( nproc = $nproc_exe1 + $nproc_exe2 )) |
---|
225 | |
---|
226 | cat <<EOF > $rundir/run_$casename.$arch |
---|
227 | #!/bin/bash -l |
---|
228 | ##SBATCH --partition debug |
---|
229 | # Nom du job |
---|
230 | #SBATCH --job-name scrip |
---|
231 | # Temps limite du job |
---|
232 | #SBATCH --time=00:02:00 |
---|
233 | #SBATCH --output=$rundir/$casename.o |
---|
234 | #SBATCH --error=$rundir/$casename.e |
---|
235 | # Nombre de noeuds et de processus |
---|
236 | #SBATCH --nodes=$nnode --ntasks-per-node=$mpiprocs |
---|
237 | #SBATCH --distribution cyclic |
---|
238 | |
---|
239 | cd $rundir |
---|
240 | |
---|
241 | ulimit -s unlimited |
---|
242 | module purge |
---|
243 | module -s load compiler/intel/2015.2.164 mkl/2015.2.164 mpi/intelmpi/5.0.3.048 |
---|
244 | # |
---|
245 | # |
---|
246 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
247 | # |
---|
248 | EOF |
---|
249 | |
---|
250 | ###--------------------------------------------------------------------- |
---|
251 | ### NEMO_LENOVO_INTEL_IMPI_OPENMP sur un noeud de la machine |
---|
252 | ###--------------------------------------------------------------------- |
---|
253 | elif [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then |
---|
254 | |
---|
255 | timreq=00:03:00 |
---|
256 | |
---|
257 | cat <<EOF > $rundir/run_$casename.$arch |
---|
258 | #!/bin/bash -l |
---|
259 | #Partition |
---|
260 | #SBATCH --partition prod |
---|
261 | # Nom du job |
---|
262 | #SBATCH --job-name ${n_p_t} |
---|
263 | # Time limit for the job |
---|
264 | #SBATCH --time=$timreq |
---|
265 | #SBATCH --output=$rundir/$casename.o |
---|
266 | #SBATCH --error=$rundir/$casename.e |
---|
267 | # Number of nodes |
---|
268 | #SBATCH --nodes=$nnode |
---|
269 | # Number of MPI tasks per node |
---|
270 | #SBATCH --ntasks-per-node=$mpiprocs |
---|
271 | # Number of OpenMP threads per MPI task |
---|
272 | #SBATCH --cpus-per-task=$threads |
---|
273 | |
---|
274 | cd $rundir |
---|
275 | |
---|
276 | export KMP_STACKSIZE=1GB |
---|
277 | export I_MPI_PIN_DOMAIN=omp |
---|
278 | #export I_MPI_PIN_DOMAIN=socket |
---|
279 | export I_MPI_WAIT_MODE=enable |
---|
280 | export KMP_AFFINITY=verbose,granularity=fine,compact |
---|
281 | export OASIS_OMP_NUM_THREADS=$threads |
---|
282 | export OMP_NUM_THREADS=$threads |
---|
283 | |
---|
284 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
285 | EOF |
---|
286 | |
---|
287 | ###--------------------------------------------------------------------- |
---|
288 | ### KRAKEN_INTEL_IMPI |
---|
289 | ###--------------------------------------------------------------------- |
---|
290 | elif [ ${arch} == kraken_intel_impi ]; then |
---|
291 | |
---|
292 | (( nproc = $nproc_exe1 + $nproc_exe2 )) |
---|
293 | |
---|
294 | cat <<EOF > $rundir/run_$casename.$arch |
---|
295 | #!/bin/bash -l |
---|
296 | #SBATCH --partition prod |
---|
297 | # Nom du job |
---|
298 | #SBATCH --job-name scrip |
---|
299 | # Temps limite du job |
---|
300 | #SBATCH --time=02:00:00 |
---|
301 | #SBATCH --output=$rundir/$casename.o |
---|
302 | #SBATCH --error=$rundir/$casename.e |
---|
303 | # Nombre de noeuds et de processus |
---|
304 | #SBATCH --nodes=$nnode --ntasks-per-node=$mpiprocs |
---|
305 | #SBATCH --distribution cyclic |
---|
306 | |
---|
307 | cd $rundir |
---|
308 | |
---|
309 | ulimit -s unlimited |
---|
310 | module purge |
---|
311 | module load compiler/intel/18.0.1.163 |
---|
312 | module load mpi/intelmpi/2018.1.163 |
---|
313 | module load lib/netcdf-fortran/4.4.4_impi |
---|
314 | module load lib/netcdf-c/4.6.1_impi |
---|
315 | # |
---|
316 | # |
---|
317 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
318 | # |
---|
319 | EOF |
---|
320 | |
---|
321 | |
---|
322 | ###--------------------------------------------------------------------- |
---|
323 | ### KRAKEN_INTEL_IMPI_OPENMP |
---|
324 | ###--------------------------------------------------------------------- |
---|
325 | elif [ ${arch} == kraken_intel_impi_openmp ]; then |
---|
326 | |
---|
327 | timreq=00:30:00 |
---|
328 | |
---|
329 | cat <<EOF > $rundir/run_$casename.$arch |
---|
330 | #!/bin/bash -l |
---|
331 | #Partition |
---|
332 | #SBATCH --partition prod |
---|
333 | # Nom du job |
---|
334 | #SBATCH --job-name ${n_p_t} |
---|
335 | # Time limit for the job |
---|
336 | #SBATCH --time=$timreq |
---|
337 | #SBATCH --output=$rundir/$casename.o |
---|
338 | #SBATCH --error=$rundir/$casename.e |
---|
339 | # Number of nodes |
---|
340 | #SBATCH --nodes=$nnode |
---|
341 | # Number of MPI tasks per node |
---|
342 | #SBATCH --ntasks-per-node=$mpiprocs |
---|
343 | # Number of OpenMP threads per MPI task |
---|
344 | #SBATCH --cpus-per-task=$threads |
---|
345 | |
---|
346 | cd $rundir |
---|
347 | module purge |
---|
348 | module load compiler/intel/18.0.1.163 |
---|
349 | module load mpi/intelmpi/2018.1.163 |
---|
350 | module load lib/netcdf-fortran/4.4.4_impi |
---|
351 | module load lib/netcdf-c/4.6.1_impi |
---|
352 | |
---|
353 | export KMP_STACKSIZE=1GB |
---|
354 | export I_MPI_PIN_DOMAIN=omp |
---|
355 | export I_MPI_WAIT_MODE=enable |
---|
356 | (( map = $threads - 1 )) |
---|
357 | affin="verbose,granularity=fine,proclist=[0" |
---|
358 | for place in \$(seq \$map); do |
---|
359 | affin=\${affin}",\${place}" |
---|
360 | echo \$place |
---|
361 | done |
---|
362 | echo affin1 \$affin |
---|
363 | affin=\${affin}"],explicit" |
---|
364 | export KMP_AFFINITY=\$affin |
---|
365 | echo KMP_AFFINITY \$KMP_AFFINITY |
---|
366 | export OASIS_OMP_NUM_THREADS=$threads |
---|
367 | export OMP_NUM_THREADS=$threads |
---|
368 | |
---|
369 | # Binding IntelMPI |
---|
370 | MAP_CPU="0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35" |
---|
371 | INTELMPI_BINDING="-env I_MPI_PIN_PROCESSOR_LIST \${MAP_CPU}" |
---|
372 | I_IMPI_BINDING="-env I_MPI_PERHOST \${mpiprocs} \${INTELMPI_BINDING}" |
---|
373 | |
---|
374 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 |
---|
375 | EOF |
---|
376 | |
---|
377 | fi |
---|
378 | |
---|
379 | ###################################################################### |
---|
380 | ### - Execute the model |
---|
381 | |
---|
382 | if [ ${arch} == training_computer ]; then |
---|
383 | export OASIS_OMP_NUM_THREADS=$threads |
---|
384 | MPIRUN=/usr/local/intel/impi/2018.1.163/bin64/mpirun |
---|
385 | echo 'Executing the model using '$MPIRUN |
---|
386 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
387 | elif [ ${arch} == davinci_intel_impi_openmp ]; then |
---|
388 | export OASIS_OMP_NUM_THREADS=$threads |
---|
389 | MPIRUN=/opt/intel/impi/2018.1.163/bin64/mpirun |
---|
390 | echo 'Executing the model using '$MPIRUN |
---|
391 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
392 | elif [ ${arch} == linux_gfortran_openmpi ] || [ ${arch} == linux_gfortran_openmpi_openmp ]; then |
---|
393 | export OASIS_OMP_NUM_THREADS=1 |
---|
394 | MPIRUN=/usr/lib64/openmpi/bin/mpirun |
---|
395 | echo 'Executing the model using '$MPIRUN |
---|
396 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
397 | elif [ ${arch} == linux_pgi_openmpi ] || [ ${arch} == linux_pgi_openmpi_openmp ]; then |
---|
398 | export OASIS_OMP_NUM_THREADS=1 |
---|
399 | MPIRUN=/usr/local/pgi/linux86-64/20.4/mpi/openmpi-3.1.3/bin/mpirun |
---|
400 | echo 'Executing the model using '$MPIRUN |
---|
401 | $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
402 | elif [ $arch == beaufix_intel_impi_openmp ]; then |
---|
403 | echo 'Submitting the job to queue using sbatch' |
---|
404 | sbatch $rundir/run_$casename.$arch |
---|
405 | squeue -u $user |
---|
406 | elif [ $arch == nemo_lenovo_intel_impi_openmp ] || [ $arch == nemo_lenovo_intel_impi ]; then |
---|
407 | echo 'Submitting the job to queue using sbatch' |
---|
408 | sbatch $rundir/run_$casename.$arch |
---|
409 | squeue -u $USER |
---|
410 | elif [ $arch == kraken_intel_impi_openmp ] || [ $arch == kraken_intel_impi ]; then |
---|
411 | echo 'Submitting the job to queue using sbatch' |
---|
412 | sbatch $rundir/run_$casename.$arch |
---|
413 | squeue -u $USER |
---|
414 | fi |
---|
415 | echo $casename 'is executed or submitted to queue.' |
---|
416 | echo 'Results are found in rundir : '$rundir |
---|
417 | |
---|
418 | ###################################################################### |
---|
419 | |
---|