1 | #!/bin/ksh |
---|
2 | #set -xv |
---|
3 | ###################################################################### |
---|
4 | # OASIS3-MCT_4.0 compiled with -qopenmp |
---|
5 | ############### User's section ####################################### |
---|
6 | ## |
---|
7 | fich_i=$1 |
---|
8 | ## - Define architecture |
---|
9 | arch=machine_test # stiff (pgi 18.7 openmpi fedora 26), fundy (gfortran openmpi fedora 26), |
---|
10 | # nemo_lenovo (intel impi), kraken (intel impi), beaufix (intel impi) |
---|
11 | # machine_test (==tioman pgi 17.10 for the moment) |
---|
12 | platform=$arch |
---|
13 | MPILIB=MPI1 # MPI1 |
---|
14 | user=`whoami` |
---|
15 | # |
---|
16 | ## - Define paths |
---|
17 | srcdir=`pwd` |
---|
18 | datadir=$srcdir/data_oasis3 |
---|
19 | casename=`basename $srcdir` |
---|
20 | pathname=`dirname $srcdir` |
---|
21 | configfilesdir=$pathname/bench_buildbot_since_2019 |
---|
22 | griddircom=$pathname/bench_buildbot_since_2019/common_data_oasis3 |
---|
23 | rmpdircom=$pathname/bench_buildbot_since_2019/common_rmp_files |
---|
24 | echo $arch |
---|
25 | echo $casename |
---|
26 | # |
---|
27 | # - Toy models: Define number of processes to run each executable |
---|
28 | # and name of executables |
---|
29 | # |
---|
30 | #name of the executables |
---|
31 | exe1=model1 |
---|
32 | exe2=model2 |
---|
33 | exe3=model3 |
---|
34 | # number of processes for each executable |
---|
35 | nproc_exe1=0 |
---|
36 | nproc_exe2=6 |
---|
37 | nproc_exe3=0 |
---|
38 | # namcouple used (modified in script run_examples_oa3-mct_buildbot: |
---|
39 | # namcouple_ini=${namcouple_used}) |
---|
40 | namcouple_ini=namcouple_1 |
---|
41 | # Makefile_ini=${makefile_used} |
---|
42 | Makefile_ini=Makefile_1 |
---|
43 | # Number of nodes (to be coherent with toy toy_interpolation) |
---|
44 | nnodes=1 |
---|
45 | # Number of threads (to be coherent with toy toy_interpolation) |
---|
46 | threads=1 |
---|
47 | # |
---|
48 | # Stiff pgi 18.7 |
---|
49 | if [ ${arch} == linux_pgi_openmpi ] || [ ${arch} == linux_pgi_openmpi_openmp ]; then |
---|
50 | MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun |
---|
51 | rundir=/space/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} |
---|
52 | # Fundy gfortran |
---|
53 | elif [ ${arch} == linux_gfortran_openmpi ] || [ ${arch} == linux_gfortran_openmpi_openmp ]; then |
---|
54 | MPIRUN=/usr/lib64/openmpi/bin/mpirun |
---|
55 | rundir=/space/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} |
---|
56 | # Nemo_lenovo |
---|
57 | elif [ ${arch} == nemo_lenovo_intel_impi ] || [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then |
---|
58 | rundir=/scratch/globc/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} |
---|
59 | # Kraken |
---|
60 | elif [ ${arch} == kraken_intel_impi ] || [ ${arch} == kraken_intel_impi_openmp ]; then |
---|
61 | rundir=/scratch/globc/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} |
---|
62 | # Beaufix M-F |
---|
63 | elif [ ${arch} == beaufix_intel_impi ] || [ ${arch} == beaufix_intel_impi_openmp ]; then |
---|
64 | rundir=/scratch/work/coquartl/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} |
---|
65 | # Machine_test (for developments, here tioman pgi 18.7) |
---|
66 | elif [ ${arch} == machine_test ]; then |
---|
67 | MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun |
---|
68 | rundir=/space/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} |
---|
69 | fi |
---|
70 | |
---|
71 | ############### End of user's section ################################ |
---|
72 | |
---|
73 | echo '' |
---|
74 | echo '*****************************************************************' |
---|
75 | echo '*** '$casename' : '$run |
---|
76 | echo '' |
---|
77 | echo 'Rundir :' $rundir |
---|
78 | echo 'Architecture :' $arch |
---|
79 | echo 'User : '$user |
---|
80 | echo '' |
---|
81 | echo $exe1' runs on '$nproc_exe1 'processes' |
---|
82 | echo $exe2' runs on '$nproc_exe2 'processes' |
---|
83 | echo $exe3' runs on '$nproc_exe3 'processes' |
---|
84 | echo '' |
---|
85 | echo '' |
---|
86 | ###################################################################### |
---|
87 | ### |
---|
88 | ### 1. Copy source example directory containing everything needed |
---|
89 | ### into rundir |
---|
90 | |
---|
91 | cd $srcdir |
---|
92 | mkdir -p $rundir |
---|
93 | |
---|
94 | if [ $casename == toy_grids_writing ]; then |
---|
95 | ln -sf $griddircom/grids_nogt.nc $rundir/. |
---|
96 | ln -sf $griddircom/masks_nogt.nc $rundir/. |
---|
97 | ln -sf $griddircom/areas_nogt.nc $rundir/. |
---|
98 | ln -sf $griddircom/grids_bggd.nc $rundir/. |
---|
99 | ln -sf $griddircom/masks_bggd.nc $rundir/. |
---|
100 | ln -sf $griddircom/areas_bggd.nc $rundir/. |
---|
101 | else |
---|
102 | ln -sf $griddircom/grids.nc $rundir/. |
---|
103 | ln -sf $griddircom/masks.nc $rundir/. |
---|
104 | ln -sf $griddircom/areas.nc $rundir/. |
---|
105 | fi |
---|
106 | |
---|
107 | cp $configfilesdir/case${fich_i}.txt $rundir/. |
---|
108 | |
---|
109 | cp -f $srcdir/$exe1 $rundir/. |
---|
110 | cp -f $srcdir/$exe2 $rundir/. |
---|
111 | cp -f $srcdir/$exe3 $rundir/. |
---|
112 | |
---|
113 | # namcouple used copied in script run_examples_oa3-mct_buildbot |
---|
114 | cp -f $datadir/namcouple $rundir/namcouple |
---|
115 | # Restart used copied in rundir |
---|
116 | if [ $casename == toy_multiple_fields_one_communication ] || [ $casename == toy_time_transformations ] || [ $casename == toy_restart_ACCUMUL_1_NOLAG ] || [ $casename == toy_restart_ACCUMUL_1_LAG ] || [ $casename == toy_restart_ACCUMUL_2_NOLAG ] || [ $casename == toy_restart_ACCUMUL_2_LAG ]; then |
---|
117 | cp $datadir/ocean.nc $rundir/ocean.nc |
---|
118 | fi |
---|
119 | # |
---|
120 | # Copy necessary rmp files as a function of the toy |
---|
121 | # In toy_interpolation we calculate them and we need name_grids.dat file |
---|
122 | if [ $casename == toy_interpolation ]; then |
---|
123 | # If you add any additional lines in the namcouples given as examples you will have |
---|
124 | # to change the lines below |
---|
125 | # SRC_GRID : bggd, ssea, icos, nogt, nogt, nogt |
---|
126 | # TGT_GRID : nogt, nogt, nogt, bggd, ssea, icos |
---|
127 | # remap=gauswgt, distwgt, bicu, bili, conserv1st, conserv2nd |
---|
128 | conserv_order=" " |
---|
129 | remap=" " |
---|
130 | SRC_GRID=`sed -n 16p $rundir/namcouple | tr -s ' ' | cut -d" " -f5` |
---|
131 | TGT_GRID=`sed -n 16p $rundir/namcouple | tr -s ' ' | cut -d" " -f6` |
---|
132 | echo ${SRC_GRID} |
---|
133 | echo ${TGT_GRID} |
---|
134 | # Remapping (see restrictions below) |
---|
135 | remapping=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` |
---|
136 | if [ $remapping == BICUBIC ]; then |
---|
137 | remap=bicu |
---|
138 | fi |
---|
139 | if [ $remapping == DISTWGT ]; then |
---|
140 | remap=distwgt |
---|
141 | fi |
---|
142 | if [ $remapping == GAUSWGT ]; then |
---|
143 | remap=gauswgt |
---|
144 | fi |
---|
145 | if [ $remapping == BILINEAR ]; then |
---|
146 | remap=bili |
---|
147 | fi |
---|
148 | if [ $remapping == CONSERV ]; then |
---|
149 | conserv_order=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f7` |
---|
150 | if [ ${conserv_order} == FIRST ]; then |
---|
151 | remap=conserv1st |
---|
152 | else |
---|
153 | remap=conserv2nd |
---|
154 | fi |
---|
155 | fi |
---|
156 | echo "Remapping : $remap" |
---|
157 | # Verification source grid type and remapping |
---|
158 | if [ ${SRC_GRID} == "ssea" ]; then |
---|
159 | if [ ${remap} == "conserv2nd" ]; then |
---|
160 | echo "Impossible to perform conserv2nd remapping from gaussian reduced grid ssea" |
---|
161 | exit |
---|
162 | fi |
---|
163 | fi |
---|
164 | if [ ${SRC_GRID} == "icos" ]; then |
---|
165 | if [ ${remap} == "conserv2nd" ] || [ ${remap} == "bicu" ] || [ ${remap} == "bili" ]; then |
---|
166 | echo "Impossible to perform ${remap} remapping from unstructured grid icos" |
---|
167 | exit |
---|
168 | fi |
---|
169 | fi |
---|
170 | SRC_GRID_TYPE=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # source grid type |
---|
171 | SRC_GRID_PERIOD=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` # "P" for periodic, "R" for non-periodic |
---|
172 | SRC_GRID_OVERLAP=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # Number of overlapping grid points for periodic grids |
---|
173 | echo "SRC_GRID_TYPE : $SRC_GRID_TYPE" |
---|
174 | echo "SRC_GRID_PERIOD : $SRC_GRID_PERIOD" |
---|
175 | echo "SRC_GRID_OVERLAP : $SRC_GRID_OVERLAP" |
---|
176 | ## - Create name_grids.dat from namcouple informations |
---|
177 | cat <<EOF >> $rundir/name_grids.dat |
---|
178 | \$grid_source_characteristics |
---|
179 | cl_grd_src='${SRC_GRID}' |
---|
180 | cl_remap='${remap}' |
---|
181 | cl_type_src='${SRC_GRID_TYPE}' |
---|
182 | cl_period_src='${SRC_GRID_PERIOD}' |
---|
183 | il_overlap_src=${SRC_GRID_OVERLAP} |
---|
184 | \$end |
---|
185 | \$grid_target_characteristics |
---|
186 | cl_grd_tgt='${TGT_GRID}' |
---|
187 | \$end |
---|
188 | EOF |
---|
189 | # toy different from toy_interpolation |
---|
190 | else |
---|
191 | if [ ${casename} == toy_1f1grd_to_2f2grd ]; then |
---|
192 | ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/. |
---|
193 | ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/. |
---|
194 | ln -sf $rmpdircom/rmp_lmdz_to_nogt_BILINEAR.nc $rundir/. |
---|
195 | ln -sf $rmpdircom/rmp_nogt_to_lmdz_BILINEAR.nc $rundir/. |
---|
196 | elif [ ${casename} == toy_configuration_components_B ]; then |
---|
197 | ln -sf $rmpdircom/rmp_bggd_to_lmdz_BILINEAR.nc $rundir/. |
---|
198 | ln -sf $rmpdircom/rmp_lmdz_to_bggd_BILINEAR.nc $rundir/. |
---|
199 | elif [ ${casename} == toy_configuration_components_C ]; then |
---|
200 | ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/. |
---|
201 | ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/. |
---|
202 | elif [ ${casename} == toy_configuration_components_G ]; then |
---|
203 | ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/. |
---|
204 | ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/. |
---|
205 | ln -sf $rmpdircom/rmp_lmdz_to_ssea_BILINEAR.nc $rundir/. |
---|
206 | ln -sf $rmpdircom/rmp_ssea_to_lmdz_BILINEAR.nc $rundir/. |
---|
207 | ln -sf $rmpdircom/rmp_ssea_to_icos_DISTWGT.nc $rundir/. |
---|
208 | ln -sf $rmpdircom/rmp_icos_to_ssea_DISTWGT.nc $rundir/. |
---|
209 | elif [ ${casename} == toy_configuration_components_ABCG ] ; then |
---|
210 | ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/. |
---|
211 | ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/. |
---|
212 | ln -sf $rmpdircom/rmp_lmdz_to_ssea_BILINEAR.nc $rundir/. |
---|
213 | ln -sf $rmpdircom/rmp_ssea_to_lmdz_BILINEAR.nc $rundir/. |
---|
214 | ln -sf $rmpdircom/rmp_ssea_to_icos_DISTWGT.nc $rundir/. |
---|
215 | ln -sf $rmpdircom/rmp_icos_to_ssea_DISTWGT.nc $rundir/. |
---|
216 | ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/. |
---|
217 | ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/. |
---|
218 | ln -sf $rmpdircom/rmp_bggd_to_lmdz_BILINEAR.nc $rundir/. |
---|
219 | ln -sf $rmpdircom/rmp_lmdz_to_bggd_BILINEAR.nc $rundir/. |
---|
220 | elif [ ${casename} == toy_load_balancing ] ; then |
---|
221 | ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/. |
---|
222 | ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/. |
---|
223 | ln -sf $rmpdircom/rmp_lmdz_to_ssea_BILINEAR.nc $rundir/. |
---|
224 | ln -sf $rmpdircom/rmp_ssea_to_lmdz_BILINEAR.nc $rundir/. |
---|
225 | ln -sf $rmpdircom/rmp_ssea_to_icos_DISTWGT.nc $rundir/. |
---|
226 | ln -sf $rmpdircom/rmp_icos_to_ssea_DISTWGT.nc $rundir/. |
---|
227 | ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/. |
---|
228 | ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/. |
---|
229 | ln -sf $rmpdircom/rmp_bggd_to_lmdz_BILINEAR.nc $rundir/. |
---|
230 | ln -sf $rmpdircom/rmp_lmdz_to_bggd_BILINEAR.nc $rundir/. |
---|
231 | else |
---|
232 | ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/. |
---|
233 | ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/. |
---|
234 | fi |
---|
235 | # Endif toy_interpolation test |
---|
236 | fi |
---|
237 | # |
---|
238 | cd $rundir |
---|
239 | # |
---|
240 | ###################################################################### |
---|
241 | ### |
---|
242 | ### 3. Creation of configuration scripts (not for Fedora 26 computers) |
---|
243 | # |
---|
244 | ###--------------------------------------------------------------------- |
---|
245 | ### NEMO_LENOVO_INTELMPI |
---|
246 | ###--------------------------------------------------------------------- |
---|
247 | if [ ${arch} == nemo_lenovo_intel_impi ]; then |
---|
248 | |
---|
249 | cat <<EOF > $rundir/run_$casename.$arch |
---|
250 | #!/bin/bash -l |
---|
251 | #Partition |
---|
252 | #SBATCH --partition prod |
---|
253 | # Nom du job |
---|
254 | #SBATCH --job-name $casename |
---|
255 | # Temps limite du job |
---|
256 | #SBATCH --time=00:30:00 |
---|
257 | #SBATCH --output=$rundir/$casename.o |
---|
258 | #SBATCH --error=$rundir/$casename.e |
---|
259 | # Nombre de noeuds et de processus MPI |
---|
260 | #SBATCH --nodes=1 --ntasks-per-node=24 |
---|
261 | #SBATCH --distribution cyclic |
---|
262 | |
---|
263 | cd $rundir |
---|
264 | |
---|
265 | ulimit -s unlimited |
---|
266 | module purge |
---|
267 | module load compiler/intel/2015.2.164 mkl/2015.2.164 mpi/intelmpi/5.0.3.048 |
---|
268 | # |
---|
269 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 |
---|
270 | # |
---|
271 | EOF |
---|
272 | |
---|
273 | ###---------------------------------------------------------------------------- |
---|
274 | ### NEMO_LENOVO_INTELMPI_OPENMP : run hybride OpenMP+MPI (toy_interpolation) |
---|
275 | ###---------------------------------------------------------------------------- |
---|
276 | elif [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then |
---|
277 | |
---|
278 | echo "We run on Nemo_lenovo OpenMP+MPI" |
---|
279 | echo "Prescribe : N=2, nprocs/by node=2, ntasks/by proc=12" |
---|
280 | |
---|
281 | cat <<EOF > $rundir/run_$casename.$arch |
---|
282 | #!/bin/bash -l |
---|
283 | #Partition |
---|
284 | #SBATCH --partition prod |
---|
285 | # Nom du job |
---|
286 | #SBATCH --job-name $casename |
---|
287 | # Time limit for the job |
---|
288 | #SBATCH --time=00:30:00 |
---|
289 | #SBATCH --output=$rundir/$casename.o |
---|
290 | #SBATCH --error=$rundir/$casename.e |
---|
291 | # Number of nodes |
---|
292 | #SBATCH --nodes=2 |
---|
293 | # Number of MPI tasks per node |
---|
294 | #SBATCH --ntasks-per-node=2 |
---|
295 | # Number of OpenMP threads per MPI task |
---|
296 | #SBATCH --cpus-per-task=12 |
---|
297 | |
---|
298 | cd $rundir |
---|
299 | |
---|
300 | export KMP_STACKSIZE=1GB |
---|
301 | export I_MPI_PIN_DOMAIN=omp |
---|
302 | #export I_MPI_PIN_DOMAIN=socket |
---|
303 | export I_MPI_WAIT_MODE=enable |
---|
304 | export KMP_AFFINITY=verbose,granularity=fine,compact |
---|
305 | export OASIS_OMP_NUM_THREADS=12 |
---|
306 | |
---|
307 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 |
---|
308 | EOF |
---|
309 | |
---|
310 | ###--------------------------------------------------------------------- |
---|
311 | ### KRAKEN_INTEL_IMPI |
---|
312 | ###--------------------------------------------------------------------- |
---|
313 | elif [ ${arch} == kraken_intel_impi ]; then |
---|
314 | |
---|
315 | cat <<EOF > $rundir/run_$casename.$arch |
---|
316 | #!/bin/bash -l |
---|
317 | #SBATCH --partition prod |
---|
318 | # Nom du job |
---|
319 | #SBATCH --job-name $casename |
---|
320 | # Temps limite du job |
---|
321 | #SBATCH --time=00:30:00 |
---|
322 | #SBATCH --output=$rundir/$casename.o |
---|
323 | #SBATCH --error=$rundir/$casename.e |
---|
324 | # Nombre de noeuds et de processus |
---|
325 | #SBATCH --nodes=$nnodes --ntasks-per-node=36 |
---|
326 | #SBATCH --distribution cyclic |
---|
327 | |
---|
328 | cd $rundir |
---|
329 | |
---|
330 | ulimit -s unlimited |
---|
331 | module purge |
---|
332 | module load compiler/intel/18.0.1.163 |
---|
333 | module load mpi/intelmpi/2018.1.163 |
---|
334 | module load lib/netcdf-fortran/4.4.4_impi |
---|
335 | module load lib/netcdf-c/4.6.1_impi |
---|
336 | # |
---|
337 | # |
---|
338 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 |
---|
339 | # |
---|
340 | EOF |
---|
341 | |
---|
342 | ###------------------------------------------------------------------------ |
---|
343 | ### KRAKEN_INTEL_IMPI_OPENMP : run hybride OpenMP+MPI (toy_interpolation) |
---|
344 | ###------------------------------------------------------------------------ |
---|
345 | elif [ ${arch} == kraken_intel_impi_openmp ]; then |
---|
346 | |
---|
347 | |
---|
348 | cat <<EOF > $rundir/run_$casename.$arch |
---|
349 | #!/bin/bash -l |
---|
350 | #Partition |
---|
351 | #SBATCH --partition prod |
---|
352 | # Nom du job |
---|
353 | #SBATCH --job-name $casename |
---|
354 | # Time limit for the job |
---|
355 | #SBATCH --time=00:30:00 |
---|
356 | #SBATCH --output=$rundir/$casename.o |
---|
357 | #SBATCH --error=$rundir/$casename.e |
---|
358 | # Number of nodes |
---|
359 | #SBATCH --nodes=2 |
---|
360 | # Number of MPI tasks per node |
---|
361 | #SBATCH --ntasks-per-node=2 |
---|
362 | # Number of OpenMP threads per MPI task |
---|
363 | #SBATCH --cpus-per-task=18 |
---|
364 | |
---|
365 | cd $rundir |
---|
366 | module purge |
---|
367 | module load compiler/intel/18.0.1.163 |
---|
368 | module load mpi/intelmpi/2018.1.163 |
---|
369 | module load lib/netcdf-fortran/4.4.4_impi |
---|
370 | module load lib/netcdf-c/4.6.1_impi |
---|
371 | |
---|
372 | export KMP_STACKSIZE=1GB |
---|
373 | export I_MPI_PIN_DOMAIN=omp |
---|
374 | #export I_MPI_PIN_DOMAIN=socket |
---|
375 | export I_MPI_WAIT_MODE=enable |
---|
376 | export KMP_AFFINITY=verbose,granularity=fine,compact |
---|
377 | export OASIS_OMP_NUM_THREADS=18 |
---|
378 | export OMP_NUM_THREADS=18 |
---|
379 | |
---|
380 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 |
---|
381 | EOF |
---|
382 | |
---|
383 | |
---|
384 | ###--------------------------------------------------------------------- |
---|
385 | ### BEAUFIX INTELIMPI |
---|
386 | ###--------------------------------------------------------------------- |
---|
387 | elif [ $arch == beaufix_intel_impi ]; then |
---|
388 | |
---|
389 | (( nproc = $nproc_exe1 + $nproc_exe2 + $nproc_exe3 )) |
---|
390 | |
---|
391 | cat <<EOF > $rundir/run_$casename.$arch |
---|
392 | #!/bin/bash |
---|
393 | # Nom du job |
---|
394 | #SBATCH --job-name $casename |
---|
395 | # Time limit for the job |
---|
396 | #SBATCH --time=01:00:00 |
---|
397 | #SBATCH -p normal64 # partition/queue |
---|
398 | #SBATCH -N $nnodes # number of nodes |
---|
399 | #SBATCH -n $nproc # number of procs |
---|
400 | #SBATCH -o job.out%j |
---|
401 | #SBATCH -o job.err%j |
---|
402 | #SBATCH --exclusive |
---|
403 | |
---|
404 | ulimit -s unlimited |
---|
405 | cd $rundir |
---|
406 | module load intel/16.1.150 |
---|
407 | module load intelmpi/5.1.2.150 |
---|
408 | module load netcdf/4.4.0 |
---|
409 | # |
---|
410 | time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 |
---|
411 | # |
---|
412 | EOF |
---|
413 | |
---|
414 | ###------------------------------------------------------------------------ |
---|
415 | ### BEAUFIX_INTELMPI_OPENMP : run hybride OpenMP+MPI (toy_interpolation) |
---|
416 | ###------------------------------------------------------------------------ |
---|
417 | elif [ $arch == beaufix_intel_impi_openmp ] ; then |
---|
418 | |
---|
419 | cat <<EOF > $rundir/run_$casename.$arch |
---|
420 | #!/bin/bash |
---|
421 | #SBATCH --exclusive |
---|
422 | #SBATCH --partition=normal64 |
---|
423 | #SBATCH --job-name $casename |
---|
424 | # Time limit for the job |
---|
425 | #SBATCH --time=01:00:00 |
---|
426 | #SBATCH -o $rundir/$casename.o |
---|
427 | #SBATCH -e $rundir/$casename.e |
---|
428 | # Number of nodes |
---|
429 | #SBATCH --nodes=2 |
---|
430 | # Number of MPI tasks per node |
---|
431 | #SBATCH --ntasks-per-node=2 |
---|
432 | # Number of threads per MPI task ombre de thread openmp par proc MPI = nombre de coeur par proc |
---|
433 | #SBATCH --cpus_per_task=20 |
---|
434 | |
---|
435 | ulimit -s unlimited |
---|
436 | cd $rundir |
---|
437 | module load intel/16.1.150 |
---|
438 | module load intelmpi/5.1.2.150 |
---|
439 | module load netcdf/4.3.0 |
---|
440 | # |
---|
441 | export KMP_STACKSIZE=1GB |
---|
442 | export I_MPI_WAIT_MODE=enable |
---|
443 | export KMP_AFFINITY=verbose,granularity=fine,compact |
---|
444 | export OASIS_OMP_NUM_THREADS=20 |
---|
445 | export OMP_NUM_THREADS=20 |
---|
446 | # |
---|
447 | time mpirun -np ${nproc_exe1} ./$exe1 : -np ${nproc_exe2} ./$exe2 : -np $nproc_exe3 ./$exe3 |
---|
448 | # |
---|
449 | |
---|
450 | EOF |
---|
451 | |
---|
452 | fi |
---|
453 | |
---|
454 | ###################################################################### |
---|
455 | ### |
---|
456 | ### 4. Execute the model |
---|
457 | # Stiff and Fundy |
---|
458 | if [ $arch == linux_pgi_openmpi ] || [ $arch == linux_pgi_openmpi_openmp ] || [ $arch == linux_gfortran_openmpi ] || [ ${arch} == linux_gfortran_openmpi_openmp ] ; then |
---|
459 | export OMP_NUM_THREADS=1 |
---|
460 | # ulimit -s unlimited : deja positionne a cause du . /usr/local/bin/setuprogenv.sh dans le .bashrc |
---|
461 | if [ $nproc_exe2 == 0 ] && [ $nproc_exe3 == 0 ]; then |
---|
462 | $MPIRUN -np $nproc_exe1 ./$exe1 > runjob.err |
---|
463 | elif [ $nproc_exe1 == 0 ] && [ $nproc_exe3 == 0 ]; then |
---|
464 | $MPIRUN -np $nproc_exe2 ./$exe2 > runjob.err |
---|
465 | elif [ $nproc_exe3 == 0 ]; then |
---|
466 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
467 | else |
---|
468 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 > runjob.err |
---|
469 | fi |
---|
470 | elif [ ${arch} == nemo_lenovo_intel_impi ] || [ ${arch} == nemo_lenovo_intel_impi_openmp ] || [ ${arch} == kraken_intel_impi ] || [ ${arch} == kraken_intel_impi_openmp ]; then |
---|
471 | echo 'Submitting the job to queue using sbatch' |
---|
472 | sbatch $rundir/run_$casename.$arch |
---|
473 | squeue -u $USER |
---|
474 | elif [ $arch == beaufix_intel_impi ] || [ ${platform} == beaufix_intel_impi_openmp ]; then |
---|
475 | echo 'Submitting the job to queue using sbatch' |
---|
476 | sbatch $rundir/run_$casename.$arch |
---|
477 | squeue -u $user |
---|
478 | elif [ $arch == machine_test ]; then |
---|
479 | export OMP_NUM_THREADS=1 |
---|
480 | # ulimit -s unlimited : deja positionne a cause du . /usr/local/bin/setuprogenv.sh dans le .bashrc |
---|
481 | if [ $nproc_exe2 == 0 ] && [ $nproc_exe3 == 0 ]; then |
---|
482 | $MPIRUN -np $nproc_exe1 ./$exe1 > runjob.err |
---|
483 | elif [ $nproc_exe1 == 0 ] && [ $nproc_exe3 == 0 ]; then |
---|
484 | $MPIRUN -np $nproc_exe2 ./$exe2 > runjob.err |
---|
485 | elif [ $nproc_exe3 == 0 ]; then |
---|
486 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err |
---|
487 | else |
---|
488 | $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 > runjob.err |
---|
489 | fi |
---|
490 | fi |
---|
491 | |
---|
492 | echo $casename 'is executed or submitted to queue.' |
---|
493 | echo 'Results are found in rundir : '$rundir |
---|
494 | |
---|
495 | ###################################################################### |
---|