source: CONFIG_DEVT/IPSLCM6.5_work_ENSEMBLES/oasis3-mct/examples/test_interpolation/run_testinterp.sh @ 5863

Last change on this file since 5863 was 5725, checked in by aclsce, 3 years ago

Added new oasis3-MCT version to be used to handle ensembles simulations with XIOS.

File size: 13.2 KB
Line 
1#!/bin/ksh
2#set -x
3
4host=`uname -n`
5user=`whoami`
6
7## - Define paths
8srcdir=`pwd`
9datadir=$srcdir/data_oasis3
10casename=`basename $srcdir`
11
12## - Define case
13if [ $# -eq 0 ] ; then
14   echo "Default usage: ./run_testinterp.sh 2_1_1 (i.e. nnodes=2, nprocs=1, nthreads=1)"
15   echo "nnodes: total number of nodes fr the run"
16   echo "nprocs: number of MPI tasks per node"
17   echo "nthreads: number of OpenMP threads per MPI task"
18   n_p_t=1
19   nnode=2
20   mpiprocs=1
21   threads=1
22else
23   n_p_t=$1
24   nargs=`echo $n_p_t | awk -F _ '{print NF}'`
25   if [ $nargs -ne 3 ] ; then
26       echo "You can run this script without argument (default nnodes=2, nprocs=1, nthreads=1 will be used)"
27       echo "or as ./run_testinterp.sh nnodes_ nprocs_nthreads where:"
28       echo "nnodes: total number of nodes fr the run"
29       echo "nprocs: number of MPI tasks per node"
30       echo "nthreads: number of OpenMP threads per MPI task"
31       exit
32   else
33       nnode=`echo $n_p_t | awk -F _ '{print $1}'`
34       mpiprocs=`echo $n_p_t | awk -F _ '{print $2}'`
35       threads=`echo $n_p_t | awk -F _ '{print $3}'`
36   fi
37fi
38
39######################################################################
40## - User's section
41# Some examples of namcouples are given in data_oasis3
42# Warning: If you add any extra lines in one of the namcouple given as examples you will have to
43# change the definition of SRC_GRID_TYPE, SRC_GRID_PERIOD and SRC_GRID_OVERLAP in this script (see below lines 140-142)
44## - Source grids (you have the choice between bggd, ssea, icos)
45## bggd is an atmosphere structured (LR) grid
46## ssea is an atmosphere gaussian reduced grid (D) : no conserv2nd remapping
47## icos is an atmosphere unstructured grid (U) : no bili, no bicu nor conserv2nd remapping
48SRC_GRID=bggd # bggd, ssea, icos
49##
50## - Target grid (the only grid supported in this environment is nogt)
51## nogt is an ocean structured grid (LR)
52TGT_GRID=nogt
53##
54## - Remapping (see restrictions above)
55remap=conserv1st #distwgt, bicu, bili, conserv1st, conserv2nd
56
57## - Verification source grid type and remapping
58if [ ${SRC_GRID} == "ssea" ]; then
59        if [ ${remap} == "conserv2nd" ]; then
60                echo "Impossible to perform conserv2nd remapping from gaussian reduced grid ssea"
61                exit
62        fi
63fi
64if [ ${SRC_GRID} == "icos" ]; then
65        if [ ${remap} == "conserv2nd" ] || [ ${remap} == "bicu" ] || [ ${remap} == "bili" ]; then
66                echo "Impossible to perform ${remap} remapping from unstructured grid icos"
67                exit
68        fi
69fi
70
71arch=kraken_intel_impi_openmp  # nemo_lenovo_intel_impi, nemo_lenovo_intel_impi_openmp or beaufix_intel_impi_openmp
72                              # kraken_intel_impi, kraken_intel_impi_openmp, training_computer
73                              # linux_gfortran_openmpi_openmp, linux_gfortran_openmpi
74                              # linux_pgi_openmpi_openmp, linux_pgi_openmpi
75# For arch=beaufix_intel_impi_openmp you must put in your .bashrc
76#module load intel
77#module load intelmpi
78#module load netcdf
79#module load hdf5/1.8.16_par_thrsaf
80
81rundir=$srcdir/${casename}_${SRC_GRID}_${TGT_GRID}_${remap}/rundir_${nnode}_${mpiprocs}_${threads}
82
83## - End of user's section
84######################################################################
85
86typeset -Z4 nodes
87nodes=$nnode
88typeset -Z2 mpiprocesses
89mpiprocesses=$mpiprocs
90typeset -Z2 nthreads
91nthreads=$threads
92
93## - Name of the executables
94exe1=model1
95exe2=model2
96
97## - Define number of processes to run each executable
98(( nproc = $nnode * $mpiprocs ))
99(( nproc_exe2 = $nproc / 2 ))
100(( nproc_exe1 = $nproc - $nproc_exe2 ))
101
102echo ''
103echo '*****************************************************************'
104echo '*** '$casename' : '$run
105echo ''
106echo "Running test_interpolation with nnodes=$nnode nprocs=$mpiprocs nthreads=$threads"
107echo '*****************************************************************'
108echo 'Source grid :' $SRC_GRID
109echo 'Target grid :' $TGT_GRID
110echo 'Rundir       :' $rundir
111echo 'Architecture :' $arch
112echo 'Host         : '$host
113echo 'User         : '$user
114echo 'Grids        : '$SRC_GRID'-->'$TGT_GRID
115echo 'Remap        : '$remap
116echo 'Threads      : '$threads
117echo ''
118echo $exe1' runs on '$nproc_exe1 'processes'
119echo $exe2' runs on '$nproc_exe2 'processes'
120echo ''
121echo ''
122
123## - Copy everything needed into rundir
124\rm -fr $rundir/*
125mkdir -p $rundir
126
127ln -sf $datadir/grids.nc  $rundir/grids.nc
128ln -sf $datadir/masks.nc  $rundir/masks.nc
129ln -sf $datadir/areas.nc  $rundir/areas.nc
130
131ln -sf $srcdir/$exe1 $rundir/.
132ln -sf $srcdir/$exe2 $rundir/.
133
134cp -f $datadir/namcouple_${SRC_GRID}_${TGT_GRID}_${remap} $rundir/namcouple
135
136## - Grid source characteristics
137# If you add any additional lines in the namcouples given as examples you will have
138# to change the 3 lines below
139SRC_GRID_TYPE=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # source grid type
140SRC_GRID_PERIOD=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` # "P" for periodic, "R" for non-periodic
141SRC_GRID_OVERLAP=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # Number of overlapping grid points for periodic grids
142
143echo "SRC_GRID_TYPE : $SRC_GRID_TYPE"
144echo "SRC_GRID_PERIOD : $SRC_GRID_PERIOD"
145echo "SRC_GRID_OVERLAP : $SRC_GRID_OVERLAP"
146
147## - Create name_grids.dat from namcouple informations
148cat <<EOF >> $rundir/name_grids.dat
149\$grid_source_characteristics
150cl_grd_src='$SRC_GRID'
151cl_remap='$remap'
152cl_type_src='$SRC_GRID_TYPE'
153cl_period_src='$SRC_GRID_PERIOD'
154il_overlap_src=$SRC_GRID_OVERLAP
155\$end
156\$grid_target_characteristics
157cl_grd_tgt='$TGT_GRID'
158\$end
159EOF
160#
161cd $rundir
162
163######################################################################
164## - Creation of configuration scripts
165
166###---------------------------------------------------------------------
167### BEAUFIX
168###---------------------------------------------------------------------
169if [ $arch == beaufix_intel_impi_openmp ] ; then
170 ncore_per_node=40
171 (( cpus_per_task = $ncore_per_node * 2 / $mpiprocs ))
172 timreq=12:00:00
173  cat <<EOF > $rundir/run_$casename.$arch
174#!/bin/bash
175#SBATCH --exclusive
176#SBATCH --partition=normal64
177#SBATCH --job-name ${remap}_${nthreads}
178# Time limit for the job
179#SBATCH --time=$timreq
180#SBATCH -o $rundir/$casename.o
181#SBATCH -e $rundir/$casename.e
182# Number of nodes
183#SBATCH --nodes=$nnode
184# Number of MPI tasks per node
185#SBATCH --ntasks-per-node=$mpiprocs
186# Number of threads per MPI task ombre de thread openmp par proc MPI = nombre de coeur par proc
187#SBATCH -c $cpus_per_task
188ulimit -s unlimited
189# rundir must be in the TMPDIR
190cd \$TMPDIR
191cp $rundir/* \$TMPDIR
192#
193export KMP_STACKSIZE=1GB
194export I_MPI_WAIT_MODE=enable
195(( map = $threads - 1 ))
196affin="verbose,granularity=fine,proclist=[0"
197for place in \$(seq \$map); do
198  affin=\${affin}",\${place}"
199  echo \$place
200done
201echo affin1 \$affin
202affin=\${affin}"],explicit"
203export KMP_AFFINITY=\$affin
204echo KMP_AFFINITY \$KMP_AFFINITY
205export OASIS_OMP_NUM_THREADS=$threads
206export OMP_NUM_THREADS=$threads
207
208    # Binding IntelMPI
209    MAP_CPU="0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39"
210    INTELMPI_BINDING="-env I_MPI_PIN_PROCESSOR_LIST \${MAP_CPU}"
211    I_IMPI_BINDING="-env I_MPI_PERHOST \${mpiprocs} \${INTELMPI_BINDING}"
212#
213time mpirun -np ${nproc_exe1} ./$exe1 : -np ${nproc_exe2} ./$exe2
214#
215cp * $rundir
216
217EOF
218
219###---------------------------------------------------------------------
220### NEMO_LENOVO_INTEL_IMPI
221###---------------------------------------------------------------------
222elif [ ${arch} == nemo_lenovo_intel_impi ]; then
223
224  (( nproc = $nproc_exe1 + $nproc_exe2 ))
225
226  cat <<EOF > $rundir/run_$casename.$arch
227#!/bin/bash -l
228##SBATCH --partition debug
229# Nom du job
230#SBATCH --job-name scrip
231# Temps limite du job
232#SBATCH --time=00:02:00
233#SBATCH --output=$rundir/$casename.o
234#SBATCH --error=$rundir/$casename.e
235# Nombre de noeuds et de processus
236#SBATCH --nodes=$nnode --ntasks-per-node=$mpiprocs
237#SBATCH --distribution cyclic
238
239cd $rundir
240
241ulimit -s unlimited
242module purge
243module -s load compiler/intel/2015.2.164 mkl/2015.2.164 mpi/intelmpi/5.0.3.048
244#
245#
246time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2
247#
248EOF
249
250###---------------------------------------------------------------------
251### NEMO_LENOVO_INTEL_IMPI_OPENMP sur un noeud de la machine
252###---------------------------------------------------------------------
253elif [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then
254
255  timreq=00:03:00
256
257  cat <<EOF > $rundir/run_$casename.$arch
258#!/bin/bash -l
259#Partition
260#SBATCH --partition prod
261# Nom du job
262#SBATCH --job-name ${n_p_t}
263# Time limit for the job
264#SBATCH --time=$timreq
265#SBATCH --output=$rundir/$casename.o
266#SBATCH --error=$rundir/$casename.e
267# Number of nodes
268#SBATCH --nodes=$nnode
269# Number of MPI tasks per node
270#SBATCH --ntasks-per-node=$mpiprocs
271# Number of OpenMP threads per MPI task
272#SBATCH --cpus-per-task=$threads
273
274cd $rundir
275
276export KMP_STACKSIZE=1GB
277export I_MPI_PIN_DOMAIN=omp
278#export I_MPI_PIN_DOMAIN=socket
279export I_MPI_WAIT_MODE=enable
280export KMP_AFFINITY=verbose,granularity=fine,compact
281export OASIS_OMP_NUM_THREADS=$threads
282export OMP_NUM_THREADS=$threads
283
284time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2
285EOF
286
287###---------------------------------------------------------------------
288### KRAKEN_INTEL_IMPI
289###---------------------------------------------------------------------
290elif [ ${arch} == kraken_intel_impi ]; then
291
292  (( nproc = $nproc_exe1 + $nproc_exe2 ))
293
294  cat <<EOF > $rundir/run_$casename.$arch
295#!/bin/bash -l
296#SBATCH --partition prod
297# Nom du job
298#SBATCH --job-name scrip
299# Temps limite du job
300#SBATCH --time=02:00:00
301#SBATCH --output=$rundir/$casename.o
302#SBATCH --error=$rundir/$casename.e
303# Nombre de noeuds et de processus
304#SBATCH --nodes=$nnode --ntasks-per-node=$mpiprocs
305#SBATCH --distribution cyclic
306
307cd $rundir
308
309ulimit -s unlimited
310module purge
311module load compiler/intel/18.0.1.163
312module load mpi/intelmpi/2018.1.163
313module load lib/netcdf-fortran/4.4.4_impi
314module load lib/netcdf-c/4.6.1_impi
315#
316#
317time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2
318#
319EOF
320
321
322###---------------------------------------------------------------------
323### KRAKEN_INTEL_IMPI_OPENMP
324###---------------------------------------------------------------------
325elif [ ${arch} == kraken_intel_impi_openmp ]; then
326
327  timreq=00:30:00
328
329  cat <<EOF > $rundir/run_$casename.$arch
330#!/bin/bash -l
331#Partition
332#SBATCH --partition prod
333# Nom du job
334#SBATCH --job-name ${n_p_t}
335# Time limit for the job
336#SBATCH --time=$timreq
337#SBATCH --output=$rundir/$casename.o
338#SBATCH --error=$rundir/$casename.e
339# Number of nodes
340#SBATCH --nodes=$nnode
341# Number of MPI tasks per node
342#SBATCH --ntasks-per-node=$mpiprocs
343# Number of OpenMP threads per MPI task
344#SBATCH --cpus-per-task=$threads
345
346cd $rundir
347module purge
348module load compiler/intel/18.0.1.163
349module load mpi/intelmpi/2018.1.163
350module load lib/netcdf-fortran/4.4.4_impi
351module load lib/netcdf-c/4.6.1_impi
352
353export KMP_STACKSIZE=1GB
354export I_MPI_PIN_DOMAIN=omp
355export I_MPI_WAIT_MODE=enable
356(( map = $threads - 1 ))
357affin="verbose,granularity=fine,proclist=[0"
358for place in \$(seq \$map); do
359  affin=\${affin}",\${place}"
360  echo \$place
361done
362echo affin1 \$affin
363affin=\${affin}"],explicit"
364export KMP_AFFINITY=\$affin
365echo KMP_AFFINITY \$KMP_AFFINITY
366export OASIS_OMP_NUM_THREADS=$threads
367export OMP_NUM_THREADS=$threads
368
369    # Binding IntelMPI
370    MAP_CPU="0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35"
371    INTELMPI_BINDING="-env I_MPI_PIN_PROCESSOR_LIST \${MAP_CPU}"
372    I_IMPI_BINDING="-env I_MPI_PERHOST \${mpiprocs} \${INTELMPI_BINDING}"
373
374time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2
375EOF
376
377fi 
378
379######################################################################
380### - Execute the model
381
382if [ ${arch} == training_computer ]; then
383    export OASIS_OMP_NUM_THREADS=$threads
384    MPIRUN=/usr/local/intel/impi/2018.1.163/bin64/mpirun
385    echo 'Executing the model using '$MPIRUN
386    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err
387elif [ ${arch} == davinci_intel_impi_openmp ]; then
388    export OASIS_OMP_NUM_THREADS=$threads
389    MPIRUN=/opt/intel/impi/2018.1.163/bin64/mpirun
390    echo 'Executing the model using '$MPIRUN
391    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err
392elif [ ${arch} == linux_gfortran_openmpi ] || [ ${arch} == linux_gfortran_openmpi_openmp ]; then
393    export OASIS_OMP_NUM_THREADS=1
394    MPIRUN=/usr/lib64/openmpi/bin/mpirun
395    echo 'Executing the model using '$MPIRUN
396    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err
397elif [ ${arch} == linux_pgi_openmpi ] || [ ${arch} == linux_pgi_openmpi_openmp ]; then
398    export OASIS_OMP_NUM_THREADS=1
399    MPIRUN=/usr/local/pgi/linux86-64/20.4/mpi/openmpi-3.1.3/bin/mpirun
400    echo 'Executing the model using '$MPIRUN
401    $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err
402elif [ $arch == beaufix_intel_impi_openmp ]; then
403    echo 'Submitting the job to queue using sbatch'
404    sbatch $rundir/run_$casename.$arch
405    squeue -u $user
406elif [ $arch == nemo_lenovo_intel_impi_openmp ] || [ $arch == nemo_lenovo_intel_impi ]; then
407    echo 'Submitting the job to queue using sbatch'
408    sbatch $rundir/run_$casename.$arch
409    squeue -u $USER
410elif [ $arch == kraken_intel_impi_openmp ] || [ $arch == kraken_intel_impi ]; then
411    echo 'Submitting the job to queue using sbatch'
412    sbatch $rundir/run_$casename.$arch
413    squeue -u $USER
414fi
415echo $casename 'is executed or submitted to queue.'
416echo 'Results are found in rundir : '$rundir 
417
418######################################################################
419
Note: See TracBrowser for help on using the repository browser.