source: CONFIG_DEVT/IPSLCM6.5_work_ENSEMBLES/oasis3-mct/examples/spoc/spoc_regridding/run_regrid.sh @ 5725

Last change on this file since 5725 was 5725, checked in by aclsce, 3 years ago

Added new oasis3-MCT version to be used to handle ensembles simulations with XIOS.

  • Property svn:executable set to *
File size: 10.7 KB
Line 
1#!/bin/ksh
2#set -x
3
4host=`uname -n`
5user=`whoami`
6
7## - Define paths
8srcdir=`pwd`
9datadir=$srcdir/data_oasis
10casename=`basename $srcdir`
11
12## - Define case
13if [ $# -eq 0 ] ; then
14   echo "By default, i.e. without arguments, the source grid is bggd,"
15   echo "the target grid is nogt and the remapping is 1st order conservative;"
16   echo "2 nodes, 1 MPI task per node and 1 OpenMP thread per MPI task are used for the run."
17   SRC_GRID=bggd
18   TGT_GRID=nogt
19   remap=conserv1st
20   n_p_t=2_1_1
21   nnode=2
22   mpiprocs=1
23   threads=1
24elif [ $# -ne 4 ] ; then
25   echo "If you don't want to run the default case without arguments, "
26   echo "you must run the script with 4 arguments i.e. './run_testinterp.sh src tgt remap nnodes_nprocs_nthreads'"
27   echo "where 'src' is the source grid, 'tgt' the target grid and 'remap' the remapping,"
28   echo "'nnodes' the total number of nodes for the run, 'nprocs' the number of MPI tasks per node"
29   echo "and 'nthreads' the number of OpenMP threads per MPI task"
30   exit
31else
32   SRC_GRID=$1
33   TGT_GRID=$2
34   remap=$3
35   n_p_t=$4
36   nnode=`echo $n_p_t | awk -F _ '{print $1}'`
37   mpiprocs=`echo $n_p_t | awk -F _ '{print $2}'`
38   threads=`echo $n_p_t | awk -F _ '{print $3}'`
39fi
40##
41## User's choice of computing architecture
42arch=pgi20.4_openmpi_openmp_linux  # nemo_lenovo_intel_impi_openmp, kraken_intel_impi_openmp,
43              # training_computer, gfortran_openmpi_openmp_linux, belenos, mac
44              # pgi_openmpi_openmp_linux,
45              # pgi20.4_openmpi_openmp_linux (not work with 4.0)
46              # gnu1020_openmpi_openmp_linux (not work with 4.0)
47##
48######################################################################
49## - Verification source grid type and remapping
50#
51## - Source grid : bggd, ssea or icos
52## bggd is an atmosphere structured (LR) grid
53## ssea is an atmosphere gaussian reduced grid (D) : no 2nd order conservative remapping
54## icos is an atmosphere unstructured grid (U) : no bilinear, no bicubic nor 2nd order conservative remapping
55##
56## - Target grid : nogt
57## nogt is an ocean structured (LR) grid
58##
59## - Remapping : distwgt (nearest-neighbour), bili (bilinear), bicu (bicubic), conserv1st or conserv2nd (1st or 2nd order conservative remapping)
60##
61## Configuration files 'namcouple' are given in /data_oasis3
62## Warning: If you add any extra lines in one of the namcouple given as examples you will have to
63## change the definition of SRC_GRID_TYPE, SRC_GRID_PERIOD and SRC_GRID_OVERLAP in this script (see below lines 140-142)
64##
65## - Verification source grid type and remapping
66if [ ${SRC_GRID} == "ssea" ]; then
67        if [ ${remap} == "conserv2nd" ]; then
68                echo "Impossible to perform conserv2nd remapping from gaussian reduced grid ssea"
69                exit
70        fi
71fi
72if [ ${SRC_GRID} == "icos" ]; then
73        if [ ${remap} == "conserv2nd" ] || [ ${remap} == "bicu" ] || [ ${remap} == "bili" ]; then
74                echo "Impossible to perform ${remap} remapping from unstructured grid icos"
75                exit
76        fi
77fi
78##
79rundir=$srcdir/${casename}_${SRC_GRID}_${TGT_GRID}_${remap}_${nnode}_${mpiprocs}_${threads}
80##
81######################################################################
82##
83## - Name of the executables
84exe1=model1
85exe2=model2
86##
87## - Define number of processes to run each executable
88(( nproc = $nnode * $mpiprocs ))
89(( nproc_exe2 = $nproc / 2 ))
90(( nproc_exe1 = $nproc - $nproc_exe2 ))
91
92echo ''
93echo '**************************************************************************************************************'
94echo '*** '$casename' : '$run
95echo ''
96echo "Running test_interpolation on $nnode nodes with $mpiprocs MPI tasks per node and $threads threads per MPI task"
97echo '**************************************************************************************************************'
98echo 'Source grid :' $SRC_GRID
99echo 'Target grid :' $TGT_GRID
100echo 'Rundir       :' $rundir
101echo 'Architecture :' $arch
102echo 'Host         : '$host
103echo 'User         : '$user
104echo 'Grids        : '$SRC_GRID'-->'$TGT_GRID
105echo 'Remap        : '$remap
106echo ''
107echo $exe1' runs on '$nproc_exe1 'processes'
108echo $exe2' runs on '$nproc_exe2 'processes'
109echo ''
110echo ''
111
112## - Copy everything needed into rundir
113\rm -fr $rundir/*
114mkdir -p $rundir
115
116ln -sf $datadir/grids.nc  $rundir/grids.nc
117ln -sf $datadir/masks.nc  $rundir/masks.nc
118ln -sf $datadir/areas.nc  $rundir/areas.nc
119ln -sf $srcdir/$exe1 $rundir/.
120ln -sf $srcdir/$exe2 $rundir/.
121cp -f $datadir/namcouple_${SRC_GRID}_${TGT_GRID}_${remap} $rundir/namcouple
122
123## - Grid source characteristics
124# These are read in the namcouple file. If you decide to use another namcouple than the ones coming from /data_oasis3
125# you may have to change the 3 lines below
126SRC_GRID_TYPE=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # source grid type
127SRC_GRID_PERIOD=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` # "P" for periodic, "R" for non-periodic
128SRC_GRID_OVERLAP=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # Number of overlapping grid points for periodic grids
129
130echo "SRC_GRID_TYPE : $SRC_GRID_TYPE"
131echo "SRC_GRID_PERIOD : $SRC_GRID_PERIOD"
132echo "SRC_GRID_OVERLAP : $SRC_GRID_OVERLAP"
133
134## - Create name_grids.dat, that will be read by the models, from namcouple informations
135cat <<EOF >> $rundir/name_grids.dat
136\$grid_source_characteristics
137cl_grd_src='$SRC_GRID'
138cl_remap='$remap'
139cl_type_src='$SRC_GRID_TYPE'
140cl_period_src='$SRC_GRID_PERIOD'
141il_overlap_src=$SRC_GRID_OVERLAP
142\$end
143\$grid_target_characteristics
144cl_grd_tgt='$TGT_GRID'
145\$end
146EOF
147#
148cd $rundir
149
150######################################################################
151## - Creation of configuration scripts
152
153###---------------------------------------------------------------------
154### NEMO_LENOVO_INTEL_IMPI_OPENMP
155###---------------------------------------------------------------------
156if [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then
157
158  cat <<EOF > $rundir/run_$casename.$arch
159#!/bin/bash -l
160#SBATCH --partition prod
161#SBATCH --job-name ${n_p_t}
162#SBATCH --time=00:02:00
163#SBATCH --output=$rundir/$casename.o
164#SBATCH --error=$rundir/$casename.e
165# Number of nodes
166#SBATCH --nodes=$nnode
167# Number of MPI tasks per node
168#SBATCH --ntasks-per-node=$mpiprocs
169# Number of OpenMP threads per MPI task
170##SBATCH --cpus-per-task=24
171cd $rundir
172
173export KMP_STACKSIZE=1GB
174export I_MPI_PIN_DOMAIN=omp
175#export I_MPI_PIN_DOMAIN=socket
176export I_MPI_WAIT_MODE=enable
177export KMP_AFFINITY=verbose,granularity=fine,compact
178export OASIS_OMP_NUM_THREADS=$threads
179
180time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2
181EOF
182
183###---------------------------------------------------------------------
184### KRAKEN_INTEL_IMPI_OPENMP
185###---------------------------------------------------------------------
186elif [ ${arch} == kraken_intel_impi_openmp ]; then
187
188  timreq=00:30:00
189
190  cat <<EOF > $rundir/run_$casename.$arch
191#!/bin/bash -l
192#Partition
193#SBATCH --partition prod
194# Nom du job
195#SBATCH --job-name ${n_p_t}
196# Time limit for the job
197#SBATCH --time=$timreq
198#SBATCH --output=$rundir/$casename.o
199#SBATCH --error=$rundir/$casename.e
200# Number of nodes
201#SBATCH --nodes=$nnode
202# Number of MPI tasks per node
203#SBATCH --ntasks-per-node=$mpiprocs
204# Number of OpenMP threads per MPI task
205#SBATCH --cpus-per-task=36
206
207cd $rundir
208module purge
209module load compiler/intel/18.0.1.163
210module load mpi/intelmpi/2018.1.163
211module load lib/netcdf-fortran/4.4.4_impi
212module load lib/netcdf-c/4.6.1_impi
213
214export KMP_STACKSIZE=1GB
215export I_MPI_PIN_DOMAIN=omp
216export I_MPI_WAIT_MODE=enable
217(( map = $threads - 1 ))
218affin="verbose,granularity=fine,proclist=[0"
219for place in \$(seq \$map); do
220  affin=\${affin}",\${place}"
221  echo \$place
222done
223echo affin1 \$affin
224affin=\${affin}"],explicit"
225export KMP_AFFINITY=\$affin
226echo KMP_AFFINITY \$KMP_AFFINITY
227export OASIS_OMP_NUM_THREADS=$threads
228export OMP_NUM_THREADS=$threads
229
230    # Binding IntelMPI
231    MAP_CPU="0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35"
232    INTELMPI_BINDING="-env I_MPI_PIN_PROCESSOR_LIST \${MAP_CPU}"
233    I_IMPI_BINDING="-env I_MPI_PERHOST \${mpiprocs} \${INTELMPI_BINDING}"
234
235time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2
236EOF
237
238elif [ $arch == belenos ] ; then
239  cat <<EOF > $rundir/run_$casename.$arch
240#!/bin/bash
241#SBATCH --exclusive
242#SBATCH --partition=normal256
243#SBATCH --job-name ${remap}_${nthreads}
244#SBATCH --time=02:00:00
245#SBATCH -o $rundir/$casename.o
246#SBATCH -e $rundir/$casename.e
247#SBATCH -N $nnode
248#SBATCH --ntasks-per-node=$mpiprocs
249#
250ulimit -s unlimited
251cd $rundir
252#
253module load intelmpi/2018.5.274
254module load intel/2018.5.274
255module load netcdf-fortran/4.5.2_V2
256#
257export KMP_STACKSIZE=1GB
258export I_MPI_WAIT_MODE=enable
259export KMP_AFFINITY=verbose,granularity=fine,compact
260export OASIS_OMP_NUM_THREADS=$threads
261export OMP_NUM_THREADS=$threads
262#
263time mpirun -np ${nproc_exe1} ./$exe1 : -np ${nproc_exe2} ./$exe2
264#
265EOF
266
267fi 
268
269######################################################################
270### - Execute the model
271
272if [ ${arch} == training_computer ]; then
273    export OASIS_OMP_NUM_THREADS=$threads
274    MPIRUN=/usr/local/intel/impi/2018.1.163/bin64/mpirun
275    echo 'Executing the model using '$MPIRUN
276    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err
277elif [ ${arch} == gfortran_openmpi_openmp_linux ]; then
278    export OASIS_OMP_NUM_THREADS=$threads
279    MPIRUN=/usr/lib64/openmpi/bin/mpirun
280    echo 'Executing the model using '$MPIRUN
281    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err
282elif [ $arch == pgi_openmpi_openmp_linux ]; then
283    MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun
284    echo 'Executing the model using '$MPIRUN
285    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err
286elif [ ${arch} == gnu1020_openmpi_openmp_linux ]; then
287    export OASIS_OMP_NUM_THREADS=$threads
288    MPIRUN=/usr/local/openmpi/4.1.0_gcc1020/bin/mpirun
289    echo 'Executing the model using '$MPIRUN
290    $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err
291elif [ $arch == pgi20.4_openmpi_openmp_linux ]; then
292    MPIRUN=/usr/local/pgi/linux86-64/20.4/mpi/openmpi-3.1.3/bin/mpirun
293    echo 'Executing the model using '$MPIRUN
294    $MPIRUN -oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err
295elif [ $arch == nemo_lenovo_intel_impi_openmp ]; then
296    echo 'Submitting the job to queue using sbatch'
297    sbatch $rundir/run_$casename.$arch
298    squeue -u $USER
299elif [ $arch == kraken_intel_impi_openmp ]; then
300    echo 'Submitting the job to queue using sbatch'
301    sbatch $rundir/run_$casename.$arch
302    squeue -u $USER
303elif [ $arch == belenos ]; then
304    echo 'Submitting the job to queue using sbatch'
305    sbatch $rundir/run_$casename.$arch
306    squeue -u $user
307elif [ ${arch} == mac ]; then
308    echo 'Executing the model using mpirun'
309    mpirun --oversubscribe -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2
310fi
311
312echo $casename 'is executed or submitted to queue.'
313echo 'Results are found in rundir : '$rundir 
314
315######################################################################
316
Note: See TracBrowser for help on using the repository browser.