source: CONFIG_DEVT/IPSLCM6.5_work_ENSEMBLES/oasis3-mct/examples/toy_configuration_components_C/run_buildbot @ 5725

Last change on this file since 5725 was 5725, checked in by aclsce, 3 years ago

Added new oasis3-MCT version to be used to handle ensembles simulations with XIOS.

  • Property svn:executable set to *
File size: 18.3 KB
Line 
1#!/bin/ksh
2#set -xv
3######################################################################
4# OASIS3-MCT_4.0 compiled with -qopenmp
5############### User's section #######################################
6##
7fich_i=$1
8## - Define architecture
9arch=machine_test      # stiff (pgi 18.7 openmpi fedora 26), fundy (gfortran openmpi fedora 26),
10                 # nemo_lenovo (intel impi), kraken (intel impi), beaufix (intel impi)
11                 # machine_test (==tioman pgi 17.10 for the moment)
12platform=$arch
13MPILIB=MPI1      # MPI1
14user=`whoami`
15#
16## - Define paths
17srcdir=`pwd`
18datadir=$srcdir/data_oasis3
19casename=`basename $srcdir`
20pathname=`dirname $srcdir`
21configfilesdir=$pathname/bench_buildbot_since_2019
22griddircom=$pathname/bench_buildbot_since_2019/common_data_oasis3
23rmpdircom=$pathname/bench_buildbot_since_2019/common_rmp_files
24echo $arch
25echo $casename
26#
27# - Toy models: Define number of processes to run each executable
28#               and name of executables
29#
30#name of the executables
31    exe1=model1
32    exe2=model2
33    exe3=model3
34# number of processes for each executable
35    nproc_exe1=0
36    nproc_exe2=6
37    nproc_exe3=0
38# namcouple used (modified in script run_examples_oa3-mct_buildbot:
39# namcouple_ini=${namcouple_used})
40    namcouple_ini=namcouple_1
41# Makefile_ini=${makefile_used}
42    Makefile_ini=Makefile_1
43# Number of nodes (to be coherent with toy toy_interpolation)
44    nnodes=1
45# Number of threads (to be coherent with toy toy_interpolation)
46    threads=1
47#
48# Stiff pgi 18.7
49if [ ${arch} == linux_pgi_openmpi ] || [ ${arch} == linux_pgi_openmpi_openmp ]; then
50    MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun
51    rundir=/space/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3}
52# Fundy gfortran
53elif [ ${arch} == linux_gfortran_openmpi ] || [ ${arch} == linux_gfortran_openmpi_openmp ]; then
54    MPIRUN=/usr/lib64/openmpi/bin/mpirun
55    rundir=/space/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3}
56# Nemo_lenovo
57elif [ ${arch} == nemo_lenovo_intel_impi ] || [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then
58    rundir=/scratch/globc/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3}
59# Kraken
60elif [ ${arch} == kraken_intel_impi ] || [ ${arch} == kraken_intel_impi_openmp ]; then
61    rundir=/scratch/globc/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3}
62# Beaufix M-F
63elif [ ${arch} == beaufix_intel_impi ] || [ ${arch} == beaufix_intel_impi_openmp ]; then
64    rundir=/scratch/work/coquartl/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3}
65# Machine_test (for developments, here tioman pgi 18.7)
66elif [ ${arch} == machine_test ]; then
67    MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun
68    rundir=/space/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3}
69fi
70
71############### End of user's section ################################
72
73echo ''
74echo '*****************************************************************'
75echo '*** '$casename' : '$run
76echo ''
77echo 'Rundir       :' $rundir
78echo 'Architecture :' $arch
79echo 'User         : '$user
80echo ''
81echo $exe1' runs on '$nproc_exe1 'processes'
82echo $exe2' runs on '$nproc_exe2 'processes'
83echo $exe3' runs on '$nproc_exe3 'processes'
84echo ''
85echo ''
86######################################################################
87###
88### 1. Copy source example directory containing everything needed
89###    into rundir
90
91cd $srcdir
92mkdir -p $rundir
93
94if [ $casename == toy_grids_writing ]; then
95    ln -sf $griddircom/grids_nogt.nc  $rundir/.
96    ln -sf $griddircom/masks_nogt.nc  $rundir/.
97    ln -sf $griddircom/areas_nogt.nc  $rundir/.
98    ln -sf $griddircom/grids_bggd.nc  $rundir/.
99    ln -sf $griddircom/masks_bggd.nc  $rundir/.
100    ln -sf $griddircom/areas_bggd.nc  $rundir/.
101else
102    ln -sf $griddircom/grids.nc  $rundir/.
103    ln -sf $griddircom/masks.nc  $rundir/.
104    ln -sf $griddircom/areas.nc  $rundir/.
105fi
106
107cp $configfilesdir/case${fich_i}.txt $rundir/.
108
109cp -f $srcdir/$exe1 $rundir/.
110cp -f $srcdir/$exe2 $rundir/.
111cp -f $srcdir/$exe3 $rundir/.
112
113# namcouple used copied in script run_examples_oa3-mct_buildbot
114cp -f $datadir/namcouple $rundir/namcouple
115# Restart used copied in rundir
116if [ $casename == toy_multiple_fields_one_communication ] || [ $casename == toy_time_transformations ] || [ $casename == toy_restart_ACCUMUL_1_NOLAG ] || [ $casename == toy_restart_ACCUMUL_1_LAG ] || [ $casename == toy_restart_ACCUMUL_2_NOLAG ] || [ $casename == toy_restart_ACCUMUL_2_LAG ]; then
117       cp $datadir/ocean.nc $rundir/ocean.nc
118fi       
119#
120# Copy necessary rmp files as a function of the toy
121# In toy_interpolation we calculate them and we need name_grids.dat file
122if [ $casename == toy_interpolation ]; then
123# If you add any additional lines in the namcouples given as examples you will have
124# to change the lines below
125# SRC_GRID : bggd, ssea, icos, nogt, nogt, nogt
126# TGT_GRID : nogt, nogt, nogt, bggd, ssea, icos
127# remap=gauswgt, distwgt, bicu, bili, conserv1st, conserv2nd
128    conserv_order=" "
129    remap=" "
130    SRC_GRID=`sed -n 16p $rundir/namcouple | tr -s ' ' | cut -d" " -f5`
131    TGT_GRID=`sed -n 16p $rundir/namcouple | tr -s ' ' | cut -d" " -f6`
132    echo ${SRC_GRID}
133    echo ${TGT_GRID}
134# Remapping (see restrictions below)
135    remapping=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f1`
136    if [ $remapping == BICUBIC ]; then
137       remap=bicu
138    fi
139    if [ $remapping == DISTWGT ]; then
140       remap=distwgt
141    fi
142    if [ $remapping == GAUSWGT ]; then
143       remap=gauswgt
144    fi
145    if [ $remapping == BILINEAR ]; then
146       remap=bili
147    fi
148    if [ $remapping == CONSERV ]; then
149       conserv_order=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f7`
150       if [ ${conserv_order} == FIRST ]; then
151           remap=conserv1st
152       else
153           remap=conserv2nd
154       fi
155    fi
156    echo "Remapping : $remap"
157# Verification source grid type and remapping
158    if [ ${SRC_GRID} == "ssea" ]; then
159      if [ ${remap} == "conserv2nd" ]; then
160        echo "Impossible to perform conserv2nd remapping from gaussian reduced grid ssea"
161        exit
162      fi
163    fi
164    if [ ${SRC_GRID} == "icos" ]; then
165      if [ ${remap} == "conserv2nd" ] || [ ${remap} == "bicu" ] || [ ${remap} == "bili" ]; then
166        echo "Impossible to perform ${remap} remapping from unstructured grid icos"
167        exit
168      fi
169    fi
170    SRC_GRID_TYPE=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # source grid type
171    SRC_GRID_PERIOD=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` # "P" for periodic, "R" for non-periodic
172    SRC_GRID_OVERLAP=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # Number of overlapping grid points for periodic grids
173    echo "SRC_GRID_TYPE : $SRC_GRID_TYPE"
174    echo "SRC_GRID_PERIOD : $SRC_GRID_PERIOD"
175    echo "SRC_GRID_OVERLAP : $SRC_GRID_OVERLAP"
176## - Create name_grids.dat from namcouple informations
177cat <<EOF >> $rundir/name_grids.dat
178\$grid_source_characteristics
179cl_grd_src='${SRC_GRID}'
180cl_remap='${remap}'
181cl_type_src='${SRC_GRID_TYPE}'
182cl_period_src='${SRC_GRID_PERIOD}'
183il_overlap_src=${SRC_GRID_OVERLAP}
184\$end
185\$grid_target_characteristics
186cl_grd_tgt='${TGT_GRID}'
187\$end
188EOF
189# toy different from toy_interpolation
190else   
191   if [ ${casename} == toy_1f1grd_to_2f2grd ]; then
192      ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/.
193      ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/.
194      ln -sf $rmpdircom/rmp_lmdz_to_nogt_BILINEAR.nc $rundir/.
195      ln -sf $rmpdircom/rmp_nogt_to_lmdz_BILINEAR.nc $rundir/.
196   elif [ ${casename} == toy_configuration_components_B ]; then
197      ln -sf $rmpdircom/rmp_bggd_to_lmdz_BILINEAR.nc $rundir/.
198      ln -sf $rmpdircom/rmp_lmdz_to_bggd_BILINEAR.nc $rundir/.
199   elif [ ${casename} == toy_configuration_components_C ]; then
200      ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/.
201      ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/.
202   elif [ ${casename} == toy_configuration_components_G ]; then
203      ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/.
204      ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/.
205      ln -sf $rmpdircom/rmp_lmdz_to_ssea_BILINEAR.nc $rundir/.
206      ln -sf $rmpdircom/rmp_ssea_to_lmdz_BILINEAR.nc $rundir/.
207      ln -sf $rmpdircom/rmp_ssea_to_icos_DISTWGT.nc $rundir/.
208      ln -sf $rmpdircom/rmp_icos_to_ssea_DISTWGT.nc $rundir/.
209   elif [ ${casename} == toy_configuration_components_ABCG ] ; then
210      ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/.
211      ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/.
212      ln -sf $rmpdircom/rmp_lmdz_to_ssea_BILINEAR.nc $rundir/.
213      ln -sf $rmpdircom/rmp_ssea_to_lmdz_BILINEAR.nc $rundir/.
214      ln -sf $rmpdircom/rmp_ssea_to_icos_DISTWGT.nc $rundir/.
215      ln -sf $rmpdircom/rmp_icos_to_ssea_DISTWGT.nc $rundir/.
216      ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/.
217      ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/.
218      ln -sf $rmpdircom/rmp_bggd_to_lmdz_BILINEAR.nc $rundir/.
219      ln -sf $rmpdircom/rmp_lmdz_to_bggd_BILINEAR.nc $rundir/.
220   elif [ ${casename} == toy_load_balancing ] ; then
221      ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/.
222      ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/.
223      ln -sf $rmpdircom/rmp_lmdz_to_ssea_BILINEAR.nc $rundir/.
224      ln -sf $rmpdircom/rmp_ssea_to_lmdz_BILINEAR.nc $rundir/.
225      ln -sf $rmpdircom/rmp_ssea_to_icos_DISTWGT.nc $rundir/.
226      ln -sf $rmpdircom/rmp_icos_to_ssea_DISTWGT.nc $rundir/.
227      ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/.
228      ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/.
229      ln -sf $rmpdircom/rmp_bggd_to_lmdz_BILINEAR.nc $rundir/.
230      ln -sf $rmpdircom/rmp_lmdz_to_bggd_BILINEAR.nc $rundir/.
231   else
232      ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/.
233      ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/.
234   fi
235# Endif toy_interpolation test     
236fi   
237#
238cd $rundir
239#
240######################################################################
241###
242### 3. Creation of configuration scripts (not for Fedora 26 computers)
243#
244###---------------------------------------------------------------------
245### NEMO_LENOVO_INTELMPI
246###---------------------------------------------------------------------
247if [ ${arch} == nemo_lenovo_intel_impi ]; then
248
249  cat <<EOF > $rundir/run_$casename.$arch
250#!/bin/bash -l
251#Partition
252#SBATCH --partition prod
253# Nom du job
254#SBATCH --job-name $casename
255# Temps limite du job
256#SBATCH --time=00:30:00
257#SBATCH --output=$rundir/$casename.o
258#SBATCH --error=$rundir/$casename.e
259# Nombre de noeuds et de processus MPI
260#SBATCH --nodes=1 --ntasks-per-node=24
261#SBATCH --distribution cyclic
262
263cd $rundir
264
265ulimit -s unlimited
266module purge
267module load compiler/intel/2015.2.164 mkl/2015.2.164 mpi/intelmpi/5.0.3.048
268#
269time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3
270#
271EOF
272
273###----------------------------------------------------------------------------
274### NEMO_LENOVO_INTELMPI_OPENMP : run hybride OpenMP+MPI (toy_interpolation)
275###----------------------------------------------------------------------------
276elif [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then
277
278  echo "We run on Nemo_lenovo OpenMP+MPI"
279  echo "Prescribe : N=2, nprocs/by node=2, ntasks/by proc=12"
280
281  cat <<EOF > $rundir/run_$casename.$arch
282#!/bin/bash -l
283#Partition
284#SBATCH --partition prod
285# Nom du job
286#SBATCH --job-name $casename
287# Time limit for the job
288#SBATCH --time=00:30:00
289#SBATCH --output=$rundir/$casename.o
290#SBATCH --error=$rundir/$casename.e
291# Number of nodes
292#SBATCH --nodes=2
293# Number of MPI tasks per node
294#SBATCH --ntasks-per-node=2
295# Number of OpenMP threads per MPI task
296#SBATCH --cpus-per-task=12
297
298cd $rundir
299
300export KMP_STACKSIZE=1GB
301export I_MPI_PIN_DOMAIN=omp
302#export I_MPI_PIN_DOMAIN=socket
303export I_MPI_WAIT_MODE=enable
304export KMP_AFFINITY=verbose,granularity=fine,compact
305export OASIS_OMP_NUM_THREADS=12
306
307time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3
308EOF
309
310###---------------------------------------------------------------------
311### KRAKEN_INTEL_IMPI
312###---------------------------------------------------------------------
313elif [ ${arch} == kraken_intel_impi ]; then
314
315  cat <<EOF > $rundir/run_$casename.$arch
316#!/bin/bash -l
317#SBATCH --partition prod
318# Nom du job
319#SBATCH --job-name $casename
320# Temps limite du job
321#SBATCH --time=00:30:00
322#SBATCH --output=$rundir/$casename.o
323#SBATCH --error=$rundir/$casename.e
324# Nombre de noeuds et de processus
325#SBATCH --nodes=$nnodes --ntasks-per-node=36
326#SBATCH --distribution cyclic
327
328cd $rundir
329
330ulimit -s unlimited
331module purge
332module load compiler/intel/18.0.1.163
333module load mpi/intelmpi/2018.1.163
334module load lib/netcdf-fortran/4.4.4_impi
335module load lib/netcdf-c/4.6.1_impi
336#
337#
338time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3
339#
340EOF
341
342###------------------------------------------------------------------------
343### KRAKEN_INTEL_IMPI_OPENMP : run hybride OpenMP+MPI (toy_interpolation)
344###------------------------------------------------------------------------
345elif [ ${arch} == kraken_intel_impi_openmp ]; then
346
347
348  cat <<EOF > $rundir/run_$casename.$arch
349#!/bin/bash -l
350#Partition
351#SBATCH --partition prod
352# Nom du job
353#SBATCH --job-name $casename
354# Time limit for the job
355#SBATCH --time=00:30:00
356#SBATCH --output=$rundir/$casename.o
357#SBATCH --error=$rundir/$casename.e
358# Number of nodes
359#SBATCH --nodes=2
360# Number of MPI tasks per node
361#SBATCH --ntasks-per-node=2
362# Number of OpenMP threads per MPI task
363#SBATCH --cpus-per-task=18
364
365cd $rundir
366module purge
367module load compiler/intel/18.0.1.163
368module load mpi/intelmpi/2018.1.163
369module load lib/netcdf-fortran/4.4.4_impi
370module load lib/netcdf-c/4.6.1_impi
371
372export KMP_STACKSIZE=1GB
373export I_MPI_PIN_DOMAIN=omp
374#export I_MPI_PIN_DOMAIN=socket
375export I_MPI_WAIT_MODE=enable
376export KMP_AFFINITY=verbose,granularity=fine,compact
377export OASIS_OMP_NUM_THREADS=18
378export OMP_NUM_THREADS=18
379
380time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2  : -np $nproc_exe3 ./$exe3
381EOF
382
383
384###---------------------------------------------------------------------
385### BEAUFIX INTELIMPI
386###---------------------------------------------------------------------
387elif [ $arch == beaufix_intel_impi ]; then
388
389  (( nproc = $nproc_exe1 + $nproc_exe2 + $nproc_exe3 ))
390
391  cat <<EOF > $rundir/run_$casename.$arch
392#!/bin/bash
393# Nom du job
394#SBATCH --job-name $casename
395# Time limit for the job
396#SBATCH --time=01:00:00
397#SBATCH -p  normal64        # partition/queue
398#SBATCH -N $nnodes          # number of nodes
399#SBATCH -n $nproc           # number of procs
400#SBATCH -o job.out%j
401#SBATCH -o job.err%j
402#SBATCH --exclusive
403
404ulimit -s unlimited
405cd $rundir
406module load intel/16.1.150
407module load intelmpi/5.1.2.150
408module load netcdf/4.4.0
409#
410time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2  : -np $nproc_exe3 ./$exe3
411#
412EOF
413
414###------------------------------------------------------------------------
415### BEAUFIX_INTELMPI_OPENMP : run hybride OpenMP+MPI (toy_interpolation)
416###------------------------------------------------------------------------
417elif [ $arch == beaufix_intel_impi_openmp ] ; then
418 
419  cat <<EOF > $rundir/run_$casename.$arch
420#!/bin/bash
421#SBATCH --exclusive
422#SBATCH --partition=normal64
423#SBATCH --job-name $casename
424# Time limit for the job
425#SBATCH --time=01:00:00
426#SBATCH -o $rundir/$casename.o
427#SBATCH -e $rundir/$casename.e
428# Number of nodes
429#SBATCH --nodes=2
430# Number of MPI tasks per node
431#SBATCH --ntasks-per-node=2
432# Number of threads per MPI task ombre de thread openmp par proc MPI = nombre de coeur par proc
433#SBATCH --cpus_per_task=20
434
435ulimit -s unlimited
436cd $rundir
437module load intel/16.1.150
438module load intelmpi/5.1.2.150
439module load netcdf/4.3.0
440#
441export KMP_STACKSIZE=1GB
442export I_MPI_WAIT_MODE=enable
443export KMP_AFFINITY=verbose,granularity=fine,compact
444export OASIS_OMP_NUM_THREADS=20
445export OMP_NUM_THREADS=20
446#
447time mpirun -np ${nproc_exe1} ./$exe1 : -np ${nproc_exe2} ./$exe2 : -np $nproc_exe3 ./$exe3
448#
449
450EOF
451
452fi 
453
454######################################################################
455###
456### 4. Execute the model
457# Stiff and Fundy
458if [ $arch == linux_pgi_openmpi ] || [ $arch == linux_pgi_openmpi_openmp ] || [ $arch == linux_gfortran_openmpi ] || [ ${arch} == linux_gfortran_openmpi_openmp ] ; then
459    export OMP_NUM_THREADS=1
460    # ulimit -s unlimited : deja positionne a cause du . /usr/local/bin/setuprogenv.sh dans le .bashrc
461    if [ $nproc_exe2 == 0 ] && [ $nproc_exe3 == 0 ]; then
462    $MPIRUN -np $nproc_exe1 ./$exe1 > runjob.err
463    elif [ $nproc_exe1 == 0 ] && [ $nproc_exe3 == 0 ]; then
464    $MPIRUN -np $nproc_exe2 ./$exe2 > runjob.err
465    elif [ $nproc_exe3 == 0 ]; then
466    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err   
467    else
468    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 > runjob.err
469    fi
470elif [ ${arch} == nemo_lenovo_intel_impi ]  || [ ${arch} == nemo_lenovo_intel_impi_openmp ] || [ ${arch} == kraken_intel_impi ]  || [ ${arch} == kraken_intel_impi_openmp ]; then
471    echo 'Submitting the job to queue using sbatch'
472    sbatch $rundir/run_$casename.$arch
473    squeue -u $USER 
474elif [ $arch == beaufix_intel_impi ] || [ ${platform} == beaufix_intel_impi_openmp ]; then
475    echo 'Submitting the job to queue using sbatch'
476    sbatch $rundir/run_$casename.$arch
477    squeue -u $user 
478elif [ $arch == machine_test ]; then
479    export OMP_NUM_THREADS=1
480    # ulimit -s unlimited : deja positionne a cause du . /usr/local/bin/setuprogenv.sh dans le .bashrc
481    if [ $nproc_exe2 == 0 ] && [ $nproc_exe3 == 0 ]; then
482    $MPIRUN -np $nproc_exe1 ./$exe1 > runjob.err
483    elif [ $nproc_exe1 == 0 ] && [ $nproc_exe3 == 0 ]; then
484    $MPIRUN -np $nproc_exe2 ./$exe2 > runjob.err
485    elif [ $nproc_exe3 == 0 ]; then
486    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err   
487    else
488    $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 > runjob.err
489    fi
490fi
491
492echo $casename 'is executed or submitted to queue.'
493echo 'Results are found in rundir : '$rundir 
494
495######################################################################
Note: See TracBrowser for help on using the repository browser.