#!/bin/ksh #set -xv ###################################################################### # OASIS3-MCT_4.0 compiled with -qopenmp ############### User's section ####################################### ## fich_i=$1 ## - Define architecture arch=machine_test # stiff (pgi 18.7 openmpi fedora 26), fundy (gfortran openmpi fedora 26), # nemo_lenovo (intel impi), kraken (intel impi), beaufix (intel impi) # machine_test (==tioman pgi 17.10 for the moment) platform=$arch MPILIB=MPI1 # MPI1 user=`whoami` # ## - Define paths srcdir=`pwd` datadir=$srcdir/data_oasis3 casename=`basename $srcdir` pathname=`dirname $srcdir` configfilesdir=$pathname/bench_buildbot_since_2019 griddircom=$pathname/bench_buildbot_since_2019/common_data_oasis3 rmpdircom=$pathname/bench_buildbot_since_2019/common_rmp_files echo $arch echo $casename # # - Toy models: Define number of processes to run each executable # and name of executables # #name of the executables exe1=model1 exe2=model2 exe3=model3 # number of processes for each executable nproc_exe1=0 nproc_exe2=6 nproc_exe3=0 # namcouple used (modified in script run_examples_oa3-mct_buildbot: # namcouple_ini=${namcouple_used}) namcouple_ini=namcouple_1 # Makefile_ini=${makefile_used} Makefile_ini=Makefile_1 # Number of nodes (to be coherent with toy toy_interpolation) nnodes=1 # Number of threads (to be coherent with toy toy_interpolation) threads=1 # # Stiff pgi 18.7 if [ ${arch} == linux_pgi_openmpi ] || [ ${arch} == linux_pgi_openmpi_openmp ]; then MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun rundir=/space/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} # Fundy gfortran elif [ ${arch} == linux_gfortran_openmpi ] || [ ${arch} == linux_gfortran_openmpi_openmp ]; then MPIRUN=/usr/lib64/openmpi/bin/mpirun rundir=/space/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} # Nemo_lenovo elif [ ${arch} == nemo_lenovo_intel_impi ] || [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then rundir=/scratch/globc/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} # Kraken elif [ ${arch} == kraken_intel_impi ] || [ ${arch} == kraken_intel_impi_openmp ]; then rundir=/scratch/globc/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} # Beaufix M-F elif [ ${arch} == beaufix_intel_impi ] || [ ${arch} == beaufix_intel_impi_openmp ]; then rundir=/scratch/work/coquartl/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} # Machine_test (for developments, here tioman pgi 18.7) elif [ ${arch} == machine_test ]; then MPIRUN=/usr/local/pgi/linux86-64/18.7/mpi/openmpi-2.1.2/bin/mpirun rundir=/space/$USER/OA3_MCT_RES/work_${casename}_${namcouple_ini}_${Makefile_ini}_nodes_${nnodes}_threads_${threads}_${exe1}_${nproc_exe1}_${exe2}_${nproc_exe2}_${exe3}_${nproc_exe3} fi ############### End of user's section ################################ echo '' echo '*****************************************************************' echo '*** '$casename' : '$run echo '' echo 'Rundir :' $rundir echo 'Architecture :' $arch echo 'User : '$user echo '' echo $exe1' runs on '$nproc_exe1 'processes' echo $exe2' runs on '$nproc_exe2 'processes' echo $exe3' runs on '$nproc_exe3 'processes' echo '' echo '' ###################################################################### ### ### 1. Copy source example directory containing everything needed ### into rundir cd $srcdir mkdir -p $rundir if [ $casename == toy_grids_writing ]; then ln -sf $griddircom/grids_nogt.nc $rundir/. ln -sf $griddircom/masks_nogt.nc $rundir/. ln -sf $griddircom/areas_nogt.nc $rundir/. ln -sf $griddircom/grids_bggd.nc $rundir/. ln -sf $griddircom/masks_bggd.nc $rundir/. ln -sf $griddircom/areas_bggd.nc $rundir/. else ln -sf $griddircom/grids.nc $rundir/. ln -sf $griddircom/masks.nc $rundir/. ln -sf $griddircom/areas.nc $rundir/. fi cp $configfilesdir/case${fich_i}.txt $rundir/. cp -f $srcdir/$exe1 $rundir/. cp -f $srcdir/$exe2 $rundir/. cp -f $srcdir/$exe3 $rundir/. # namcouple used copied in script run_examples_oa3-mct_buildbot cp -f $datadir/namcouple $rundir/namcouple # Restart used copied in rundir if [ $casename == toy_multiple_fields_one_communication ] || [ $casename == toy_time_transformations ] || [ $casename == toy_restart_ACCUMUL_1_NOLAG ] || [ $casename == toy_restart_ACCUMUL_1_LAG ] || [ $casename == toy_restart_ACCUMUL_2_NOLAG ] || [ $casename == toy_restart_ACCUMUL_2_LAG ]; then cp $datadir/ocean.nc $rundir/ocean.nc fi # # Copy necessary rmp files as a function of the toy # In toy_interpolation we calculate them and we need name_grids.dat file if [ $casename == toy_interpolation ]; then # If you add any additional lines in the namcouples given as examples you will have # to change the lines below # SRC_GRID : bggd, ssea, icos, nogt, nogt, nogt # TGT_GRID : nogt, nogt, nogt, bggd, ssea, icos # remap=gauswgt, distwgt, bicu, bili, conserv1st, conserv2nd conserv_order=" " remap=" " SRC_GRID=`sed -n 16p $rundir/namcouple | tr -s ' ' | cut -d" " -f5` TGT_GRID=`sed -n 16p $rundir/namcouple | tr -s ' ' | cut -d" " -f6` echo ${SRC_GRID} echo ${TGT_GRID} # Remapping (see restrictions below) remapping=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` if [ $remapping == BICUBIC ]; then remap=bicu fi if [ $remapping == DISTWGT ]; then remap=distwgt fi if [ $remapping == GAUSWGT ]; then remap=gauswgt fi if [ $remapping == BILINEAR ]; then remap=bili fi if [ $remapping == CONSERV ]; then conserv_order=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f7` if [ ${conserv_order} == FIRST ]; then remap=conserv1st else remap=conserv2nd fi fi echo "Remapping : $remap" # Verification source grid type and remapping if [ ${SRC_GRID} == "ssea" ]; then if [ ${remap} == "conserv2nd" ]; then echo "Impossible to perform conserv2nd remapping from gaussian reduced grid ssea" exit fi fi if [ ${SRC_GRID} == "icos" ]; then if [ ${remap} == "conserv2nd" ] || [ ${remap} == "bicu" ] || [ ${remap} == "bili" ]; then echo "Impossible to perform ${remap} remapping from unstructured grid icos" exit fi fi SRC_GRID_TYPE=`sed -n 20p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # source grid type SRC_GRID_PERIOD=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f1` # "P" for periodic, "R" for non-periodic SRC_GRID_OVERLAP=`sed -n 17p $rundir/namcouple | tr -s ' ' | cut -d" " -f2` # Number of overlapping grid points for periodic grids echo "SRC_GRID_TYPE : $SRC_GRID_TYPE" echo "SRC_GRID_PERIOD : $SRC_GRID_PERIOD" echo "SRC_GRID_OVERLAP : $SRC_GRID_OVERLAP" ## - Create name_grids.dat from namcouple informations cat <> $rundir/name_grids.dat \$grid_source_characteristics cl_grd_src='${SRC_GRID}' cl_remap='${remap}' cl_type_src='${SRC_GRID_TYPE}' cl_period_src='${SRC_GRID_PERIOD}' il_overlap_src=${SRC_GRID_OVERLAP} \$end \$grid_target_characteristics cl_grd_tgt='${TGT_GRID}' \$end EOF # toy different from toy_interpolation else if [ ${casename} == toy_1f1grd_to_2f2grd ]; then ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_nogt_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_nogt_to_lmdz_BILINEAR.nc $rundir/. elif [ ${casename} == toy_configuration_components_B ]; then ln -sf $rmpdircom/rmp_bggd_to_lmdz_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_bggd_BILINEAR.nc $rundir/. elif [ ${casename} == toy_configuration_components_C ]; then ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/. elif [ ${casename} == toy_configuration_components_G ]; then ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_ssea_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_ssea_to_lmdz_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_ssea_to_icos_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_icos_to_ssea_DISTWGT.nc $rundir/. elif [ ${casename} == toy_configuration_components_ABCG ] ; then ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_ssea_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_ssea_to_lmdz_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_ssea_to_icos_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_icos_to_ssea_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_bggd_to_lmdz_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_bggd_BILINEAR.nc $rundir/. elif [ ${casename} == toy_load_balancing ] ; then ln -sf $rmpdircom/rmp_icos_to_lmdz_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_icos_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_ssea_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_ssea_to_lmdz_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_ssea_to_icos_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_icos_to_ssea_DISTWGT.nc $rundir/. ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_bggd_to_lmdz_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_lmdz_to_bggd_BILINEAR.nc $rundir/. else ln -sf $rmpdircom/rmp_bggd_to_nogt_BILINEAR.nc $rundir/. ln -sf $rmpdircom/rmp_nogt_to_bggd_BILINEAR.nc $rundir/. fi # Endif toy_interpolation test fi # cd $rundir # ###################################################################### ### ### 3. Creation of configuration scripts (not for Fedora 26 computers) # ###--------------------------------------------------------------------- ### NEMO_LENOVO_INTELMPI ###--------------------------------------------------------------------- if [ ${arch} == nemo_lenovo_intel_impi ]; then cat < $rundir/run_$casename.$arch #!/bin/bash -l #Partition #SBATCH --partition prod # Nom du job #SBATCH --job-name $casename # Temps limite du job #SBATCH --time=00:30:00 #SBATCH --output=$rundir/$casename.o #SBATCH --error=$rundir/$casename.e # Nombre de noeuds et de processus MPI #SBATCH --nodes=1 --ntasks-per-node=24 #SBATCH --distribution cyclic cd $rundir ulimit -s unlimited module purge module load compiler/intel/2015.2.164 mkl/2015.2.164 mpi/intelmpi/5.0.3.048 # time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 # EOF ###---------------------------------------------------------------------------- ### NEMO_LENOVO_INTELMPI_OPENMP : run hybride OpenMP+MPI (toy_interpolation) ###---------------------------------------------------------------------------- elif [ ${arch} == nemo_lenovo_intel_impi_openmp ]; then echo "We run on Nemo_lenovo OpenMP+MPI" echo "Prescribe : N=2, nprocs/by node=2, ntasks/by proc=12" cat < $rundir/run_$casename.$arch #!/bin/bash -l #Partition #SBATCH --partition prod # Nom du job #SBATCH --job-name $casename # Time limit for the job #SBATCH --time=00:30:00 #SBATCH --output=$rundir/$casename.o #SBATCH --error=$rundir/$casename.e # Number of nodes #SBATCH --nodes=2 # Number of MPI tasks per node #SBATCH --ntasks-per-node=2 # Number of OpenMP threads per MPI task #SBATCH --cpus-per-task=12 cd $rundir export KMP_STACKSIZE=1GB export I_MPI_PIN_DOMAIN=omp #export I_MPI_PIN_DOMAIN=socket export I_MPI_WAIT_MODE=enable export KMP_AFFINITY=verbose,granularity=fine,compact export OASIS_OMP_NUM_THREADS=12 time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 EOF ###--------------------------------------------------------------------- ### KRAKEN_INTEL_IMPI ###--------------------------------------------------------------------- elif [ ${arch} == kraken_intel_impi ]; then cat < $rundir/run_$casename.$arch #!/bin/bash -l #SBATCH --partition prod # Nom du job #SBATCH --job-name $casename # Temps limite du job #SBATCH --time=00:30:00 #SBATCH --output=$rundir/$casename.o #SBATCH --error=$rundir/$casename.e # Nombre de noeuds et de processus #SBATCH --nodes=$nnodes --ntasks-per-node=36 #SBATCH --distribution cyclic cd $rundir ulimit -s unlimited module purge module load compiler/intel/18.0.1.163 module load mpi/intelmpi/2018.1.163 module load lib/netcdf-fortran/4.4.4_impi module load lib/netcdf-c/4.6.1_impi # # time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 # EOF ###------------------------------------------------------------------------ ### KRAKEN_INTEL_IMPI_OPENMP : run hybride OpenMP+MPI (toy_interpolation) ###------------------------------------------------------------------------ elif [ ${arch} == kraken_intel_impi_openmp ]; then cat < $rundir/run_$casename.$arch #!/bin/bash -l #Partition #SBATCH --partition prod # Nom du job #SBATCH --job-name $casename # Time limit for the job #SBATCH --time=00:30:00 #SBATCH --output=$rundir/$casename.o #SBATCH --error=$rundir/$casename.e # Number of nodes #SBATCH --nodes=2 # Number of MPI tasks per node #SBATCH --ntasks-per-node=2 # Number of OpenMP threads per MPI task #SBATCH --cpus-per-task=18 cd $rundir module purge module load compiler/intel/18.0.1.163 module load mpi/intelmpi/2018.1.163 module load lib/netcdf-fortran/4.4.4_impi module load lib/netcdf-c/4.6.1_impi export KMP_STACKSIZE=1GB export I_MPI_PIN_DOMAIN=omp #export I_MPI_PIN_DOMAIN=socket export I_MPI_WAIT_MODE=enable export KMP_AFFINITY=verbose,granularity=fine,compact export OASIS_OMP_NUM_THREADS=18 export OMP_NUM_THREADS=18 time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 EOF ###--------------------------------------------------------------------- ### BEAUFIX INTELIMPI ###--------------------------------------------------------------------- elif [ $arch == beaufix_intel_impi ]; then (( nproc = $nproc_exe1 + $nproc_exe2 + $nproc_exe3 )) cat < $rundir/run_$casename.$arch #!/bin/bash # Nom du job #SBATCH --job-name $casename # Time limit for the job #SBATCH --time=01:00:00 #SBATCH -p normal64 # partition/queue #SBATCH -N $nnodes # number of nodes #SBATCH -n $nproc # number of procs #SBATCH -o job.out%j #SBATCH -o job.err%j #SBATCH --exclusive ulimit -s unlimited cd $rundir module load intel/16.1.150 module load intelmpi/5.1.2.150 module load netcdf/4.4.0 # time mpirun -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 # EOF ###------------------------------------------------------------------------ ### BEAUFIX_INTELMPI_OPENMP : run hybride OpenMP+MPI (toy_interpolation) ###------------------------------------------------------------------------ elif [ $arch == beaufix_intel_impi_openmp ] ; then cat < $rundir/run_$casename.$arch #!/bin/bash #SBATCH --exclusive #SBATCH --partition=normal64 #SBATCH --job-name $casename # Time limit for the job #SBATCH --time=01:00:00 #SBATCH -o $rundir/$casename.o #SBATCH -e $rundir/$casename.e # Number of nodes #SBATCH --nodes=2 # Number of MPI tasks per node #SBATCH --ntasks-per-node=2 # Number of threads per MPI task ombre de thread openmp par proc MPI = nombre de coeur par proc #SBATCH --cpus_per_task=20 ulimit -s unlimited cd $rundir module load intel/16.1.150 module load intelmpi/5.1.2.150 module load netcdf/4.3.0 # export KMP_STACKSIZE=1GB export I_MPI_WAIT_MODE=enable export KMP_AFFINITY=verbose,granularity=fine,compact export OASIS_OMP_NUM_THREADS=20 export OMP_NUM_THREADS=20 # time mpirun -np ${nproc_exe1} ./$exe1 : -np ${nproc_exe2} ./$exe2 : -np $nproc_exe3 ./$exe3 # EOF fi ###################################################################### ### ### 4. Execute the model # Stiff and Fundy if [ $arch == linux_pgi_openmpi ] || [ $arch == linux_pgi_openmpi_openmp ] || [ $arch == linux_gfortran_openmpi ] || [ ${arch} == linux_gfortran_openmpi_openmp ] ; then export OMP_NUM_THREADS=1 # ulimit -s unlimited : deja positionne a cause du . /usr/local/bin/setuprogenv.sh dans le .bashrc if [ $nproc_exe2 == 0 ] && [ $nproc_exe3 == 0 ]; then $MPIRUN -np $nproc_exe1 ./$exe1 > runjob.err elif [ $nproc_exe1 == 0 ] && [ $nproc_exe3 == 0 ]; then $MPIRUN -np $nproc_exe2 ./$exe2 > runjob.err elif [ $nproc_exe3 == 0 ]; then $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err else $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 > runjob.err fi elif [ ${arch} == nemo_lenovo_intel_impi ] || [ ${arch} == nemo_lenovo_intel_impi_openmp ] || [ ${arch} == kraken_intel_impi ] || [ ${arch} == kraken_intel_impi_openmp ]; then echo 'Submitting the job to queue using sbatch' sbatch $rundir/run_$casename.$arch squeue -u $USER elif [ $arch == beaufix_intel_impi ] || [ ${platform} == beaufix_intel_impi_openmp ]; then echo 'Submitting the job to queue using sbatch' sbatch $rundir/run_$casename.$arch squeue -u $user elif [ $arch == machine_test ]; then export OMP_NUM_THREADS=1 # ulimit -s unlimited : deja positionne a cause du . /usr/local/bin/setuprogenv.sh dans le .bashrc if [ $nproc_exe2 == 0 ] && [ $nproc_exe3 == 0 ]; then $MPIRUN -np $nproc_exe1 ./$exe1 > runjob.err elif [ $nproc_exe1 == 0 ] && [ $nproc_exe3 == 0 ]; then $MPIRUN -np $nproc_exe2 ./$exe2 > runjob.err elif [ $nproc_exe3 == 0 ]; then $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 > runjob.err else $MPIRUN -np $nproc_exe1 ./$exe1 : -np $nproc_exe2 ./$exe2 : -np $nproc_exe3 ./$exe3 > runjob.err fi fi echo $casename 'is executed or submitted to queue.' echo 'Results are found in rundir : '$rundir ######################################################################