New URL for NEMO forge!   http://forge.nemo-ocean.eu

Since March 2022 along with NEMO 4.2 release, the code development moved to a self-hosted GitLab.
This present forge is now archived and remained online for history.
batch-X64_BELENOS_INTEL_IMPI in utils/CI/sette/BATCH_TEMPLATE – NEMO

source: utils/CI/sette/BATCH_TEMPLATE/batch-X64_BELENOS_INTEL_IMPI @ 15769

Last change on this file since 15769 was 14948, checked in by gsamson, 3 years ago

update batch templates in SETTE in agreement with Mercator ARCH files

File size: 5.0 KB
Line 
1#!/usr/bin/env bash
2
3#SBATCH -J sette
4#SBATCH -o sette.%j.out
5#SBATCH -e sette.%j.err
6#SBATCH --export=ALL
7#SBATCH --parsable
8#SBATCH --exclusive
9#SBATCH -N 1
10
11#SBATCH -p normal256
12#SBATCH --time=01:00:00
13##SBATCH --time=00:15:00
14
15#SBATCH -A smer
16##SBATCH -A cmems
17
18#SBATCH --qos=normal
19##SBATCH --qos=coper
20
21
22# Test specific settings. Do not hand edit these lines; the fcm_job.sh script will set these
23# (via sed operating on this template job file).
24#
25  echo " ";
26  OCORES=NPROCS
27  XCORES=NXIOPROCS
28  O_PER_NODE=32
29  X_PER_NODE=8
30  if [ $XCORES -le $X_PER_NODE ]; then X_PER_NODE=$XCORES; fi
31  if [ $OCORES -le $O_PER_NODE ]; then O_PER_NODE=$OCORES; fi
32  export SETTE_DIR=DEF_SETTE_DIR
33
34###############################################################
35#
36#
37# load sette functions (only post_test_tidyup needed)
38#
39  . ${SETTE_DIR}/all_functions.sh
40###############################################################
41#
42# modules to load
43module purge
44module load gcc/9.2.0 intel/2018.5.274 intelmpi/2018.5.274 phdf5/1.8.18 netcdf_par/4.7.1_V2
45#module load xios-2.5_rev1903
46export XIOS_DIR="/home/ext/mr/smer/samsong/SRC/XIOS/trunk/BEL_INTEL18_r2134"
47export XIOS_BIN="${XIOS_DIR}/bin"
48export XIOS_BIN_DIR=${XIOS_BIN}
49export XIOS_INC="${XIOS_DIR}/inc"
50export XIOS_INC_DIR=${XIOS_INC}
51export XIOS_LIB="${XIOS_DIR}/lib"
52export XIOS_LIB_DIR=${XIOS_LIB}
53export XIO_HOME=${XIOS_DIR}
54
55# Don't remove neither change the following line
56# BODY
57#
58# Test specific settings. Do not hand edit these lines; the fcm_job.sh script will set these
59# (via sed operating on this template job file). Note that the number of compute nodes required
60# is also set by the fcm_job.sh on the PBS select header line above.
61#
62# These variables are needed by post_test_tidyup function in all_functions.sh
63#
64  export INPUT_DIR=DEF_INPUT_DIR
65  export CONFIG_DIR=DEF_CONFIG_DIR
66  export TOOLS_DIR=DEF_TOOLS_DIR
67  export NEMO_VALIDATION_DIR=DEF_NEMO_VALIDATION
68  export NEW_CONF=DEF_NEW_CONF
69  export CMP_NAM=DEF_CMP_NAM
70  export TEST_NAME=DEF_TEST_NAME
71  export EXE_DIR=DEF_EXE_DIR
72  ulimit -a
73  ulimit -s unlimited
74#
75# end of set up
76###############################################################
77#
78# change to the working directory
79#
80  cd $EXE_DIR
81
82  echo Running on host `hostname`
83  echo Time is `date`
84  echo Directory is `pwd`
85#
86#  Run the parallel MPI executable
87#
88
89  # Comm/Fabric
90  # -----------
91  export DAPL_ACK_RETRY=7
92  export DAPL_ACK_TIMER=20
93  export DAPL_IB_SL=0
94  export DAPL_UCM_CQ_SIZE=8192
95  export DAPL_UCM_DREQ_RETRY=4
96  export DAPL_UCM_QP_SIZE=8192
97  export DAPL_UCM_REP_TIME=8000
98  export DAPL_UCM_RTU_TIME=8000
99  export DAPL_UCM_WAIT_TIME=10000
100  export I_MPI_CHECK_DAPL_PROVIDER_COMPATIBILITY=0
101  export I_MPI_CHECK_DAPL_PROVIDER_MISMATCH=none
102  export I_MPI_DAPL_RDMA_MIXED=enable
103  export I_MPI_DAPL_SCALABLE_PROGRESS=1
104  export I_MPI_DAPL_TRANSLATION_CACHE=1
105  export I_MPI_DAPL_UD_DIRECT_COPY_THRESHOLD=65536
106  export I_MPI_DAPL_UD=on
107  export I_MPI_FABRICS=shm:dapl
108  export I_MPI_DAPL_PROVIDER=ofa-v2-mlx5_0-1u
109  export I_MPI_FALLBACK=disable
110  export I_MPI_FALLBACK_DEVICE=disable
111  export I_MPI_DYNAMIC_CONNECTION=1
112  export I_MPI_FAST_COLLECTIVES=1
113  export I_MPI_LARGE_SCALE_THRESHOLD=8192
114  # File system
115  # -----------
116  export I_MPI_EXTRA_FILESYSTEM_LIST=lustre
117  export I_MPI_EXTRA_FILESYSTEM=on
118  # Slurm
119  # -----
120  export I_MPI_HYDRA_BOOTSTRAP=slurm
121  export I_MPI_SLURM_EXT=0
122  # Force kill job
123  # --------------
124  export I_MPI_JOB_SIGNAL_PROPAGATION=on
125  export I_MPI_JOB_ABORT_SIGNAL=9
126  # Extra
127  # -----
128  export I_MPI_LIBRARY_KIND=release_mt
129  export EC_MPI_ATEXIT=0
130  export EC_PROFILE_HEAP=0
131  # Process placement (cyclic)
132  # --------------------------
133  export I_MPI_JOB_RESPECT_PROCESS_PLACEMENT=off
134  export I_MPI_PERHOST=1
135  # Process pinning
136  # ---------------
137  export I_MPI_PIN=enable
138  export I_MPI_PIN_PROCESSOR_LIST="allcores:map=scatter" # map=spread
139
140  if [ $XCORES -gt 0 ]; then
141#
142#  Run MPMD case
143#
144     #XIOS will run on a separate node so will run in parallel queue
145     if [ ! -f ./xios_server.exe ] && [ -f ${XIO_HOME}/bin/xios_server.exe ]; then
146        cp ${XIO_HOME}/bin/xios_server.exe .
147     fi
148     if [ ! -f ./xios_server.exe ]; then
149        echo "./xios_server.exe not found"
150        echo "run aborted"
151        exit
152     fi
153
154#    cat > mpmd.conf <<EOF
155#0-$((OCORES-1)) ./nemo
156#${OCORES}-39 ./xios_server.exe
157#EOF
158    cat > mpmd.conf <<EOF
159-n ${OCORES} ./nemo
160-n ${XCORES} ./xios_server.exe
161EOF
162
163#     echo time srun --cpu_bind=cores --mpi=pmi2 -m cyclic -n 40 --multi-prog ./mpmd.conf
164#          time srun --cpu_bind=cores --mpi=pmi2 -m cyclic -n 40 --multi-prog ./mpmd.conf
165     echo time mpiexec.hydra -configfile ./mpmd.conf
166          time mpiexec.hydra -configfile ./mpmd.conf
167#
168  else
169#
170# Run SPMD case
171#
172#    echo time srun --cpu_bind=cores --mpi=pmi2 -m cyclic -n ${OCORES} ./nemo
173#         time srun --cpu_bind=cores --mpi=pmi2 -m cyclic -n ${OCORES} ./nemo
174     echo time mpiexec.hydra -n ${OCORES} ./nemo
175          time mpiexec.hydra -n ${OCORES} ./nemo
176  fi
177#
178
179#
180  post_test_tidyup
181# END_BODY
182# Don't remove neither change the previous line
183  exit
184
Note: See TracBrowser for help on using the repository browser.