[3520] | 1 | ##################################################### |
---|
| 2 | # Author : Simona Flavoni for NEMO |
---|
| 3 | # Contact : sflod@locean-ipsl.upmc.fr |
---|
| 4 | # |
---|
| 5 | # ---------------------------------------------------------------------- |
---|
| 6 | # NEMO/SETTE , NEMO Consortium (2010) |
---|
| 7 | # Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) |
---|
| 8 | # ---------------------------------------------------------------------- |
---|
| 9 | # |
---|
| 10 | # Some scripts called by sette.sh |
---|
| 11 | # prepare_job.sh : creates the job script for running job |
---|
| 12 | ###################################################### |
---|
[3661] | 13 | #set -vx |
---|
[3520] | 14 | set -o posix |
---|
| 15 | #set -u |
---|
| 16 | #set -e |
---|
[3661] | 17 | #+ |
---|
[3520] | 18 | # |
---|
| 19 | # ================ |
---|
| 20 | # prepare_job.sh |
---|
| 21 | # ================ |
---|
| 22 | # |
---|
| 23 | # ------------------------------------------------- |
---|
| 24 | # script that creates the job script for NEMO tests |
---|
| 25 | # ------------------------------------------------- |
---|
| 26 | # |
---|
| 27 | # SYNOPSIS |
---|
| 28 | # ======== |
---|
| 29 | # |
---|
| 30 | # :: |
---|
| 31 | # |
---|
[3661] | 32 | # $ ./prepare_job.sh INPUT_FILE_CONFIG_NAME NUMBER_PROC TEST_NAME MPI_FLAG JOB_FILE |
---|
[3520] | 33 | # |
---|
| 34 | # |
---|
| 35 | # DESCRIPTION |
---|
| 36 | # =========== |
---|
| 37 | # |
---|
[3661] | 38 | # Part of the SETTE package to run tests for NEMO |
---|
[3520] | 39 | # |
---|
[3661] | 40 | # prepare the script $JOB_FILE to run the tests |
---|
[3520] | 41 | # |
---|
| 42 | # EXAMPLES |
---|
| 43 | # ======== |
---|
| 44 | # |
---|
| 45 | # :: |
---|
| 46 | # |
---|
[3661] | 47 | # $ ./prepare_job.sh INPUT_FILE_CONFIG_NAME NUMBER_PROC TEST_NAME MPI_FLAG $JOB_FILE |
---|
[3520] | 48 | # |
---|
[3661] | 49 | # prepare the $JOB_FILE for execution |
---|
[3520] | 50 | # |
---|
| 51 | # |
---|
| 52 | # TODO |
---|
| 53 | # ==== |
---|
| 54 | # |
---|
| 55 | # option debug |
---|
| 56 | # |
---|
| 57 | # |
---|
| 58 | # EVOLUTIONS |
---|
| 59 | # ========== |
---|
| 60 | # |
---|
[3661] | 61 | # $Id: prepare_job.sh 3050 2011-11-07 14:11:34Z acc $ |
---|
[3520] | 62 | # |
---|
| 63 | # |
---|
| 64 | # |
---|
| 65 | # * creation |
---|
| 66 | # |
---|
| 67 | #- |
---|
| 68 | # |
---|
| 69 | |
---|
[7753] | 70 | usage=" Usage : ./prepare_job.sh INPUT_FILE_CONFIG_NAME NUMBER_PROC TEST_NAME MPI_FLAG JOB_FILE NUM_XIO_SERVERS" |
---|
[9663] | 71 | usage=" example : ./prepare_job.sh input_ORCA2_ICE_PISCES.cfg 8 SHORT no/yes $JOB_FILE 0 2" |
---|
[3520] | 72 | |
---|
| 73 | |
---|
[4245] | 74 | minargcount=6 |
---|
[3520] | 75 | if [ ${#} -lt ${minargcount} ] |
---|
| 76 | then |
---|
[3661] | 77 | echo "not enough arguments for prepare_job.sh script" |
---|
| 78 | echo "control number of argument of prepare_job.sh in sette.sh" |
---|
[3520] | 79 | echo "${usage}" |
---|
| 80 | exit 1 |
---|
| 81 | fi |
---|
| 82 | unset minargcount |
---|
| 83 | if [ ! -f ${SETTE_DIR}/output.sette ] ; then |
---|
| 84 | touch ${SETTE_DIR}/output.sette |
---|
| 85 | fi |
---|
| 86 | |
---|
| 87 | # |
---|
| 88 | # set and export TEST_NAME. It will be used within the post_test_tidyup function |
---|
| 89 | # |
---|
| 90 | INPUTARFILE=$1 |
---|
| 91 | NB_PROC=$2 |
---|
| 92 | TEST_NAME=$3 |
---|
| 93 | MPI_FLAG=$4 |
---|
| 94 | JOB_FILE=$5 |
---|
[4245] | 95 | NXIO_PROC=$6 |
---|
[11161] | 96 | NEMO_VALID=$7 |
---|
[12818] | 97 | TOTAL_NPROCS=$(( $NB_PROC + $NXIO_PROC )) |
---|
[3520] | 98 | |
---|
| 99 | # export EXE_DIR. This directory is used to execute model |
---|
| 100 | # |
---|
| 101 | # |
---|
| 102 | # |
---|
| 103 | echo "date: `date`" >> ${SETTE_DIR}/output.sette |
---|
| 104 | echo "" >> ${SETTE_DIR}/output.sette |
---|
| 105 | echo "running config: ${NEW_CONF}" >> ${SETTE_DIR}/output.sette |
---|
| 106 | echo "" >> ${SETTE_DIR}/output.sette |
---|
| 107 | echo "list of cpp_keys: " >> ${SETTE_DIR}/output.sette |
---|
[7715] | 108 | echo "`more ${CONFIG_DIR}/${NEW_CONF}/cpp_${NEW_CONF}.fcm`" >> ${SETTE_DIR}/output.sette |
---|
[3520] | 109 | echo "" >> ${SETTE_DIR}/output.sette |
---|
| 110 | echo "compiling with: ${CMP_NAM}" >> ${SETTE_DIR}/output.sette |
---|
| 111 | echo "" >> ${SETTE_DIR}/output.sette |
---|
| 112 | echo "executing script : \"fcm_job $@\" " >> ${SETTE_DIR}/output.sette |
---|
| 113 | echo " " >> ${SETTE_DIR}/output.sette |
---|
| 114 | |
---|
| 115 | ################################################################ |
---|
[4147] | 116 | # SET INPUT |
---|
[3520] | 117 | # get the input tarfile if needed |
---|
[4147] | 118 | if [ "$(cat ${SETTE_DIR}/$INPUTARFILE | wc -w)" -ne 0 ] ; then |
---|
| 119 | echo "looking for input files in ${SETTE_DIR}/$INPUTARFILE " >> ${SETTE_DIR}/output.sette |
---|
| 120 | # number of tarfiles: NBTAR |
---|
| 121 | NBTAR=`cat ${SETTE_DIR}/$INPUTARFILE |wc -l` |
---|
| 122 | echo "NB of tarfiles ${NBTAR} " >> ${SETTE_DIR}/output.sette |
---|
| 123 | # loop on tarfiles |
---|
| 124 | # read file name and directory |
---|
| 125 | while read tar_file dir_conf_forc |
---|
| 126 | do |
---|
| 127 | echo looking for tarfile ${tar_file} and directory ${FORCING_DIR}/${dir_conf_forc} ; echo looking for tarfile ${tar_file} and directory ${FORCING_DIR}/${dir_conf_forc} >> ${SETTE_DIR}/output.sette |
---|
| 128 | if [ -f ${FORCING_DIR}/${tar_file} ] && [ -d ${FORCING_DIR}/${dir_conf_forc} ] ; then |
---|
| 129 | # Tarfile and input dir ar there, only check the links |
---|
| 130 | echo "Tarfile and input dir ar there, only check the links" >> ${SETTE_DIR}/output.sette |
---|
| 131 | # extract tarfile |
---|
| 132 | else |
---|
[3520] | 133 | |
---|
[4147] | 134 | if [ ! -f ${FORCING_DIR}/${tar_file} ] ; then |
---|
| 135 | echo "tarfile ${FORCING_DIR}/${tar_file} cannot be found we stop " ; exit 2 ; fi |
---|
[3520] | 136 | |
---|
[4147] | 137 | echo "mkdir ${FORCING_DIR}/${dir_conf_forc}" >> ${SETTE_DIR}/output.sette |
---|
| 138 | mkdir ${FORCING_DIR}/${dir_conf_forc} |
---|
| 139 | cd ${FORCING_DIR}/${dir_conf_forc} |
---|
| 140 | echo " extract from tarfile ${FORCING_DIR}/${tar_file} in ${FORCING_DIR}/${dir_conf_forc}" >> ${SETTE_DIR}/output.sette |
---|
| 141 | tar xvof ${FORCING_DIR}/${tar_file} ; gunzip -f `find . -name "*.gz"` |
---|
| 142 | fi |
---|
| 143 | # Tarfile and input dir ar there, only check the links |
---|
| 144 | cd ${FORCING_DIR}/${dir_conf_forc} |
---|
| 145 | for fida in * |
---|
| 146 | do |
---|
| 147 | [ -f ${EXE_DIR}/${fida} ] || ln -s ${FORCING_DIR}/${dir_conf_forc}/${fida} ${EXE_DIR}/${fida} |
---|
| 148 | done |
---|
| 149 | done < ${SETTE_DIR}/$INPUTARFILE |
---|
[3520] | 150 | |
---|
[4147] | 151 | else |
---|
| 152 | echo "no input file to be searched " |
---|
[3520] | 153 | fi |
---|
| 154 | ################################################################ |
---|
| 155 | |
---|
| 156 | ################################################################ |
---|
| 157 | # RUN OPA |
---|
| 158 | cd ${EXE_DIR} |
---|
[9576] | 159 | if [ ! -r ${EXE_DIR}/nemo ] |
---|
[3520] | 160 | then |
---|
[9576] | 161 | echo "executable nemo does not exist" |
---|
| 162 | echo "executable nemo does not exist, exit" >> ${SETTE_DIR}/output.sette |
---|
[3520] | 163 | exit 1 |
---|
| 164 | fi |
---|
| 165 | |
---|
[4687] | 166 | # example for NOCS ClusterVision system using SLURM batch submission (requires ${SETTE_DIR}/sette_batch_template file) |
---|
[3520] | 167 | # |
---|
[3661] | 168 | # if [ ${MPI_FLAG} == "no" ] ; then |
---|
[3520] | 169 | case ${COMPILER} in |
---|
[4687] | 170 | X64_MOBILIS) |
---|
| 171 | NB_REM=$( echo $NB_PROC $NXIO_PROC | awk '{print ( $1 + $2 ) % 16}') |
---|
[3520] | 172 | if [ ${NB_REM} == 0 ] ; then |
---|
[4687] | 173 | # number of processes required is an integer multiple of 16 |
---|
[3520] | 174 | # |
---|
[4687] | 175 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{print ($1 + $2 ) / 16}') |
---|
[3520] | 176 | else |
---|
| 177 | # |
---|
[4687] | 178 | # number of processes required is not an integer multiple of 16 |
---|
[3520] | 179 | # round up the number of nodes required. |
---|
| 180 | # |
---|
[4687] | 181 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{printf("%d",($1 + $2 ) / 16 + 1 )}') |
---|
[3520] | 182 | fi |
---|
| 183 | ;; |
---|
[4814] | 184 | XC_ARCHER_INTEL) |
---|
| 185 | # ocean cores are packed 24 to a node |
---|
| 186 | NB_REM=$( echo $NB_PROC | awk '{print ( $1 % 24 ) }') |
---|
| 187 | if [ ${NB_REM} == 0 ] ; then |
---|
| 188 | # number of processes required is an integer multiple of 24 |
---|
| 189 | # |
---|
| 190 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{print ($1) / 24}') |
---|
| 191 | else |
---|
| 192 | # |
---|
| 193 | # number of processes required is not an integer multiple of 24 |
---|
| 194 | # round up the number of nodes required. |
---|
| 195 | # |
---|
| 196 | NB_NODES=$( echo $NB_PROC | awk '{printf("%d",($1) / 24 + 1 )}') |
---|
| 197 | fi |
---|
| 198 | # xios cores are sparsely packed at 4 to a node |
---|
| 199 | # but can not share nodes with the ocean cores |
---|
| 200 | NB_REM=$( echo $NXIO_PROC | awk '{print ( $2 % 4 ) }') |
---|
| 201 | if [ ${NB_REM} == 0 ] ; then |
---|
| 202 | # number of processes required is an integer multiple of 4 |
---|
| 203 | # |
---|
| 204 | NB_NODES=$( echo $NB_NODES $NXIO_PROC | awk '{print ($1 + ( $2 / 4 ))}') |
---|
| 205 | else |
---|
| 206 | # |
---|
| 207 | # number of processes required is not an integer multiple of 4 |
---|
| 208 | # round up the number of nodes required. |
---|
| 209 | # |
---|
[5480] | 210 | NB_NODES=$( echo $NB_NODES $NXIO_PROC | awk '{print ($1 + ( $2 / 4 ) + 1)}') |
---|
[4814] | 211 | fi |
---|
| 212 | ;; |
---|
[5480] | 213 | XC40_METO*) #Setup for Met Office XC40 with any compiler |
---|
| 214 | # ocean cores are packed 32 to a node |
---|
| 215 | # If we need more than one node then have to use parallel queue and XIOS must have a node to itself |
---|
| 216 | NB_REM=$( echo $NB_PROC | awk '{print ( $1 % 32 ) }') |
---|
| 217 | if [ ${NB_REM} == 0 ] ; then |
---|
| 218 | # number of processes required is an integer multiple of 32 |
---|
| 219 | # |
---|
| 220 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{print ($1) / 32}') |
---|
| 221 | else |
---|
| 222 | # |
---|
| 223 | # number of processes required is not an integer multiple of 32 |
---|
| 224 | # round up the number of nodes required. |
---|
| 225 | # |
---|
| 226 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{printf("%d",($1) / 32 + 1 )}') |
---|
| 227 | fi |
---|
| 228 | # xios cores are sparsely packed at 4 to a node |
---|
| 229 | if [ $NXIO_PROC == 0 ] ; then |
---|
| 230 | NB_XNODES=0 |
---|
| 231 | else |
---|
| 232 | NB_REM=$( echo $NXIO_PROC | awk '{print ( $1 % 4 ) }') |
---|
| 233 | if [ ${NB_REM} == 0 ] ; then |
---|
| 234 | # number of processes required is an integer multiple of 4 |
---|
| 235 | # |
---|
| 236 | NB_XNODES=$( echo $NXIO_PROC | awk '{print (( $1 / 4 ) + 1)}') |
---|
| 237 | else |
---|
| 238 | # |
---|
| 239 | # number of processes required is not an integer multiple of 4 |
---|
| 240 | # round up the number of nodes required. |
---|
| 241 | # |
---|
| 242 | NB_XNODES=$( echo $NXIO_PROC | awk '{printf("%d",($1) / 4 + 1) }') |
---|
| 243 | fi |
---|
| 244 | fi |
---|
| 245 | if [ ${NB_XNODES} -ge 1 ] ; then |
---|
| 246 | NB_NODES=$((NB_NODES+NB_XNODES)) |
---|
| 247 | fi |
---|
| 248 | echo NB_XNODES=${NB_XNODES} |
---|
| 249 | echo Total NB_NODES=${NB_NODES} |
---|
[6140] | 250 | QUEUE=normal |
---|
| 251 | SELECT="select=$NB_NODES" |
---|
| 252 | module unload cray-snplauncher #Make sure snplauncher module is not loaded |
---|
[5480] | 253 | ;; |
---|
[11645] | 254 | X64_JEANZAY*) #Setup for Jean-Zay |
---|
| 255 | export GROUP_IDRIS=`echo ${USER} |cut -c 2-4` |
---|
| 256 | ;; |
---|
[12818] | 257 | openmpi_KARA_MERCATOR*) # Mercator local cluster |
---|
[11805] | 258 | NB_PROC_NODE=32 |
---|
[12818] | 259 | NB_NODES=$(awk -v NB_PROC="$TOTAL_NPROCS" -v NB_PROC_NODE="$NB_PROC_NODE" 'BEGIN{print int((NB_PROC-1)/NB_PROC_NODE)+1 }') |
---|
| 260 | if [ ${NB_PROC} -le 128 ] ; then QUEUE=multi; fi |
---|
[9480] | 261 | ;; |
---|
[12818] | 262 | ifort_beaufix_sette*) # Mercator Beaufix |
---|
[12798] | 263 | NB_PROC_NODE=40 |
---|
[12818] | 264 | NB_NODES=$(awk -v NB_PROC="$TOTAL_NPROCS" -v NB_PROC_NODE="$NB_PROC_NODE" 'BEGIN{print int((NB_PROC-1)/NB_PROC_NODE)+1 }') |
---|
[12798] | 265 | ;; |
---|
[3520] | 266 | *) |
---|
| 267 | NB_NODES=${NB_PROC} |
---|
| 268 | ;; |
---|
| 269 | esac |
---|
| 270 | # |
---|
| 271 | # Pass settings into job file by using sed to edit predefined strings |
---|
| 272 | # |
---|
[12798] | 273 | cat ${SETTE_DIR}/job_batch_template | sed -e"s/\(=\| \)NODES/\1${NB_NODES}/" \ |
---|
[4373] | 274 | -e"s/TOTAL_NPROCS/${TOTAL_NPROCS}/" \ |
---|
[4245] | 275 | -e"s/NPROCS/${NB_PROC}/" \ |
---|
| 276 | -e"s/NXIOPROCS/${NXIO_PROC}/" \ |
---|
[3520] | 277 | -e"s:DEF_SETTE_DIR:${SETTE_DIR}:" -e"s:DEF_INPUT_DIR:${INPUT_DIR}:" \ |
---|
| 278 | -e"s:DEF_EXE_DIR:${EXE_DIR}:" \ |
---|
| 279 | -e"s:DEF_CONFIG_DIR:${CONFIG_DIR}:" \ |
---|
[8468] | 280 | -e"s:DEF_TOOLS_DIR:${TOOLS_DIR}:" \ |
---|
[3520] | 281 | -e"s:MPI_FLAG:${MPI_FLAG}:" \ |
---|
[11161] | 282 | -e"s:DEF_NEMO_VALIDATION:${NEMO_VALID}:" -e"s:DEF_NEW_CONF:${NEW_CONF}:" \ |
---|
[3520] | 283 | -e"s:DEF_CMP_NAM:${CMP_NAM}:" -e"s:DEF_TEST_NAME:${TEST_NAME}:" > run_sette_test.job |
---|
[3661] | 284 | |
---|
[3665] | 285 | case ${COMPILER} in |
---|
[9480] | 286 | openmpi_KARA_MERCATOR_XIOS ) |
---|
| 287 | cat run_sette_test.job | sed -e"s/NPROC_NODE/${NB_PROC_NODE}/" \ |
---|
| 288 | -e"s:QUEUE:${QUEUE}:" > run_sette_test1.job |
---|
| 289 | mv run_sette_test1.job run_sette_test.job |
---|
| 290 | ;; |
---|
[5480] | 291 | XC40_METO*) |
---|
[6140] | 292 | cat run_sette_test.job | sed -e"s/SELECT/${SELECT}/" > run_sette_test1.job |
---|
[5480] | 293 | mv run_sette_test1.job run_sette_test.job |
---|
| 294 | ;; |
---|
[11645] | 295 | X64_JEANZAY*) |
---|
| 296 | cat run_sette_test.job | sed -e"s/GROUP_IDRIS/${GROUP_IDRIS}/" > run_sette_test1.job |
---|
| 297 | mv run_sette_test1.job run_sette_test.job |
---|
| 298 | ;; |
---|
[3665] | 299 | esac |
---|
[3520] | 300 | # |
---|
| 301 | # create the unique submission job script |
---|
| 302 | # |
---|
| 303 | if [ ! -f $JOB_FILE ] ; then |
---|
| 304 | mv run_sette_test.job $JOB_FILE |
---|
| 305 | else |
---|
| 306 | e=`grep -n "# END_BODY" ${JOB_FILE} | cut -d : -f 1` |
---|
| 307 | e=$(($e - 1)) |
---|
| 308 | head -$e $JOB_FILE > ${JOB_FILE}_new |
---|
| 309 | mv ${JOB_FILE}_new ${JOB_FILE} |
---|
| 310 | l=`wc -l run_sette_test.job | sed -e "s:run_sette_test.job::"` |
---|
| 311 | b=`grep -n "# BODY" run_sette_test.job | cut -d : -f 1` |
---|
| 312 | t=$(($l - $b)) |
---|
| 313 | tail -$t run_sette_test.job >> $JOB_FILE |
---|
| 314 | fi |
---|
| 315 | |
---|
[3661] | 316 | chmod a+x $JOB_FILE ; echo "$JOB_FILE is ready" |
---|
[3520] | 317 | |
---|
| 318 | #fi |
---|