[3520] | 1 | ##################################################### |
---|
| 2 | # Author : Simona Flavoni for NEMO |
---|
| 3 | # Contact : sflod@locean-ipsl.upmc.fr |
---|
| 4 | # |
---|
| 5 | # ---------------------------------------------------------------------- |
---|
| 6 | # NEMO/SETTE , NEMO Consortium (2010) |
---|
| 7 | # Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) |
---|
| 8 | # ---------------------------------------------------------------------- |
---|
| 9 | # |
---|
| 10 | # Some scripts called by sette.sh |
---|
| 11 | # prepare_job.sh : creates the job script for running job |
---|
| 12 | ###################################################### |
---|
[3661] | 13 | #set -vx |
---|
[3520] | 14 | set -o posix |
---|
| 15 | #set -u |
---|
| 16 | #set -e |
---|
[3661] | 17 | #+ |
---|
[3520] | 18 | # |
---|
| 19 | # ================ |
---|
| 20 | # prepare_job.sh |
---|
| 21 | # ================ |
---|
| 22 | # |
---|
| 23 | # ------------------------------------------------- |
---|
| 24 | # script that creates the job script for NEMO tests |
---|
| 25 | # ------------------------------------------------- |
---|
| 26 | # |
---|
| 27 | # SYNOPSIS |
---|
| 28 | # ======== |
---|
| 29 | # |
---|
| 30 | # :: |
---|
| 31 | # |
---|
[3661] | 32 | # $ ./prepare_job.sh INPUT_FILE_CONFIG_NAME NUMBER_PROC TEST_NAME MPI_FLAG JOB_FILE |
---|
[3520] | 33 | # |
---|
| 34 | # |
---|
| 35 | # DESCRIPTION |
---|
| 36 | # =========== |
---|
| 37 | # |
---|
[3661] | 38 | # Part of the SETTE package to run tests for NEMO |
---|
[3520] | 39 | # |
---|
[3661] | 40 | # prepare the script $JOB_FILE to run the tests |
---|
[3520] | 41 | # |
---|
| 42 | # EXAMPLES |
---|
| 43 | # ======== |
---|
| 44 | # |
---|
| 45 | # :: |
---|
| 46 | # |
---|
[3661] | 47 | # $ ./prepare_job.sh INPUT_FILE_CONFIG_NAME NUMBER_PROC TEST_NAME MPI_FLAG $JOB_FILE |
---|
[3520] | 48 | # |
---|
[3661] | 49 | # prepare the $JOB_FILE for execution |
---|
[3520] | 50 | # |
---|
| 51 | # |
---|
| 52 | # TODO |
---|
| 53 | # ==== |
---|
| 54 | # |
---|
| 55 | # option debug |
---|
| 56 | # |
---|
| 57 | # |
---|
| 58 | # EVOLUTIONS |
---|
| 59 | # ========== |
---|
| 60 | # |
---|
[3661] | 61 | # $Id: prepare_job.sh 3050 2011-11-07 14:11:34Z acc $ |
---|
[3520] | 62 | # |
---|
| 63 | # |
---|
| 64 | # |
---|
| 65 | # * creation |
---|
| 66 | # |
---|
| 67 | #- |
---|
| 68 | # |
---|
| 69 | |
---|
[7753] | 70 | usage=" Usage : ./prepare_job.sh INPUT_FILE_CONFIG_NAME NUMBER_PROC TEST_NAME MPI_FLAG JOB_FILE NUM_XIO_SERVERS" |
---|
[9663] | 71 | usage=" example : ./prepare_job.sh input_ORCA2_ICE_PISCES.cfg 8 SHORT no/yes $JOB_FILE 0 2" |
---|
[3520] | 72 | |
---|
| 73 | |
---|
[4245] | 74 | minargcount=6 |
---|
[3520] | 75 | if [ ${#} -lt ${minargcount} ] |
---|
| 76 | then |
---|
[3661] | 77 | echo "not enough arguments for prepare_job.sh script" |
---|
| 78 | echo "control number of argument of prepare_job.sh in sette.sh" |
---|
[3520] | 79 | echo "${usage}" |
---|
| 80 | exit 1 |
---|
| 81 | fi |
---|
| 82 | unset minargcount |
---|
| 83 | if [ ! -f ${SETTE_DIR}/output.sette ] ; then |
---|
| 84 | touch ${SETTE_DIR}/output.sette |
---|
| 85 | fi |
---|
| 86 | |
---|
| 87 | # |
---|
| 88 | # set and export TEST_NAME. It will be used within the post_test_tidyup function |
---|
| 89 | # |
---|
| 90 | INPUTARFILE=$1 |
---|
| 91 | NB_PROC=$2 |
---|
| 92 | TEST_NAME=$3 |
---|
| 93 | MPI_FLAG=$4 |
---|
| 94 | JOB_FILE=$5 |
---|
[4245] | 95 | NXIO_PROC=$6 |
---|
[11161] | 96 | NEMO_VALID=$7 |
---|
[3520] | 97 | |
---|
| 98 | # export EXE_DIR. This directory is used to execute model |
---|
| 99 | # |
---|
| 100 | # |
---|
| 101 | # |
---|
| 102 | echo "date: `date`" >> ${SETTE_DIR}/output.sette |
---|
| 103 | echo "" >> ${SETTE_DIR}/output.sette |
---|
| 104 | echo "running config: ${NEW_CONF}" >> ${SETTE_DIR}/output.sette |
---|
| 105 | echo "" >> ${SETTE_DIR}/output.sette |
---|
| 106 | echo "list of cpp_keys: " >> ${SETTE_DIR}/output.sette |
---|
[7715] | 107 | echo "`more ${CONFIG_DIR}/${NEW_CONF}/cpp_${NEW_CONF}.fcm`" >> ${SETTE_DIR}/output.sette |
---|
[3520] | 108 | echo "" >> ${SETTE_DIR}/output.sette |
---|
| 109 | echo "compiling with: ${CMP_NAM}" >> ${SETTE_DIR}/output.sette |
---|
| 110 | echo "" >> ${SETTE_DIR}/output.sette |
---|
| 111 | echo "executing script : \"fcm_job $@\" " >> ${SETTE_DIR}/output.sette |
---|
| 112 | echo " " >> ${SETTE_DIR}/output.sette |
---|
| 113 | |
---|
| 114 | ################################################################ |
---|
[4147] | 115 | # SET INPUT |
---|
[3520] | 116 | # get the input tarfile if needed |
---|
[4147] | 117 | if [ "$(cat ${SETTE_DIR}/$INPUTARFILE | wc -w)" -ne 0 ] ; then |
---|
| 118 | echo "looking for input files in ${SETTE_DIR}/$INPUTARFILE " >> ${SETTE_DIR}/output.sette |
---|
| 119 | # number of tarfiles: NBTAR |
---|
| 120 | NBTAR=`cat ${SETTE_DIR}/$INPUTARFILE |wc -l` |
---|
| 121 | echo "NB of tarfiles ${NBTAR} " >> ${SETTE_DIR}/output.sette |
---|
| 122 | # loop on tarfiles |
---|
| 123 | # read file name and directory |
---|
| 124 | while read tar_file dir_conf_forc |
---|
| 125 | do |
---|
| 126 | echo looking for tarfile ${tar_file} and directory ${FORCING_DIR}/${dir_conf_forc} ; echo looking for tarfile ${tar_file} and directory ${FORCING_DIR}/${dir_conf_forc} >> ${SETTE_DIR}/output.sette |
---|
| 127 | if [ -f ${FORCING_DIR}/${tar_file} ] && [ -d ${FORCING_DIR}/${dir_conf_forc} ] ; then |
---|
| 128 | # Tarfile and input dir ar there, only check the links |
---|
| 129 | echo "Tarfile and input dir ar there, only check the links" >> ${SETTE_DIR}/output.sette |
---|
| 130 | # extract tarfile |
---|
| 131 | else |
---|
[3520] | 132 | |
---|
[4147] | 133 | if [ ! -f ${FORCING_DIR}/${tar_file} ] ; then |
---|
| 134 | echo "tarfile ${FORCING_DIR}/${tar_file} cannot be found we stop " ; exit 2 ; fi |
---|
[3520] | 135 | |
---|
[4147] | 136 | echo "mkdir ${FORCING_DIR}/${dir_conf_forc}" >> ${SETTE_DIR}/output.sette |
---|
| 137 | mkdir ${FORCING_DIR}/${dir_conf_forc} |
---|
| 138 | cd ${FORCING_DIR}/${dir_conf_forc} |
---|
| 139 | echo " extract from tarfile ${FORCING_DIR}/${tar_file} in ${FORCING_DIR}/${dir_conf_forc}" >> ${SETTE_DIR}/output.sette |
---|
| 140 | tar xvof ${FORCING_DIR}/${tar_file} ; gunzip -f `find . -name "*.gz"` |
---|
| 141 | fi |
---|
| 142 | # Tarfile and input dir ar there, only check the links |
---|
| 143 | cd ${FORCING_DIR}/${dir_conf_forc} |
---|
| 144 | for fida in * |
---|
| 145 | do |
---|
| 146 | [ -f ${EXE_DIR}/${fida} ] || ln -s ${FORCING_DIR}/${dir_conf_forc}/${fida} ${EXE_DIR}/${fida} |
---|
| 147 | done |
---|
| 148 | done < ${SETTE_DIR}/$INPUTARFILE |
---|
[3520] | 149 | |
---|
[4147] | 150 | else |
---|
| 151 | echo "no input file to be searched " |
---|
[3520] | 152 | fi |
---|
| 153 | ################################################################ |
---|
| 154 | |
---|
| 155 | ################################################################ |
---|
| 156 | # RUN OPA |
---|
| 157 | cd ${EXE_DIR} |
---|
[9576] | 158 | if [ ! -r ${EXE_DIR}/nemo ] |
---|
[3520] | 159 | then |
---|
[9576] | 160 | echo "executable nemo does not exist" |
---|
| 161 | echo "executable nemo does not exist, exit" >> ${SETTE_DIR}/output.sette |
---|
[3520] | 162 | exit 1 |
---|
| 163 | fi |
---|
| 164 | |
---|
[4687] | 165 | # example for NOCS ClusterVision system using SLURM batch submission (requires ${SETTE_DIR}/sette_batch_template file) |
---|
[3520] | 166 | # |
---|
[3661] | 167 | # if [ ${MPI_FLAG} == "no" ] ; then |
---|
[3520] | 168 | case ${COMPILER} in |
---|
[4687] | 169 | X64_MOBILIS) |
---|
| 170 | NB_REM=$( echo $NB_PROC $NXIO_PROC | awk '{print ( $1 + $2 ) % 16}') |
---|
[3520] | 171 | if [ ${NB_REM} == 0 ] ; then |
---|
[4687] | 172 | # number of processes required is an integer multiple of 16 |
---|
[3520] | 173 | # |
---|
[4687] | 174 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{print ($1 + $2 ) / 16}') |
---|
[3520] | 175 | else |
---|
| 176 | # |
---|
[4687] | 177 | # number of processes required is not an integer multiple of 16 |
---|
[3520] | 178 | # round up the number of nodes required. |
---|
| 179 | # |
---|
[4687] | 180 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{printf("%d",($1 + $2 ) / 16 + 1 )}') |
---|
[3520] | 181 | fi |
---|
| 182 | ;; |
---|
[4814] | 183 | XC_ARCHER_INTEL) |
---|
| 184 | # ocean cores are packed 24 to a node |
---|
| 185 | NB_REM=$( echo $NB_PROC | awk '{print ( $1 % 24 ) }') |
---|
| 186 | if [ ${NB_REM} == 0 ] ; then |
---|
| 187 | # number of processes required is an integer multiple of 24 |
---|
| 188 | # |
---|
| 189 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{print ($1) / 24}') |
---|
| 190 | else |
---|
| 191 | # |
---|
| 192 | # number of processes required is not an integer multiple of 24 |
---|
| 193 | # round up the number of nodes required. |
---|
| 194 | # |
---|
| 195 | NB_NODES=$( echo $NB_PROC | awk '{printf("%d",($1) / 24 + 1 )}') |
---|
| 196 | fi |
---|
| 197 | # xios cores are sparsely packed at 4 to a node |
---|
| 198 | # but can not share nodes with the ocean cores |
---|
| 199 | NB_REM=$( echo $NXIO_PROC | awk '{print ( $2 % 4 ) }') |
---|
| 200 | if [ ${NB_REM} == 0 ] ; then |
---|
| 201 | # number of processes required is an integer multiple of 4 |
---|
| 202 | # |
---|
| 203 | NB_NODES=$( echo $NB_NODES $NXIO_PROC | awk '{print ($1 + ( $2 / 4 ))}') |
---|
| 204 | else |
---|
| 205 | # |
---|
| 206 | # number of processes required is not an integer multiple of 4 |
---|
| 207 | # round up the number of nodes required. |
---|
| 208 | # |
---|
[5480] | 209 | NB_NODES=$( echo $NB_NODES $NXIO_PROC | awk '{print ($1 + ( $2 / 4 ) + 1)}') |
---|
[4814] | 210 | fi |
---|
| 211 | ;; |
---|
[5480] | 212 | XC40_METO*) #Setup for Met Office XC40 with any compiler |
---|
| 213 | # ocean cores are packed 32 to a node |
---|
| 214 | # If we need more than one node then have to use parallel queue and XIOS must have a node to itself |
---|
| 215 | NB_REM=$( echo $NB_PROC | awk '{print ( $1 % 32 ) }') |
---|
| 216 | if [ ${NB_REM} == 0 ] ; then |
---|
| 217 | # number of processes required is an integer multiple of 32 |
---|
| 218 | # |
---|
| 219 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{print ($1) / 32}') |
---|
| 220 | else |
---|
| 221 | # |
---|
| 222 | # number of processes required is not an integer multiple of 32 |
---|
| 223 | # round up the number of nodes required. |
---|
| 224 | # |
---|
| 225 | NB_NODES=$( echo $NB_PROC $NXIO_PROC | awk '{printf("%d",($1) / 32 + 1 )}') |
---|
| 226 | fi |
---|
| 227 | # xios cores are sparsely packed at 4 to a node |
---|
| 228 | if [ $NXIO_PROC == 0 ] ; then |
---|
| 229 | NB_XNODES=0 |
---|
| 230 | else |
---|
| 231 | NB_REM=$( echo $NXIO_PROC | awk '{print ( $1 % 4 ) }') |
---|
| 232 | if [ ${NB_REM} == 0 ] ; then |
---|
| 233 | # number of processes required is an integer multiple of 4 |
---|
| 234 | # |
---|
| 235 | NB_XNODES=$( echo $NXIO_PROC | awk '{print (( $1 / 4 ) + 1)}') |
---|
| 236 | else |
---|
| 237 | # |
---|
| 238 | # number of processes required is not an integer multiple of 4 |
---|
| 239 | # round up the number of nodes required. |
---|
| 240 | # |
---|
| 241 | NB_XNODES=$( echo $NXIO_PROC | awk '{printf("%d",($1) / 4 + 1) }') |
---|
| 242 | fi |
---|
| 243 | fi |
---|
| 244 | if [ ${NB_XNODES} -ge 1 ] ; then |
---|
| 245 | NB_NODES=$((NB_NODES+NB_XNODES)) |
---|
| 246 | fi |
---|
| 247 | echo NB_XNODES=${NB_XNODES} |
---|
| 248 | echo Total NB_NODES=${NB_NODES} |
---|
[6140] | 249 | QUEUE=normal |
---|
| 250 | SELECT="select=$NB_NODES" |
---|
| 251 | module unload cray-snplauncher #Make sure snplauncher module is not loaded |
---|
[5480] | 252 | ;; |
---|
[11645] | 253 | X64_JEANZAY*) #Setup for Jean-Zay |
---|
| 254 | export GROUP_IDRIS=`echo ${USER} |cut -c 2-4` |
---|
| 255 | ;; |
---|
[12798] | 256 | openmpi_KARA_MERCATOR*) |
---|
[11805] | 257 | NB_PROC_NODE=32 |
---|
[12798] | 258 | NB_NODES=$( echo $NB_PROC | awk '{print $1 - $1 % $NB_PROC_NODE }' | awk '{print $1 / $NB_PROC_NODE }' ) |
---|
[9480] | 259 | if [ ${NB_PROC} -le 128 ] ; then |
---|
| 260 | QUEUE=multi |
---|
| 261 | fi |
---|
| 262 | ;; |
---|
[12798] | 263 | ifort_beaufix_sette*) |
---|
| 264 | NB_PROC_NODE=40 |
---|
| 265 | NB_NODES=$( echo $NB_PROC | awk '{print $1 - $1 % $NB_PROC_NODE }' | awk '{print $1 / $NB_PROC_NODE }' ) |
---|
| 266 | ;; |
---|
[3520] | 267 | *) |
---|
| 268 | NB_NODES=${NB_PROC} |
---|
| 269 | ;; |
---|
| 270 | esac |
---|
| 271 | # |
---|
| 272 | # Pass settings into job file by using sed to edit predefined strings |
---|
| 273 | # |
---|
[4373] | 274 | TOTAL_NPROCS=$(( $NB_PROC + $NXIO_PROC )) |
---|
[12798] | 275 | cat ${SETTE_DIR}/job_batch_template | sed -e"s/\(=\| \)NODES/\1${NB_NODES}/" \ |
---|
[4373] | 276 | -e"s/TOTAL_NPROCS/${TOTAL_NPROCS}/" \ |
---|
[4245] | 277 | -e"s/NPROCS/${NB_PROC}/" \ |
---|
| 278 | -e"s/NXIOPROCS/${NXIO_PROC}/" \ |
---|
[3520] | 279 | -e"s:DEF_SETTE_DIR:${SETTE_DIR}:" -e"s:DEF_INPUT_DIR:${INPUT_DIR}:" \ |
---|
| 280 | -e"s:DEF_EXE_DIR:${EXE_DIR}:" \ |
---|
| 281 | -e"s:DEF_CONFIG_DIR:${CONFIG_DIR}:" \ |
---|
[8468] | 282 | -e"s:DEF_TOOLS_DIR:${TOOLS_DIR}:" \ |
---|
[3520] | 283 | -e"s:MPI_FLAG:${MPI_FLAG}:" \ |
---|
[11161] | 284 | -e"s:DEF_NEMO_VALIDATION:${NEMO_VALID}:" -e"s:DEF_NEW_CONF:${NEW_CONF}:" \ |
---|
[3520] | 285 | -e"s:DEF_CMP_NAM:${CMP_NAM}:" -e"s:DEF_TEST_NAME:${TEST_NAME}:" > run_sette_test.job |
---|
[3661] | 286 | |
---|
[3665] | 287 | case ${COMPILER} in |
---|
[9480] | 288 | openmpi_KARA_MERCATOR_XIOS ) |
---|
| 289 | cat run_sette_test.job | sed -e"s/NPROC_NODE/${NB_PROC_NODE}/" \ |
---|
| 290 | -e"s:QUEUE:${QUEUE}:" > run_sette_test1.job |
---|
| 291 | mv run_sette_test1.job run_sette_test.job |
---|
| 292 | ;; |
---|
[5480] | 293 | XC40_METO*) |
---|
[6140] | 294 | cat run_sette_test.job | sed -e"s/SELECT/${SELECT}/" > run_sette_test1.job |
---|
[5480] | 295 | mv run_sette_test1.job run_sette_test.job |
---|
| 296 | ;; |
---|
[11645] | 297 | X64_JEANZAY*) |
---|
| 298 | cat run_sette_test.job | sed -e"s/GROUP_IDRIS/${GROUP_IDRIS}/" > run_sette_test1.job |
---|
| 299 | mv run_sette_test1.job run_sette_test.job |
---|
| 300 | ;; |
---|
[3665] | 301 | esac |
---|
[3520] | 302 | # |
---|
| 303 | # create the unique submission job script |
---|
| 304 | # |
---|
| 305 | if [ ! -f $JOB_FILE ] ; then |
---|
| 306 | mv run_sette_test.job $JOB_FILE |
---|
| 307 | else |
---|
| 308 | e=`grep -n "# END_BODY" ${JOB_FILE} | cut -d : -f 1` |
---|
| 309 | e=$(($e - 1)) |
---|
| 310 | head -$e $JOB_FILE > ${JOB_FILE}_new |
---|
| 311 | mv ${JOB_FILE}_new ${JOB_FILE} |
---|
| 312 | l=`wc -l run_sette_test.job | sed -e "s:run_sette_test.job::"` |
---|
| 313 | b=`grep -n "# BODY" run_sette_test.job | cut -d : -f 1` |
---|
| 314 | t=$(($l - $b)) |
---|
| 315 | tail -$t run_sette_test.job >> $JOB_FILE |
---|
| 316 | fi |
---|
| 317 | |
---|
[3661] | 318 | chmod a+x $JOB_FILE ; echo "$JOB_FILE is ready" |
---|
[3520] | 319 | |
---|
| 320 | #fi |
---|