source: trunk/libIGCM/AA_job_debug @ 1572

Last change on this file since 1572 was 1572, checked in by aclsce, 15 months ago

Added missing parenthesis around shell variable for jeanzay case.

File size: 9.9 KB
Line 
1#-Q- curie #!/bin/ksh
2#-Q- curie ######################
3#-Q- curie ## CURIE   TGCC/CEA ##
4#-Q- curie ######################
5#-Q- curie #MSUB -r ::Jobname::        # Job Name
6#-Q- curie #MSUB -o Script_Output_::Jobname::.000001    # standard output
7#-Q- curie #MSUB -e Script_Output_::Jobname::.000001    # error output
8#-Q- curie #MSUB -eo
9#-Q- curie #MSUB -n ::JobNumProcTot::  # Number of MPI tasks (SPMD case) or cores (MPMD case)
10#-Q- curie #MSUB -c ::openMPthreads::  # Number of openMP threads. To specify only for SMPD
11#-Q- curie #MSUB -x                    # exclusive node. To specify only for MPMD together with the one below
12#-Q- curie #MSUB -E '--cpu_bind=none'
13#-Q- curie ##MSUB -E '--distribution cyclic'
14#-Q- curie #MSUB -T 86400              # Wall clock limit (seconds)
15#-Q- curie #MSUB -q standard           # thin nodes
16#-Q- curie ##MSUB -U high
17#-Q- curie #MSUB -U medium
18#-Q- curie ##MSUB -U low
19#-Q- curie #MSUB -A ::default_project::
20#-Q- curie #MSUB -E --no-requeue
21#-Q- curie # Below specific options that can be activated
22#-Q- curie ##MSUB -q ivybridge         # Option for Airain
23#-Q- curie
24#-Q- curie BATCH_NUM_PROC_TOT=$BRIDGE_MSUB_NPROC
25#-Q- curie set +x
26#-Q- irene #!/bin/ksh
27#-Q- irene ######################
28#-Q- irene ## IRENE   TGCC/CEA ##
29#-Q- irene ######################
30#-Q- irene #MSUB -r ::Jobname::        # Job Name
31#-Q- irene #MSUB -o Script_Output_::Jobname::.000001    # standard output
32#-Q- irene #MSUB -e Script_Output_::Jobname::.000001    # error output
33#-Q- irene #MSUB -eo
34#-Q- irene #MSUB -n ::JobNumProcTot::  # Number of MPI tasks (SPMD case) or cores (MPMD case)
35#-Q- irene #MSUB -c ::openMPthreads::  # Number of openMP threads. To specify only for SMPD
36#-Q- irene #MSUB -x                    # exclusive node. To specify only for MPMD together with the one below
37#-Q- irene #MSUB -E '--cpu_bind=none'
38#-Q- irene #MSUB -T ::WallTime::              # Wall clock limit (seconds)
39#-Q- irene #MSUB -A ::default_project::
40#-Q- irene #MSUB -q skylake
41#-Q- irene #MSUB -m store,work,scratch
42#-Q- irene
43#-Q- irene BATCH_NUM_PROC_TOT=$BRIDGE_MSUB_NPROC
44#-Q- irene set +x
45#-Q- irene-amd #!/bin/ksh
46#-Q- irene-amd ######################
47#-Q- irene-amd ## IRENE-AMD   TGCC/CEA ##
48#-Q- irene-amd ######################
49#-Q- irene-amd #MSUB -r ::Jobname::        # Job Name
50#-Q- irene-amd #MSUB -o Script_Output_::Jobname::.000001    # standard output
51#-Q- irene-amd #MSUB -e Script_Output_::Jobname::.000001    # error output
52#-Q- irene-amd #MSUB -eo
53#-Q- irene-amd #MSUB -n ::JobNumProcTot::  # Number of MPI tasks (SPMD case) or cores (MPMD case)
54#-Q- irene-amd #MSUB -c ::openMPthreads::  # Number of openMP threads. To specify only for SMPD
55#-Q- irene-amd #MSUB -x                    # exclusive node. To specify only for MPMD together with the one below
56#-Q- irene-amd #MSUB -T ::WallTime::              # Wall clock limit (seconds)
57#-Q- irene-amd #MSUB -A ::default_project::
58#-Q- irene-amd #MSUB -q rome
59#-Q- irene-amd #MSUB -m store,work,scratch
60#-Q- irene-amd
61#-Q- irene-amd BATCH_NUM_PROC_TOT=$BRIDGE_MSUB_NPROC
62#-Q- irene-amd set +x
63#-Q- jeanzay #!/bin/ksh
64#-Q- jeanzay ######################
65#-Q- jeanzay ## JEANZAY    IDRIS ##
66#-Q- jeanzay ######################
67#-Q- jeanzay #SBATCH --job-name=::Jobname::        # Job Name
68#-Q- jeanzay #SBATCH --output=Script_Output_::Jobname::.000001    # standard output
69#-Q- jeanzay #SBATCH --error=Script_Output_::Jobname::.000001    # error output
70#-Q- jeanzay #SBATCH --ntasks=::JobNumProcTot::   # Number of MPI tasks
71#-Q- jeanzay #SBATCH --cpus-per-task=::openMPthreads::  # Number of openMP threads.
72#-Q- jeanzay #SBATCH --hint=nomultithread         # 1 processus MPI par par physical core (no hyperthreading)
73#-Q- jeanzay #SBATCH --time=::WallTime::             # Wall clock limit (minutes)
74#-Q- jeanzay #SBATCH --account ::default_project::@cpu
75#-Q- jeanzay
76#-Q- jeanzay ##BATCH_NUM_PROC_TOT=$BRIDGE_SBATCH_NPROC
77#-Q- jeanzay set +x
78#-Q- ada #!/bin/ksh
79#-Q- ada # ######################
80#-Q- ada # ##  ADA       IDRIS ##
81#-Q- ada # ######################
82#-Q- ada # Job name
83#-Q- ada # @ job_name = ::Jobname::
84#-Q- ada # Standard output file name
85#-Q- ada # @ output = Script_Output_::Jobname::.000001
86#-Q- ada # Error output file name
87#-Q- ada # @ error = Script_Output_::Jobname::.000001
88#-Q- ada # Job type
89#-Q- ada # @ job_type = parallel
90#-Q- ada # Total number of tasks
91#-Q- ada # @ total_tasks = ::JobNumProcTot::
92#-Q- ada # Specific option for OpenMP parallelization: Number of OpenMP threads per MPI task
93#-Q- ada # @ parallel_threads = ::openMPthreads::
94#-Q- ada # Memory : as_limit=3.5gb max per process per core. With 4 threads per process use max as_limit=14gb
95#-Q- ada # @ as_limit = 3.5gb
96#-Q- ada # Maximum CPU time per task hh:mm:ss
97#-Q- ada # @ wall_clock_limit = 1:00:00
98#-Q- ada # @ environment = "BATCH_NUM_PROC_TOT=::JobNumProcTot::" ; wall_clock_limit=$(wall_clock_limit)
99#-Q- ada # End of the header options
100#-Q- ada # @ queue
101#-Q- obelix ######################
102#-Q- obelix ## OBELIX      LSCE ##
103#-Q- obelix ######################
104#-Q- obelix #PBS -N ::Jobname::
105#-Q- obelix #PBS -m a
106#-Q- obelix #PBS -j oe
107#-Q- obelix #PBS -q mediump
108#-Q- obelix #PBS -o Script_Output_::Jobname::.000001
109#-Q- obelix #PBS -S /bin/ksh
110#-Q- obelix #PBS -v BATCH_NUM_PROC_TOT=::JobNumProcTot::
111#-Q- obelix #PBS -l nodes=1:ppn=::JobNumProcTot::
112#-Q- mesoipsl #!/bin/ksh
113#-Q- mesoipsl ######################
114#-Q- mesoipsl ## MESO ESPRI IPSL  ##
115#-Q- mesoipsl ######################
116#-Q- mesoipsl #SBATCH --job-name=::Jobname::        # Job Name
117#-Q- mesoipsl #SBATCH --output=Script_Output_::Jobname::.000001    # standard output
118#-Q- mesoipsl #SBATCH --error=Script_Output_::Jobname::.000001    # error output
119#-Q- mesoipsl #SBATCH --ntasks=::JobNumProcTot::   # Number of MPI tasks
120#-Q- mesoipsl #SBATCH --cpus-per-task=::openMPthreads::  # Number of openMP threads.
121#-Q- mesoipsl #SBATCH --hint=nomultithread         # 1 processus MPI par par physical core (no hyperthreading)
122#-Q- mesoipsl #SBATCH --time=30                    # Wall clock limit (minutes)
123#-Q- mesoipsl set +x
124#-Q- ifort_CICLAD ######################
125#-Q- ifort_CICLAD ##   CICLAD    IPSL ##
126#-Q- ifort_CICLAD ######################
127#-Q- ifort_CICLAD #PBS -N ::Jobname::
128#-Q- ifort_CICLAD #PBS -m a
129#-Q- ifort_CICLAD #PBS -j oe
130#-Q- ifort_CICLAD ###PBS -q h12   # Queue for 12 hours at ciclad only
131#-Q- ifort_CICLAD #PBS -o Script_Output_::Jobname::.000001
132#-Q- ifort_CICLAD #PBS -S /bin/ksh
133#-Q- ifort_CICLAD #PBS -v BATCH_NUM_PROC_TOT=::JobNumProcTot::
134#-Q- ifort_CICLAD #PBS -l nodes=1:ppn=::JobNumProcTot::
135#-Q- ifort_CICLAD #PBS -l mem=6gb
136#-Q- ifort_CICLAD #PBS -l vmem=30gb
137#-Q- default #!/bin/ksh
138#-Q- default ##################
139#-Q- default ## DEFAULT HOST ##
140#-Q- default ##################
141#-Q- default #For MPI use, uncomment next line :
142#-Q- default #BATCH_NUM_PROC_TOT=::JobNumProcTot::
143
144#**************************************************************
145# Author: Sebastien Denvil
146# Contact: Sebastien.Denvil__at__ipsl.jussieu.fr
147# $Revision:: 1536                                     $ Revision of last commit
148# $Author:: rpennel                                    $ Author of last commit
149# $Date:: 2020-07-03 15:16:53 +0200 (Fri, 03 Jul 2020) $ Date of last commit
150# IPSL (2006)
151#  This software is governed by the CeCILL licence see libIGCM/libIGCM_CeCILL.LIC
152#
153#**************************************************************
154
155date
156
157echo
158echo "#######################################"
159echo "#  ANOTHER GREAT DEBUG SIMULATION #"
160echo "#######################################"
161echo
162
163#D------------------------------------------------------
164#D- ENVIRONMENT
165#D-     - Loading environment needed to run
166#D------------------------------------------------------
167
168MODIPSL=::modipsl::
169libIGCM=${MODIPSL}/libIGCM
170SRF_comp=::SRF_comp::
171OCE_comp=::OCE_comp::
172CPL_comp=::CPL_comp::
173CHM_comp=::CHM_comp::
174JobName=::JOBNAME::
175::SUBMIT_DIR::
176ExecutionType=::EXECUTION_TYPE::
177
178#-Q- jeanzay . /gpfslocalsup/spack_soft/environment-modules/current/init/ksh
179#-Q- mesoipsl . /etc/profile.d/modules.sh
180
181. ::ENVFILE::
182
183module list
184
185#D------------------------------------------------------
186#D- EXECUTION
187#D-     - Copy binary files from modipsl directory
188#D-     - Remove of some out restart files
189#D-     - Execution
190#D------------------------------------------------------
191
192cd ${SUBMIT_DIR}
193
194while read EXEIN EXEOUT ;do
195 eval EXEIN=${EXEIN}
196 cp ${MODIPSL}/bin/${EXEIN} ${EXEOUT}
197done < components_binary.txt
198
199if [ X${SRF_comp} = Xy ] ; then
200  [ -f sechiba_rest_out.nc ] && rm -f sechiba_rest_out.nc
201  [ -f stomate_rest_out.nc ] && rm -f stomate_rest_out.nc
202  [ -f driver_rest_out.nc ] && rm -f driver_rest_out.nc
203fi
204if [ X${OCE_comp} = Xy ] ; then
205  [ X${JobName} != X ] && rm -f ${JobName}*restart*.nc
206fi
207
208if [ X${CPL_comp} = Xy ] ; then
209  if [ -f sstoc_orig.nc ] ; then
210    cp sstoc_orig.nc sstoc.nc
211    cp flxat_orig.nc flxat.nc
212    cp icbrg_orig.nc icbrg.nc
213    cp icshf_orig.nc icshf.nc
214  else
215   cp sstoc.nc sstoc_orig.nc
216   cp flxat.nc flxat_orig.nc
217   cp icbrg.nc icbrg_orig.nc
218   cp icshf.nc icshf_orig.nc
219  fi
220fi
221if [ X${CHM_comp} = Xy ] ; then
222  [ -f reprecipinsoil.dat ] && rm -f reprecipinsoil.dat
223fi
224
225#-Q- jeanzay #Case MPMD + MPI/OpenMP
226#-Q- jeanzay if [ ${ExecutionType} -eq 2 ] ; then
227#-Q- jeanzay    _bkIFS=$IFS;
228#-Q- jeanzay    IFS=$'\n'; set -f
229#-Q- jeanzay    listnodes=($(< <( scontrol show hostnames $SLURM_JOB_NODELIST )))
230#-Q- jeanzay    IFS=$_bkIFS; set +f
231#-Q- jeanzay    rm -f hostlist ; cp hostlist_template hostlist
232#-Q- jeanzay    for nb_proc in `seq 0 $(($SLURM_JOB_NUM_NODES-1))`; do
233#-Q- jeanzay        mv hostlist hostlist_tmp
234#-Q- jeanzay        host_value=${listnodes[${nb_proc}]}
235#-Q- jeanzay        sed -e "s/node_${nb_proc}_X/${host_value}/" hostlist_tmp > hostlist
236#-Q- jeanzay    done
237#-Q- jeanzay    export SLURM_HOSTFILE=./hostlist
238#-Q- jeanzay fi
239#-Q- jeanzay # Workaround at Jean-Zay
240#-Q- jeanzay source $I_MPI_ROOT/intel64/bin/mpivars.sh release_mt
241
242
243::EXECUTION:: > out_execution  2>&1
244RET=$?
245if [ ${RET} -gt 0 ] ; then
246    echo "Return code of executable :" ${RET}
247fi
248
249date
250exit
Note: See TracBrowser for help on using the repository browser.