Changeset 4934
- Timestamp:
- 2014-12-01T11:36:36+01:00 (10 years ago)
- Location:
- branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM
- Files:
-
- 22 edited
- 5 copied
Legend:
- Unmodified
- Added
- Removed
-
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/CONFIG/SHARED/namelist_ref
r4932 r4934 728 728 ln_traadv_qck = .false. ! QUICKEST scheme 729 729 ln_traadv_msc_ups= .false. ! use upstream scheme within muscl 730 ln_traadv_tvd_zts= .false. ! TVD scheme with sub-timestepping of vertical tracer advection 730 731 / 731 732 !----------------------------------------------------------------------- … … 802 803 ln_dynadv_cen2= .false. ! flux form - 2nd order centered scheme 803 804 ln_dynadv_ubs = .false. ! flux form - 3rd order UBS scheme 805 ln_dynzad_zts = .false. ! Use (T) sub timestepping for vertical momentum advection 804 806 / 805 807 !----------------------------------------------------------------------- -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/CONFIG/makenemo
r4148 r4934 38 38 # 39 39 # - NEW_CONF : configuration to be created 40 # - REF_CONF : reference configuration to build the new one 40 # - REF_CONF : reference configuration to build the new one from 41 41 # - CMP_NAM : compiler name 42 42 # - NBR_PRC : number of processes used to compile 43 # - USP_CONF : unsupported (external) configuration to build the new one from 43 44 # - NEM_SUBDIR : NEMO subdirectory used (specified) 44 45 # … … 51 52 # - TOOLS_DIR : " " " 52 53 # - NEMO_DIR : " " " 54 # - REMOTE_CTL : URL link to a remote resource list for an external configuration 55 # which is not part of the reference suite 56 # - LOCAL_REF : Nearest reference configuration to an external configuration 57 # which is not part of the reference suite 58 # (used to populate work directories if remote access is not available) 53 59 # 54 60 # EXAMPLES … … 83 89 x_n=""; 84 90 x_r=""; 91 x_u=""; 85 92 x_m=""; 86 93 x_t=""; … … 106 113 export AGRIFUSE=10 107 114 declare -a TAB 115 declare -a REMOTE_CTL 116 declare -a LOCAL_REF 108 117 list_key=0 109 118 chk_key=1 … … 114 123 #- 115 124 #- Choice of the options --- 116 while getopts :hd:n:r: m:j:e:s:v:t:k: V125 while getopts :hd:n:r:u:m:j:e:s:v:t:k: V 117 126 do 118 127 case $V in 119 128 (h) x_h=${OPTARG}; 120 129 echo "Usage : "${b_n} \ 121 " [-h] [-n name] [-m arch] [-d "dir1 dir2"] [-r conf] [- s Path] [-e Path] [-j No] [-v No] [-k 0/1]";130 " [-h] [-n name] [-m arch] [-d "dir1 dir2"] [-r conf] [-u conf] [-s Path] [-e Path] [-j No] [-v No] [-k 0/1]"; 122 131 echo " -h : help"; 123 132 echo " -h institute : specific help for consortium members"; … … 126 135 echo " -d dir : choose NEMO sub-directories"; 127 136 echo " -r conf : choose reference configuration"; 137 echo " -u conf : choose an unsupported (external) configuration"; 128 138 echo " -s Path : choose alternative location for NEMO main directory"; 129 139 echo " -e Path : choose alternative location for MY_SRC directory"; … … 139 149 echo "Available configurations :"; cat ${CONFIG_DIR}/cfg.txt; 140 150 echo ""; 151 echo "Available unsupported (external) configurations :"; cat ${CONFIG_DIR}/uspcfg.txt; 152 echo ""; 141 153 echo "Example to remove bad configuration "; 142 154 echo "./makenemo -n MY_CONFIG clean_config"; … … 161 173 (n) x_n=${OPTARG};; 162 174 (r) x_r=${OPTARG};; 175 (u) x_u=${OPTARG};; 163 176 (m) x_m=${OPTARG};; 164 177 (j) x_j=${OPTARG};; … … 220 233 NEM_SUBDIR=${x_d} 221 234 REF_CONF=${x_r} 235 USP_CONF=${x_u} 222 236 NEMO_TDIR=${x_t:-$NEMO_TDIR} 223 237 export NEMO_TDIR=${NEMO_TDIR:-$CONFIG_DIR} … … 228 242 echo "Available configurations :" 229 243 cat ${CONFIG_DIR}/cfg.txt 244 echo 245 echo "Available unsupported (external) configurations :" 246 cat ${CONFIG_DIR}/uspcfg.txt 230 247 exit 231 248 fi … … 238 255 239 256 if [ ${#NEW_CONF} -eq 0 ] ; then 240 if [ ${#NEM_SUBDIR} -eq 0 -a ${#REF_CONF} -eq 0 ]; then241 echo "You are installing a newconfiguration"257 if [ ${#NEM_SUBDIR} -eq 0 ] && [ ${#REF_CONF} -eq 0 ] && [ ${#USP_CONF} -eq 0 ] ; then 258 echo "You are installing a new default (ORCA2_LIM) configuration" 242 259 ind=0 243 260 . ${COMPIL_DIR}/Fread_dir.sh OPA_SRC YES … … 248 265 . ${COMPIL_DIR}/Fread_dir.sh OFF_SRC NO 249 266 REF_CONF=ORCA2_LIM 250 elif [ ${#NEM_SUBDIR} -gt 0 ] && [ ${#REF_CONF} -eq 0 ] ; then251 echo "You are installing a new configuration "267 elif [ ${#NEM_SUBDIR} -gt 0 ] && [ ${#REF_CONF} -eq 0 ] && [ ${#USP_CONF} -eq 0 ] ; then 268 echo "You are installing a new configuration based on ORCA2_LIM" 252 269 TAB=( ${NEM_SUBDIR} ) 253 270 REF_CONF=ORCA2_LIM … … 255 272 echo "You are installing a new configuration based on ${REF_CONF}" 256 273 . ${COMPIL_DIR}/Fcopy_dir.sh ${REF_CONF} 274 elif [ ${#NEM_SUBDIR} -eq 0 ] && [ ${#USP_CONF} -gt 0 ]; then 275 echo "You are installing a new configuration based on the unsupported (external) ${USP_CONF}" 276 . ${COMPIL_DIR}/Fcopy_extdir.sh ${USP_CONF} 277 #echo "TTT " ${TAB} 278 #echo "RRR " ${REMOTE_CTL} 279 #echo "LLL " ${LOCAL_REF} 257 280 fi 258 281 NEW_CONF=${x_n} 259 . ${COMPIL_DIR}/Fmake_config.sh ${NEW_CONF} ${REF_CONF} 282 283 if [ ${#USP_CONF} -gt 0 ]; then 284 . ${COMPIL_DIR}/Fmake_extconfig.sh ${NEW_CONF} ${LOCAL_REF} 285 . ${COMPIL_DIR}/Ffetch_extdir.sh ${NEW_CONF} ${REMOTE_CTL} 286 else 287 . ${COMPIL_DIR}/Fmake_config.sh ${NEW_CONF} ${REF_CONF} 288 fi 260 289 else 261 290 sed -e "/${NEW_CONF} /d" ${CONFIG_DIR}/cfg.txt > ${COMPIL_DIR}/cfg.tmp -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/DYN/dynadv.F90
r4624 r4934 30 30 LOGICAL, PUBLIC :: ln_dynadv_cen2 !: flux form - 2nd order centered scheme flag 31 31 LOGICAL, PUBLIC :: ln_dynadv_ubs !: flux form - 3rd order UBS scheme flag 32 LOGICAL, PUBLIC :: ln_dynzad_zts !: vertical advection with sub-timestepping (requires vector form) 32 33 33 34 INTEGER :: nadv ! choice of the formulation and scheme for the advection … … 64 65 CALL dyn_keg ( kt ) ! vector form : horizontal gradient of kinetic energy 65 66 CALL dyn_zad ( kt ) ! vector form : vertical advection 66 CASE ( 1 ) 67 CASE ( 1 ) 68 CALL dyn_keg ( kt ) ! vector form : horizontal gradient of kinetic energy 69 CALL dyn_zad_zts ( kt ) ! vector form : vertical advection with sub-timestepping 70 CASE ( 2 ) 67 71 CALL dyn_adv_cen2( kt ) ! 2nd order centered scheme 68 CASE ( 2)72 CASE ( 3 ) 69 73 CALL dyn_adv_ubs ( kt ) ! 3rd order UBS scheme 70 74 ! … … 91 95 INTEGER :: ios ! Local integer output status for namelist read 92 96 !! 93 NAMELIST/namdyn_adv/ ln_dynadv_vec, ln_dynadv_cen2 , ln_dynadv_ubs 97 NAMELIST/namdyn_adv/ ln_dynadv_vec, ln_dynadv_cen2 , ln_dynadv_ubs, ln_dynzad_zts 94 98 !!---------------------------------------------------------------------- 95 99 … … 111 115 WRITE(numout,*) ' 2nd order centred advection scheme ln_dynadv_cen2 = ', ln_dynadv_cen2 112 116 WRITE(numout,*) ' 3rd order UBS advection scheme ln_dynadv_ubs = ', ln_dynadv_ubs 117 WRITE(numout,*) ' Sub timestepping of vertical advection ln_dynzad_zts = ', ln_dynzad_zts 113 118 ENDIF 114 119 … … 120 125 121 126 IF( ioptio /= 1 ) CALL ctl_stop( 'Choose ONE advection scheme in namelist namdyn_adv' ) 127 IF( ln_dynzad_zts .AND. .NOT. ln_dynadv_vec ) & 128 CALL ctl_stop( 'Sub timestepping of vertical advection requires vector form; set ln_dynadv_vec = .TRUE.' ) 122 129 123 130 ! ! Set nadv 124 131 IF( ln_dynadv_vec ) nadv = 0 125 IF( ln_dynadv_cen2 ) nadv = 1 126 IF( ln_dynadv_ubs ) nadv = 2 132 IF( ln_dynzad_zts ) nadv = 1 133 IF( ln_dynadv_cen2 ) nadv = 2 134 IF( ln_dynadv_ubs ) nadv = 3 127 135 IF( lk_esopa ) nadv = -1 128 136 … … 130 138 WRITE(numout,*) 131 139 IF( nadv == 0 ) WRITE(numout,*) ' vector form : keg + zad + vor is used' 132 IF( nadv == 1 ) WRITE(numout,*) ' flux form : 2nd order scheme is used' 133 IF( nadv == 2 ) WRITE(numout,*) ' flux form : UBS scheme is used' 140 IF( nadv == 1 ) WRITE(numout,*) ' vector form : keg + zad_zts + vor is used' 141 IF( nadv == 2 ) WRITE(numout,*) ' flux form : 2nd order scheme is used' 142 IF( nadv == 3 ) WRITE(numout,*) ' flux form : UBS scheme is used' 134 143 IF( nadv == -1 ) WRITE(numout,*) ' esopa test: use all advection formulation' 135 144 ENDIF -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/DYN/dynzad.F90
r4666 r4934 27 27 PRIVATE 28 28 29 PUBLIC dyn_zad ! routine called by step.F90 29 PUBLIC dyn_zad ! routine called by dynadv.F90 30 PUBLIC dyn_zad_zts ! routine called by dynadv.F90 30 31 31 32 !! * Substitutions … … 83 84 DO jj = 2, jpj ! vertical fluxes 84 85 DO ji = fs_2, jpi ! vector opt. 85 zww(ji,jj) = 0.25 * e1t(ji,jj) * e2t(ji,jj) * wn(ji,jj,jk)86 zww(ji,jj) = 0.25_wp * e1t(ji,jj) * e2t(ji,jj) * wn(ji,jj,jk) 86 87 END DO 87 88 END DO … … 95 96 DO jj = 2, jpjm1 ! Surface and bottom values set to zero 96 97 DO ji = fs_2, fs_jpim1 ! vector opt. 97 zwuw(ji,jj, 1:miku(ji,jj) ) = 0. e098 zwvw(ji,jj, 1:mikv(ji,jj) ) = 0. e099 zwuw(ji,jj,jpk) = 0. e0100 zwvw(ji,jj,jpk) = 0. e098 zwuw(ji,jj, 1:miku(ji,jj) ) = 0._wp 99 zwvw(ji,jj, 1:mikv(ji,jj) ) = 0._wp 100 zwuw(ji,jj,jpk) = 0._wp 101 zwvw(ji,jj,jpk) = 0._wp 101 102 END DO 102 103 END DO … … 132 133 END SUBROUTINE dyn_zad 133 134 135 SUBROUTINE dyn_zad_zts ( kt ) 136 !!---------------------------------------------------------------------- 137 !! *** ROUTINE dynzad_zts *** 138 !! 139 !! ** Purpose : Compute the now vertical momentum advection trend and 140 !! add it to the general trend of momentum equation. This version 141 !! uses sub-timesteps for improved numerical stability with small 142 !! vertical grid sizes. This is especially relevant when using 143 !! embedded ice with thin surface boxes. 144 !! 145 !! ** Method : The now vertical advection of momentum is given by: 146 !! w dz(u) = ua + 1/(e1u*e2u*e3u) mk+1[ mi(e1t*e2t*wn) dk(un) ] 147 !! w dz(v) = va + 1/(e1v*e2v*e3v) mk+1[ mj(e1t*e2t*wn) dk(vn) ] 148 !! Add this trend to the general trend (ua,va): 149 !! (ua,va) = (ua,va) + w dz(u,v) 150 !! 151 !! ** Action : - Update (ua,va) with the vert. momentum adv. trends 152 !! - Save the trends in (ztrdu,ztrdv) ('key_trddyn') 153 !!---------------------------------------------------------------------- 154 INTEGER, INTENT(in) :: kt ! ocean time-step inedx 155 ! 156 INTEGER :: ji, jj, jk, jl ! dummy loop indices 157 INTEGER :: jnzts = 5 ! number of sub-timesteps for vertical advection 158 INTEGER :: jtb, jtn, jta ! sub timestep pointers for leap-frog/euler forward steps 159 REAL(wp) :: zua, zva ! temporary scalars 160 REAL(wp) :: zr_rdt ! temporary scalar 161 REAL(wp) :: z2dtzts ! length of Euler forward sub-timestep for vertical advection 162 REAL(wp) :: zts ! length of sub-timestep for vertical advection 163 REAL(wp), POINTER, DIMENSION(:,:,:) :: zwuw , zwvw, zww 164 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdu, ztrdv 165 REAL(wp), POINTER, DIMENSION(:,:,:,:) :: zus , zvs 166 !!---------------------------------------------------------------------- 167 ! 168 IF( nn_timing == 1 ) CALL timing_start('dyn_zad_zts') 169 ! 170 CALL wrk_alloc( jpi,jpj,jpk, zwuw , zwvw, zww ) 171 CALL wrk_alloc( jpi,jpj,jpk,3, zus, zvs ) 172 ! 173 IF( kt == nit000 ) THEN 174 IF(lwp)WRITE(numout,*) 175 IF(lwp)WRITE(numout,*) 'dyn_zad_zts : arakawa advection scheme with sub-timesteps' 176 ENDIF 177 178 IF( l_trddyn ) THEN ! Save ua and va trends 179 CALL wrk_alloc( jpi, jpj, jpk, ztrdu, ztrdv ) 180 ztrdu(:,:,:) = ua(:,:,:) 181 ztrdv(:,:,:) = va(:,:,:) 182 ENDIF 183 184 IF( neuler == 0 .AND. kt == nit000 ) THEN 185 z2dtzts = rdt / REAL( jnzts, wp ) ! = rdt (restart with Euler time stepping) 186 ELSE 187 z2dtzts = 2._wp * rdt / REAL( jnzts, wp ) ! = 2 rdt (leapfrog) 188 ENDIF 189 190 DO jk = 2, jpkm1 ! Calculate and store vertical fluxes 191 DO jj = 2, jpj 192 DO ji = fs_2, jpi ! vector opt. 193 zww(ji,jj,jk) = 0.25_wp * e1t(ji,jj) * e2t(ji,jj) * wn(ji,jj,jk) 194 END DO 195 END DO 196 END DO 197 198 DO jj = 2, jpjm1 ! Surface and bottom advective fluxes set to zero 199 DO ji = fs_2, fs_jpim1 ! vector opt. 200 zwuw(ji,jj, 1:miku(ji,jj) ) = 0._wp 201 zwvw(ji,jj, 1:mikv(ji,jj) ) = 0._wp 202 zwuw(ji,jj,jpk) = 0._wp 203 zwvw(ji,jj,jpk) = 0._wp 204 END DO 205 END DO 206 207 ! Start with before values and use sub timestepping to reach after values 208 209 zus(:,:,:,1) = ub(:,:,:) 210 zvs(:,:,:,1) = vb(:,:,:) 211 212 DO jl = 1, jnzts ! Start of sub timestepping loop 213 214 IF( jl == 1 ) THEN ! Euler forward to kick things off 215 jtb = 1 ; jtn = 1 ; jta = 2 216 zts = z2dtzts 217 ELSEIF( jl == 2 ) THEN ! First leapfrog step 218 jtb = 1 ; jtn = 2 ; jta = 3 219 zts = 2._wp * z2dtzts 220 ELSE ! Shuffle pointers for subsequent leapfrog steps 221 jtb = MOD(jtb,3) + 1 222 jtn = MOD(jtn,3) + 1 223 jta = MOD(jta,3) + 1 224 ENDIF 225 226 DO jk = 2, jpkm1 ! Vertical momentum advection at level w and u- and v- vertical 227 DO jj = 2, jpjm1 ! vertical momentum advection at w-point 228 DO ji = fs_2, fs_jpim1 ! vector opt. 229 zwuw(ji,jj,jk) = ( zww(ji+1,jj ,jk) + zww(ji,jj,jk) ) * ( zus(ji,jj,jk-1,jtn)-zus(ji,jj,jk,jtn) ) 230 zwvw(ji,jj,jk) = ( zww(ji ,jj+1,jk) + zww(ji,jj,jk) ) * ( zvs(ji,jj,jk-1,jtn)-zvs(ji,jj,jk,jtn) ) 231 END DO 232 END DO 233 END DO 234 DO jk = 1, jpkm1 ! Vertical momentum advection at u- and v-points 235 DO jj = 2, jpjm1 236 DO ji = fs_2, fs_jpim1 ! vector opt. 237 ! ! vertical momentum advective trends 238 zua = - ( zwuw(ji,jj,jk) + zwuw(ji,jj,jk+1) ) / ( e1u(ji,jj) * e2u(ji,jj) * fse3u(ji,jj,jk) ) 239 zva = - ( zwvw(ji,jj,jk) + zwvw(ji,jj,jk+1) ) / ( e1v(ji,jj) * e2v(ji,jj) * fse3v(ji,jj,jk) ) 240 zus(ji,jj,jk,jta) = zus(ji,jj,jk,jtb) + zua * zts 241 zvs(ji,jj,jk,jta) = zvs(ji,jj,jk,jtb) + zva * zts 242 END DO 243 END DO 244 END DO 245 246 END DO ! End of sub timestepping loop 247 248 zr_rdt = 1._wp / ( REAL( jnzts, wp ) * z2dtzts ) 249 DO jk = 1, jpkm1 ! Recover trends over the outer timestep 250 DO jj = 2, jpjm1 251 DO ji = fs_2, fs_jpim1 ! vector opt. 252 ! ! vertical momentum advective trends 253 ! ! add the trends to the general momentum trends 254 ua(ji,jj,jk) = ua(ji,jj,jk) + ( zus(ji,jj,jk,jta) - ub(ji,jj,jk)) * zr_rdt 255 va(ji,jj,jk) = va(ji,jj,jk) + ( zvs(ji,jj,jk,jta) - vb(ji,jj,jk)) * zr_rdt 256 END DO 257 END DO 258 END DO 259 260 IF( l_trddyn ) THEN ! save the vertical advection trends for diagnostic 261 ztrdu(:,:,:) = ua(:,:,:) - ztrdu(:,:,:) 262 ztrdv(:,:,:) = va(:,:,:) - ztrdv(:,:,:) 263 CALL trd_mod(ztrdu, ztrdv, jpdyn_trd_zad, 'DYN', kt) 264 CALL wrk_dealloc( jpi, jpj, jpk, ztrdu, ztrdv ) 265 ENDIF 266 ! ! Control print 267 IF(ln_ctl) CALL prt_ctl( tab3d_1=ua, clinfo1=' zad - Ua: ', mask1=umask, & 268 & tab3d_2=va, clinfo2= ' Va: ', mask2=vmask, clinfo3='dyn' ) 269 ! 270 CALL wrk_dealloc( jpi,jpj,jpk, zwuw , zwvw, zww ) 271 CALL wrk_dealloc( jpi,jpj,jpk,3, zus, zvs ) 272 ! 273 IF( nn_timing == 1 ) CALL timing_stop('dyn_zad_zts') 274 ! 275 END SUBROUTINE dyn_zad_zts 276 134 277 !!====================================================================== 135 278 END MODULE dynzad -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/ICB/icb_oce.F90
r4153 r4934 96 96 REAL(wp), PUBLIC, DIMENSION(:,:), ALLOCATABLE :: ua_e, va_e 97 97 REAL(wp), PUBLIC, DIMENSION(:,:), ALLOCATABLE :: ssh_e 98 #if defined key_lim2 || defined key_lim3 98 #if defined key_lim2 || defined key_lim3 || defined key_cice 99 99 REAL(wp), PUBLIC, DIMENSION(:,:), ALLOCATABLE :: ui_e, vi_e 100 100 #endif … … 144 144 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: nicbflddest !: nfold destination proc 145 145 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: nicbfldproc !: nfold destination proc 146 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: nicbfldnsend !: nfold number of bergs to send to nfold neighbour 147 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: nicbfldexpect !: nfold expected number of bergs 148 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: nicbfldreq !: nfold message handle (immediate send) 146 149 147 150 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:,:) :: griddata !: work array for icbrst … … 162 165 ! 163 166 icb_alloc = 0 167 ALLOCATE( berg_grid, STAT=ill ) 168 icb_alloc = icb_alloc + ill 164 169 ALLOCATE( berg_grid%calving (jpi,jpj) , berg_grid%calving_hflx (jpi,jpj) , & 165 170 & berg_grid%stored_heat(jpi,jpj) , berg_grid%floating_melt(jpi,jpj) , & … … 171 176 ALLOCATE( uo_e(0:jpi+1,0:jpj+1) , ua_e(0:jpi+1,0:jpj+1) , & 172 177 & vo_e(0:jpi+1,0:jpj+1) , va_e(0:jpi+1,0:jpj+1) , & 173 #if defined key_lim2 || defined key_lim3 178 #if defined key_lim2 || defined key_lim3 || defined key_cice 174 179 & ui_e(0:jpi+1,0:jpj+1) , & 175 180 & vi_e(0:jpi+1,0:jpj+1) , & … … 181 186 icb_alloc = icb_alloc + ill 182 187 183 ALLOCATE( nicbfldpts(jpi) , nicbflddest(jpi) , nicbfldproc(jpni) , STAT=ill) 188 ALLOCATE( nicbfldpts(jpi) , nicbflddest(jpi) , nicbfldproc(jpni) , & 189 & nicbfldnsend(jpni), nicbfldexpect(jpni) , nicbfldreq(jpni), STAT=ill) 184 190 icb_alloc = icb_alloc + ill 185 191 -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/ICB/icbdyn.F90
r3614 r4934 33 33 CONTAINS 34 34 35 SUBROUTINE icb_dyn( )35 SUBROUTINE icb_dyn( kt ) 36 36 !!---------------------------------------------------------------------- 37 37 !! *** ROUTINE icb_dyn *** … … 50 50 TYPE(iceberg), POINTER :: berg 51 51 TYPE(point) , POINTER :: pt 52 INTEGER :: kt 52 53 !!---------------------------------------------------------------------- 53 54 -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/ICB/icbini.F90
r4624 r4934 172 172 DO ji = nicbdi, nicbei 173 173 ii = nicbflddest(ji) 174 DO jn = 1, jpni 175 ! work along array until we find an empty slot 176 IF( nicbfldproc(jn) == -1 ) THEN 177 nicbfldproc(jn) = ii 178 EXIT !!gm EXIT should be avoided: use DO WHILE expression instead 179 ENDIF 180 ! before we find an empty slot, we may find processor number is already here so we exit 181 IF( nicbfldproc(jn) == ii ) EXIT 182 END DO 174 IF( ii .GT. 0 ) THEN ! Needed because land suppression can mean 175 ! that unused points are not set in edge haloes 176 DO jn = 1, jpni 177 ! work along array until we find an empty slot 178 IF( nicbfldproc(jn) == -1 ) THEN 179 nicbfldproc(jn) = ii 180 EXIT !!gm EXIT should be avoided: use DO WHILE expression instead 181 ENDIF 182 ! before we find an empty slot, we may find processor number is already here so we exit 183 IF( nicbfldproc(jn) == ii ) EXIT 184 END DO 185 ENDIF 183 186 END DO 184 187 ENDIF … … 210 213 WRITE(numicb,*) 'north fold destination procs ' 211 214 WRITE(numicb,*) nicbflddest 215 WRITE(numicb,*) 'north fold destination proclist ' 216 WRITE(numicb,*) nicbfldproc 212 217 ENDIF 213 218 CALL flush(numicb) … … 397 402 ENDIF 398 403 399 400 401 404 ! IF( lk_lim3 .AND. ln_icebergs ) THEN 405 ! CALL ctl_stop( 'icb_nam: the use of ICB with LIM3 not allowed. ice thickness missing in ICB' ) 406 ! ENDIF 402 407 403 408 IF(lwp) THEN ! control print -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/ICB/icblbc.F90
r3614 r4934 280 280 zwebergs(1) = ibergs_to_send_e 281 281 CALL mppsend( 12, zwebergs(1), 1, ipe_E, iml_req1) 282 CALL mpprecv( 11, zewbergs(2), 1 )282 CALL mpprecv( 11, zewbergs(2), 1, ipe_E ) 283 283 IF( l_isend ) CALL mpi_wait( iml_req1, iml_stat, iml_err ) 284 284 ibergs_rcvd_from_e = INT( zewbergs(2) ) … … 288 288 CALL mppsend( 11, zewbergs(1), 1, ipe_W, iml_req2) 289 289 CALL mppsend( 12, zwebergs(1), 1, ipe_E, iml_req3) 290 CALL mpprecv( 11, zewbergs(2), 1 )291 CALL mpprecv( 12, zwebergs(2), 1 )290 CALL mpprecv( 11, zewbergs(2), 1, ipe_E ) 291 CALL mpprecv( 12, zwebergs(2), 1, ipe_W ) 292 292 IF( l_isend ) CALL mpi_wait( iml_req2, iml_stat, iml_err ) 293 293 IF( l_isend ) CALL mpi_wait( iml_req3, iml_stat, iml_err ) … … 297 297 zewbergs(1) = ibergs_to_send_w 298 298 CALL mppsend( 11, zewbergs(1), 1, ipe_W, iml_req4) 299 CALL mpprecv( 12, zwebergs(2), 1 )299 CALL mpprecv( 12, zwebergs(2), 1, ipe_W ) 300 300 IF( l_isend ) CALL mpi_wait( iml_req4, iml_stat, iml_err ) 301 301 ibergs_rcvd_from_w = INT( zwebergs(2) ) … … 411 411 zsnbergs(1) = ibergs_to_send_n 412 412 CALL mppsend( 16, zsnbergs(1), 1, ipe_N, iml_req1) 413 CALL mpprecv( 15, znsbergs(2), 1 )413 CALL mpprecv( 15, znsbergs(2), 1, ipe_N ) 414 414 IF( l_isend ) CALL mpi_wait( iml_req1, iml_stat, iml_err ) 415 415 ibergs_rcvd_from_n = INT( znsbergs(2) ) … … 419 419 CALL mppsend( 15, znsbergs(1), 1, ipe_S, iml_req2) 420 420 CALL mppsend( 16, zsnbergs(1), 1, ipe_N, iml_req3) 421 CALL mpprecv( 15, znsbergs(2), 1 )422 CALL mpprecv( 16, zsnbergs(2), 1 )421 CALL mpprecv( 15, znsbergs(2), 1, ipe_N ) 422 CALL mpprecv( 16, zsnbergs(2), 1, ipe_S ) 423 423 IF( l_isend ) CALL mpi_wait( iml_req2, iml_stat, iml_err ) 424 424 IF( l_isend ) CALL mpi_wait( iml_req3, iml_stat, iml_err ) … … 428 428 znsbergs(1) = ibergs_to_send_s 429 429 CALL mppsend( 15, znsbergs(1), 1, ipe_S, iml_req4) 430 CALL mpprecv( 16, zsnbergs(2), 1 )430 CALL mpprecv( 16, zsnbergs(2), 1, ipe_S ) 431 431 IF( l_isend ) CALL mpi_wait( iml_req4, iml_stat, iml_err ) 432 432 ibergs_rcvd_from_s = INT( zsnbergs(2) ) … … 581 581 INTEGER :: ifldproc, iproc, ipts 582 582 INTEGER :: iine, ijne 583 REAL(wp), DIMENSION(2) :: zsbergs, znbergs 583 INTEGER :: jjn 584 REAL(wp), DIMENSION(0:3) :: zsbergs, znbergs 584 585 INTEGER :: iml_req1, iml_req2, iml_err 585 586 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: iml_stat … … 591 592 ! its of fixed size, the first -1 marks end of list of processors 592 593 ! 594 nicbfldnsend(:) = 0 595 nicbfldexpect(:) = 0 596 nicbfldreq(:) = 0 597 ! 598 ! Since each processor may be communicating with more than one northern 599 ! neighbour, cycle through the sends so that the receive order can be 600 ! controlled. 601 ! 602 ! First compute how many icebergs each active neighbour should expect 603 DO jn = 1, jpni 604 IF( nicbfldproc(jn) /= -1 ) THEN 605 ifldproc = nicbfldproc(jn) 606 nicbfldnsend(jn) = 0 607 608 ! Find number of bergs that need to be exchanged 609 ! Pick out exchanges with processor ifldproc 610 ! if ifldproc is this processor then don't send 611 ! 612 IF( ASSOCIATED(first_berg) ) THEN 613 this => first_berg 614 DO WHILE (ASSOCIATED(this)) 615 pt => this%current_point 616 iine = INT( pt%xi + 0.5 ) 617 ijne = INT( pt%yj + 0.5 ) 618 iproc = nicbflddest(mi1(iine)) 619 IF( ijne .GT. mjg(nicbej) ) THEN 620 IF( iproc == ifldproc ) THEN 621 ! 622 IF( iproc /= narea ) THEN 623 tmpberg => this 624 nicbfldnsend(jn) = nicbfldnsend(jn) + 1 625 ENDIF 626 ! 627 ENDIF 628 ENDIF 629 this => this%next 630 END DO 631 ENDIF 632 ! 633 ENDIF 634 ! 635 END DO 636 ! 637 ! Now tell each active neighbour how many icebergs to expect 638 DO jn = 1, jpni 639 IF( nicbfldproc(jn) /= -1 ) THEN 640 ifldproc = nicbfldproc(jn) 641 IF( ifldproc == narea ) CYCLE 642 643 zsbergs(0) = narea 644 zsbergs(1) = nicbfldnsend(jn) 645 !IF ( nicbfldnsend(jn) .GT. 0) write(numicb,*) 'ICB sending ',nicbfldnsend(jn),' to ', ifldproc 646 CALL mppsend( 21, zsbergs(0:1), 2, ifldproc-1, nicbfldreq(jn)) 647 ENDIF 648 ! 649 END DO 650 ! 651 ! and receive the heads-up from active neighbours preparing to send 652 DO jn = 1, jpni 653 IF( nicbfldproc(jn) /= -1 ) THEN 654 ifldproc = nicbfldproc(jn) 655 IF( ifldproc == narea ) CYCLE 656 657 CALL mpprecv( 21, znbergs(1:2), 2 ) 658 DO jjn = 1,jpni 659 IF( nicbfldproc(jjn) .eq. INT(znbergs(1)) ) EXIT 660 END DO 661 IF( jjn .GT. jpni ) write(numicb,*) 'ICB ERROR' 662 nicbfldexpect(jjn) = INT( znbergs(2) ) 663 !IF ( nicbfldexpect(jjn) .GT. 0) write(numicb,*) 'ICB expecting ',nicbfldexpect(jjn),' from ', nicbfldproc(jjn) 664 !CALL FLUSH(numicb) 665 ENDIF 666 ! 667 END DO 668 ! 669 ! post the mpi waits if using immediate send protocol 670 DO jn = 1, jpni 671 IF( nicbfldproc(jn) /= -1 ) THEN 672 ifldproc = nicbfldproc(jn) 673 IF( ifldproc == narea ) CYCLE 674 675 IF( l_isend ) CALL mpi_wait( nicbfldreq(jn), iml_stat, iml_err ) 676 ENDIF 677 ! 678 END DO 679 680 ! 681 ! Cycle through the icebergs again, this time packing and sending any 682 ! going through the north fold. They will be expected. 593 683 DO jn = 1, jpni 594 684 IF( nicbfldproc(jn) /= -1 ) THEN … … 646 736 IF( ifldproc == narea ) CYCLE 647 737 648 zsbergs(1) = ibergs_to_send649 CALL mppsend( 21, zsbergs(1), 1, ifldproc-1, iml_req1)650 CALL mpprecv( 21, znbergs(2), 1 )651 IF( l_isend ) CALL mpi_wait( iml_req1, iml_stat, iml_err )652 ibergs_to_rcv = INT( znbergs(2) )653 654 738 ! send bergs 655 739 656 740 IF( ibergs_to_send > 0 ) & 657 CALL mppsend( 12, obuffer_f%data, ibergs_to_send*jp_buffer_width, ifldproc-1, iml_req2 ) 741 CALL mppsend( 12, obuffer_f%data, ibergs_to_send*jp_buffer_width, ifldproc-1, nicbfldreq(jn) ) 742 ! 743 ENDIF 744 ! 745 END DO 746 ! 747 ! Now receive the expected number of bergs from the active neighbours 748 DO jn = 1, jpni 749 IF( nicbfldproc(jn) /= -1 ) THEN 750 ifldproc = nicbfldproc(jn) 751 IF( ifldproc == narea ) CYCLE 752 ibergs_to_rcv = nicbfldexpect(jn) 753 658 754 IF( ibergs_to_rcv > 0 ) THEN 659 755 CALL icb_increase_ibuffer(ibuffer_f, ibergs_to_rcv) 660 CALL mpprecv( 12, ibuffer_f%data, ibergs_to_rcv*jp_buffer_width )661 ENDIF 662 IF( ibergs_to_send > 0 .AND. l_isend ) CALL mpi_wait( iml_req2, iml_stat, iml_err )756 CALL mpprecv( 12, ibuffer_f%data, ibergs_to_rcv*jp_buffer_width, ifldproc-1 ) 757 ENDIF 758 ! 663 759 DO jk = 1, ibergs_to_rcv 664 760 IF( nn_verbose_level >= 4 ) THEN … … 668 764 CALL icb_unpack_from_buffer(first_berg, ibuffer_f, jk ) 669 765 END DO 670 ! 766 ENDIF 767 ! 768 END DO 769 ! 770 ! Finally post the mpi waits if using immediate send protocol 771 DO jn = 1, jpni 772 IF( nicbfldproc(jn) /= -1 ) THEN 773 ifldproc = nicbfldproc(jn) 774 IF( ifldproc == narea ) CYCLE 775 776 IF( l_isend ) CALL mpi_wait( nicbfldreq(jn), iml_stat, iml_err ) 671 777 ENDIF 672 778 ! -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/ICB/icbrst.F90
r3614 r4934 64 64 ! start and count arrays 65 65 LOGICAL :: ll_found_restart 66 CHARACTER(len= 80):: cl_filename66 CHARACTER(len=256) :: cl_filename 67 67 CHARACTER(len=NF90_MAX_NAME) :: cl_dname 68 68 TYPE(iceberg) :: localberg ! NOT a pointer but an actual local variable … … 228 228 INTEGER :: jn ! dummy loop index 229 229 INTEGER :: ix_dim, iy_dim, ik_dim, in_dim 230 CHARACTER(len= 80):: cl_filename230 CHARACTER(len=256) :: cl_filename 231 231 TYPE(iceberg), POINTER :: this 232 232 TYPE(point) , POINTER :: pt … … 256 256 IF (nret .ne. NF90_NOERR) CALL ctl_stop('icebergs, write_restart: nf_def_dim k failed') 257 257 258 ! global attributes 259 IF( lk_mpp ) THEN 260 ! Set domain parameters (assume jpdom_local_full) 261 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_number_total' , jpnij ) 262 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_number' , narea-1 ) 263 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_dimensions_ids' , (/1 , 2 /) ) 264 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_size_global' , (/jpiglo, jpjglo/) ) 265 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_size_local' , (/jpi , jpj /) ) 266 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_position_first' , (/nimpp , njmpp /) ) 267 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_position_last' , (/nimpp + jpi - 1 , njmpp + jpj - 1 /) ) 268 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_halo_size_start', (/nldi - 1 , nldj - 1 /) ) 269 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_halo_size_end' , (/jpi - nlei , jpj - nlej /) ) 270 nret = NF90_PUT_ATT( ncid, NF90_GLOBAL, 'DOMAIN_type' , 'BOX' ) 271 ENDIF 272 258 273 IF (associated(first_berg)) then 259 274 nret = NF90_DEF_DIM(ncid, 'n', NF90_UNLIMITED, in_dim) -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/ICB/icbstp.F90
r4153 r4934 105 105 ! !== For each berg, evolve ==! 106 106 ! 107 IF( ASSOCIATED(first_berg) ) CALL icb_dyn( )! ice berg dynamics107 IF( ASSOCIATED(first_berg) ) CALL icb_dyn( kt ) ! ice berg dynamics 108 108 109 109 IF( lk_mpp ) THEN ; CALL icb_lbc_mpp() ! Send bergs to other PEs -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/ICB/icbutl.F90
r4724 r4934 76 76 va_e(:,:) = 0._wp ; va_e(1:jpi, 1:jpj) = vtau (:,:) * vmask(:,:,1) ! maybe mask useless because mask applied in sbcblk 77 77 78 CALL lbc_lnk_ e( uo_e, 'U', -1._wp, 1, 1 )79 CALL lbc_lnk_ e( vo_e, 'V', -1._wp, 1, 1 )80 CALL lbc_lnk_ e( ff_e, 'F', +1._wp, 1, 1 )81 CALL lbc_lnk_ e( ua_e, 'U', -1._wp, 1, 1 )82 CALL lbc_lnk_ e( va_e, 'V', -1._wp, 1, 1 )78 CALL lbc_lnk_icb( uo_e, 'U', -1._wp, 1, 1 ) 79 CALL lbc_lnk_icb( vo_e, 'V', -1._wp, 1, 1 ) 80 CALL lbc_lnk_icb( ff_e, 'F', +1._wp, 1, 1 ) 81 CALL lbc_lnk_icb( ua_e, 'U', -1._wp, 1, 1 ) 82 CALL lbc_lnk_icb( va_e, 'V', -1._wp, 1, 1 ) 83 83 84 84 #if defined key_lim2 || defined key_lim3 … … 86 86 vi_e(:,:) = 0._wp ; vi_e(1:jpi, 1:jpj) = v_ice(:,:) 87 87 88 CALL lbc_lnk_ e( ui_e, 'U', -1._wp, 1, 1 )89 CALL lbc_lnk_ e( vi_e, 'V', -1._wp, 1, 1 )88 CALL lbc_lnk_icb( ui_e, 'U', -1._wp, 1, 1 ) 89 CALL lbc_lnk_icb( vi_e, 'V', -1._wp, 1, 1 ) 90 90 #endif 91 91 … … 102 102 ssh_e(0,jpj+1) = ssh_e(1,jpj) 103 103 ssh_e(jpi+1,jpj+1) = ssh_e(jpi,jpj) 104 CALL lbc_lnk_ e( ssh_e, 'T', +1._wp, 1, 1 )104 CALL lbc_lnk_icb( ssh_e, 'T', +1._wp, 1, 1 ) 105 105 ! 106 106 END SUBROUTINE icb_utl_copy -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/LBC/lbclnk.F90
r4328 r4934 34 34 END INTERFACE 35 35 36 INTERFACE lbc_lnk_icb 37 MODULE PROCEDURE mpp_lnk_2d_icb 38 END INTERFACE 39 36 40 PUBLIC lbc_lnk ! ocean lateral boundary conditions 37 41 PUBLIC lbc_lnk_e 38 42 PUBLIC lbc_bdy_lnk ! ocean lateral BDY boundary conditions 43 PUBLIC lbc_lnk_icb 39 44 40 45 !!---------------------------------------------------------------------- … … 73 78 END INTERFACE 74 79 80 INTERFACE lbc_lnk_icb 81 MODULE PROCEDURE lbc_lnk_2d_e 82 END INTERFACE 83 75 84 PUBLIC lbc_lnk ! ocean/ice lateral boundary conditions 76 85 PUBLIC lbc_lnk_e 77 86 PUBLIC lbc_bdy_lnk ! ocean lateral BDY boundary conditions 87 PUBLIC lbc_lnk_icb 78 88 79 89 !!---------------------------------------------------------------------- -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/LBC/lib_mpp.F90
r4924 r4934 42 42 !! mpp_lnk_3d_gather : Message passing manadgement for two 3D arrays 43 43 !! mpp_lnk_e : interface (defined in lbclnk) for message passing of 2d array with extra halo (mpp_lnk_2d_e) 44 !! mpp_lnk_icb : interface for message passing of 2d arrays with extra halo for icebergs (mpp_lnk_2d_icb) 44 45 !! mpprecv : 45 46 !! mppsend : SUBROUTINE mpp_ini_znl … … 56 57 !! mpp_lbc_north : north fold processors gathering 57 58 !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo 59 !! mpp_lbc_north_icb : variant of mpp_lbc_north for extra outer halo with icebergs 58 60 !!---------------------------------------------------------------------- 59 61 USE dom_oce ! ocean space and time domain … … 74 76 PUBLIC mppsend, mpprecv ! needed by TAM and ICB routines 75 77 PUBLIC mpp_lnk_bdy_2d, mpp_lnk_bdy_3d 78 PUBLIC mpp_lbc_north_icb, mpp_lnk_2d_icb 76 79 77 80 !! * Interfaces … … 2893 2896 END SUBROUTINE DDPDD_MPI 2894 2897 2898 SUBROUTINE mpp_lbc_north_icb( pt2d, cd_type, psgn, pr2dj) 2899 !!--------------------------------------------------------------------- 2900 !! *** routine mpp_lbc_north_icb *** 2901 !! 2902 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2903 !! in mpp configuration in case of jpn1 > 1 and for 2d 2904 !! array with outer extra halo 2905 !! 2906 !! ** Method : North fold condition and mpp with more than one proc 2907 !! in i-direction require a specific treatment. We gather 2908 !! the 4+2*jpr2dj northern lines of the global domain on 1 2909 !! processor and apply lbc north-fold on this sub array. 2910 !! Then we scatter the north fold array back to the processors. 2911 !! This version accounts for an extra halo with icebergs. 2912 !! 2913 !!---------------------------------------------------------------------- 2914 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array with extra halo 2915 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 2916 ! ! = T , U , V , F or W -points 2917 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 2918 !! ! north fold, = 1. otherwise 2919 INTEGER, OPTIONAL , INTENT(in ) :: pr2dj 2920 INTEGER :: ji, jj, jr 2921 INTEGER :: ierr, itaille, ildi, ilei, iilb 2922 INTEGER :: ijpj, ij, iproc, ipr2dj 2923 ! 2924 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e 2925 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e 2926 2927 !!---------------------------------------------------------------------- 2928 ! 2929 ijpj=4 2930 IF( PRESENT(pr2dj) ) THEN ! use of additional halos 2931 ipr2dj = pr2dj 2932 ELSE 2933 ipr2dj = 0 2934 ENDIF 2935 ALLOCATE( ztab_e(jpiglo,4+2*ipr2dj), znorthloc_e(jpi,4+2*ipr2dj), znorthgloio_e(jpi,4+2*ipr2dj,jpni) ) 2936 2937 ! 2938 ztab_e(:,:) = 0.e0 2939 2940 ij=0 2941 ! put in znorthloc_e the last 4 jlines of pt2d 2942 DO jj = nlcj - ijpj + 1 - ipr2dj, nlcj +ipr2dj 2943 ij = ij + 1 2944 DO ji = 1, jpi 2945 znorthloc_e(ji,ij)=pt2d(ji,jj) 2946 END DO 2947 END DO 2948 ! 2949 itaille = jpi * ( ijpj + 2 * ipr2dj ) 2950 CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, & 2951 & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2952 ! 2953 DO jr = 1, ndim_rank_north ! recover the global north array 2954 iproc = nrank_north(jr) + 1 2955 ildi = nldit (iproc) 2956 ilei = nleit (iproc) 2957 iilb = nimppt(iproc) 2958 DO jj = 1, ijpj+2*ipr2dj 2959 DO ji = ildi, ilei 2960 ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr) 2961 END DO 2962 END DO 2963 END DO 2964 2965 2966 ! 2. North-Fold boundary conditions 2967 ! ---------------------------------- 2968 CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = ipr2dj ) 2969 2970 ij = ipr2dj 2971 !! Scatter back to pt2d 2972 DO jj = nlcj - ijpj + 1 , nlcj +ipr2dj 2973 ij = ij +1 2974 DO ji= 1, nlci 2975 pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij) 2976 END DO 2977 END DO 2978 ! 2979 DEALLOCATE( ztab_e, znorthloc_e, znorthgloio_e ) 2980 ! 2981 END SUBROUTINE mpp_lbc_north_icb 2982 2983 SUBROUTINE mpp_lnk_2d_icb( pt2d, cd_type, psgn, jpri, jprj ) 2984 !!---------------------------------------------------------------------- 2985 !! *** routine mpp_lnk_2d_icb *** 2986 !! 2987 !! ** Purpose : Message passing manadgement for 2d array (with extra halo and icebergs) 2988 !! 2989 !! ** Method : Use mppsend and mpprecv function for passing mask 2990 !! between processors following neighboring subdomains. 2991 !! domain parameters 2992 !! nlci : first dimension of the local subdomain 2993 !! nlcj : second dimension of the local subdomain 2994 !! jpri : number of rows for extra outer halo 2995 !! jprj : number of columns for extra outer halo 2996 !! nbondi : mark for "east-west local boundary" 2997 !! nbondj : mark for "north-south local boundary" 2998 !! noea : number for local neighboring processors 2999 !! nowe : number for local neighboring processors 3000 !! noso : number for local neighboring processors 3001 !! nono : number for local neighboring processors 3002 !! 3003 !!---------------------------------------------------------------------- 3004 INTEGER , INTENT(in ) :: jpri 3005 INTEGER , INTENT(in ) :: jprj 3006 REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo 3007 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points 3008 ! ! = T , U , V , F , W and I points 3009 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the 3010 !! ! north boundary, = 1. otherwise 3011 INTEGER :: jl ! dummy loop indices 3012 INTEGER :: imigr, iihom, ijhom ! temporary integers 3013 INTEGER :: ipreci, iprecj ! temporary integers 3014 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 3015 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 3016 !! 3017 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns 3018 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn 3019 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe 3020 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew 3021 !!---------------------------------------------------------------------- 3022 3023 ipreci = jpreci + jpri ! take into account outer extra 2D overlap area 3024 iprecj = jprecj + jprj 3025 3026 3027 ! 1. standard boundary treatment 3028 ! ------------------------------ 3029 ! Order matters Here !!!! 3030 ! 3031 ! ! East-West boundaries 3032 ! !* Cyclic east-west 3033 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 3034 pt2d(1-jpri: 1 ,:) = pt2d(jpim1-jpri: jpim1 ,:) ! east 3035 pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west 3036 ! 3037 ELSE !* closed 3038 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point 3039 pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north 3040 ENDIF 3041 ! 3042 3043 ! north fold treatment 3044 ! ----------------------- 3045 IF( npolj /= 0 ) THEN 3046 ! 3047 SELECT CASE ( jpni ) 3048 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj ) 3049 CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+jprj) , cd_type, psgn , pr2dj=jprj ) 3050 END SELECT 3051 ! 3052 ENDIF 3053 3054 ! 2. East and west directions exchange 3055 ! ------------------------------------ 3056 ! we play with the neigbours AND the row number because of the periodicity 3057 ! 3058 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 3059 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 3060 iihom = nlci-nreci-jpri 3061 DO jl = 1, ipreci 3062 r2dew(:,jl,1) = pt2d(jpreci+jl,:) 3063 r2dwe(:,jl,1) = pt2d(iihom +jl,:) 3064 END DO 3065 END SELECT 3066 ! 3067 ! ! Migrations 3068 imigr = ipreci * ( jpj + 2*jprj) 3069 ! 3070 SELECT CASE ( nbondi ) 3071 CASE ( -1 ) 3072 CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req1 ) 3073 CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) 3074 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3075 CASE ( 0 ) 3076 CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) 3077 CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req2 ) 3078 CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) 3079 CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) 3080 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3081 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 3082 CASE ( 1 ) 3083 CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) 3084 CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) 3085 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3086 END SELECT 3087 ! 3088 ! ! Write Dirichlet lateral conditions 3089 iihom = nlci - jpreci 3090 ! 3091 SELECT CASE ( nbondi ) 3092 CASE ( -1 ) 3093 DO jl = 1, ipreci 3094 pt2d(iihom+jl,:) = r2dew(:,jl,2) 3095 END DO 3096 CASE ( 0 ) 3097 DO jl = 1, ipreci 3098 pt2d(jl-jpri,:) = r2dwe(:,jl,2) 3099 pt2d( iihom+jl,:) = r2dew(:,jl,2) 3100 END DO 3101 CASE ( 1 ) 3102 DO jl = 1, ipreci 3103 pt2d(jl-jpri,:) = r2dwe(:,jl,2) 3104 END DO 3105 END SELECT 3106 3107 3108 ! 3. North and south directions 3109 ! ----------------------------- 3110 ! always closed : we play only with the neigbours 3111 ! 3112 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 3113 ijhom = nlcj-nrecj-jprj 3114 DO jl = 1, iprecj 3115 r2dsn(:,jl,1) = pt2d(:,ijhom +jl) 3116 r2dns(:,jl,1) = pt2d(:,jprecj+jl) 3117 END DO 3118 ENDIF 3119 ! 3120 ! ! Migrations 3121 imigr = iprecj * ( jpi + 2*jpri ) 3122 ! 3123 SELECT CASE ( nbondj ) 3124 CASE ( -1 ) 3125 CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req1 ) 3126 CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) 3127 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3128 CASE ( 0 ) 3129 CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) 3130 CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req2 ) 3131 CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) 3132 CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) 3133 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3134 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 3135 CASE ( 1 ) 3136 CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) 3137 CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) 3138 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3139 END SELECT 3140 ! 3141 ! ! Write Dirichlet lateral conditions 3142 ijhom = nlcj - jprecj 3143 ! 3144 SELECT CASE ( nbondj ) 3145 CASE ( -1 ) 3146 DO jl = 1, iprecj 3147 pt2d(:,ijhom+jl) = r2dns(:,jl,2) 3148 END DO 3149 CASE ( 0 ) 3150 DO jl = 1, iprecj 3151 pt2d(:,jl-jprj) = r2dsn(:,jl,2) 3152 pt2d(:,ijhom+jl ) = r2dns(:,jl,2) 3153 END DO 3154 CASE ( 1 ) 3155 DO jl = 1, iprecj 3156 pt2d(:,jl-jprj) = r2dsn(:,jl,2) 3157 END DO 3158 END SELECT 3159 3160 END SUBROUTINE mpp_lnk_2d_icb 2895 3161 #else 2896 3162 !!---------------------------------------------------------------------- -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/OBS/diaobs.F90
r4932 r4934 208 208 !----------------------------------------------------------------------- 209 209 210 !Initalise all values in namelist arrays211 enactfiles(:) = ''212 coriofiles(:) = ''213 profbfiles(:) = ''214 slafilesact(:) = ''215 slafilespas(:) = ''216 slafbfiles(:) = ''217 sstfiles(:) = ''218 sstfbfiles(:) = ''219 seaicefiles(:) = ''220 210 velcurfiles(:) = '' 221 211 veladcpfiles(:) = '' 222 velavcurfiles(:) = ''223 velhrcurfiles(:) = ''224 velavadcpfiles(:) = ''225 velhradcpfiles(:) = ''226 velfbfiles(:) = ''227 velcurfiles(:) = ''228 veladcpfiles(:) = ''229 endailyavtypes(:) = -1230 endailyavtypes(1) = 820231 ln_profb_ena(:) = .FALSE.232 ln_profb_enatim(:) = .TRUE.233 ln_velfb_av(:) = .FALSE.234 ln_ignmis = .FALSE.235 212 CALL ini_date( dobsini ) 236 213 CALL fin_date( dobsend ) -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/TRA/traadv.F90
r4724 r4934 44 44 LOGICAL :: ln_traadv_cen2 ! 2nd order centered scheme flag 45 45 LOGICAL :: ln_traadv_tvd ! TVD scheme flag 46 LOGICAL :: ln_traadv_tvd_zts ! TVD scheme flag with vertical sub time-stepping 46 47 LOGICAL :: ln_traadv_muscl ! MUSCL scheme flag 47 48 LOGICAL :: ln_traadv_muscl2 ! MUSCL2 scheme flag … … 121 122 CASE ( 5 ) ; CALL tra_adv_ubs ( kt, nit000, 'TRA', r2dtra, zun, zvn, zwn, tsb, tsn, tsa, jpts ) ! UBS 122 123 CASE ( 6 ) ; CALL tra_adv_qck ( kt, nit000, 'TRA', r2dtra, zun, zvn, zwn, tsb, tsn, tsa, jpts ) ! QUICKEST 124 CASE ( 7 ) ; CALL tra_adv_tvd_zts( kt, nit000, 'TRA', r2dtra, zun, zvn, zwn, tsb, tsn, tsa, jpts ) ! TVD ZTS 123 125 ! 124 126 CASE (-1 ) !== esopa: test all possibility with control print ==! … … 167 169 & ln_traadv_muscl, ln_traadv_muscl2, & 168 170 & ln_traadv_ubs , ln_traadv_qck, & 169 & ln_traadv_msc_ups 171 & ln_traadv_msc_ups, ln_traadv_tvd_zts 170 172 !!---------------------------------------------------------------------- 171 173 … … 191 193 WRITE(numout,*) ' QUICKEST advection scheme ln_traadv_qck = ', ln_traadv_qck 192 194 WRITE(numout,*) ' upstream scheme within muscl ln_traadv_msc_ups = ', ln_traadv_msc_ups 195 WRITE(numout,*) ' TVD advection scheme with zts ln_traadv_tvd_zts = ', ln_traadv_tvd_zts 193 196 ENDIF 194 197 … … 200 203 IF( ln_traadv_ubs ) ioptio = ioptio + 1 201 204 IF( ln_traadv_qck ) ioptio = ioptio + 1 205 IF( ln_traadv_tvd_zts) ioptio = ioptio + 1 202 206 IF( lk_esopa ) ioptio = 1 203 207 … … 214 218 IF( ln_traadv_ubs ) nadv = 5 215 219 IF( ln_traadv_qck ) nadv = 6 220 IF( ln_traadv_tvd_zts) nadv = 7 216 221 IF( lk_esopa ) nadv = -1 217 222 … … 224 229 IF( nadv == 5 ) WRITE(numout,*) ' UBS scheme is used' 225 230 IF( nadv == 6 ) WRITE(numout,*) ' QUICKEST scheme is used' 231 IF( nadv == 7 ) WRITE(numout,*) ' TVD ZTS scheme is used' 226 232 IF( nadv == -1 ) WRITE(numout,*) ' esopa test: use all advection scheme' 227 233 ENDIF -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_tvd.F90
r4666 r4934 37 37 PRIVATE 38 38 39 PUBLIC tra_adv_tvd ! routine called by step.F90 39 PUBLIC tra_adv_tvd ! routine called by traadv.F90 40 PUBLIC tra_adv_tvd_zts ! routine called by traadv.F90 40 41 41 42 LOGICAL :: l_trd ! flag to compute trends … … 262 263 END SUBROUTINE tra_adv_tvd 263 264 265 SUBROUTINE tra_adv_tvd_zts ( kt, kit000, cdtype, p2dt, pun, pvn, pwn, & 266 & ptb, ptn, pta, kjpt ) 267 !!---------------------------------------------------------------------- 268 !! *** ROUTINE tra_adv_tvd_zts *** 269 !! 270 !! ** Purpose : Compute the now trend due to total advection of 271 !! tracers and add it to the general trend of tracer equations 272 !! 273 !! ** Method : TVD ZTS scheme, i.e. 2nd order centered scheme with 274 !! corrected flux (monotonic correction). This version use sub- 275 !! timestepping for the vertical advection which increases stability 276 !! when vertical metrics are small. 277 !! note: - this advection scheme needs a leap-frog time scheme 278 !! 279 !! ** Action : - update (pta) with the now advective tracer trends 280 !! - save the trends 281 !!---------------------------------------------------------------------- 282 USE oce , ONLY: zwx => ua , zwy => va ! (ua,va) used as workspace 283 ! 284 INTEGER , INTENT(in ) :: kt ! ocean time-step index 285 INTEGER , INTENT(in ) :: kit000 ! first time step index 286 CHARACTER(len=3) , INTENT(in ) :: cdtype ! =TRA or TRC (tracer indicator) 287 INTEGER , INTENT(in ) :: kjpt ! number of tracers 288 REAL(wp), DIMENSION( jpk ), INTENT(in ) :: p2dt ! vertical profile of tracer time-step 289 REAL(wp), DIMENSION(jpi,jpj,jpk ), INTENT(in ) :: pun, pvn, pwn ! 3 ocean velocity components 290 REAL(wp), DIMENSION(jpi,jpj,jpk,kjpt), INTENT(in ) :: ptb, ptn ! before and now tracer fields 291 REAL(wp), DIMENSION(jpi,jpj,jpk,kjpt), INTENT(inout) :: pta ! tracer trend 292 ! 293 REAL(wp), DIMENSION( jpk ) :: zts ! length of sub-timestep for vertical advection 294 REAL(wp), DIMENSION( jpk ) :: zr_p2dt ! reciprocal of tracer timestep 295 INTEGER :: ji, jj, jk, jl, jn ! dummy loop indices 296 INTEGER :: jnzts = 5 ! number of sub-timesteps for vertical advection 297 INTEGER :: jtb, jtn, jta ! sub timestep pointers for leap-frog/euler forward steps 298 INTEGER :: jtaken ! toggle for collecting appropriate fluxes from sub timesteps 299 REAL(wp) :: z_rzts ! Fractional length of Euler forward sub-timestep for vertical advection 300 REAL(wp) :: z2dtt, zbtr, ztra ! local scalar 301 REAL(wp) :: zfp_ui, zfp_vj, zfp_wk ! - - 302 REAL(wp) :: zfm_ui, zfm_vj, zfm_wk ! - - 303 REAL(wp), POINTER, DIMENSION(:,: ) :: zwx_sav , zwy_sav 304 REAL(wp), POINTER, DIMENSION(:,:,:) :: zwi, zwz, zhdiv, zwz_sav, zwzts 305 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrdx, ztrdy, ztrdz 306 REAL(wp), POINTER, DIMENSION(:,:,:,:) :: ztrs 307 !!---------------------------------------------------------------------- 308 ! 309 IF( nn_timing == 1 ) CALL timing_start('tra_adv_tvd_zts') 310 ! 311 CALL wrk_alloc( jpi, jpj, zwx_sav, zwy_sav ) 312 CALL wrk_alloc( jpi, jpj, jpk, zwi, zwz , zhdiv, zwz_sav, zwzts ) 313 CALL wrk_alloc( jpi, jpj, jpk, 3, ztrs ) 314 ! 315 IF( kt == kit000 ) THEN 316 IF(lwp) WRITE(numout,*) 317 IF(lwp) WRITE(numout,*) 'tra_adv_tvd_zts : TVD ZTS advection scheme on ', cdtype 318 IF(lwp) WRITE(numout,*) '~~~~~~~~~~~' 319 ENDIF 320 ! 321 l_trd = .FALSE. 322 IF( ( cdtype == 'TRA' .AND. l_trdtra ) .OR. ( cdtype == 'TRC' .AND. l_trdtrc ) ) l_trd = .TRUE. 323 ! 324 IF( l_trd ) THEN 325 CALL wrk_alloc( jpi, jpj, jpk, ztrdx, ztrdy, ztrdz ) 326 ztrdx(:,:,:) = 0._wp ; ztrdy(:,:,:) = 0._wp ; ztrdz(:,:,:) = 0._wp 327 ENDIF 328 ! 329 zwi(:,:,:) = 0._wp 330 z_rzts = 1._wp / REAL( jnzts, wp ) 331 zr_p2dt(:) = 1._wp / p2dt(:) 332 ! 333 ! ! =========== 334 DO jn = 1, kjpt ! tracer loop 335 ! ! =========== 336 ! 1. Bottom value : flux set to zero 337 ! ---------------------------------- 338 zwx(:,:,jpk) = 0._wp ; zwz(:,:,jpk) = 0._wp 339 zwy(:,:,jpk) = 0._wp ; zwi(:,:,jpk) = 0._wp 340 341 ! 2. upstream advection with initial mass fluxes & intermediate update 342 ! -------------------------------------------------------------------- 343 ! upstream tracer flux in the i and j direction 344 DO jk = 1, jpkm1 345 DO jj = 1, jpjm1 346 DO ji = 1, fs_jpim1 ! vector opt. 347 ! upstream scheme 348 zfp_ui = pun(ji,jj,jk) + ABS( pun(ji,jj,jk) ) 349 zfm_ui = pun(ji,jj,jk) - ABS( pun(ji,jj,jk) ) 350 zfp_vj = pvn(ji,jj,jk) + ABS( pvn(ji,jj,jk) ) 351 zfm_vj = pvn(ji,jj,jk) - ABS( pvn(ji,jj,jk) ) 352 zwx(ji,jj,jk) = 0.5_wp * ( zfp_ui * ptb(ji,jj,jk,jn) + zfm_ui * ptb(ji+1,jj ,jk,jn) ) 353 zwy(ji,jj,jk) = 0.5_wp * ( zfp_vj * ptb(ji,jj,jk,jn) + zfm_vj * ptb(ji ,jj+1,jk,jn) ) 354 END DO 355 END DO 356 END DO 357 358 ! upstream tracer flux in the k direction 359 ! Surface value 360 IF( lk_vvl ) THEN ; zwz(:,:, 1 ) = 0._wp ! volume variable 361 ELSE ; zwz(:,:, 1 ) = pwn(:,:,1) * ptb(:,:,1,jn) ! linear free surface 362 ENDIF 363 ! Interior value 364 DO jk = 2, jpkm1 365 DO jj = 1, jpj 366 DO ji = 1, jpi 367 zfp_wk = pwn(ji,jj,jk) + ABS( pwn(ji,jj,jk) ) 368 zfm_wk = pwn(ji,jj,jk) - ABS( pwn(ji,jj,jk) ) 369 zwz(ji,jj,jk) = 0.5_wp * ( zfp_wk * ptb(ji,jj,jk,jn) + zfm_wk * ptb(ji,jj,jk-1,jn) ) 370 END DO 371 END DO 372 END DO 373 374 ! total advective trend 375 DO jk = 1, jpkm1 376 z2dtt = p2dt(jk) 377 DO jj = 2, jpjm1 378 DO ji = fs_2, fs_jpim1 ! vector opt. 379 zbtr = 1._wp / ( e1t(ji,jj) * e2t(ji,jj) * fse3t(ji,jj,jk) ) 380 ! total intermediate advective trends 381 ztra = - zbtr * ( zwx(ji,jj,jk) - zwx(ji-1,jj ,jk ) & 382 & + zwy(ji,jj,jk) - zwy(ji ,jj-1,jk ) & 383 & + zwz(ji,jj,jk) - zwz(ji ,jj ,jk+1) ) 384 ! update and guess with monotonic sheme 385 pta(ji,jj,jk,jn) = pta(ji,jj,jk,jn) + ztra 386 zwi(ji,jj,jk) = ( ptb(ji,jj,jk,jn) + z2dtt * ztra ) * tmask(ji,jj,jk) 387 END DO 388 END DO 389 END DO 390 ! ! Lateral boundary conditions on zwi (unchanged sign) 391 CALL lbc_lnk( zwi, 'T', 1. ) 392 393 ! ! trend diagnostics (contribution of upstream fluxes) 394 IF( l_trd ) THEN 395 ! store intermediate advective trends 396 ztrdx(:,:,:) = zwx(:,:,:) ; ztrdy(:,:,:) = zwy(:,:,:) ; ztrdz(:,:,:) = zwz(:,:,:) 397 END IF 398 ! ! "Poleward" heat and salt transports (contribution of upstream fluxes) 399 IF( cdtype == 'TRA' .AND. ln_diaptr .AND. ( MOD( kt, nn_fptr ) == 0 ) ) THEN 400 IF( jn == jp_tem ) htr_adv(:) = ptr_vj( zwy(:,:,:) ) 401 IF( jn == jp_sal ) str_adv(:) = ptr_vj( zwy(:,:,:) ) 402 ENDIF 403 404 ! 3. antidiffusive flux : high order minus low order 405 ! -------------------------------------------------- 406 ! antidiffusive flux on i and j 407 408 409 DO jk = 1, jpkm1 410 411 DO jj = 1, jpjm1 412 DO ji = 1, fs_jpim1 ! vector opt. 413 zwx_sav(ji,jj) = zwx(ji,jj,jk) 414 zwy_sav(ji,jj) = zwy(ji,jj,jk) 415 416 zwx(ji,jj,jk) = 0.5_wp * pun(ji,jj,jk) * ( ptn(ji,jj,jk,jn) + ptn(ji+1,jj,jk,jn) ) 417 zwy(ji,jj,jk) = 0.5_wp * pvn(ji,jj,jk) * ( ptn(ji,jj,jk,jn) + ptn(ji,jj+1,jk,jn) ) 418 END DO 419 END DO 420 421 DO jj = 2, jpjm1 ! partial horizontal divergence 422 DO ji = fs_2, fs_jpim1 423 zhdiv(ji,jj,jk) = ( zwx(ji,jj,jk) - zwx(ji-1,jj ,jk) & 424 & + zwy(ji,jj,jk) - zwy(ji ,jj-1,jk) ) 425 END DO 426 END DO 427 428 DO jj = 1, jpjm1 429 DO ji = 1, fs_jpim1 ! vector opt. 430 zwx(ji,jj,jk) = zwx(ji,jj,jk) - zwx_sav(ji,jj) 431 zwy(ji,jj,jk) = zwy(ji,jj,jk) - zwy_sav(ji,jj) 432 END DO 433 END DO 434 END DO 435 436 ! antidiffusive flux on k 437 zwz(:,:,1) = 0._wp ! Surface value 438 zwz_sav(:,:,:) = zwz(:,:,:) 439 ! 440 ztrs(:,:,:,1) = ptb(:,:,:,jn) 441 zwzts(:,:,:) = 0._wp 442 443 DO jl = 1, jnzts ! Start of sub timestepping loop 444 445 IF( jl == 1 ) THEN ! Euler forward to kick things off 446 jtb = 1 ; jtn = 1 ; jta = 2 447 zts(:) = p2dt(:) * z_rzts 448 jtaken = MOD( jnzts + 1 , 2) ! Toggle to collect every second flux 449 ! starting at jl =1 if jnzts is odd; 450 ! starting at jl =2 otherwise 451 ELSEIF( jl == 2 ) THEN ! First leapfrog step 452 jtb = 1 ; jtn = 2 ; jta = 3 453 zts(:) = 2._wp * p2dt(:) * z_rzts 454 ELSE ! Shuffle pointers for subsequent leapfrog steps 455 jtb = MOD(jtb,3) + 1 456 jtn = MOD(jtn,3) + 1 457 jta = MOD(jta,3) + 1 458 ENDIF 459 DO jk = 2, jpkm1 ! Interior value 460 DO jj = 2, jpjm1 461 DO ji = fs_2, fs_jpim1 462 zwz(ji,jj,jk) = 0.5_wp * pwn(ji,jj,jk) * ( ztrs(ji,jj,jk,jtn) + ztrs(ji,jj,jk-1,jtn) ) 463 IF( jtaken == 0 ) zwzts(ji,jj,jk) = zwzts(ji,jj,jk) + zwz(ji,jj,jk)*zts(jk) ! Accumulate time-weighted vertcal flux 464 END DO 465 END DO 466 END DO 467 468 jtaken = MOD( jtaken + 1 , 2 ) 469 470 DO jk = 2, jpkm1 ! Interior value 471 DO jj = 2, jpjm1 472 DO ji = fs_2, fs_jpim1 473 zbtr = 1._wp / ( e1t(ji,jj) * e2t(ji,jj) * fse3t(ji,jj,jk) ) 474 ! total advective trends 475 ztra = - zbtr * ( zhdiv(ji,jj,jk) + zwz(ji,jj,jk) - zwz(ji ,jj ,jk+1) ) 476 ztrs(ji,jj,jk,jta) = ztrs(ji,jj,jk,jtb) + zts(jk) * ztra 477 END DO 478 END DO 479 END DO 480 481 END DO 482 483 DO jk = 2, jpkm1 ! Anti-diffusive vertical flux using average flux from the sub-timestepping 484 DO jj = 2, jpjm1 485 DO ji = fs_2, fs_jpim1 486 zwz(ji,jj,jk) = zwzts(ji,jj,jk) * zr_p2dt(jk) - zwz_sav(ji,jj,jk) 487 END DO 488 END DO 489 END DO 490 CALL lbc_lnk( zwx, 'U', -1. ) ; CALL lbc_lnk( zwy, 'V', -1. ) ! Lateral bondary conditions 491 CALL lbc_lnk( zwz, 'W', 1. ) 492 493 ! 4. monotonicity algorithm 494 ! ------------------------- 495 CALL nonosc( ptb(:,:,:,jn), zwx, zwy, zwz, zwi, p2dt ) 496 497 498 ! 5. final trend with corrected fluxes 499 ! ------------------------------------ 500 DO jk = 1, jpkm1 501 DO jj = 2, jpjm1 502 DO ji = fs_2, fs_jpim1 ! vector opt. 503 zbtr = 1. / ( e1t(ji,jj) * e2t(ji,jj) * fse3t(ji,jj,jk) ) 504 ! total advective trends 505 ztra = - zbtr * ( zwx(ji,jj,jk) - zwx(ji-1,jj ,jk ) & 506 & + zwy(ji,jj,jk) - zwy(ji ,jj-1,jk ) & 507 & + zwz(ji,jj,jk) - zwz(ji ,jj ,jk+1) ) 508 ! add them to the general tracer trends 509 pta(ji,jj,jk,jn) = pta(ji,jj,jk,jn) + ztra 510 END DO 511 END DO 512 END DO 513 514 ! ! trend diagnostics (contribution of upstream fluxes) 515 IF( l_trd ) THEN 516 ztrdx(:,:,:) = ztrdx(:,:,:) + zwx(:,:,:) ! <<< Add to previously computed 517 ztrdy(:,:,:) = ztrdy(:,:,:) + zwy(:,:,:) ! <<< Add to previously computed 518 ztrdz(:,:,:) = ztrdz(:,:,:) + zwz(:,:,:) ! <<< Add to previously computed 519 520 CALL trd_tra( kt, cdtype, jn, jptra_trd_xad, ztrdx, pun, ptn(:,:,:,jn) ) 521 CALL trd_tra( kt, cdtype, jn, jptra_trd_yad, ztrdy, pvn, ptn(:,:,:,jn) ) 522 CALL trd_tra( kt, cdtype, jn, jptra_trd_zad, ztrdz, pwn, ptn(:,:,:,jn) ) 523 END IF 524 ! ! "Poleward" heat and salt transports (contribution of upstream fluxes) 525 IF( cdtype == 'TRA' .AND. ln_diaptr .AND. ( MOD( kt, nn_fptr ) == 0 ) ) THEN 526 IF( jn == jp_tem ) htr_adv(:) = ptr_vj( zwy(:,:,:) ) + htr_adv(:) 527 IF( jn == jp_sal ) str_adv(:) = ptr_vj( zwy(:,:,:) ) + str_adv(:) 528 ENDIF 529 ! 530 END DO 531 ! 532 CALL wrk_dealloc( jpi, jpj, jpk, zwi, zwz, zhdiv, zwz_sav, zwzts ) 533 CALL wrk_dealloc( jpi, jpj, jpk, 3, ztrs ) 534 CALL wrk_dealloc( jpi, jpj, zwx_sav, zwy_sav ) 535 IF( l_trd ) CALL wrk_dealloc( jpi, jpj, jpk, ztrdx, ztrdy, ztrdz ) 536 ! 537 IF( nn_timing == 1 ) CALL timing_stop('tra_adv_tvd_zts') 538 ! 539 END SUBROUTINE tra_adv_tvd_zts 264 540 265 541 SUBROUTINE nonosc( pbef, paa, pbb, pcc, paft, p2dt ) -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/TOOLS/COMPILE/Fadd_keys.sh
r2603 r4934 33 33 # 34 34 # 35 # Script to add a set of fkey when compiling a configuration.36 # The list of fkey to be added has to be enclosed with " ".35 # Script to add a set of key when compiling a configuration. 36 # The list of key to be added has to be enclosed with " ". 37 37 # A 'sed' is performed to modify the CONFIG_NAME/cpp.fcm file to 38 38 # add the new key(s). -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/TOOLS/COMPILE/Fcheck_config.sh
r3294 r4934 39 39 # - Nothing set, use the previous in use 40 40 # 41 # We use TOOLS/CONFIG_DIR/cfg.txt to check if the onfiguration exists.41 # We use TOOLS/CONFIG_DIR/cfg.txt to check if the configuration exists. 42 42 # 43 43 # EXAMPLES … … 76 76 echo "Use makenemo -n MYCONFIG" 77 77 echo "or makenemo -h for help" 78 echo "Using defau t configuration : ${NEW_CONF}"78 echo "Using default configuration : ${NEW_CONF}" 79 79 fi 80 80 if [ "$1" == cfg.txt ]; then -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/TOOLS/COMPILE/Fclean_var.sh
r3294 r4934 11 11 # 12 12 # ---------------------------- 13 # Clean environ ement variables13 # Clean environment variables 14 14 # ---------------------------- 15 15 # … … 26 26 # 27 27 # 28 # Clean environ ement variables28 # Clean environment variables 29 29 # 30 30 # EXAMPLES -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/TOOLS/COMPILE/Fcopy_dir.sh
r3294 r4934 26 26 # 27 27 # 28 # When a refe nrence configuration is set,28 # When a reference configuration is set, 29 29 # Copy NEMO sub-directories needed (OPA_SRC, TOP_SRC ...) 30 30 # -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/TOOLS/COMPILE/Fdel_keys.sh
r2584 r4934 34 34 # 35 35 # Add cpp keys when compiling a configuration, key list has to be enclosed with " ". 36 # We perform a 'sed' on the CONFIG_NAME/CPP.fcm file, cont ianing the list of keys.36 # We perform a 'sed' on the CONFIG_NAME/CPP.fcm file, containing the list of keys. 37 37 # 38 38 # EXAMPLES -
branches/2014/dev_r4879_UKMO_NOC_MERGE/NEMOGCM/TOOLS/COMPILE/Fmake_WORK.sh
r3680 r4934 35 35 # Make the WORK directory: 36 36 # 37 # - Create lin in NEW_CONF/WORK37 # - Create line in NEW_CONF/WORK 38 38 # - Use specified sub-directories previously 39 39 # - OPA has to be done first !!!
Note: See TracChangeset
for help on using the changeset viewer.