- Timestamp:
- 2018-11-15T17:27:18+01:00 (5 years ago)
- Location:
- NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE
- Files:
-
- 2 added
- 23 edited
Legend:
- Unmodified
- Added
- Removed
-
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/BDY/bdydyn2d.F90
r10068 r10314 109 109 pva2d(ii,ij) = ( pva2d(ii,ij) + zwgt * ( dta%v2d(jb) - pva2d(ii,ij) ) ) * vmask(ii,ij,1) 110 110 END DO 111 CALL lbc_bdy_lnk( pua2d, 'U', -1., ib_bdy )112 CALL lbc_bdy_lnk( pva2d, 'V', -1., ib_bdy) ! Boundary points should be updated111 CALL lbc_bdy_lnk( 'bdydyn2d', pua2d, 'U', -1., ib_bdy ) 112 CALL lbc_bdy_lnk( 'bdydyn2d', pva2d, 'V', -1., ib_bdy) ! Boundary points should be updated 113 113 ! 114 114 END SUBROUTINE bdy_dyn2d_frs … … 169 169 END DO 170 170 171 CALL lbc_bdy_lnk( spgu(:,:), 'T', 1., ib_bdy )171 CALL lbc_bdy_lnk( 'bdydyn2d', spgu(:,:), 'T', 1., ib_bdy ) 172 172 ! 173 173 igrd = 2 ! Flather bc on u-velocity; … … 207 207 pva2d(ii,ij) = zforc + (1._wp - z1_2*zflag) * zcorr * vmask(ii,ij,1) 208 208 END DO 209 CALL lbc_bdy_lnk( pua2d, 'U', -1., ib_bdy ) ! Boundary points should be updated210 CALL lbc_bdy_lnk( pva2d, 'V', -1., ib_bdy ) !209 CALL lbc_bdy_lnk( 'bdydyn2d', pua2d, 'U', -1., ib_bdy ) ! Boundary points should be updated 210 CALL lbc_bdy_lnk( 'bdydyn2d', pva2d, 'V', -1., ib_bdy ) ! 211 211 ! 212 212 END SUBROUTINE bdy_dyn2d_fla … … 243 243 CALL bdy_orlanski_2d( idx, igrd, pvb2d, pva2d, dta%v2d, ll_npo ) 244 244 ! 245 CALL lbc_bdy_lnk( pua2d, 'U', -1., ib_bdy ) ! Boundary points should be updated246 CALL lbc_bdy_lnk( pva2d, 'V', -1., ib_bdy ) !245 CALL lbc_bdy_lnk( 'bdydyn2d', pua2d, 'U', -1., ib_bdy ) ! Boundary points should be updated 246 CALL lbc_bdy_lnk( 'bdydyn2d', pva2d, 'V', -1., ib_bdy ) ! 247 247 ! 248 248 END SUBROUTINE bdy_dyn2d_orlanski … … 291 291 292 292 ! Boundary points should be updated 293 CALL lbc_bdy_lnk( zssh(:,:), 'T', 1., ib_bdy )293 CALL lbc_bdy_lnk( 'bdydyn2d', zssh(:,:), 'T', 1., ib_bdy ) 294 294 END DO 295 295 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/BDY/bdydyn3d.F90
r10170 r10314 97 97 END DO 98 98 END DO 99 CALL lbc_bdy_lnk( ua, 'U', -1., ib_bdy ) ! Boundary points should be updated100 CALL lbc_bdy_lnk( va, 'V', -1., ib_bdy )99 CALL lbc_bdy_lnk( 'bdydyn3d', ua, 'U', -1., ib_bdy ) ! Boundary points should be updated 100 CALL lbc_bdy_lnk( 'bdydyn3d', va, 'V', -1., ib_bdy ) 101 101 ! 102 102 IF( kt == nit000 ) CLOSE( unit = 102 ) … … 144 144 END DO 145 145 END DO 146 CALL lbc_bdy_lnk( ua, 'U', -1., ib_bdy ) ! Boundary points should be updated147 CALL lbc_bdy_lnk( va, 'V', -1., ib_bdy )146 CALL lbc_bdy_lnk( 'bdydyn3d', ua, 'U', -1., ib_bdy ) ! Boundary points should be updated 147 CALL lbc_bdy_lnk( 'bdydyn3d', va, 'V', -1., ib_bdy ) 148 148 ! 149 149 IF( kt == nit000 ) CLOSE( unit = 102 ) … … 187 187 END DO 188 188 ! 189 CALL lbc_bdy_lnk( ua, 'U', -1., ib_bdy ) ; CALL lbc_bdy_lnk(va, 'V', -1.,ib_bdy ) ! Boundary points should be updated189 CALL lbc_bdy_lnk( 'bdydyn3d', ua, 'U', -1., ib_bdy ) ; CALL lbc_bdy_lnk( 'bdydyn3d', va, 'V', -1.,ib_bdy ) ! Boundary points should be updated 190 190 ! 191 191 IF( kt == nit000 ) CLOSE( unit = 102 ) … … 234 234 END DO 235 235 END DO 236 CALL lbc_bdy_lnk( ua, 'U', -1., ib_bdy ) ! Boundary points should be updated237 CALL lbc_bdy_lnk( va, 'V', -1., ib_bdy )236 CALL lbc_bdy_lnk( 'bdydyn3d', ua, 'U', -1., ib_bdy ) ! Boundary points should be updated 237 CALL lbc_bdy_lnk( 'bdydyn3d', va, 'V', -1., ib_bdy ) 238 238 ! 239 239 IF( kt == nit000 ) CLOSE( unit = 102 ) … … 270 270 CALL bdy_orlanski_3d( idx, igrd, vb, va, dta%v3d, ll_npo ) 271 271 ! 272 CALL lbc_bdy_lnk( ua, 'U', -1., ib_bdy ) ! Boundary points should be updated273 CALL lbc_bdy_lnk( va, 'V', -1., ib_bdy )272 CALL lbc_bdy_lnk( 'bdydyn3d', ua, 'U', -1., ib_bdy ) ! Boundary points should be updated 273 CALL lbc_bdy_lnk( 'bdydyn3d', va, 'V', -1., ib_bdy ) 274 274 ! 275 275 END SUBROUTINE bdy_dyn3d_orlanski … … 351 351 CALL bdy_nmn( idx, igrd, va ) 352 352 ! 353 CALL lbc_bdy_lnk( ua, 'U', -1., ib_bdy ) ! Boundary points should be updated354 CALL lbc_bdy_lnk( va, 'V', -1., ib_bdy )353 CALL lbc_bdy_lnk( 'bdydyn3d', ua, 'U', -1., ib_bdy ) ! Boundary points should be updated 354 CALL lbc_bdy_lnk( 'bdydyn3d', va, 'V', -1., ib_bdy ) 355 355 ! 356 356 END SUBROUTINE bdy_dyn3d_nmn -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/BDY/bdyice.F90
r10069 r10314 135 135 ENDDO 136 136 ENDDO 137 CALL lbc_bdy_lnk( a_i(:,:,:), 'T', 1., jbdy )138 CALL lbc_bdy_lnk( h_i(:,:,:), 'T', 1., jbdy )139 CALL lbc_bdy_lnk( h_s(:,:,:), 'T', 1., jbdy )137 CALL lbc_bdy_lnk( 'bdyice', a_i(:,:,:), 'T', 1., jbdy ) 138 CALL lbc_bdy_lnk( 'bdyice', h_i(:,:,:), 'T', 1., jbdy ) 139 CALL lbc_bdy_lnk( 'bdyice', h_s(:,:,:), 'T', 1., jbdy ) 140 140 141 141 DO jl = 1, jpl … … 244 244 END DO ! jl 245 245 246 CALL lbc_bdy_lnk( a_i (:,:,:) , 'T', 1., jbdy )247 CALL lbc_bdy_lnk( h_i (:,:,:) , 'T', 1., jbdy )248 CALL lbc_bdy_lnk( h_s (:,:,:) , 'T', 1., jbdy )249 CALL lbc_bdy_lnk( oa_i(:,:,:) , 'T', 1., jbdy )250 CALL lbc_bdy_lnk( a_ip(:,:,:) , 'T', 1., jbdy )251 CALL lbc_bdy_lnk( v_ip(:,:,:) , 'T', 1., jbdy )252 CALL lbc_bdy_lnk( s_i (:,:,:) , 'T', 1., jbdy )253 CALL lbc_bdy_lnk( t_su(:,:,:) , 'T', 1., jbdy )254 CALL lbc_bdy_lnk( v_i (:,:,:) , 'T', 1., jbdy )255 CALL lbc_bdy_lnk( v_s (:,:,:) , 'T', 1., jbdy )256 CALL lbc_bdy_lnk( sv_i(:,:,:) , 'T', 1., jbdy )257 CALL lbc_bdy_lnk( t_s (:,:,:,:), 'T', 1., jbdy )258 CALL lbc_bdy_lnk( e_s (:,:,:,:), 'T', 1., jbdy )259 CALL lbc_bdy_lnk( t_i (:,:,:,:), 'T', 1., jbdy )260 CALL lbc_bdy_lnk( e_i (:,:,:,:), 'T', 1., jbdy )246 CALL lbc_bdy_lnk( 'bdyice', a_i (:,:,:) , 'T', 1., jbdy ) 247 CALL lbc_bdy_lnk( 'bdyice', h_i (:,:,:) , 'T', 1., jbdy ) 248 CALL lbc_bdy_lnk( 'bdyice', h_s (:,:,:) , 'T', 1., jbdy ) 249 CALL lbc_bdy_lnk( 'bdyice', oa_i(:,:,:) , 'T', 1., jbdy ) 250 CALL lbc_bdy_lnk( 'bdyice', a_ip(:,:,:) , 'T', 1., jbdy ) 251 CALL lbc_bdy_lnk( 'bdyice', v_ip(:,:,:) , 'T', 1., jbdy ) 252 CALL lbc_bdy_lnk( 'bdyice', s_i (:,:,:) , 'T', 1., jbdy ) 253 CALL lbc_bdy_lnk( 'bdyice', t_su(:,:,:) , 'T', 1., jbdy ) 254 CALL lbc_bdy_lnk( 'bdyice', v_i (:,:,:) , 'T', 1., jbdy ) 255 CALL lbc_bdy_lnk( 'bdyice', v_s (:,:,:) , 'T', 1., jbdy ) 256 CALL lbc_bdy_lnk( 'bdyice', sv_i(:,:,:) , 'T', 1., jbdy ) 257 CALL lbc_bdy_lnk( 'bdyice', t_s (:,:,:,:), 'T', 1., jbdy ) 258 CALL lbc_bdy_lnk( 'bdyice', e_s (:,:,:,:), 'T', 1., jbdy ) 259 CALL lbc_bdy_lnk( 'bdyice', t_i (:,:,:,:), 'T', 1., jbdy ) 260 CALL lbc_bdy_lnk( 'bdyice', e_i (:,:,:,:), 'T', 1., jbdy ) 261 261 ! 262 262 END SUBROUTINE bdy_ice_frs … … 317 317 ! 318 318 END DO 319 CALL lbc_bdy_lnk( u_ice(:,:), 'U', -1., jbdy )319 CALL lbc_bdy_lnk( 'bdyice', u_ice(:,:), 'U', -1., jbdy ) 320 320 ! 321 321 CASE ( 'V' ) … … 340 340 ! 341 341 END DO 342 CALL lbc_bdy_lnk( v_ice(:,:), 'V', -1., jbdy )342 CALL lbc_bdy_lnk( 'bdyice', v_ice(:,:), 'V', -1., jbdy ) 343 343 ! 344 344 END SELECT -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/BDY/bdytra.F90
r10068 r10314 71 71 END SELECT 72 72 ! Boundary points should be updated 73 CALL lbc_bdy_lnk( tsa(:,:,:,jn), 'T', 1., ib_bdy )73 CALL lbc_bdy_lnk( 'bdytra', tsa(:,:,:,jn), 'T', 1., ib_bdy ) 74 74 ! 75 75 END DO -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/BDY/bdyvol.F90
r10297 r10314 87 87 IF( lk_mpp ) CALL mpp_sum( 'bdyvol', z_cflxemp ) ! sum over the global domain 88 88 !!gm by : 89 !!gm z_cflxemp = glob_sum( ( emp(:,:)-rnf(:,:)+fwfisf(:,:) ) * bdytmask(:,:) * e1e2t(:,:) ) / rau089 !!gm z_cflxemp = glob_sum( 'bdyvol', ( emp(:,:)-rnf(:,:)+fwfisf(:,:) ) * bdytmask(:,:) * e1e2t(:,:) ) / rau0 90 90 !!gm 91 91 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DIA/diacfl.F90
r10068 r10314 54 54 INTEGER, INTENT(in) :: kt ! ocean time-step index 55 55 ! 56 INTEGER :: ji, jj, jk! dummy loop indices57 REAL(wp) :: z2dt, zCu_max, zCv_max, zCw_max ! local scalars58 INTEGER , DIMENSION(3) :: iloc_u , iloc_v , iloc_w , iloc! workspace56 INTEGER :: ji, jj, jk ! dummy loop indices 57 REAL(wp) :: z2dt, zCu_max, zCv_max, zCw_max ! local scalars 58 INTEGER , DIMENSION(3) :: iloc_u , iloc_v , iloc_w , iloc ! workspace 59 59 !!gm this does not work REAL(wp), DIMENSION(jpi,jpj,jpk) :: zCu_cfl, zCv_cfl, zCw_cfl ! workspace 60 60 !!---------------------------------------------------------------------- … … 80 80 ! ! calculate maximum values and locations 81 81 IF( lk_mpp ) THEN 82 CALL mpp_maxloc( zCu_cfl, umask, zCu_max, iloc_u(1), iloc_u(2), iloc_u(3))83 CALL mpp_maxloc( zCv_cfl, vmask, zCv_max, iloc_v(1), iloc_v(2), iloc_v(3))84 CALL mpp_maxloc( zCw_cfl, wmask, zCw_max, iloc_w(1), iloc_w(2), iloc_w(3))82 CALL mpp_maxloc( 'diacfl', zCu_cfl, umask, zCu_max, iloc_u ) 83 CALL mpp_maxloc( 'diacfl', zCv_cfl, vmask, zCv_max, iloc_v ) 84 CALL mpp_maxloc( 'diacfl', zCw_cfl, wmask, zCw_max, iloc_w ) 85 85 ELSE 86 86 iloc = MAXLOC( ABS( zcu_cfl(:,:,:) ) ) -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DIA/diahsb.F90
r10068 r10314 91 91 ! 1 - Trends due to forcing ! 92 92 ! ------------------------- ! 93 z_frc_trd_v = r1_rau0 * glob_sum( - ( emp(:,:) - rnf(:,:) + fwfisf(:,:) ) * surf(:,:) ) ! volume fluxes94 z_frc_trd_t = glob_sum( sbc_tsc(:,:,jp_tem) * surf(:,:) ) ! heat fluxes95 z_frc_trd_s = glob_sum( sbc_tsc(:,:,jp_sal) * surf(:,:) ) ! salt fluxes93 z_frc_trd_v = r1_rau0 * glob_sum( 'diahsb', - ( emp(:,:) - rnf(:,:) + fwfisf(:,:) ) * surf(:,:) ) ! volume fluxes 94 z_frc_trd_t = glob_sum( 'diahsb', sbc_tsc(:,:,jp_tem) * surf(:,:) ) ! heat fluxes 95 z_frc_trd_s = glob_sum( 'diahsb', sbc_tsc(:,:,jp_sal) * surf(:,:) ) ! salt fluxes 96 96 ! ! Add runoff heat & salt input 97 IF( ln_rnf ) z_frc_trd_t = z_frc_trd_t + glob_sum( rnf_tsc(:,:,jp_tem) * surf(:,:) )98 IF( ln_rnf_sal) z_frc_trd_s = z_frc_trd_s + glob_sum( rnf_tsc(:,:,jp_sal) * surf(:,:) )97 IF( ln_rnf ) z_frc_trd_t = z_frc_trd_t + glob_sum( 'diahsb', rnf_tsc(:,:,jp_tem) * surf(:,:) ) 98 IF( ln_rnf_sal) z_frc_trd_s = z_frc_trd_s + glob_sum( 'diahsb', rnf_tsc(:,:,jp_sal) * surf(:,:) ) 99 99 ! ! Add ice shelf heat & salt input 100 IF( ln_isf ) z_frc_trd_t = z_frc_trd_t + glob_sum( risf_tsc(:,:,jp_tem) * surf(:,:) )100 IF( ln_isf ) z_frc_trd_t = z_frc_trd_t + glob_sum( 'diahsb', risf_tsc(:,:,jp_tem) * surf(:,:) ) 101 101 ! ! Add penetrative solar radiation 102 IF( ln_traqsr ) z_frc_trd_t = z_frc_trd_t + r1_rau0_rcp * glob_sum( qsr (:,:) * surf(:,:) )102 IF( ln_traqsr ) z_frc_trd_t = z_frc_trd_t + r1_rau0_rcp * glob_sum( 'diahsb', qsr (:,:) * surf(:,:) ) 103 103 ! ! Add geothermal heat flux 104 IF( ln_trabbc ) z_frc_trd_t = z_frc_trd_t + glob_sum( qgh_trd0(:,:) * surf(:,:) )104 IF( ln_trabbc ) z_frc_trd_t = z_frc_trd_t + glob_sum( 'diahsb', qgh_trd0(:,:) * surf(:,:) ) 105 105 ! 106 106 IF( ln_linssh ) THEN … … 116 116 z2d1(:,:) = surf(:,:) * wn(:,:,1) * tsb(:,:,1,jp_sal) 117 117 END IF 118 z_wn_trd_t = - glob_sum( z2d0 )119 z_wn_trd_s = - glob_sum( z2d1 )118 z_wn_trd_t = - glob_sum( 'diahsb', z2d0 ) 119 z_wn_trd_s = - glob_sum( 'diahsb', z2d1 ) 120 120 ENDIF 121 121 … … 135 135 136 136 ! ! volume variation (calculated with ssh) 137 zdiff_v1 = glob_sum_full( surf(:,:)*sshn(:,:) - surf_ini(:,:)*ssh_ini(:,:) )137 zdiff_v1 = glob_sum_full( 'diahsb', surf(:,:)*sshn(:,:) - surf_ini(:,:)*ssh_ini(:,:) ) 138 138 139 139 ! ! heat & salt content variation (associated with ssh) … … 150 150 z2d1(:,:) = surf(:,:) * ( tsn(:,:,1,jp_sal) * sshn(:,:) - ssh_sc_loc_ini(:,:) ) 151 151 END IF 152 z_ssh_hc = glob_sum_full( z2d0 )153 z_ssh_sc = glob_sum_full( z2d1 )154 ENDIF 155 ! 156 DO jk = 1, jpkm1 ! volume variation (calculated with scale factors)152 z_ssh_hc = glob_sum_full( 'diahsb', z2d0 ) 153 z_ssh_sc = glob_sum_full( 'diahsb', z2d1 ) 154 ENDIF 155 ! 156 DO jk = 1, jpkm1 ! volume variation (calculated with scale factors) 157 157 zwrk(:,:,jk) = ( surf(:,:)*e3t_n(:,:,jk) - surf_ini(:,:)*e3t_ini(:,:,jk) ) * tmask(:,:,jk) 158 158 END DO 159 zdiff_v2 = glob_sum_full( zwrk(:,:,:) )159 zdiff_v2 = glob_sum_full( 'diahsb', zwrk(:,:,:) ) 160 160 DO jk = 1, jpkm1 ! heat content variation 161 161 zwrk(:,:,jk) = ( surf(:,:)*e3t_n(:,:,jk)*tsn(:,:,jk,jp_tem) - surf_ini(:,:)*hc_loc_ini(:,:,jk) ) * tmask(:,:,jk) 162 162 END DO 163 zdiff_hc = glob_sum_full( zwrk(:,:,:) )163 zdiff_hc = glob_sum_full( 'diahsb', zwrk(:,:,:) ) 164 164 DO jk = 1, jpkm1 ! salt content variation 165 165 zwrk(:,:,jk) = ( surf(:,:)*e3t_n(:,:,jk)*tsn(:,:,jk,jp_sal) - surf_ini(:,:)*sc_loc_ini(:,:,jk) ) * tmask(:,:,jk) 166 166 END DO 167 zdiff_sc = glob_sum_full( zwrk(:,:,:) )167 zdiff_sc = glob_sum_full( 'diahsb', zwrk(:,:,:) ) 168 168 169 169 ! ------------------------ ! … … 187 187 zwrk(:,:,jk) = surf(:,:) * e3t_n(:,:,jk) * tmask(:,:,jk) 188 188 END DO 189 zvol_tot = glob_sum_full( zwrk(:,:,:) )189 zvol_tot = glob_sum_full( 'diahsb', zwrk(:,:,:) ) 190 190 191 191 !!gm to be added ? 192 192 ! IF( ln_linssh ) THEN ! fixed volume, add the ssh contribution 193 ! zvol_tot = zvol_tot + glob_sum( surf(:,:) * sshn(:,:) )193 ! zvol_tot = zvol_tot + glob_sum( 'diahsb', surf(:,:) * sshn(:,:) ) 194 194 ! ENDIF 195 195 !!gm end … … 409 409 ! 2 - Time independant variables and file opening ! 410 410 ! ----------------------------------------------- ! 411 surf(:,:) = e1e2t(:,:) * tmask_i(:,:) ! masked surface grid cell area412 surf_tot = glob_sum( surf(:,:) ) ! total ocean surface area411 surf(:,:) = e1e2t(:,:) * tmask_i(:,:) ! masked surface grid cell area 412 surf_tot = glob_sum( 'diahsb', surf(:,:) ) ! total ocean surface area 413 413 414 414 IF( ln_bdy ) CALL ctl_warn( 'dia_hsb_init: heat/salt budget does not consider open boundary fluxes' ) -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DOM/closea.F90
r10297 r10314 237 237 surfe(:) = 0.e0_wp 238 238 ! 239 surf(jncs+1) = glob_sum( e1e2t(:,:) ) ! surface of the global ocean239 surf(jncs+1) = glob_sum( 'closea', e1e2t(:,:) ) ! surface of the global ocean 240 240 ! 241 241 ! ! surface areas of closed seas … … 243 243 ztmp2d(:,:) = 0.e0_wp 244 244 WHERE( closea_mask(:,:) == jc ) ztmp2d(:,:) = e1e2t(:,:) * tmask_i(:,:) 245 surf(jc) = glob_sum( ztmp2d(:,:) )245 surf(jc) = glob_sum( 'closea', ztmp2d(:,:) ) 246 246 END DO 247 247 ! … … 254 254 ztmp2d(:,:) = 0.e0_wp 255 255 WHERE( closea_mask_rnf(:,:) == jcr .and. closea_mask(:,:) == 0 ) ztmp2d(:,:) = e1e2t(:,:) * tmask_i(:,:) 256 surfr(jcr) = glob_sum( ztmp2d(:,:) )256 surfr(jcr) = glob_sum( 'closea', ztmp2d(:,:) ) 257 257 END DO 258 258 ENDIF … … 263 263 ztmp2d(:,:) = 0.e0_wp 264 264 WHERE( closea_mask_empmr(:,:) == jce .and. closea_mask(:,:) == 0 ) ztmp2d(:,:) = e1e2t(:,:) * tmask_i(:,:) 265 surfe(jce) = glob_sum( ztmp2d(:,:) )265 surfe(jce) = glob_sum( 'closea', ztmp2d(:,:) ) 266 266 END DO 267 267 ENDIF … … 301 301 ztmp2d(:,:) = 0.e0_wp 302 302 WHERE( closea_mask(:,:) == jc ) ztmp2d(:,:) = e1e2t(:,:) * ( emp(:,:)-rnf(:,:) ) * tmask_i(:,:) 303 zfwf(jc) = glob_sum( ztmp2d(:,:) )303 zfwf(jc) = glob_sum( 'closea', ztmp2d(:,:) ) 304 304 END DO 305 305 zfwf_total = SUM(zfwf) … … 316 316 ztmp2d(:,:) = 0.e0_wp 317 317 WHERE( closea_mask_rnf(:,:) == jcr .and. closea_mask(:,:) > 0 ) ztmp2d(:,:) = e1e2t(:,:) * ( emp(:,:)-rnf(:,:) ) * tmask_i(:,:) 318 zfwfr(jcr) = glob_sum( ztmp2d(:,:) )318 zfwfr(jcr) = glob_sum( 'closea', ztmp2d(:,:) ) 319 319 ! 320 320 ! The following if avoids the redistribution of the round off … … 345 345 ztmp2d(:,:) = 0.e0_wp 346 346 WHERE( closea_mask_empmr(:,:) == jce .and. closea_mask(:,:) > 0 ) ztmp2d(:,:) = e1e2t(:,:) * ( emp(:,:)-rnf(:,:) ) * tmask_i(:,:) 347 zfwfe(jce) = glob_sum( ztmp2d(:,:) )347 zfwfe(jce) = glob_sum( 'closea', ztmp2d(:,:) ) 348 348 ! 349 349 ! The following if avoids the redistribution of the round off -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DOM/domain.F90
r10068 r10314 469 469 !! ** Method : compute and print extrema of masked scale factors 470 470 !!---------------------------------------------------------------------- 471 INTEGER :: iimi1, ijmi1, iimi2, ijmi2, iima1, ijma1, iima2, ijma2471 INTEGER, DIMENSION(2) :: imi1, imi2, ima1, ima2 472 472 INTEGER, DIMENSION(2) :: iloc ! 473 473 REAL(wp) :: ze1min, ze1max, ze2min, ze2max … … 475 475 ! 476 476 IF(lk_mpp) THEN 477 CALL mpp_minloc( e1t(:,:), tmask_i(:,:), ze1min, iimi1,ijmi1 )478 CALL mpp_minloc( e2t(:,:), tmask_i(:,:), ze2min, iimi2,ijmi2 )479 CALL mpp_maxloc( e1t(:,:), tmask_i(:,:), ze1max, iima1,ijma1 )480 CALL mpp_maxloc( e2t(:,:), tmask_i(:,:), ze2max, iima2,ijma2 )477 CALL mpp_minloc( 'domain', e1t(:,:), tmask_i(:,:), ze1min, imi1 ) 478 CALL mpp_minloc( 'domain', e2t(:,:), tmask_i(:,:), ze2min, imi2 ) 479 CALL mpp_maxloc( 'domain', e1t(:,:), tmask_i(:,:), ze1max, ima1 ) 480 CALL mpp_maxloc( 'domain', e2t(:,:), tmask_i(:,:), ze2max, ima2 ) 481 481 ELSE 482 482 ze1min = MINVAL( e1t(:,:), mask = tmask_i(:,:) == 1._wp ) … … 486 486 ! 487 487 iloc = MINLOC( e1t(:,:), mask = tmask_i(:,:) == 1._wp ) 488 i imi1= iloc(1) + nimpp - 1489 i jmi1= iloc(2) + njmpp - 1488 imi1(1) = iloc(1) + nimpp - 1 489 imi1(2) = iloc(2) + njmpp - 1 490 490 iloc = MINLOC( e2t(:,:), mask = tmask_i(:,:) == 1._wp ) 491 i imi2= iloc(1) + nimpp - 1492 i jmi2= iloc(2) + njmpp - 1491 imi2(1) = iloc(1) + nimpp - 1 492 imi2(2) = iloc(2) + njmpp - 1 493 493 iloc = MAXLOC( e1t(:,:), mask = tmask_i(:,:) == 1._wp ) 494 i ima1= iloc(1) + nimpp - 1495 i jma1= iloc(2) + njmpp - 1494 ima1(1) = iloc(1) + nimpp - 1 495 ima1(2) = iloc(2) + njmpp - 1 496 496 iloc = MAXLOC( e2t(:,:), mask = tmask_i(:,:) == 1._wp ) 497 i ima2= iloc(1) + nimpp - 1498 i jma2= iloc(2) + njmpp - 1497 ima2(1) = iloc(1) + nimpp - 1 498 ima2(2) = iloc(2) + njmpp - 1 499 499 ENDIF 500 500 IF(lwp) THEN … … 502 502 WRITE(numout,*) 'dom_ctl : extrema of the masked scale factors' 503 503 WRITE(numout,*) '~~~~~~~' 504 WRITE(numout,"(14x,'e1t maxi: ',1f10.2,' at i = ',i5,' j= ',i5)") ze1max, i ima1, ijma1505 WRITE(numout,"(14x,'e1t mini: ',1f10.2,' at i = ',i5,' j= ',i5)") ze1min, i imi1, ijmi1506 WRITE(numout,"(14x,'e2t maxi: ',1f10.2,' at i = ',i5,' j= ',i5)") ze2max, i ima2, ijma2507 WRITE(numout,"(14x,'e2t mini: ',1f10.2,' at i = ',i5,' j= ',i5)") ze2min, i imi2, ijmi2504 WRITE(numout,"(14x,'e1t maxi: ',1f10.2,' at i = ',i5,' j= ',i5)") ze1max, ima1(1), ima1(2) 505 WRITE(numout,"(14x,'e1t mini: ',1f10.2,' at i = ',i5,' j= ',i5)") ze1min, imi1(1), imi1(2) 506 WRITE(numout,"(14x,'e2t maxi: ',1f10.2,' at i = ',i5,' j= ',i5)") ze2max, ima2(1), ima2(2) 507 WRITE(numout,"(14x,'e2t mini: ',1f10.2,' at i = ',i5,' j= ',i5)") ze2min, imi2(1), imi2(2) 508 508 ENDIF 509 509 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DOM/domngb.F90
r10068 r10314 67 67 68 68 IF( lk_mpp ) THEN 69 CALL mpp_minloc( zdist(:,:), zmask, zmini, kii, kjj) 69 CALL mpp_minloc( 'domngb', zdist(:,:), zmask, zmini, iloc) 70 kii = iloc(1) ; kjj = iloc(2) 70 71 ELSE 71 72 iloc(:) = MINLOC( zdist(:,:), mask = zmask(:,:) == 1.e0 ) -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DOM/domvvl.F90
r10297 r10314 435 435 IF( ( z_tmax > rn_zdef_max ) .OR. ( z_tmin < - rn_zdef_max ) ) THEN 436 436 IF( lk_mpp ) THEN 437 CALL mpp_maxloc( ze3t, tmask, z_tmax, ijk_max(1), ijk_max(2), ijk_max(3))438 CALL mpp_minloc( ze3t, tmask, z_tmin, ijk_min(1), ijk_min(2), ijk_min(3))437 CALL mpp_maxloc( 'domvvl', ze3t, tmask, z_tmax, ijk_max ) 438 CALL mpp_minloc( 'domvvl', ze3t, tmask, z_tmin, ijk_min ) 439 439 ELSE 440 440 ijk_max = MAXLOC( ze3t(:,:,:) ) -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ICB/icbutl.F90
r10297 r10314 73 73 va_e(:,:) = 0._wp ; va_e(1:jpi,1:jpj) = vtau (:,:) * vmask(:,:,1) ! maybe mask useless because mask applied in sbcblk 74 74 ! 75 CALL lbc_lnk_icb( uo_e, 'U', -1._wp, 1, 1 )76 CALL lbc_lnk_icb( vo_e, 'V', -1._wp, 1, 1 )77 CALL lbc_lnk_icb( ff_e, 'F', +1._wp, 1, 1 )78 CALL lbc_lnk_icb( ua_e, 'U', -1._wp, 1, 1 )79 CALL lbc_lnk_icb( va_e, 'V', -1._wp, 1, 1 )80 CALL lbc_lnk_icb( fr_e, 'T', +1._wp, 1, 1 )81 CALL lbc_lnk_icb( tt_e, 'T', +1._wp, 1, 1 )75 CALL lbc_lnk_icb( 'icbutl', uo_e, 'U', -1._wp, 1, 1 ) 76 CALL lbc_lnk_icb( 'icbutl', vo_e, 'V', -1._wp, 1, 1 ) 77 CALL lbc_lnk_icb( 'icbutl', ff_e, 'F', +1._wp, 1, 1 ) 78 CALL lbc_lnk_icb( 'icbutl', ua_e, 'U', -1._wp, 1, 1 ) 79 CALL lbc_lnk_icb( 'icbutl', va_e, 'V', -1._wp, 1, 1 ) 80 CALL lbc_lnk_icb( 'icbutl', fr_e, 'T', +1._wp, 1, 1 ) 81 CALL lbc_lnk_icb( 'icbutl', tt_e, 'T', +1._wp, 1, 1 ) 82 82 #if defined key_si3 83 83 hicth(:,:) = 0._wp ; hicth(1:jpi,1:jpj) = hm_i (:,:) … … 85 85 vi_e(:,:) = 0._wp ; vi_e(1:jpi, 1:jpj) = v_ice(:,:) 86 86 ! 87 CALL lbc_lnk_icb( hicth, 'T', +1._wp, 1, 1 )88 CALL lbc_lnk_icb( ui_e , 'U', -1._wp, 1, 1 )89 CALL lbc_lnk_icb( vi_e , 'V', -1._wp, 1, 1 )87 CALL lbc_lnk_icb( 'icbutl', hicth, 'T', +1._wp, 1, 1 ) 88 CALL lbc_lnk_icb( 'icbutl', ui_e , 'U', -1._wp, 1, 1 ) 89 CALL lbc_lnk_icb( 'icbutl', vi_e , 'V', -1._wp, 1, 1 ) 90 90 #endif 91 91 … … 102 102 ssh_e(0,jpj+1) = ssh_e(1,jpj) 103 103 ssh_e(jpi+1,jpj+1) = ssh_e(jpi,jpj) 104 CALL lbc_lnk_icb( ssh_e, 'T', +1._wp, 1, 1 )104 CALL lbc_lnk_icb( 'icbutl', ssh_e, 'T', +1._wp, 1, 1 ) 105 105 ! 106 106 END SUBROUTINE icb_utl_copy -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/LBC/lbc_lnk_generic.h90
r10068 r10314 46 46 47 47 #if defined MULTI 48 SUBROUTINE ROUTINE_LNK( ptab, cd_nat, psgn, kfld, cd_mpp, pval )48 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn, kfld, cd_mpp, pval ) 49 49 INTEGER , INTENT(in ) :: kfld ! number of pt3d arrays 50 50 #else 51 SUBROUTINE ROUTINE_LNK( ptab, cd_nat, psgn , cd_mpp, pval )51 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn , cd_mpp, pval ) 52 52 #endif 53 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 53 54 ARRAY_TYPE(:,:,:,:,:) ! array or pointer of arrays on which the boundary condition is applied 54 55 CHARACTER(len=1) , INTENT(in ) :: NAT_IN(:) ! nature of array grid-points -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/LBC/lbclnk.F90
r10068 r10314 90 90 ! 91 91 INTERFACE lbc_bdy_lnk 92 MODULE PROCEDURE lbc_bdy_lnk_2d, lbc_bdy_lnk_3d 92 MODULE PROCEDURE lbc_bdy_lnk_2d, lbc_bdy_lnk_3d, lbc_bdy_lnk_4d 93 93 END INTERFACE 94 94 ! … … 179 179 !!---------------------------------------------------------------------- 180 180 181 SUBROUTINE lbc_bdy_lnk_3d( pt3d, cd_type, psgn, ib_bdy ) 182 !!---------------------------------------------------------------------- 181 SUBROUTINE lbc_bdy_lnk_4d( cdname, pt4d, cd_type, psgn, ib_bdy ) 182 !!---------------------------------------------------------------------- 183 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 184 REAL(wp), DIMENSION(:,:,:,:), INTENT(inout) :: pt4d ! 3D array on which the lbc is applied 185 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 186 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold 187 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 188 !!---------------------------------------------------------------------- 189 CALL lbc_lnk_4d( cdname, pt4d, cd_type, psgn) 190 END SUBROUTINE lbc_bdy_lnk_4d 191 192 SUBROUTINE lbc_bdy_lnk_3d( cdname, pt3d, cd_type, psgn, ib_bdy ) 193 !!---------------------------------------------------------------------- 194 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 183 195 REAL(wp), DIMENSION(:,:,:), INTENT(inout) :: pt3d ! 3D array on which the lbc is applied 184 196 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points … … 186 198 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 187 199 !!---------------------------------------------------------------------- 188 CALL lbc_lnk_3d( pt3d, cd_type, psgn)200 CALL lbc_lnk_3d( cdname, pt3d, cd_type, psgn) 189 201 END SUBROUTINE lbc_bdy_lnk_3d 190 202 191 203 192 SUBROUTINE lbc_bdy_lnk_2d( pt2d, cd_type, psgn, ib_bdy ) 193 !!---------------------------------------------------------------------- 204 SUBROUTINE lbc_bdy_lnk_2d( cdname, pt2d, cd_type, psgn, ib_bdy ) 205 !!---------------------------------------------------------------------- 206 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 194 207 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 3D array on which the lbc is applied 195 208 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points … … 197 210 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 198 211 !!---------------------------------------------------------------------- 199 CALL lbc_lnk_2d( pt2d, cd_type, psgn)212 CALL lbc_lnk_2d( cdname, pt2d, cd_type, psgn) 200 213 END SUBROUTINE lbc_bdy_lnk_2d 201 214 … … 203 216 !!gm This routine should be removed with an optional halos size added in argument of generic routines 204 217 205 SUBROUTINE lbc_lnk_2d_icb( pt2d, cd_type, psgn, ki, kj ) 206 !!---------------------------------------------------------------------- 218 SUBROUTINE lbc_lnk_2d_icb( cdname, pt2d, cd_type, psgn, ki, kj ) 219 !!---------------------------------------------------------------------- 220 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 207 221 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array on which the lbc is applied 208 222 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points … … 210 224 INTEGER , INTENT(in ) :: ki, kj ! sizes of extra halo (not needed in non-mpp) 211 225 !!---------------------------------------------------------------------- 212 CALL lbc_lnk_2d( pt2d, cd_type, psgn )226 CALL lbc_lnk_2d( cdname, pt2d, cd_type, psgn ) 213 227 END SUBROUTINE lbc_lnk_2d_icb 214 228 !!gm end -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/LBC/lib_mpp.F90
r10300 r10314 84 84 PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc 85 85 PUBLIC mpp_ilor 86 PUBLIC mpp_max_multiple87 86 PUBLIC mppscatter, mppgather 88 87 PUBLIC mpp_ini_znl … … 112 111 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 113 112 END INTERFACE 114 INTERFACE mpp_max_multiple115 MODULE PROCEDURE mppmax_real_multiple116 END INTERFACE117 113 118 114 !! ========================= !! … … 163 159 INTEGER, PUBLIC :: ncom_fsbc = 1 !: copy of sbc time step # nn_fsbc 164 160 INTEGER, PUBLIC :: ncom_dttrc = 1 !: copy of top time step # nn_dttrc 161 INTEGER, PUBLIC :: ncom_freq !: frequency of comm diagnostic 165 162 INTEGER, PUBLIC , DIMENSION(:,:), ALLOCATABLE :: ncomm_sequence !: size of communicated arrays (halos) 163 INTEGER, PARAMETER, PUBLIC :: ncom_rec_max = 2000 !: max number of communication record 166 164 INTEGER, PUBLIC :: n_sequence_lbc = 0 !: # of communicated arraysvia lbc 167 165 INTEGER, PUBLIC :: n_sequence_glb = 0 !: # of global communications … … 721 719 # undef OPERATION_SUM_DD 722 720 723 SUBROUTINE mppmax_real_multiple( pt1d, kdim, kcom ) 724 !!---------------------------------------------------------------------- 725 !! *** routine mppmax_real *** 726 !! 727 !! ** Purpose : Maximum across processor of each element of a 1D arrays 728 !! 729 !!---------------------------------------------------------------------- 730 REAL(wp), DIMENSION(kdim), INTENT(inout) :: pt1d ! 1D arrays 731 INTEGER , INTENT(in ) :: kdim 732 INTEGER , OPTIONAL , INTENT(in ) :: kcom ! local communicator 733 !! 734 INTEGER :: ierror, ilocalcomm 735 REAL(wp), DIMENSION(kdim) :: zwork 736 !!---------------------------------------------------------------------- 737 ilocalcomm = mpi_comm_oce 738 IF( PRESENT(kcom) ) ilocalcomm = kcom 739 ! 740 CALL mpi_allreduce( pt1d, zwork, kdim, mpi_double_precision, mpi_max, ilocalcomm, ierror ) 741 pt1d(:) = zwork(:) 742 ! 743 END SUBROUTINE mppmax_real_multiple 744 745 746 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) 747 !!------------------------------------------------------------------------ 748 !! *** routine mpp_minloc *** 749 !! 750 !! ** Purpose : Compute the global minimum of an array ptab 751 !! and also give its global position 752 !! 753 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 754 !! 755 !!-------------------------------------------------------------------------- 756 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 757 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 758 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 759 INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame 760 ! 761 INTEGER :: ierror 762 INTEGER , DIMENSION(2) :: ilocs 763 REAL(wp) :: zmin ! local minimum 764 REAL(wp), DIMENSION(2,1) :: zain, zaout 765 !!----------------------------------------------------------------------- 766 ! 767 zmin = MINVAL( ptab(:,:) , mask= pmask == 1._wp ) 768 ilocs = MINLOC( ptab(:,:) , mask= pmask == 1._wp ) 769 ! 770 ki = ilocs(1) + nimpp - 1 771 kj = ilocs(2) + njmpp - 1 772 ! 773 zain(1,:)=zmin 774 zain(2,:)=ki+10000.*kj 775 ! 776 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OCE,ierror) 777 ! 778 pmin = zaout(1,1) 779 kj = INT(zaout(2,1)/10000.) 780 ki = INT(zaout(2,1) - 10000.*kj ) 781 ! 782 END SUBROUTINE mpp_minloc2d 783 784 785 SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj ,kk) 786 !!------------------------------------------------------------------------ 787 !! *** routine mpp_minloc *** 788 !! 789 !! ** Purpose : Compute the global minimum of an array ptab 790 !! and also give its global position 791 !! 792 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 793 !! 794 !!-------------------------------------------------------------------------- 795 REAL(wp), DIMENSION(:,:,:), INTENT(in ) :: ptab ! Local 2D array 796 REAL(wp), DIMENSION(:,:,:), INTENT(in ) :: pmask ! Local mask 797 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 798 INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame 799 ! 800 INTEGER :: ierror 801 REAL(wp) :: zmin ! local minimum 802 INTEGER , DIMENSION(3) :: ilocs 803 REAL(wp), DIMENSION(2,1) :: zain, zaout 804 !!----------------------------------------------------------------------- 805 ! 806 zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1._wp ) 807 ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1._wp ) 808 ! 809 ki = ilocs(1) + nimpp - 1 810 kj = ilocs(2) + njmpp - 1 811 kk = ilocs(3) 812 ! 813 zain(1,:) = zmin 814 zain(2,:) = ki + 10000.*kj + 100000000.*kk 815 ! 816 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OCE,ierror) 817 ! 818 pmin = zaout(1,1) 819 kk = INT( zaout(2,1) / 100000000. ) 820 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 821 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 822 ! 823 END SUBROUTINE mpp_minloc3d 824 825 826 SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) 827 !!------------------------------------------------------------------------ 828 !! *** routine mpp_maxloc *** 829 !! 830 !! ** Purpose : Compute the global maximum of an array ptab 831 !! and also give its global position 832 !! 833 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 834 !! 835 !!-------------------------------------------------------------------------- 836 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 837 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 838 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 839 INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame 840 !! 841 INTEGER :: ierror 842 INTEGER, DIMENSION (2) :: ilocs 843 REAL(wp) :: zmax ! local maximum 844 REAL(wp), DIMENSION(2,1) :: zain, zaout 845 !!----------------------------------------------------------------------- 846 ! 847 zmax = MAXVAL( ptab(:,:) , mask= pmask == 1._wp ) 848 ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1._wp ) 849 ! 850 ki = ilocs(1) + nimpp - 1 851 kj = ilocs(2) + njmpp - 1 852 ! 853 zain(1,:) = zmax 854 zain(2,:) = ki + 10000. * kj 855 ! 856 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OCE,ierror) 857 ! 858 pmax = zaout(1,1) 859 kj = INT( zaout(2,1) / 10000. ) 860 ki = INT( zaout(2,1) - 10000.* kj ) 861 ! 862 END SUBROUTINE mpp_maxloc2d 863 864 865 SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) 866 !!------------------------------------------------------------------------ 867 !! *** routine mpp_maxloc *** 868 !! 869 !! ** Purpose : Compute the global maximum of an array ptab 870 !! and also give its global position 871 !! 872 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 873 !! 874 !!-------------------------------------------------------------------------- 875 REAL(wp), DIMENSION (:,:,:), INTENT(in ) :: ptab ! Local 2D array 876 REAL(wp), DIMENSION (:,:,:), INTENT(in ) :: pmask ! Local mask 877 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 878 INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame 879 ! 880 INTEGER :: ierror ! local integer 881 REAL(wp) :: zmax ! local maximum 882 REAL(wp), DIMENSION(2,1) :: zain, zaout 883 INTEGER , DIMENSION(3) :: ilocs 884 !!----------------------------------------------------------------------- 885 ! 886 zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1._wp ) 887 ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1._wp ) 888 ! 889 ki = ilocs(1) + nimpp - 1 890 kj = ilocs(2) + njmpp - 1 891 kk = ilocs(3) 892 ! 893 zain(1,:) = zmax 894 zain(2,:) = ki + 10000.*kj + 100000000.*kk 895 ! 896 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OCE,ierror) 897 ! 898 pmax = zaout(1,1) 899 kk = INT( zaout(2,1) / 100000000. ) 900 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 901 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 902 ! 903 END SUBROUTINE mpp_maxloc3d 904 721 !!---------------------------------------------------------------------- 722 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 723 !! 724 !!---------------------------------------------------------------------- 725 !! 726 # define OPERATION_MINLOC 727 # define DIM_2d 728 # define ROUTINE_LOC mpp_minloc2d 729 # include "mpp_loc_generic.h90" 730 # undef ROUTINE_LOC 731 # undef DIM_2d 732 # define DIM_3d 733 # define ROUTINE_LOC mpp_minloc3d 734 # include "mpp_loc_generic.h90" 735 # undef ROUTINE_LOC 736 # undef DIM_3d 737 # undef OPERATION_MINLOC 738 739 # define OPERATION_MAXLOC 740 # define DIM_2d 741 # define ROUTINE_LOC mpp_maxloc2d 742 # include "mpp_loc_generic.h90" 743 # undef ROUTINE_LOC 744 # undef DIM_2d 745 # define DIM_3d 746 # define ROUTINE_LOC mpp_maxloc3d 747 # include "mpp_loc_generic.h90" 748 # undef ROUTINE_LOC 749 # undef DIM_3d 750 # undef OPERATION_MAXLOC 905 751 906 752 SUBROUTINE mppsync() … … 1247 1093 ! 1248 1094 itaille = jpimax * ( ipj + 2*kextj ) 1095 ! 1096 IF( ln_timing ) CALL tic_tac(.TRUE.) 1249 1097 CALL MPI_ALLGATHER( znorthloc_e(1,1-kextj) , itaille, MPI_DOUBLE_PRECISION, & 1250 1098 & znorthgloio_e(1,1-kextj,1), itaille, MPI_DOUBLE_PRECISION, & 1251 1099 & ncomm_north, ierr ) 1100 ! 1101 IF( ln_timing ) CALL tic_tac(.FALSE.) 1252 1102 ! 1253 1103 DO jr = 1, ndim_rank_north ! recover the global north array … … 1281 1131 1282 1132 1283 SUBROUTINE mpp_lnk_2d_icb( pt2d, cd_type, psgn, kexti, kextj )1133 SUBROUTINE mpp_lnk_2d_icb( cdname, pt2d, cd_type, psgn, kexti, kextj ) 1284 1134 !!---------------------------------------------------------------------- 1285 1135 !! *** routine mpp_lnk_2d_icb *** … … 1303 1153 !! nono : number for local neighboring processors 1304 1154 !!---------------------------------------------------------------------- 1155 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 1305 1156 REAL(wp), DIMENSION(1-kexti:jpi+kexti,1-kextj:jpj+kextj), INTENT(inout) :: pt2d ! 2D array with extra halo 1306 1157 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points … … 1322 1173 iprecj = nn_hls + kextj 1323 1174 1175 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, 1, 1, 1, ld_lbc = .TRUE. ) 1324 1176 1325 1177 ! 1. standard boundary treatment … … 1373 1225 ! ! Migrations 1374 1226 imigr = ipreci * ( jpj + 2*kextj ) 1227 ! 1228 IF( ln_timing ) CALL tic_tac(.TRUE.) 1375 1229 ! 1376 1230 SELECT CASE ( nbondi ) … … 1392 1246 END SELECT 1393 1247 ! 1248 IF( ln_timing ) CALL tic_tac(.FALSE.) 1249 ! 1394 1250 ! ! Write Dirichlet lateral conditions 1395 1251 iihom = jpi - nn_hls … … 1426 1282 ! ! Migrations 1427 1283 imigr = iprecj * ( jpi + 2*kexti ) 1284 ! 1285 IF( ln_timing ) CALL tic_tac(.TRUE.) 1428 1286 ! 1429 1287 SELECT CASE ( nbondj ) … … 1445 1303 END SELECT 1446 1304 ! 1305 IF( ln_timing ) CALL tic_tac(.FALSE.) 1306 ! 1447 1307 ! ! Write Dirichlet lateral conditions 1448 1308 ijhom = jpj - nn_hls … … 1466 1326 END SUBROUTINE mpp_lnk_2d_icb 1467 1327 1328 1329 SUBROUTINE mpp_report( cdname, kpk, kpl, kpf, ld_lbc, ld_glb ) 1330 !!---------------------------------------------------------------------- 1331 !! *** routine mpp_report *** 1332 !! 1333 !! ** Purpose : report use of mpp routines per time-setp 1334 !! 1335 !!---------------------------------------------------------------------- 1336 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1337 INTEGER , OPTIONAL, INTENT(in ) :: kpk, kpl, kpf 1338 LOGICAL , OPTIONAL, INTENT(in ) :: ld_lbc, ld_glb 1339 !! 1340 LOGICAL :: ll_lbc, ll_glb 1341 INTEGER :: ji, jj, jk, jh, jf ! dummy loop indices 1342 !!---------------------------------------------------------------------- 1343 ! 1344 ll_lbc = .FALSE. 1345 IF( PRESENT(ld_lbc) ) ll_lbc = ld_lbc 1346 ll_glb = .FALSE. 1347 IF( PRESENT(ld_glb) ) ll_glb = ld_glb 1348 ! 1349 ! find the smallest common frequency: default = frequency product, if multiple, choose the larger of the 2 frequency 1350 ncom_freq = ncom_fsbc * ncom_dttrc 1351 IF( MOD( MAX(ncom_fsbc,ncom_dttrc), MIN(ncom_fsbc,ncom_dttrc) ) == 0 ) ncom_freq = MAX(ncom_fsbc,ncom_dttrc) 1352 ! 1353 IF ( ncom_stp == nit000+ncom_freq ) THEN ! avoid to count extra communications in potential initializations at nit000 1354 IF( ll_lbc ) THEN 1355 IF( .NOT. ALLOCATED(ncomm_sequence) ) ALLOCATE( ncomm_sequence(ncom_rec_max,2) ) 1356 IF( .NOT. ALLOCATED( crname_lbc) ) ALLOCATE( crname_lbc(ncom_rec_max ) ) 1357 n_sequence_lbc = n_sequence_lbc + 1 1358 IF( n_sequence_lbc > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lnk_generic, increase ncom_rec_max' ) ! deadlock 1359 crname_lbc(n_sequence_lbc) = cdname ! keep the name of the calling routine 1360 ncomm_sequence(n_sequence_lbc,1) = kpk*kpl ! size of 3rd and 4th dimensions 1361 ncomm_sequence(n_sequence_lbc,2) = kpf ! number of arrays to be treated (multi) 1362 ENDIF 1363 IF( ll_glb ) THEN 1364 IF( .NOT. ALLOCATED(crname_glb) ) ALLOCATE( crname_glb(ncom_rec_max) ) 1365 n_sequence_glb = n_sequence_glb + 1 1366 IF( n_sequence_glb > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lnk_generic, increase ncom_rec_max' ) ! deadlock 1367 crname_glb(n_sequence_glb) = cdname ! keep the name of the calling routine 1368 ENDIF 1369 ELSE IF ( ncom_stp == nit000+2*ncom_freq ) THEN 1370 CALL ctl_opn( numcom, 'communication_report.txt', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) 1371 WRITE(numcom,*) ' ' 1372 WRITE(numcom,*) ' ------------------------------------------------------------' 1373 WRITE(numcom,*) ' Communication pattern report (second oce+sbc+top time step):' 1374 WRITE(numcom,*) ' ------------------------------------------------------------' 1375 WRITE(numcom,*) ' ' 1376 WRITE(numcom,'(A,I4)') ' Exchanged halos : ', n_sequence_lbc 1377 jj = 0; jk = 0; jf = 0; jh = 0 1378 DO ji = 1, n_sequence_lbc 1379 IF ( ncomm_sequence(ji,1) .GT. 1 ) jk = jk + 1 1380 IF ( ncomm_sequence(ji,2) .GT. 1 ) jf = jf + 1 1381 IF ( ncomm_sequence(ji,1) .GT. 1 .AND. ncomm_sequence(ji,2) .GT. 1 ) jj = jj + 1 1382 jh = MAX (jh, ncomm_sequence(ji,1)*ncomm_sequence(ji,2)) 1383 END DO 1384 WRITE(numcom,'(A,I3)') ' 3D Exchanged halos : ', jk 1385 WRITE(numcom,'(A,I3)') ' Multi arrays exchanged halos : ', jf 1386 WRITE(numcom,'(A,I3)') ' from which 3D : ', jj 1387 WRITE(numcom,'(A,I10)') ' Array max size : ', jh*jpi*jpj 1388 WRITE(numcom,*) ' ' 1389 WRITE(numcom,*) ' lbc_lnk called' 1390 jj = 1 1391 DO ji = 2, n_sequence_lbc 1392 IF( crname_lbc(ji-1) /= crname_lbc(ji) ) THEN 1393 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(ji-1)) 1394 jj = 0 1395 END IF 1396 jj = jj + 1 1397 END DO 1398 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(n_sequence_lbc)) 1399 WRITE(numcom,*) ' ' 1400 IF ( n_sequence_glb > 0 ) THEN 1401 WRITE(numcom,'(A,I4)') ' Global communications : ', n_sequence_glb 1402 jj = 1 1403 DO ji = 2, n_sequence_glb 1404 IF( crname_glb(ji-1) /= crname_glb(ji) ) THEN 1405 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(ji-1)) 1406 jj = 0 1407 END IF 1408 jj = jj + 1 1409 END DO 1410 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(n_sequence_glb)) 1411 DEALLOCATE(crname_glb) 1412 ELSE 1413 WRITE(numcom,*) ' No MPI global communication ' 1414 ENDIF 1415 WRITE(numcom,*) ' ' 1416 WRITE(numcom,*) ' -----------------------------------------------' 1417 WRITE(numcom,*) ' ' 1418 DEALLOCATE(ncomm_sequence) 1419 DEALLOCATE(crname_lbc) 1420 ENDIF 1421 END SUBROUTINE mpp_report 1422 1468 1423 1469 1424 SUBROUTINE tic_tac (ld_tic, ld_global) … … 1482 1437 END IF 1483 1438 1484 #if defined key_mpp_mpi1485 1439 IF ( ld_tic ) THEN 1486 1440 tic_wt(ii) = MPI_Wtime() ! start count tic->tac (waiting time) … … 1490 1444 tic_ct = MPI_Wtime() ! start count tac->tic (waiting time) 1491 1445 ENDIF 1492 #endif1493 1446 1494 1447 END SUBROUTINE tic_tac … … 1502 1455 1503 1456 INTERFACE mpp_sum 1504 MODULE PROCEDURE mpp _sum_a2s, mpp_sum_as, mpp_sum_ai, mpp_sum_s, mpp_sum_i, mppsum_realdd, mppsum_a_realdd1457 MODULE PROCEDURE mppsum_int, mppsum_a_int, mppsum_real, mppsum_a_real, mppsum_realdd, mppsum_a_realdd 1505 1458 END INTERFACE 1506 1459 INTERFACE mpp_max … … 1516 1469 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 1517 1470 END INTERFACE 1518 INTERFACE mpp_max_multiple1519 MODULE PROCEDURE mppmax_real_multiple1520 END INTERFACE1521 1471 1522 1472 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag … … 1545 1495 END SUBROUTINE mppsync 1546 1496 1547 SUBROUTINE mpp_sum_as( parr, kdim, kcom ) ! Dummy routine 1548 REAL , DIMENSION(:) :: parr 1549 INTEGER :: kdim 1550 INTEGER, OPTIONAL :: kcom 1551 WRITE(*,*) 'mpp_sum_as: You should not have seen this print! error?', kdim, parr(1), kcom 1552 END SUBROUTINE mpp_sum_as 1553 1554 SUBROUTINE mpp_sum_a2s( parr, kdim, kcom ) ! Dummy routine 1555 REAL , DIMENSION(:,:) :: parr 1556 INTEGER :: kdim 1557 INTEGER, OPTIONAL :: kcom 1558 WRITE(*,*) 'mpp_sum_a2s: You should not have seen this print! error?', kdim, parr(1,1), kcom 1559 END SUBROUTINE mpp_sum_a2s 1560 1561 SUBROUTINE mpp_sum_ai( karr, kdim, kcom ) ! Dummy routine 1562 INTEGER, DIMENSION(:) :: karr 1563 INTEGER :: kdim 1564 INTEGER, OPTIONAL :: kcom 1565 WRITE(*,*) 'mpp_sum_ai: You should not have seen this print! error?', kdim, karr(1), kcom 1566 END SUBROUTINE mpp_sum_ai 1567 1568 SUBROUTINE mpp_sum_s( psca, kcom ) ! Dummy routine 1569 REAL :: psca 1570 INTEGER, OPTIONAL :: kcom 1571 WRITE(*,*) 'mpp_sum_s: You should not have seen this print! error?', psca, kcom 1572 END SUBROUTINE mpp_sum_s 1573 1574 SUBROUTINE mpp_sum_i( kint, kcom ) ! Dummy routine 1575 integer :: kint 1576 INTEGER, OPTIONAL :: kcom 1577 WRITE(*,*) 'mpp_sum_i: You should not have seen this print! error?', kint, kcom 1578 END SUBROUTINE mpp_sum_i 1579 1580 SUBROUTINE mppsum_realdd( ytab, kcom ) 1581 COMPLEX(wp), INTENT(inout) :: ytab ! input scalar 1582 INTEGER , INTENT( in ), OPTIONAL :: kcom 1583 WRITE(*,*) 'mppsum_realdd: You should not have seen this print! error?', ytab 1584 END SUBROUTINE mppsum_realdd 1585 1586 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 1587 INTEGER , INTENT( in ) :: kdim ! size of ytab 1588 COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array 1589 INTEGER , INTENT( in ), OPTIONAL :: kcom 1590 WRITE(*,*) 'mppsum_a_realdd: You should not have seen this print! error?', kdim, ytab(1), kcom 1591 END SUBROUTINE mppsum_a_realdd 1592 1593 SUBROUTINE mppmax_a_real( parr, kdim, kcom ) 1594 REAL , DIMENSION(:) :: parr 1595 INTEGER :: kdim 1596 INTEGER, OPTIONAL :: kcom 1597 WRITE(*,*) 'mppmax_a_real: You should not have seen this print! error?', kdim, parr(1), kcom 1598 END SUBROUTINE mppmax_a_real 1599 1600 SUBROUTINE mppmax_real( psca, kcom ) 1601 REAL :: psca 1602 INTEGER, OPTIONAL :: kcom 1603 WRITE(*,*) 'mppmax_real: You should not have seen this print! error?', psca, kcom 1604 END SUBROUTINE mppmax_real 1605 1606 SUBROUTINE mppmin_a_real( parr, kdim, kcom ) 1607 REAL , DIMENSION(:) :: parr 1608 INTEGER :: kdim 1609 INTEGER, OPTIONAL :: kcom 1610 WRITE(*,*) 'mppmin_a_real: You should not have seen this print! error?', kdim, parr(1), kcom 1611 END SUBROUTINE mppmin_a_real 1612 1613 SUBROUTINE mppmin_real( psca, kcom ) 1614 REAL :: psca 1615 INTEGER, OPTIONAL :: kcom 1616 WRITE(*,*) 'mppmin_real: You should not have seen this print! error?', psca, kcom 1617 END SUBROUTINE mppmin_real 1618 1619 SUBROUTINE mppmax_a_int( karr, kdim ,kcom) 1620 INTEGER, DIMENSION(:) :: karr 1621 INTEGER :: kdim 1622 INTEGER, OPTIONAL :: kcom 1623 WRITE(*,*) 'mppmax_a_int: You should not have seen this print! error?', kdim, karr(1), kcom 1624 END SUBROUTINE mppmax_a_int 1625 1626 SUBROUTINE mppmax_int( kint, kcom) 1627 INTEGER :: kint 1628 INTEGER, OPTIONAL :: kcom 1629 WRITE(*,*) 'mppmax_int: You should not have seen this print! error?', kint, kcom 1630 END SUBROUTINE mppmax_int 1631 1632 SUBROUTINE mppmin_a_int( karr, kdim, kcom ) 1633 INTEGER, DIMENSION(:) :: karr 1634 INTEGER :: kdim 1635 INTEGER, OPTIONAL :: kcom 1636 WRITE(*,*) 'mppmin_a_int: You should not have seen this print! error?', kdim, karr(1), kcom 1637 END SUBROUTINE mppmin_a_int 1638 1639 SUBROUTINE mppmin_int( kint, kcom ) 1640 INTEGER :: kint 1641 INTEGER, OPTIONAL :: kcom 1642 WRITE(*,*) 'mppmin_int: You should not have seen this print! error?', kint, kcom 1643 END SUBROUTINE mppmin_int 1644 1645 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki, kj ) 1646 REAL :: pmin 1647 REAL , DIMENSION (:,:) :: ptab, pmask 1648 INTEGER :: ki, kj 1649 WRITE(*,*) 'mpp_minloc2d: You should not have seen this print! error?', pmin, ki, kj, ptab(1,1), pmask(1,1) 1650 END SUBROUTINE mpp_minloc2d 1651 1652 SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj, kk ) 1653 REAL :: pmin 1654 REAL , DIMENSION (:,:,:) :: ptab, pmask 1655 INTEGER :: ki, kj, kk 1656 WRITE(*,*) 'mpp_minloc3d: You should not have seen this print! error?', pmin, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) 1657 END SUBROUTINE mpp_minloc3d 1658 1659 SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) 1660 REAL :: pmax 1661 REAL , DIMENSION (:,:) :: ptab, pmask 1662 INTEGER :: ki, kj 1663 WRITE(*,*) 'mpp_maxloc2d: You should not have seen this print! error?', pmax, ki, kj, ptab(1,1), pmask(1,1) 1664 END SUBROUTINE mpp_maxloc2d 1665 1666 SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) 1667 REAL :: pmax 1668 REAL , DIMENSION (:,:,:) :: ptab, pmask 1669 INTEGER :: ki, kj, kk 1670 WRITE(*,*) 'mpp_maxloc3d: You should not have seen this print! error?', pmax, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) 1671 END SUBROUTINE mpp_maxloc3d 1497 !!---------------------------------------------------------------------- 1498 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 1499 !! 1500 !!---------------------------------------------------------------------- 1501 !! 1502 # define OPERATION_MAX 1503 # define INTEGER_TYPE 1504 # define DIM_0d 1505 # define ROUTINE_ALLREDUCE mppmax_int 1506 # include "mpp_allreduce_generic.h90" 1507 # undef ROUTINE_ALLREDUCE 1508 # undef DIM_0d 1509 # define DIM_1d 1510 # define ROUTINE_ALLREDUCE mppmax_a_int 1511 # include "mpp_allreduce_generic.h90" 1512 # undef ROUTINE_ALLREDUCE 1513 # undef DIM_1d 1514 # undef INTEGER_TYPE 1515 ! 1516 # define REAL_TYPE 1517 # define DIM_0d 1518 # define ROUTINE_ALLREDUCE mppmax_real 1519 # include "mpp_allreduce_generic.h90" 1520 # undef ROUTINE_ALLREDUCE 1521 # undef DIM_0d 1522 # define DIM_1d 1523 # define ROUTINE_ALLREDUCE mppmax_a_real 1524 # include "mpp_allreduce_generic.h90" 1525 # undef ROUTINE_ALLREDUCE 1526 # undef DIM_1d 1527 # undef REAL_TYPE 1528 # undef OPERATION_MAX 1529 !!---------------------------------------------------------------------- 1530 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** 1531 !! 1532 !!---------------------------------------------------------------------- 1533 !! 1534 # define OPERATION_MIN 1535 # define INTEGER_TYPE 1536 # define DIM_0d 1537 # define ROUTINE_ALLREDUCE mppmin_int 1538 # include "mpp_allreduce_generic.h90" 1539 # undef ROUTINE_ALLREDUCE 1540 # undef DIM_0d 1541 # define DIM_1d 1542 # define ROUTINE_ALLREDUCE mppmin_a_int 1543 # include "mpp_allreduce_generic.h90" 1544 # undef ROUTINE_ALLREDUCE 1545 # undef DIM_1d 1546 # undef INTEGER_TYPE 1547 ! 1548 # define REAL_TYPE 1549 # define DIM_0d 1550 # define ROUTINE_ALLREDUCE mppmin_real 1551 # include "mpp_allreduce_generic.h90" 1552 # undef ROUTINE_ALLREDUCE 1553 # undef DIM_0d 1554 # define DIM_1d 1555 # define ROUTINE_ALLREDUCE mppmin_a_real 1556 # include "mpp_allreduce_generic.h90" 1557 # undef ROUTINE_ALLREDUCE 1558 # undef DIM_1d 1559 # undef REAL_TYPE 1560 # undef OPERATION_MIN 1561 1562 !!---------------------------------------------------------------------- 1563 !! *** mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real *** 1564 !! 1565 !! Global sum of 1D array or a variable (integer, real or complex) 1566 !!---------------------------------------------------------------------- 1567 !! 1568 # define OPERATION_SUM 1569 # define INTEGER_TYPE 1570 # define DIM_0d 1571 # define ROUTINE_ALLREDUCE mppsum_int 1572 # include "mpp_allreduce_generic.h90" 1573 # undef ROUTINE_ALLREDUCE 1574 # undef DIM_0d 1575 # define DIM_1d 1576 # define ROUTINE_ALLREDUCE mppsum_a_int 1577 # include "mpp_allreduce_generic.h90" 1578 # undef ROUTINE_ALLREDUCE 1579 # undef DIM_1d 1580 # undef INTEGER_TYPE 1581 ! 1582 # define REAL_TYPE 1583 # define DIM_0d 1584 # define ROUTINE_ALLREDUCE mppsum_real 1585 # include "mpp_allreduce_generic.h90" 1586 # undef ROUTINE_ALLREDUCE 1587 # undef DIM_0d 1588 # define DIM_1d 1589 # define ROUTINE_ALLREDUCE mppsum_a_real 1590 # include "mpp_allreduce_generic.h90" 1591 # undef ROUTINE_ALLREDUCE 1592 # undef DIM_1d 1593 # undef REAL_TYPE 1594 # undef OPERATION_SUM 1595 1596 # define OPERATION_SUM_DD 1597 # define COMPLEX_TYPE 1598 # define DIM_0d 1599 # define ROUTINE_ALLREDUCE mppsum_realdd 1600 # include "mpp_allreduce_generic.h90" 1601 # undef ROUTINE_ALLREDUCE 1602 # undef DIM_0d 1603 # define DIM_1d 1604 # define ROUTINE_ALLREDUCE mppsum_a_realdd 1605 # include "mpp_allreduce_generic.h90" 1606 # undef ROUTINE_ALLREDUCE 1607 # undef DIM_1d 1608 # undef COMPLEX_TYPE 1609 # undef OPERATION_SUM_DD 1610 1611 !!---------------------------------------------------------------------- 1612 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 1613 !! 1614 !!---------------------------------------------------------------------- 1615 !! 1616 # define OPERATION_MINLOC 1617 # define DIM_2d 1618 # define ROUTINE_LOC mpp_minloc2d 1619 # include "mpp_loc_generic.h90" 1620 # undef ROUTINE_LOC 1621 # undef DIM_2d 1622 # define DIM_3d 1623 # define ROUTINE_LOC mpp_minloc3d 1624 # include "mpp_loc_generic.h90" 1625 # undef ROUTINE_LOC 1626 # undef DIM_3d 1627 # undef OPERATION_MINLOC 1628 1629 # define OPERATION_MAXLOC 1630 # define DIM_2d 1631 # define ROUTINE_LOC mpp_maxloc2d 1632 # include "mpp_loc_generic.h90" 1633 # undef ROUTINE_LOC 1634 # undef DIM_2d 1635 # define DIM_3d 1636 # define ROUTINE_LOC mpp_maxloc3d 1637 # include "mpp_loc_generic.h90" 1638 # undef ROUTINE_LOC 1639 # undef DIM_3d 1640 # undef OPERATION_MAXLOC 1672 1641 1673 1642 SUBROUTINE mpp_ilor( ld_switch, ldlast, kcom ) … … 1692 1661 END SUBROUTINE mpp_comm_free 1693 1662 1694 SUBROUTINE mppmax_real_multiple( ptab, kdim , kcom )1695 REAL, DIMENSION(:) :: ptab !1696 INTEGER :: kdim !1697 INTEGER, OPTIONAL :: kcom !1698 WRITE(*,*) 'mppmax_real_multiple: You should not have seen this print! error?', ptab(1), kdim1699 END SUBROUTINE mppmax_real_multiple1700 1701 1663 #endif 1702 1664 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/LBC/mpp_allreduce_generic.h90
r10300 r10314 42 42 INTEGER, OPTIONAL, INTENT(in ) :: kdim ! optional pointer dimension 43 43 INTEGER, OPTIONAL, INTENT(in ) :: kcom ! optional communicator 44 #if defined key_mpp_mpi 44 45 ! 45 46 INTEGER :: ipi, ii, ierr 46 47 INTEGER :: ierror, ilocalcomm 47 48 TMP_TYPE(:) 49 !!----------------------------------------------------------------------- 50 ! 51 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ld_glb = .TRUE. ) 48 52 ! 49 53 ilocalcomm = mpi_comm_oce … … 55 59 ipi = I_SIZE(ptab) ! 1st dimension 56 60 ENDIF 57 61 ! 62 ALLOCATE(work(ipi)) 58 63 IF( ln_timing ) CALL tic_tac(.TRUE., ld_global = .TRUE.) 59 ALLOCATE(work(ipi))60 64 CALL mpi_allreduce( ARRAY_IN(:), work, ipi, MPI_TYPE, MPI_OPERATION, ilocalcomm, ierror ) 65 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 61 66 DO ii = 1, ipi 62 67 ARRAY_IN(ii) = work(ii) 63 68 ENDDO 64 69 DEALLOCATE(work) 65 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 66 ! 67 IF( narea == 1 .AND. ncom_stp == nit000+5 ) THEN 68 IF( .NOT. ALLOCATED( crname_glb) ) THEN 69 ALLOCATE( crname_glb(2000), STAT=ierr ) 70 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'allreduce_generic, cannot allocate crname' ) 71 ENDIF 72 n_sequence_glb = n_sequence_glb + 1 73 IF( n_sequence_glb > 2000 ) CALL ctl_stop( 'STOP', 'allreduce_generic, increase crname_glb first dimension' ) 74 crname_glb(n_sequence_glb) = cdname ! keep the name of the calling routine 75 ENDIF 70 #else 71 WRITE(*,*) 'ROUTINE_ALLREDUCE: You should not have seen this print! error?' 72 #endif 76 73 77 74 END SUBROUTINE ROUTINE_ALLREDUCE -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/LBC/mpp_bdy_generic.h90
r10068 r10314 21 21 # endif 22 22 23 SUBROUTINE ROUTINE_BDY( ptab, cd_nat, psgn , kb_bdy )23 SUBROUTINE ROUTINE_BDY( cdname, ptab, cd_nat, psgn , kb_bdy ) 24 24 !!---------------------------------------------------------------------- 25 25 !! *** routine mpp_lnk_bdy_3d *** … … 42 42 !! 43 43 !!---------------------------------------------------------------------- 44 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 44 45 ARRAY_TYPE(:,:,:,:,:) ! array or pointer of arrays on which the boundary condition is applied 45 46 CHARACTER(len=1) , INTENT(in ) :: NAT_IN(:) ! nature of array grid-points … … 61 62 ipl = L_SIZE(ptab) ! 4th - 62 63 ipf = F_SIZE(ptab) ! 5th - use in "multi" case (array of pointers) 64 ! 65 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ipk, ipl, ipf, ld_lbc = .TRUE. ) 63 66 ! 64 67 ALLOCATE( zt3ns(jpi,nn_hls,ipk,ipl,ipf,2), zt3sn(jpi,nn_hls,ipk,ipl,ipf,2), & … … 132 135 imigr = nn_hls * jpj * ipk * ipl 133 136 ! 137 IF( ln_timing ) CALL tic_tac(.TRUE.) 138 ! 134 139 SELECT CASE ( nbondi_bdy(IBD_IN(jf)) ) 135 140 CASE ( -1 ) … … 150 155 END SELECT 151 156 ! 157 IF( ln_timing ) CALL tic_tac(.FALSE.) 158 ! 152 159 ! ! Write Dirichlet lateral conditions 153 160 iihom = nlci-nn_hls … … 205 212 imigr = nn_hls * jpi * ipk * ipl 206 213 ! 214 IF( ln_timing ) CALL tic_tac(.TRUE.) 215 ! 207 216 SELECT CASE ( nbondj_bdy(IBD_IN(jf)) ) 208 217 CASE ( -1 ) … … 222 231 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 223 232 END SELECT 233 ! 234 IF( ln_timing ) CALL tic_tac(.FALSE.) 224 235 ! 225 236 ! ! Write Dirichlet lateral conditions -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/LBC/mpp_lnk_generic.h90
r10297 r10314 63 63 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 64 64 INTEGER :: ierr 65 INTEGER :: icom_freq66 65 REAL(wp) :: zland 67 66 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend … … 73 72 ipl = L_SIZE(ptab) ! 4th - 74 73 ipf = F_SIZE(ptab) ! 5th - use in "multi" case (array of pointers) 74 ! 75 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ipk, ipl, ipf, ld_lbc = .TRUE. ) 75 76 ! 76 77 ALLOCATE( zt3ns(jpi,nn_hls,ipk,ipl,ipf,2), zt3sn(jpi,nn_hls,ipk,ipl,ipf,2), & … … 151 152 ! 152 153 ! ! Migrations 153 imigr = nn_hls * jpj * ipk * ipl * ipf 154 ! 155 IF( narea == 1 ) THEN 156 157 ! find the smallest common frequency: default = frequency product, if multiple, choose the larger of the 2 frequency 158 icom_freq = ncom_fsbc * ncom_dttrc 159 IF( MOD( MAX(ncom_fsbc,ncom_dttrc), MIN(ncom_fsbc,ncom_dttrc) ) == 0 ) icom_freq = MAX(ncom_fsbc,ncom_dttrc) 160 161 IF ( ncom_stp == nit000+icom_freq ) THEN ! avoid to count extra communications in potential initializations at nit000 162 IF( .NOT. ALLOCATED( ncomm_sequence) ) THEN 163 ALLOCATE( ncomm_sequence(2000,2), STAT=ierr ) 164 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'lnk_generic, cannot allocate ncomm_sequence' ) 165 ALLOCATE( crname_lbc(2000), STAT=ierr ) 166 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'lnk_generic, cannot allocate crname' ) 167 ENDIF 168 n_sequence_lbc = n_sequence_lbc + 1 169 IF( n_sequence_lbc > 2000 ) CALL ctl_stop( 'STOP', 'lnk_generic, increase ncomm_sequence first dimension' ) 170 ncomm_sequence(n_sequence_lbc,1) = ipk*ipl ! size of 3rd and 4th dimensions 171 ncomm_sequence(n_sequence_lbc,2) = ipf ! number of arrays to be treated (multi) 172 crname_lbc (n_sequence_lbc) = cdname ! keep the name of the calling routine 173 ELSE IF ( ncom_stp == (nit000+2*icom_freq) ) THEN 174 IF ( numcom == -1 ) THEN 175 CALL ctl_opn( numcom, 'communication_report.txt', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) 176 WRITE(numcom,*) ' ' 177 WRITE(numcom,*) ' ------------------------------------------------------------' 178 WRITE(numcom,*) ' Communication pattern report (second oce+sbc+top time step):' 179 WRITE(numcom,*) ' ------------------------------------------------------------' 180 WRITE(numcom,*) ' ' 181 WRITE(numcom,'(A,I4)') ' Exchanged halos : ', n_sequence_lbc 182 jj = 0; jk = 0; jf = 0; jh = 0 183 DO ji = 1, n_sequence_lbc 184 IF ( ncomm_sequence(ji,1) .GT. 1 ) jk = jk + 1 185 IF ( ncomm_sequence(ji,2) .GT. 1 ) jf = jf + 1 186 IF ( ncomm_sequence(ji,1) .GT. 1 .AND. ncomm_sequence(ji,2) .GT. 1 ) jj = jj + 1 187 jh = MAX (jh, ncomm_sequence(ji,1)*ncomm_sequence(ji,2)) 188 END DO 189 WRITE(numcom,'(A,I3)') ' 3D Exchanged halos : ', jk 190 WRITE(numcom,'(A,I3)') ' Multi arrays exchanged halos : ', jf 191 WRITE(numcom,'(A,I3)') ' from which 3D : ', jj 192 WRITE(numcom,'(A,I10)') ' Array max size : ', jh*jpi*jpj 193 WRITE(numcom,*) ' ' 194 WRITE(numcom,*) ' lbc_lnk called' 195 jj = 1 196 DO ji = 2, n_sequence_lbc 197 IF( crname_lbc(ji-1) /= crname_lbc(ji) ) THEN 198 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(ji-1)) 199 jj = 0 200 END IF 201 jj = jj + 1 202 END DO 203 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(n_sequence_lbc)) 204 WRITE(numcom,*) ' ' 205 IF ( n_sequence_glb > 0 ) THEN 206 WRITE(numcom,'(A,I4)') ' Global communications : ', n_sequence_glb 207 jj = 1 208 DO ji = 2, n_sequence_glb 209 IF( crname_glb(ji-1) /= crname_glb(ji) ) THEN 210 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(ji-1)) 211 jj = 0 212 END IF 213 jj = jj + 1 214 END DO 215 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(n_sequence_glb)) 216 DEALLOCATE(crname_glb) 217 ELSE 218 WRITE(numcom,*) ' No MPI global communication ' 219 ENDIF 220 WRITE(numcom,*) ' ' 221 WRITE(numcom,*) ' -----------------------------------------------' 222 WRITE(numcom,*) ' ' 223 DEALLOCATE(ncomm_sequence) 224 DEALLOCATE(crname_lbc) 225 ENDIF 226 ENDIF 227 ENDIF 154 imigr = nn_hls * jpj * ipk * ipl * ipf 228 155 ! 229 156 IF( ln_timing ) CALL tic_tac(.TRUE.) -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbcapr.F90
r10068 r10314 94 94 ! 95 95 IF( ln_ref_apr ) THEN !* Compute whole inner domain mean masked ocean surface 96 tarea = glob_sum( e1e2t(:,:) )96 tarea = glob_sum( 'sbcapr', e1e2t(:,:) ) 97 97 IF(lwp) WRITE(numout,*) ' Variable ref. Patm computed over a ocean surface of ', tarea*1e-6, 'km2' 98 98 ELSE … … 141 141 ! 142 142 ! !* update the reference atmospheric pressure (if necessary) 143 IF( ln_ref_apr ) rn_pref = glob_sum( sf_apr(1)%fnow(:,:,1) * e1e2t(:,:) ) / tarea143 IF( ln_ref_apr ) rn_pref = glob_sum( 'sbcapr', sf_apr(1)%fnow(:,:,1) * e1e2t(:,:) ) / tarea 144 144 ! 145 145 ! !* Patm related forcing at kt -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbcfwb.F90
r10170 r10314 86 86 IF( kn_fwb == 3 .AND. ln_isfcav ) CALL ctl_stop( 'sbc_fwb: nn_fwb = 3 with ln_isfcav = .TRUE. not working, we stop ' ) 87 87 ! 88 area = glob_sum( e1e2t(:,:) * tmask(:,:,1)) ! interior global domain surface88 area = glob_sum( 'sbcfwb', e1e2t(:,:) * tmask(:,:,1)) ! interior global domain surface 89 89 ! isf cavities are excluded because it can feedback to the melting with generation of inhibition of plumes 90 90 ! and in case of no melt, it can generate HSSW. … … 102 102 ! 103 103 IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN 104 z_fwf = glob_sum( e1e2t(:,:) * ( emp(:,:) - rnf(:,:) + fwfisf(:,:) - snwice_fmass(:,:) ) ) / area ! sum over the global domain104 z_fwf = glob_sum( 'sbcfwb', e1e2t(:,:) * ( emp(:,:) - rnf(:,:) + fwfisf(:,:) - snwice_fmass(:,:) ) ) / area ! sum over the global domain 105 105 zcoef = z_fwf * rcp 106 106 emp(:,:) = emp(:,:) - z_fwf * tmask(:,:,1) … … 127 127 a_fwb_b = a_fwb ! mean sea level taking into account the ice+snow 128 128 ! sum over the global domain 129 a_fwb = glob_sum( e1e2t(:,:) * ( sshn(:,:) + snwice_mass(:,:) * r1_rau0 ) )129 a_fwb = glob_sum( 'sbcfwb', e1e2t(:,:) * ( sshn(:,:) + snwice_mass(:,:) * r1_rau0 ) ) 130 130 a_fwb = a_fwb * 1.e+3 / ( area * rday * 365. ) ! convert in Kg/m3/s = mm/s 131 131 !!gm ! !!bug 365d year … … 155 155 ztmsk_neg(:,:) = tmask_i(:,:) - ztmsk_pos(:,:) 156 156 ! 157 zsurf_neg = glob_sum( e1e2t(:,:)*ztmsk_neg(:,:) ) ! Area filled by <0 and >0 erp158 zsurf_pos = glob_sum( e1e2t(:,:)*ztmsk_pos(:,:) )157 zsurf_neg = glob_sum( 'sbcfwb', e1e2t(:,:)*ztmsk_neg(:,:) ) ! Area filled by <0 and >0 erp 158 zsurf_pos = glob_sum( 'sbcfwb', e1e2t(:,:)*ztmsk_pos(:,:) ) 159 159 ! ! fwf global mean (excluding ocean to ice/snow exchanges) 160 z_fwf = glob_sum( e1e2t(:,:) * ( emp(:,:) - rnf(:,:) + fwfisf(:,:) - snwice_fmass(:,:) ) ) / area160 z_fwf = glob_sum( 'sbcfwb', e1e2t(:,:) * ( emp(:,:) - rnf(:,:) + fwfisf(:,:) - snwice_fmass(:,:) ) ) / area 161 161 ! 162 162 IF( z_fwf < 0._wp ) THEN ! spread out over >0 erp area to increase evaporation … … 168 168 ENDIF 169 169 ! 170 zsum_fwf = glob_sum( e1e2t(:,:) * z_fwf ) ! fwf global mean over <0 or >0 erp area170 zsum_fwf = glob_sum( 'sbcfwb', e1e2t(:,:) * z_fwf ) ! fwf global mean over <0 or >0 erp area 171 171 !!gm : zsum_fwf = z_fwf * area ??? it is right? I think so.... 172 172 z_fwf_nsrf = zsum_fwf / ( zsurf_tospread + rsmall ) 173 173 ! ! weight to respect erp field 2D structure 174 zsum_erp = glob_sum( ztmsk_tospread(:,:) * erp(:,:) * e1e2t(:,:) )174 zsum_erp = glob_sum( 'sbcfwb', ztmsk_tospread(:,:) * erp(:,:) * e1e2t(:,:) ) 175 175 z_wgt(:,:) = ztmsk_tospread(:,:) * erp(:,:) / ( zsum_erp + rsmall ) 176 176 ! ! final correction term to apply -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ZDF/zdfiwm.F90
r10297 r10314 463 463 ecri_iwm(:,:) = ecri_iwm(:,:) * ssmask(:,:) 464 464 465 zbot = glob_sum( e1e2t(:,:) * ebot_iwm(:,:) )466 zpyc = glob_sum( e1e2t(:,:) * epyc_iwm(:,:) )467 zcri = glob_sum( e1e2t(:,:) * ecri_iwm(:,:) )465 zbot = glob_sum( 'zdfiwm', e1e2t(:,:) * ebot_iwm(:,:) ) 466 zpyc = glob_sum( 'zdfiwm', e1e2t(:,:) * epyc_iwm(:,:) ) 467 zcri = glob_sum( 'zdfiwm', e1e2t(:,:) * ecri_iwm(:,:) ) 468 468 IF(lwp) THEN 469 469 WRITE(numout,*) ' High-mode wave-breaking energy: ', zbot * 1.e-12_wp, 'TW' -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/lib_fortran.F90
r10297 r10314 34 34 35 35 INTERFACE glob_sum 36 MODULE PROCEDURE glob_sum_1d, glob_sum_2d, glob_sum_3d, & 37 & glob_sum_2d_a, glob_sum_3d_a 36 MODULE PROCEDURE glob_sum_1d, glob_sum_2d, glob_sum_3d 38 37 END INTERFACE 39 38 INTERFACE glob_sum_full … … 41 40 END INTERFACE 42 41 INTERFACE glob_min 43 MODULE PROCEDURE glob_min_2d, glob_min_3d ,glob_min_2d_a, glob_min_3d_a42 MODULE PROCEDURE glob_min_2d, glob_min_3d 44 43 END INTERFACE 45 44 INTERFACE glob_max 46 MODULE PROCEDURE glob_max_2d, glob_max_3d ,glob_max_2d_a, glob_max_3d_a45 MODULE PROCEDURE glob_max_2d, glob_max_3d 47 46 END INTERFACE 48 47 … … 62 61 CONTAINS 63 62 64 ! --- SUM --- 65 FUNCTION glob_sum_1d( ptab, kdim ) 66 !!---------------------------------------------------------------------- 67 !! *** FUNCTION glob_sum_1d *** 68 !! 69 !! ** Purpose : perform a sum in calling DDPDD routine 70 !!---------------------------------------------------------------------- 71 INTEGER , INTENT(in) :: kdim 72 REAL(wp), INTENT(in), DIMENSION(kdim) :: ptab 73 REAL(wp) :: glob_sum_1d ! global sum 74 !! 75 COMPLEX(wp):: ctmp 76 REAL(wp) :: ztmp 77 INTEGER :: ji ! dummy loop indices 78 !!----------------------------------------------------------------------- 79 ! 80 ztmp = 0.e0 81 ctmp = CMPLX( 0.e0, 0.e0, wp ) 82 DO ji = 1, kdim 83 ztmp = ptab(ji) 84 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 85 END DO 86 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 87 glob_sum_1d = REAL(ctmp,wp) 88 ! 89 END FUNCTION glob_sum_1d 90 91 FUNCTION glob_sum_2d( ptab ) 92 !!---------------------------------------------------------------------- 93 !! *** FUNCTION glob_sum_2d *** 94 !! 95 !! ** Purpose : perform a sum in calling DDPDD routine 96 !!---------------------------------------------------------------------- 97 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab 98 REAL(wp) :: glob_sum_2d ! global masked sum 99 !! 100 COMPLEX(wp):: ctmp 101 REAL(wp) :: ztmp 102 INTEGER :: ji, jj ! dummy loop indices 103 !!----------------------------------------------------------------------- 104 ! 105 ztmp = 0.e0 106 ctmp = CMPLX( 0.e0, 0.e0, wp ) 107 DO jj = 1, jpj 108 DO ji =1, jpi 109 ztmp = ptab(ji,jj) * tmask_i(ji,jj) 110 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 111 END DO 112 END DO 113 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 114 glob_sum_2d = REAL(ctmp,wp) 115 ! 116 END FUNCTION glob_sum_2d 117 118 119 FUNCTION glob_sum_3d( ptab ) 120 !!---------------------------------------------------------------------- 121 !! *** FUNCTION glob_sum_3d *** 122 !! 123 !! ** Purpose : perform a sum on a 3D array in calling DDPDD routine 124 !!---------------------------------------------------------------------- 125 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab 126 REAL(wp) :: glob_sum_3d ! global masked sum 127 !! 128 COMPLEX(wp):: ctmp 129 REAL(wp) :: ztmp 130 INTEGER :: ji, jj, jk ! dummy loop indices 131 INTEGER :: ijpk ! local variables: size of ptab 132 !!----------------------------------------------------------------------- 133 ! 134 ijpk = SIZE(ptab,3) 135 ! 136 ztmp = 0.e0 137 ctmp = CMPLX( 0.e0, 0.e0, wp ) 138 DO jk = 1, ijpk 139 DO jj = 1, jpj 140 DO ji =1, jpi 141 ztmp = ptab(ji,jj,jk) * tmask_i(ji,jj) 142 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 143 END DO 144 END DO 145 END DO 146 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 147 glob_sum_3d = REAL(ctmp,wp) 148 ! 149 END FUNCTION glob_sum_3d 150 151 152 FUNCTION glob_sum_2d_a( ptab1, ptab2 ) 153 !!---------------------------------------------------------------------- 154 !! *** FUNCTION glob_sum_2d_a *** 155 !! 156 !! ** Purpose : perform a sum on two 2D arrays in calling DDPDD routine 157 !!---------------------------------------------------------------------- 158 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab1, ptab2 159 REAL(wp) :: glob_sum_2d_a ! global masked sum 160 !! 161 COMPLEX(wp):: ctmp 162 REAL(wp) :: ztmp 163 INTEGER :: ji, jj ! dummy loop indices 164 !!----------------------------------------------------------------------- 165 ! 166 ztmp = 0.e0 167 ctmp = CMPLX( 0.e0, 0.e0, wp ) 168 DO jj = 1, jpj 169 DO ji =1, jpi 170 ztmp = ptab1(ji,jj) * tmask_i(ji,jj) 171 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 172 ztmp = ptab2(ji,jj) * tmask_i(ji,jj) 173 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 174 END DO 175 END DO 176 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 177 glob_sum_2d_a = REAL(ctmp,wp) 178 ! 179 END FUNCTION glob_sum_2d_a 180 181 182 FUNCTION glob_sum_3d_a( ptab1, ptab2 ) 183 !!---------------------------------------------------------------------- 184 !! *** FUNCTION glob_sum_3d_a *** 185 !! 186 !! ** Purpose : perform a sum on two 3D array in calling DDPDD routine 187 !!---------------------------------------------------------------------- 188 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab1, ptab2 189 REAL(wp) :: glob_sum_3d_a ! global masked sum 190 !! 191 COMPLEX(wp):: ctmp 192 REAL(wp) :: ztmp 193 INTEGER :: ji, jj, jk ! dummy loop indices 194 INTEGER :: ijpk ! local variables: size of ptab 195 !!----------------------------------------------------------------------- 196 ! 197 ijpk = SIZE(ptab1,3) 198 ! 199 ztmp = 0.e0 200 ctmp = CMPLX( 0.e0, 0.e0, wp ) 201 DO jk = 1, ijpk 202 DO jj = 1, jpj 203 DO ji = 1, jpi 204 ztmp = ptab1(ji,jj,jk) * tmask_i(ji,jj) 205 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 206 ztmp = ptab2(ji,jj,jk) * tmask_i(ji,jj) 207 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 208 END DO 209 END DO 210 END DO 211 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 212 glob_sum_3d_a = REAL(ctmp,wp) 213 ! 214 END FUNCTION glob_sum_3d_a 215 216 FUNCTION glob_sum_full_2d( ptab ) 217 !!---------------------------------------------------------------------- 218 !! *** FUNCTION glob_sum_full_2d *** 219 !! 220 !! ** Purpose : perform a sum in calling DDPDD routine 221 !!---------------------------------------------------------------------- 222 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab 223 REAL(wp) :: glob_sum_full_2d ! global sum (nomask) 224 !! 225 COMPLEX(wp):: ctmp 226 REAL(wp) :: ztmp 227 INTEGER :: ji, jj ! dummy loop indices 228 !!----------------------------------------------------------------------- 229 ! 230 ztmp = 0.e0 231 ctmp = CMPLX( 0.e0, 0.e0, wp ) 232 DO jj = 1, jpj 233 DO ji =1, jpi 234 ztmp = ptab(ji,jj) * tmask_h(ji,jj) 235 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 236 END DO 237 END DO 238 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 239 glob_sum_full_2d = REAL(ctmp,wp) 240 ! 241 END FUNCTION glob_sum_full_2d 242 243 FUNCTION glob_sum_full_3d( ptab ) 244 !!---------------------------------------------------------------------- 245 !! *** FUNCTION glob_sum_full_3d *** 246 !! 247 !! ** Purpose : perform a sum on a 3D array in calling DDPDD routine 248 !!---------------------------------------------------------------------- 249 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab 250 REAL(wp) :: glob_sum_full_3d ! global sum (nomask) 251 !! 252 COMPLEX(wp):: ctmp 253 REAL(wp) :: ztmp 254 INTEGER :: ji, jj, jk ! dummy loop indices 255 INTEGER :: ijpk ! local variables: size of ptab 256 !!----------------------------------------------------------------------- 257 ! 258 ijpk = SIZE(ptab,3) 259 ! 260 ztmp = 0.e0 261 ctmp = CMPLX( 0.e0, 0.e0, wp ) 262 DO jk = 1, ijpk 263 DO jj = 1, jpj 264 DO ji =1, jpi 265 ztmp = ptab(ji,jj,jk) * tmask_h(ji,jj) 266 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 267 END DO 268 END DO 269 END DO 270 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 271 glob_sum_full_3d = REAL(ctmp,wp) 272 ! 273 END FUNCTION glob_sum_full_3d 274 275 ! --- MIN --- 276 FUNCTION glob_min_2d( ptab ) 277 !!----------------------------------------------------------------------- 278 !! *** FUNCTION glob_min_2D *** 279 !! 280 !! ** Purpose : perform a masked min on the inner global domain of a 2D array 281 !!----------------------------------------------------------------------- 282 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab ! input 2D array 283 REAL(wp) :: glob_min_2d ! global masked min 284 !!----------------------------------------------------------------------- 285 ! 286 glob_min_2d = MINVAL( ptab(:,:)*tmask_i(:,:) ) 287 IF( lk_mpp ) CALL mpp_min( 'lib_fortran', glob_min_2d ) 288 ! 289 END FUNCTION glob_min_2d 290 291 FUNCTION glob_min_3d( ptab ) 292 !!----------------------------------------------------------------------- 293 !! *** FUNCTION glob_min_3D *** 294 !! 295 !! ** Purpose : perform a masked min on the inner global domain of a 3D array 296 !!----------------------------------------------------------------------- 297 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab ! input 3D array 298 REAL(wp) :: glob_min_3d ! global masked min 299 !! 300 INTEGER :: jk 301 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 302 !!----------------------------------------------------------------------- 303 ! 304 ijpk = SIZE(ptab,3) 305 ! 306 glob_min_3d = MINVAL( ptab(:,:,1)*tmask_i(:,:) ) 307 DO jk = 2, ijpk 308 glob_min_3d = MIN( glob_min_3d, MINVAL( ptab(:,:,jk)*tmask_i(:,:) ) ) 309 END DO 310 IF( lk_mpp ) CALL mpp_min( 'lib_fortran', glob_min_3d ) 311 ! 312 END FUNCTION glob_min_3d 313 314 315 FUNCTION glob_min_2d_a( ptab1, ptab2 ) 316 !!----------------------------------------------------------------------- 317 !! *** FUNCTION glob_min_2D _a *** 318 !! 319 !! ** Purpose : perform a masked min on the inner global domain of two 2D array 320 !!----------------------------------------------------------------------- 321 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab1, ptab2 ! input 2D array 322 REAL(wp) , DIMENSION(2) :: glob_min_2d_a ! global masked min 323 !!----------------------------------------------------------------------- 324 ! 325 glob_min_2d_a(1) = MINVAL( ptab1(:,:)*tmask_i(:,:) ) 326 glob_min_2d_a(2) = MINVAL( ptab2(:,:)*tmask_i(:,:) ) 327 IF( lk_mpp ) CALL mpp_min( 'lib_fortran', glob_min_2d_a, 2 ) 328 ! 329 END FUNCTION glob_min_2d_a 330 331 332 FUNCTION glob_min_3d_a( ptab1, ptab2 ) 333 !!----------------------------------------------------------------------- 334 !! *** FUNCTION glob_min_3D_a *** 335 !! 336 !! ** Purpose : perform a masked min on the inner global domain of two 3D array 337 !!----------------------------------------------------------------------- 338 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab1, ptab2 ! input 3D array 339 REAL(wp) , DIMENSION(2) :: glob_min_3d_a ! global masked min 340 !! 341 INTEGER :: jk 342 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 343 !!----------------------------------------------------------------------- 344 ! 345 ijpk = SIZE(ptab1,3) 346 ! 347 glob_min_3d_a(1) = MINVAL( ptab1(:,:,1)*tmask_i(:,:) ) 348 glob_min_3d_a(2) = MINVAL( ptab2(:,:,1)*tmask_i(:,:) ) 349 DO jk = 2, ijpk 350 glob_min_3d_a(1) = MIN( glob_min_3d_a(1), MINVAL( ptab1(:,:,jk)*tmask_i(:,:) ) ) 351 glob_min_3d_a(2) = MIN( glob_min_3d_a(2), MINVAL( ptab2(:,:,jk)*tmask_i(:,:) ) ) 352 END DO 353 IF( lk_mpp ) CALL mpp_min( 'lib_fortran', glob_min_3d_a, 2 ) 354 ! 355 END FUNCTION glob_min_3d_a 356 357 ! --- MAX --- 358 FUNCTION glob_max_2d( ptab ) 359 !!----------------------------------------------------------------------- 360 !! *** FUNCTION glob_max_2D *** 361 !! 362 !! ** Purpose : perform a masked max on the inner global domain of a 2D array 363 !!----------------------------------------------------------------------- 364 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab ! input 2D array 365 REAL(wp) :: glob_max_2d ! global masked max 366 !!----------------------------------------------------------------------- 367 ! 368 glob_max_2d = MAXVAL( ptab(:,:)*tmask_i(:,:) ) 369 IF( lk_mpp ) CALL mpp_max( 'lib_fortran', glob_max_2d ) 370 ! 371 END FUNCTION glob_max_2d 372 373 FUNCTION glob_max_3d( ptab ) 374 !!----------------------------------------------------------------------- 375 !! *** FUNCTION glob_max_3D *** 376 !! 377 !! ** Purpose : perform a masked max on the inner global domain of a 3D array 378 !!----------------------------------------------------------------------- 379 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab ! input 3D array 380 REAL(wp) :: glob_max_3d ! global masked max 381 !! 382 INTEGER :: jk 383 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 384 !!----------------------------------------------------------------------- 385 ! 386 ijpk = SIZE(ptab,3) 387 ! 388 glob_max_3d = MAXVAL( ptab(:,:,1)*tmask_i(:,:) ) 389 DO jk = 2, ijpk 390 glob_max_3d = MAX( glob_max_3d, MAXVAL( ptab(:,:,jk)*tmask_i(:,:) ) ) 391 END DO 392 IF( lk_mpp ) CALL mpp_max( 'lib_fortran', glob_max_3d ) 393 ! 394 END FUNCTION glob_max_3d 395 396 397 FUNCTION glob_max_2d_a( ptab1, ptab2 ) 398 !!----------------------------------------------------------------------- 399 !! *** FUNCTION glob_max_2D _a *** 400 !! 401 !! ** Purpose : perform a masked max on the inner global domain of two 2D array 402 !!----------------------------------------------------------------------- 403 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab1, ptab2 ! input 2D array 404 REAL(wp) , DIMENSION(2) :: glob_max_2d_a ! global masked max 405 !!----------------------------------------------------------------------- 406 ! 407 glob_max_2d_a(1) = MAXVAL( ptab1(:,:)*tmask_i(:,:) ) 408 glob_max_2d_a(2) = MAXVAL( ptab2(:,:)*tmask_i(:,:) ) 409 IF( lk_mpp ) CALL mpp_max( 'lib_fortran', glob_max_2d_a, 2 ) 410 ! 411 END FUNCTION glob_max_2d_a 412 413 414 FUNCTION glob_max_3d_a( ptab1, ptab2 ) 415 !!----------------------------------------------------------------------- 416 !! *** FUNCTION glob_max_3D_a *** 417 !! 418 !! ** Purpose : perform a masked max on the inner global domain of two 3D array 419 !!----------------------------------------------------------------------- 420 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab1, ptab2 ! input 3D array 421 REAL(wp) , DIMENSION(2) :: glob_max_3d_a ! global masked max 422 !! 423 INTEGER :: jk 424 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 425 !!----------------------------------------------------------------------- 426 ! 427 ijpk = SIZE(ptab1,3) 428 ! 429 glob_max_3d_a(1) = MAXVAL( ptab1(:,:,1)*tmask_i(:,:) ) 430 glob_max_3d_a(2) = MAXVAL( ptab2(:,:,1)*tmask_i(:,:) ) 431 DO jk = 2, ijpk 432 glob_max_3d_a(1) = MAX( glob_max_3d_a(1), MAXVAL( ptab1(:,:,jk)*tmask_i(:,:) ) ) 433 glob_max_3d_a(2) = MAX( glob_max_3d_a(2), MAXVAL( ptab2(:,:,jk)*tmask_i(:,:) ) ) 434 END DO 435 IF( lk_mpp ) CALL mpp_max( 'lib_fortran', glob_max_3d_a, 2 ) 436 ! 437 END FUNCTION glob_max_3d_a 63 # define GLOBSUM_CODE 64 65 # define DIM_1d 66 # define FUNCTION_GLOBSUM glob_sum_1d 67 # include "lib_fortran_generic.h90" 68 # undef FUNCTION_GLOBSUM 69 # undef DIM_1d 70 71 # define DIM_2d 72 # define OPERATION_GLOBSUM 73 # define FUNCTION_GLOBSUM glob_sum_2d 74 # include "lib_fortran_generic.h90" 75 # undef FUNCTION_GLOBSUM 76 # undef OPERATION_GLOBSUM 77 # define OPERATION_FULL_GLOBSUM 78 # define FUNCTION_GLOBSUM glob_sum_full_2d 79 # include "lib_fortran_generic.h90" 80 # undef FUNCTION_GLOBSUM 81 # undef OPERATION_FULL_GLOBSUM 82 # undef DIM_2d 83 84 # define DIM_3d 85 # define OPERATION_GLOBSUM 86 # define FUNCTION_GLOBSUM glob_sum_3d 87 # include "lib_fortran_generic.h90" 88 # undef FUNCTION_GLOBSUM 89 # undef OPERATION_GLOBSUM 90 # define OPERATION_FULL_GLOBSUM 91 # define FUNCTION_GLOBSUM glob_sum_full_3d 92 # include "lib_fortran_generic.h90" 93 # undef FUNCTION_GLOBSUM 94 # undef OPERATION_FULL_GLOBSUM 95 # undef DIM_3d 96 97 # undef GLOBSUM_CODE 98 99 100 # define GLOBMINMAX_CODE 101 102 # define DIM_2d 103 # define OPERATION_GLOBMIN 104 # define FUNCTION_GLOBMINMAX glob_min_2d 105 # include "lib_fortran_generic.h90" 106 # undef FUNCTION_GLOBMINMAX 107 # undef OPERATION_GLOBMIN 108 # define OPERATION_GLOBMAX 109 # define FUNCTION_GLOBMINMAX glob_max_2d 110 # include "lib_fortran_generic.h90" 111 # undef FUNCTION_GLOBMINMAX 112 # undef OPERATION_GLOBMAX 113 # undef DIM_2d 114 115 # define DIM_3d 116 # define OPERATION_GLOBMIN 117 # define FUNCTION_GLOBMINMAX glob_min_3d 118 # include "lib_fortran_generic.h90" 119 # undef FUNCTION_GLOBMINMAX 120 # undef OPERATION_GLOBMIN 121 # define OPERATION_GLOBMAX 122 # define FUNCTION_GLOBMINMAX glob_max_3d 123 # include "lib_fortran_generic.h90" 124 # undef FUNCTION_GLOBMINMAX 125 # undef OPERATION_GLOBMAX 126 # undef DIM_3d 127 # undef GLOBMINMAX_CODE 438 128 439 129 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/stpctl.F90
r10068 r10314 61 61 !! 62 62 INTEGER :: ji, jj, jk ! dummy loop indices 63 INTEGER :: i ih, ijh! local integers64 INTEGER :: i iu, iju, iku! - -65 INTEGER :: i is1, ijs1, iks1! - -66 INTEGER :: i is2, ijs2, iks2! - -63 INTEGER :: ih(2) ! local integers 64 INTEGER :: iu(3) ! - - 65 INTEGER :: is1(3) ! - - 66 INTEGER :: is2(3) ! - - 67 67 REAL(wp) :: zzz ! local real 68 68 INTEGER , DIMENSION(3) :: ilocu, ilocs1, ilocs2 … … 112 112 ! 113 113 IF( lk_mpp ) THEN 114 CALL mpp_max _multiple( zmax(:), 5) ! max over the global domain114 CALL mpp_max( "stpctl", zmax ) ! max over the global domain 115 115 ! 116 116 nstop = NINT( zmax(5) ) ! nstop indicator sheared among all local domains … … 129 129 & ISNAN( zmax(1) + zmax(2) + zmax(3) ) ) THEN ! NaN encounter in the tests 130 130 IF( lk_mpp ) THEN 131 CALL mpp_maxloc( ABS(sshn) , ssmask(:,:) , zzz, iih , ijh)132 CALL mpp_maxloc( ABS(un) , umask (:,:,:), zzz, iiu , iju , iku )133 CALL mpp_minloc( tsn(:,:,:,jp_sal), tmask (:,:,:), zzz, iis1, ijs1, iks1 )134 CALL mpp_maxloc( tsn(:,:,:,jp_sal), tmask (:,:,:), zzz, iis2, ijs2, iks2 )131 CALL mpp_maxloc( 'stpctl', ABS(sshn) , ssmask(:,:) , zzz, ih ) 132 CALL mpp_maxloc( 'stpctl', ABS(un) , umask (:,:,:), zzz, iu ) 133 CALL mpp_minloc( 'stpctl', tsn(:,:,:,jp_sal), tmask (:,:,:), zzz, is1 ) 134 CALL mpp_maxloc( 'stpctl', tsn(:,:,:,jp_sal), tmask (:,:,:), zzz, is2 ) 135 135 ELSE 136 136 iloch = MINLOC( ABS( sshn(:,:) ) ) … … 138 138 ilocs1 = MINLOC( tsn(:,:,:,jp_sal) , mask = tmask(:,:,:) == 1._wp ) 139 139 ilocs2 = MAXLOC( tsn(:,:,:,jp_sal) , mask = tmask(:,:,:) == 1._wp ) 140 i ih = iloch (1) + nimpp - 1 ; ijh= iloch (2) + njmpp - 1141 i iu = ilocu (1) + nimpp - 1 ; iju = ilocu (2) + njmpp - 1 ; iku= ilocu (3)142 i is1 = ilocs1(1) + nimpp - 1 ; ijs1 = ilocs1(2) + njmpp - 1 ; iks1= ilocs1(3)143 i is2 = ilocs2(1) + nimpp - 1 ; ijs2 = ilocs2(2) + njmpp - 1 ; iks2= ilocs2(3)140 ih(1) = iloch (1) + nimpp - 1 ; ih(2) = iloch (2) + njmpp - 1 141 iu(1) = ilocu (1) + nimpp - 1 ; iu(2) = ilocu (2) + njmpp - 1 ; iu(3) = ilocu (3) 142 is1(1) = ilocs1(1) + nimpp - 1 ; is1(2) = ilocs1(2) + njmpp - 1 ; is1(3) = ilocs1(3) 143 is2(1) = ilocs2(1) + nimpp - 1 ; is2(2) = ilocs2(2) + njmpp - 1 ; is2(3) = ilocs2(3) 144 144 ENDIF 145 145 IF(lwp) THEN … … 147 147 WRITE(numout,*) ' stp_ctl: |ssh| > 10 m or |U| > 10 m/s or S <= 0 or S >= 100 or NaN encounter in the tests' 148 148 WRITE(numout,*) ' ======= ' 149 WRITE(numout,9100) kt, zmax(1), i ih , ijh150 WRITE(numout,9200) kt, zmax(2), i iu , iju , iku151 WRITE(numout,9300) kt, - zmax(3), i is1, ijs1, iks1152 WRITE(numout,9400) kt, zmax(4), i is2, ijs2, iks2149 WRITE(numout,9100) kt, zmax(1), ih(1) , ih(2) 150 WRITE(numout,9200) kt, zmax(2), iu(1) , iu(2) , iu(3) 151 WRITE(numout,9300) kt, - zmax(3), is1(1), is1(2), is1(3) 152 WRITE(numout,9400) kt, zmax(4), is2(1), is2(2), is2(3) 153 153 WRITE(numout,*) 154 154 WRITE(numout,*) ' output of last computed fields in output.abort.nc file'
Note: See TracChangeset
for help on using the changeset viewer.