Changeset 10727 for utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src
- Timestamp:
- 2019-02-27T17:02:02+01:00 (5 years ago)
- Location:
- utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src
- Files:
-
- 25 added
- 18 deleted
- 5 edited
- 21 moved
Legend:
- Unmodified
- Added
- Removed
-
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/daymod.f90
r9598 r10727 2 2 !!====================================================================== 3 3 !! *** MODULE daymod *** 4 !! Ocean :calendar4 !! Ocean : management of the model calendar 5 5 !!===================================================================== 6 6 !! History : OPA ! 1994-09 (M. Pontaud M. Imbard) Original code … … 16 16 !!---------------------------------------------------------------------- 17 17 !! day : calendar 18 !! 19 !! ------------------------------- 20 !! ----------- WARNING ----------- 21 !! 22 !! we suppose that the time step is deviding the number of second of in a day 23 !! ---> MOD( rday, rdt ) == 0 24 !! 25 !! ----------- WARNING ----------- 26 !! ------------------------------- 27 !! 18 !!---------------------------------------------------------------------- 19 !! ----------- WARNING ----------- 20 !! ------------------------------- 21 !! sbcmod assume that the time step is dividing the number of second of 22 !! in a day, i.e. ===> MOD( rday, rdt ) == 0 23 !! except when user defined forcing is used (see sbcmod.F90) 28 24 !!---------------------------------------------------------------------- 29 25 USE dom_oce ! ocean space and time domain 30 26 USE phycst ! physical constants 27 USE ioipsl , ONLY : ymds2ju ! for calendar 28 ! 31 29 USE in_out_manager ! I/O manager 30 USE prtctl ! Print control 32 31 USE iom ! 33 USE ioipsl , ONLY : ymds2ju ! for calendar34 USE prtctl ! Print control35 32 USE timing ! Timing 36 33 … … 46 43 !!---------------------------------------------------------------------- 47 44 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 48 !! $Id: daymod.F90 6140 2015-12-21 11:35:23Z timgraham$49 !! Software governed by the CeCILL licen ce (./LICENSE)45 !! $Id: daymod.F90 10068 2018-08-28 14:09:04Z nicolasmartin $ 46 !! Software governed by the CeCILL license (see ./LICENSE) 50 47 !!---------------------------------------------------------------------- 51 48 CONTAINS … … 68 65 !! - nmonth_len, nyear_len, nmonth_half, nmonth_end through day_mth 69 66 !!---------------------------------------------------------------------- 70 INTEGER :: inbday, idweek 71 REAL(wp) :: zjul 67 INTEGER :: inbday, idweek ! local integers 68 REAL(wp) :: zjul ! local scalar 72 69 !!---------------------------------------------------------------------- 73 70 ! … … 77 74 & 'You must do a restart at higher frequency (or remove this stop and recompile the code in I8)' ) 78 75 ENDIF 79 ! all calendar staff is based on the fact that MOD( rday, rdt ) == 0 80 IF( MOD( rday , rdt ) /= 0. ) CALL ctl_stop( 'the time step must devide the number of second of in a day' ) 81 IF( MOD( rday , 2. ) /= 0. ) CALL ctl_stop( 'the number of second of in a day must be an even number' ) 82 IF( MOD( rdt , 2. ) /= 0. ) CALL ctl_stop( 'the time step (in second) must be an even number' ) 83 nsecd = NINT(rday ) 84 nsecd05 = NINT(0.5 * rday ) 85 ndt = NINT( rdt ) 86 ndt05 = NINT(0.5 * rdt ) 76 nsecd = NINT( rday ) 77 nsecd05 = NINT( 0.5 * rday ) 78 ndt = NINT( rdt ) 79 ndt05 = NINT( 0.5 * rdt ) 87 80 88 81 89 82 ! set the calandar from ndastp (read in restart file and namelist) 90 91 83 nyear = ndastp / 10000 92 84 nmonth = ( ndastp - (nyear * 10000) ) / 100 … … 139 131 140 132 ! control print 141 IF(lwp) WRITE(numout,'(a,i6,a,i2,a,i2,a,i8,a,i8,a,i8,a,i8)')' =======>> 1/2 time step before the start of the run DATE Y/M/D = ', & 133 IF(lwp) WRITE(numout,'(a,i6,a,i2,a,i2,a,i8,a,i8,a,i8,a,i8)') & 134 & ' =======>> 1/2 time step before the start of the run DATE Y/M/D = ', & 142 135 & nyear, '/', nmonth, '/', nday, ' nsec_day:', nsec_day, ' nsec_week:', nsec_week, ' & 143 136 & nsec_month:', nsec_month , ' nsec_year:' , nsec_year … … 147 140 CALL day( nit000 ) 148 141 ! 142 IF( lwxios ) THEN 143 ! define variables in restart file when writing with XIOS 144 CALL iom_set_rstw_var_active('kt') 145 CALL iom_set_rstw_var_active('ndastp') 146 CALL iom_set_rstw_var_active('adatrj') 147 CALL iom_set_rstw_var_active('ntime') 148 ENDIF 149 149 150 END SUBROUTINE day_init 150 151 … … 227 228 !!---------------------------------------------------------------------- 228 229 ! 229 IF( nn_timing == 1 )CALL timing_start('day')230 IF( ln_timing ) CALL timing_start('day') 230 231 ! 231 232 zprec = 0.1 / rday … … 278 279 ENDIF 279 280 280 ! 281 IF( nn_timing == 1 ) CALL timing_stop('day') 281 IF( lrst_oce ) CALL day_rst( kt, 'WRITE' ) ! write day restart information 282 ! 283 IF( ln_timing ) CALL timing_stop('day') 282 284 ! 283 285 END SUBROUTINE day 284 286 287 288 SUBROUTINE day_rst( kt, cdrw ) 289 !!--------------------------------------------------------------------- 290 !! *** ROUTINE day_rst *** 291 !! 292 !! ** Purpose : Read or write calendar in restart file: 293 !! 294 !! WRITE(READ) mode: 295 !! kt : number of time step since the begining of the experiment at the 296 !! end of the current(previous) run 297 !! adatrj(0) : number of elapsed days since the begining of the experiment at the 298 !! end of the current(previous) run (REAL -> keep fractions of day) 299 !! ndastp : date at the end of the current(previous) run (coded as yyyymmdd integer) 300 !! 301 !! According to namelist parameter nrstdt, 302 !! nrstdt = 0 no control on the date (nit000 is arbitrary). 303 !! nrstdt = 1 we verify that nit000 is equal to the last 304 !! time step of previous run + 1. 305 !! In both those options, the exact duration of the experiment 306 !! since the beginning (cumulated duration of all previous restart runs) 307 !! is not stored in the restart and is assumed to be (nit000-1)*rdt. 308 !! This is valid is the time step has remained constant. 309 !! 310 !! nrstdt = 2 the duration of the experiment in days (adatrj) 311 !! has been stored in the restart file. 312 !!---------------------------------------------------------------------- 313 INTEGER , INTENT(in) :: kt ! ocean time-step 314 CHARACTER(len=*), INTENT(in) :: cdrw ! "READ"/"WRITE" flag 315 ! 316 REAL(wp) :: zkt, zndastp, zdayfrac, ksecs, ktime 317 INTEGER :: ihour, iminute 318 !!---------------------------------------------------------------------- 319 320 IF( TRIM(cdrw) == 'READ' ) THEN 321 322 IF( iom_varid( numror, 'kt', ldstop = .FALSE. ) > 0 ) THEN 323 ! Get Calendar informations 324 CALL iom_get( numror, 'kt', zkt, ldxios = lrxios ) ! last time-step of previous run 325 IF(lwp) THEN 326 WRITE(numout,*) ' *** Info read in restart : ' 327 WRITE(numout,*) ' previous time-step : ', NINT( zkt ) 328 WRITE(numout,*) ' *** restart option' 329 SELECT CASE ( nrstdt ) 330 CASE ( 0 ) ; WRITE(numout,*) ' nrstdt = 0 : no control of nit000' 331 CASE ( 1 ) ; WRITE(numout,*) ' nrstdt = 1 : no control the date at nit000 (use ndate0 read in the namelist)' 332 CASE ( 2 ) ; WRITE(numout,*) ' nrstdt = 2 : calendar parameters read in restart' 333 END SELECT 334 WRITE(numout,*) 335 ENDIF 336 ! Control of date 337 IF( nit000 - NINT( zkt ) /= 1 .AND. nrstdt /= 0 ) & 338 & CALL ctl_stop( ' ===>>>> : problem with nit000 for the restart', & 339 & ' verify the restart file or rerun with nrstdt = 0 (namelist)' ) 340 ! define ndastp and adatrj 341 IF ( nrstdt == 2 ) THEN 342 ! read the parameters corresponding to nit000 - 1 (last time step of previous run) 343 CALL iom_get( numror, 'ndastp', zndastp, ldxios = lrxios ) 344 ndastp = NINT( zndastp ) 345 CALL iom_get( numror, 'adatrj', adatrj , ldxios = lrxios ) 346 CALL iom_get( numror, 'ntime' , ktime , ldxios = lrxios ) 347 nn_time0=INT(ktime) 348 ! calculate start time in hours and minutes 349 zdayfrac=adatrj-INT(adatrj) 350 ksecs = NINT(zdayfrac*86400) ! Nearest second to catch rounding errors in adatrj 351 ihour = INT(ksecs/3600) 352 iminute = ksecs/60-ihour*60 353 354 ! Add to nn_time0 355 nhour = nn_time0 / 100 356 nminute = ( nn_time0 - nhour * 100 ) 357 nminute=nminute+iminute 358 359 IF( nminute >= 60 ) THEN 360 nminute=nminute-60 361 nhour=nhour+1 362 ENDIF 363 nhour=nhour+ihour 364 IF( nhour >= 24 ) THEN 365 nhour=nhour-24 366 adatrj=adatrj+1 367 ENDIF 368 nn_time0 = nhour * 100 + nminute 369 adatrj = INT(adatrj) ! adatrj set to integer as nn_time0 updated 370 ELSE 371 ! parameters corresponding to nit000 - 1 (as we start the step loop with a call to day) 372 ndastp = ndate0 ! ndate0 read in the namelist in dom_nam 373 nhour = nn_time0 / 100 374 nminute = ( nn_time0 - nhour * 100 ) 375 IF( nhour*3600+nminute*60-ndt05 .lt. 0 ) ndastp=ndastp-1 ! Start hour is specified in the namelist (default 0) 376 adatrj = ( REAL( nit000-1, wp ) * rdt ) / rday 377 ! note this is wrong if time step has changed during run 378 ENDIF 379 ELSE 380 ! parameters corresponding to nit000 - 1 (as we start the step loop with a call to day) 381 ndastp = ndate0 ! ndate0 read in the namelist in dom_nam 382 nhour = nn_time0 / 100 383 nminute = ( nn_time0 - nhour * 100 ) 384 IF( nhour*3600+nminute*60-ndt05 .lt. 0 ) ndastp=ndastp-1 ! Start hour is specified in the namelist (default 0) 385 adatrj = ( REAL( nit000-1, wp ) * rdt ) / rday 386 ENDIF 387 IF( ABS(adatrj - REAL(NINT(adatrj),wp)) < 0.1 / rday ) adatrj = REAL(NINT(adatrj),wp) ! avoid truncation error 388 ! 389 IF(lwp) THEN 390 WRITE(numout,*) ' *** Info used values : ' 391 WRITE(numout,*) ' date ndastp : ', ndastp 392 WRITE(numout,*) ' number of elapsed days since the begining of run : ', adatrj 393 WRITE(numout,*) ' nn_time0 : ',nn_time0 394 WRITE(numout,*) 395 ENDIF 396 ! 397 ELSEIF( TRIM(cdrw) == 'WRITE' ) THEN 398 ! 399 IF( kt == nitrst ) THEN 400 IF(lwp) WRITE(numout,*) 401 IF(lwp) WRITE(numout,*) 'rst_write : write oce restart file kt =', kt 402 IF(lwp) WRITE(numout,*) '~~~~~~~' 403 ENDIF 404 ! calendar control 405 IF( lwxios ) CALL iom_swap( cwxios_context ) 406 CALL iom_rstput( kt, nitrst, numrow, 'kt' , REAL( kt , wp) , ldxios = lwxios ) ! time-step 407 CALL iom_rstput( kt, nitrst, numrow, 'ndastp' , REAL( ndastp, wp) , ldxios = lwxios ) ! date 408 CALL iom_rstput( kt, nitrst, numrow, 'adatrj' , adatrj , ldxios = lwxios ) ! number of elapsed days since 409 ! ! the begining of the run [s] 410 CALL iom_rstput( kt, nitrst, numrow, 'ntime' , REAL( nn_time0, wp), ldxios = lwxios ) ! time 411 IF( lwxios ) CALL iom_swap( cxios_context ) 412 ENDIF 413 ! 414 END SUBROUTINE day_rst 285 415 286 416 !!====================================================================== -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/dom_oce.F90
r10725 r10727 26 26 PUBLIC dom_oce_alloc ! Called from nemogcm.F90 27 27 28 !!---------------------------------------------------------------------- 29 !! time & space domain namelist 28 !! time & space domain namelist 30 29 !! ---------------------------- 30 INTEGER , PUBLIC :: nmsh !: = 1 create a mesh-mask file 31 31 ! !!* Namelist namdom : time & space domain * 32 INTEGER , PUBLIC :: nn_bathy !: = 0/1 ,compute/read the bathymetry file32 INTEGER , PUBLIC :: nn_bathy !: = 0/1/2 ,compute/read the bathymetry file 33 33 REAL(wp), PUBLIC :: rn_bathy !: depth of flat bottom (active if nn_bathy=0; if =0 depth=jpkm1) 34 34 REAL(wp), PUBLIC :: rn_hmin !: minimum ocean depth (>0) or minimum number of ocean levels (<0) 35 REAL(wp), PUBLIC :: rn_isfhmin !: threshold to discriminate grounded ice to floating ice36 35 REAL(wp), PUBLIC :: rn_e3zps_min !: miminum thickness for partial steps (meters) 37 36 REAL(wp), PUBLIC :: rn_e3zps_rat !: minimum thickness ration for partial steps 38 37 INTEGER , PUBLIC :: nn_msh !: = 1 create a mesh-mask file 39 REAL(wp), PUBLIC :: rn_atfp !: asselin time filter parameter40 REAL(wp), PUBLIC :: rn_rdt !: time step for the dynamics and tracer41 38 INTEGER , PUBLIC :: nn_closea !: =0 suppress closed sea/lake from the ORCA domain or not (=1) 42 INTEGER , PUBLIC :: nn_euler !: =0 start with forward time step or not (=1) 43 LOGICAL , PUBLIC :: ln_iscpl !: coupling with ice sheet 44 LOGICAL , PUBLIC :: ln_crs !: Apply grid coarsening to dynamical model output or online passive tracers 45 46 !! Free surface parameters 47 !! ======================= 48 LOGICAL , PUBLIC :: ln_dynspg_exp !: Explicit free surface flag 49 LOGICAL , PUBLIC :: ln_dynspg_ts !: Split-Explicit free surface flag 50 51 !! Time splitting parameters 52 !! ========================= 53 LOGICAL, PUBLIC :: ln_bt_fw !: Forward integration of barotropic sub-stepping 54 LOGICAL, PUBLIC :: ln_bt_av !: Time averaging of barotropic variables 55 LOGICAL, PUBLIC :: ln_bt_auto !: Set number of barotropic iterations automatically 56 INTEGER, PUBLIC :: nn_bt_flt !: Filter choice 57 INTEGER, PUBLIC :: nn_baro !: Number of barotropic iterations during one baroclinic step (rdt) 58 REAL(wp), PUBLIC :: rn_bt_cmax !: Maximum allowed courant number (used if ln_bt_auto=T) 59 60 !! Horizontal grid parameters for domhgr 61 !! ===================================== 39 40 INTEGER, PUBLIC :: nn_interp 41 CHARACTER(LEN=132), PUBLIC :: cn_topo 42 CHARACTER(LEN=132), PUBLIC :: cn_bath 43 CHARACTER(LEN=132), PUBLIC :: cn_lon 44 CHARACTER(LEN=132), PUBLIC :: cn_lat 45 46 LOGICAL, PUBLIC :: lzoom = .FALSE. !: zoom flag 47 LOGICAL, PUBLIC :: lzoom_e = .FALSE. !: East zoom type flag 48 LOGICAL, PUBLIC :: lzoom_w = .FALSE. !: West zoom type flag 49 LOGICAL, PUBLIC :: lzoom_s = .FALSE. !: South zoom type flag 50 LOGICAL, PUBLIC :: lzoom_n = .FALSE. !: North zoom type flag 51 52 62 53 INTEGER :: jphgr_msh !: type of horizontal mesh 63 54 ! ! = 0 curvilinear coordinate on the sphere read in coordinate.nc … … 93 84 REAL(wp) :: ppacr2 !: 94 85 95 ! !! old non-DOCTOR names still used in the model 96 INTEGER , PUBLIC :: ntopo !: = 0/1 ,compute/read the bathymetry file 97 REAL(wp), PUBLIC :: e3zps_min !: miminum thickness for partial steps (meters) 98 REAL(wp), PUBLIC :: e3zps_rat !: minimum thickness ration for partial steps 99 INTEGER , PUBLIC :: nmsh !: = 1 create a mesh-mask file 100 REAL(wp), PUBLIC :: atfp !: asselin time filter parameter 101 REAL(wp), PUBLIC :: rdt !: time step for the dynamics and tracer 102 103 ! !!! associated variables 104 INTEGER , PUBLIC :: neuler !: restart euler forward option (0=Euler) 105 REAL(wp), PUBLIC :: atfp1 !: asselin time filter coeff. (atfp1= 1-2*atfp) 106 REAL(wp), PUBLIC :: r2dt !: = 2*rdt except at nit000 (=rdt) if neuler=0 86 !!---------------------------------------------------------------------- 87 !! time & space domain namelist 88 !! ---------------------------- 89 ! !!* Namelist namdom : time & space domain * 90 LOGICAL , PUBLIC :: ln_linssh !: =T linear free surface ==>> model level are fixed in time 91 LOGICAL , PUBLIC :: ln_meshmask !: =T create a mesh-mask file (mesh_mask.nc) 92 REAL(wp), PUBLIC :: rn_isfhmin !: threshold to discriminate grounded ice to floating ice 93 REAL(wp), PUBLIC :: rn_rdt !: time step for the dynamics and tracer 94 REAL(wp), PUBLIC :: rn_atfp !: asselin time filter parameter 95 INTEGER , PUBLIC :: nn_euler !: =0 start with forward time step or not (=1) 96 LOGICAL , PUBLIC :: ln_iscpl !: coupling with ice sheet 97 LOGICAL , PUBLIC :: ln_crs !: Apply grid coarsening to dynamical model output or online passive tracers 98 99 !! Free surface parameters 100 !! ======================= 101 LOGICAL , PUBLIC :: ln_dynspg_exp !: Explicit free surface flag 102 LOGICAL , PUBLIC :: ln_dynspg_ts !: Split-Explicit free surface flag 103 104 !! Time splitting parameters 105 !! ========================= 106 LOGICAL, PUBLIC :: ln_bt_fw !: Forward integration of barotropic sub-stepping 107 LOGICAL, PUBLIC :: ln_bt_av !: Time averaging of barotropic variables 108 LOGICAL, PUBLIC :: ln_bt_auto !: Set number of barotropic iterations automatically 109 INTEGER, PUBLIC :: nn_bt_flt !: Filter choice 110 INTEGER, PUBLIC :: nn_baro !: Number of barotropic iterations during one baroclinic step (rdt) 111 REAL(wp), PUBLIC :: rn_bt_cmax !: Maximum allowed courant number (used if ln_bt_auto=T) 112 REAL(wp), PUBLIC :: rn_bt_alpha !: Time stepping diffusion parameter 113 114 115 ! !! old non-DOCTOR names still used in the model 116 REAL(wp), PUBLIC :: atfp !: asselin time filter parameter 117 REAL(wp), PUBLIC :: rdt !: time step for the dynamics and tracer 118 119 ! !!! associated variables 120 INTEGER , PUBLIC :: neuler !: restart euler forward option (0=Euler) 121 REAL(wp), PUBLIC :: r2dt !: = 2*rdt except at nit000 (=rdt) if neuler=0 107 122 108 123 !!---------------------------------------------------------------------- 109 124 !! space domain parameters 110 125 !!---------------------------------------------------------------------- 111 LOGICAL, PUBLIC :: lzoom = .FALSE. !: zoom flag 112 LOGICAL, PUBLIC :: lzoom_e = .FALSE. !: East zoom type flag 113 LOGICAL, PUBLIC :: lzoom_w = .FALSE. !: West zoom type flag 114 LOGICAL, PUBLIC :: lzoom_s = .FALSE. !: South zoom type flag 115 LOGICAL, PUBLIC :: lzoom_n = .FALSE. !: North zoom type flag 116 117 ! !!! domain parameters linked to mpp 118 INTEGER, PUBLIC :: nperio !: type of lateral boundary condition 119 INTEGER, PUBLIC :: nimpp, njmpp !: i- & j-indexes for mpp-subdomain left bottom 120 INTEGER, PUBLIC :: nreci, nrecj !: overlap region in i and j 121 INTEGER, PUBLIC :: nproc !: number for local processor 122 INTEGER, PUBLIC :: narea !: number for local area 123 INTEGER, PUBLIC :: nbondi, nbondj !: mark of i- and j-direction local boundaries 126 INTEGER, PUBLIC :: jperio !: Global domain lateral boundary type (between 0 and 7) 127 ! ! = 0 closed ; = 1 cyclic East-West 128 ! ! = 2 cyclic North-South ; = 3 North fold T-point pivot 129 ! ! = 4 cyclic East-West AND North fold T-point pivot 130 ! ! = 5 North fold F-point pivot 131 ! ! = 6 cyclic East-West AND North fold F-point pivot 132 ! ! = 7 bi-cyclic East-West AND North-South 133 LOGICAL, PUBLIC :: l_Iperio, l_Jperio ! should we explicitely take care I/J periodicity 134 135 ! ! domain MPP decomposition parameters 136 INTEGER , PUBLIC :: nimpp, njmpp !: i- & j-indexes for mpp-subdomain left bottom 137 INTEGER , PUBLIC :: nreci, nrecj !: overlap region in i and j 138 INTEGER , PUBLIC :: nproc !: number for local processor 139 INTEGER , PUBLIC :: narea !: number for local area 140 INTEGER , PUBLIC :: nbondi, nbondj !: mark of i- and j-direction local boundaries 124 141 INTEGER, ALLOCATABLE, PUBLIC :: nbondi_bdy(:) !: mark i-direction local boundaries for BDY open boundaries 125 142 INTEGER, ALLOCATABLE, PUBLIC :: nbondj_bdy(:) !: mark j-direction local boundaries for BDY open boundaries … … 132 149 INTEGER, PUBLIC :: noea, nowe !: index of the local neighboring processors in 133 150 INTEGER, PUBLIC :: noso, nono !: east, west, south and north directions 134 INTEGER, PUBLIC :: npne, npnw !: index of north east and north west processor135 INTEGER, PUBLIC :: npse, npsw !: index of south east and south west processor136 INTEGER, PUBLIC :: nbne, nbnw !: logical of north east & north west processor137 INTEGER, PUBLIC :: nbse, nbsw !: logical of south east & south west processor138 151 INTEGER, PUBLIC :: nidom !: ??? 139 152 140 153 INTEGER, PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: mig !: local ==> global domain i-index 141 154 INTEGER, PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: mjg !: local ==> global domain j-index 142 INTEGER, PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: mi0, mi1 !: global ==> local domain i-index !!bug ==> other solution?143 ! ! (mi0=1 and mi1=0 if the global indexis not in the local domain)144 INTEGER, PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: mj0, mj1 !: global ==> local domain j-index !!bug ==> other solution?145 ! ! (mi0=1 and mi1=0 if the global indexis not in the local domain)155 INTEGER, PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: mi0, mi1 !: global ==> local domain i-index (mi0=1 and mi1=0 if the global index 156 ! ! is not in the local domain) 157 INTEGER, PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: mj0, mj1 !: global ==> local domain j-index (mj0=1 and mj1=0 if the global index 158 ! ! is not in the local domain) 146 159 INTEGER, PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: nimppt, njmppt !: i-, j-indexes for each processor 147 160 INTEGER, PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: ibonit, ibonjt !: i-, j- processor neighbour existence … … 154 167 !! horizontal curvilinear coordinate and scale factors 155 168 !! --------------------------------------------------------------------- 156 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: glamt , glamu, glamv , glamf !: longitude at t, u, v, f-points [degree]157 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: gphit , gphiu, gphiv , gphif !: latitude at t, u, v, f-points [degree]169 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: glamt , glamu, glamv , glamf !: longitude at t, u, v, f-points [degree] 170 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: gphit , gphiu, gphiv , gphif !: latitude at t, u, v, f-points [degree] 158 171 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, TARGET, DIMENSION(:,:) :: e1t , e2t , r1_e1t, r1_e2t !: t-point horizontal scale factors [m] 159 172 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, TARGET, DIMENSION(:,:) :: e1u , e2u , r1_e1u, r1_e2u !: horizontal scale factors at u-point [m] … … 161 174 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, TARGET, DIMENSION(:,:) :: e1f , e2f , r1_e1f, r1_e2f !: horizontal scale factors at f-point [m] 162 175 ! 163 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: e1e2t , r1_e1e2t !: associated metrics at t-point164 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: e1e2u , r1_e1e2u , e2_e1u !: associated metrics at u-point165 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: e1e2v , r1_e1e2v , e1_e2v !: associated metrics at v-point166 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: e1e2f , r1_e1e2f !: associated metrics at f-point176 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: e1e2t , r1_e1e2t !: associated metrics at t-point 177 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: e1e2u , r1_e1e2u , e2_e1u !: associated metrics at u-point 178 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: e1e2v , r1_e1e2v , e1_e2v !: associated metrics at v-point 179 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: e1e2f , r1_e1e2f !: associated metrics at f-point 167 180 ! 168 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: ff_f, ff_t !: coriolis factor [1/s] 181 REAL(wp), PUBLIC, ALLOCATABLE, SAVE , DIMENSION(:,:) :: ff_f , ff_t !: Coriolis factor at f- & t-points [1/s] 182 183 !! s-coordinate and hybrid z-s-coordinate 184 !! =----------------======--------------- 185 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: gsigt, gsigw !: model level depth coefficient at t-, w-levels (analytic) 186 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: gsi3w !: model level depth coefficient at w-level (sum of gsigw) 187 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: esigt, esigw !: vertical scale factor coef. at t-, w-levels 188 189 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hbatv , hbatf !: ocean depth at the vertical of v--f 190 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hbatt , hbatu !: t--u points (m) 191 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: scosrf, scobot !: ocean surface and bottom topographies 192 ! ! (if deviating from coordinate surfaces in HYBRID) 193 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hifv , hiff !: interface depth between stretching at v--f 194 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hift , hifu !: and quasi-uniform spacing t--u points (m) 195 !!gm end 169 196 170 197 !!---------------------------------------------------------------------- 171 198 !! vertical coordinate and scale factors 172 199 !! --------------------------------------------------------------------- 173 ! !!* Namelist namzgr : vertical coordinate *174 200 LOGICAL, PUBLIC :: ln_zco !: z-coordinate - full step 175 201 LOGICAL, PUBLIC :: ln_zps !: z-coordinate - partial step 176 202 LOGICAL, PUBLIC :: ln_sco !: s-coordinate or hybrid z-s coordinate 177 203 LOGICAL, PUBLIC :: ln_isfcav !: presence of ISF 178 LOGICAL, PUBLIC :: ln_linssh !: variable grid flag179 180 204 ! ! ref. ! before ! now ! after ! 181 205 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:,:) :: e3t_0 , e3t_b , e3t_n , e3t_a !: t- vert. scale factor [m] … … 195 219 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: ht_0 , ht_n !: t-depth [m] 196 220 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hu_0 , hu_b , hu_n , hu_a !: u-depth [m] 197 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hv_0 , hv_b , hv_n , hv_a !: u-depth [m]221 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hv_0 , hv_b , hv_n , hv_a !: v-depth [m] 198 222 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: r1_hu_b , r1_hu_n , r1_hu_a !: inverse of u-depth [1/m] 199 223 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: r1_hv_b , r1_hv_n , r1_hv_a !: inverse of v-depth [1/m] 200 201 224 202 225 INTEGER, PUBLIC :: nla10 !: deepest W level Above ~10m (nlb10 - 1) … … 209 232 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: e3tp , e3wp !: ocean bottom level thickness at T and W points 210 233 211 !!gm This should be removed from here.... ==>>> only used in domzgr at initialization phase 212 !! s-coordinate and hybrid z-s-coordinate 213 !! =----------------======--------------- 214 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: gsigt, gsigw !: model level depth coefficient at t-, w-levels (analytic) 215 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: gsi3w !: model level depth coefficient at w-level (sum of gsigw) 216 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:) :: esigt, esigw !: vertical scale factor coef. at t-, w-levels 217 218 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hbatv , hbatf !: ocean depth at the vertical of v--f 219 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hbatt , hbatu !: t--u points (m) 220 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: scosrf, scobot !: ocean surface and bottom topographies 221 ! ! (if deviating from coordinate surfaces in HYBRID) 222 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hifv , hiff !: interface depth between stretching at v--f 223 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: hift , hifu !: and quasi-uniform spacing t--u points (m) 224 !!gm end 225 226 !!---------------------------------------------------------------------- 227 !! masks, bathymetry 234 !!---------------------------------------------------------------------- 235 !! masks, top and bottom ocean point position 228 236 !! --------------------------------------------------------------------- 237 !!gm Proposition of new name for top/bottom vertical indices 238 ! INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: mtk_t, mtk_u, mtk_v !: top first wet T-, U-, V-, F-level (ISF) 239 ! INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: mbk_t, mbk_u, mbk_v !: bottom last wet T-, U- and V-level 240 !!gm 229 241 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: mbathy !: number of ocean level (=0, 1, ... , jpk-1) 230 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: mbkt !: vertical index of the bottom last T- ocean level 231 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: mbku, mbkv !: vertical index of the bottom last U- and W- ocean level 232 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: bathy !: ocean depth (meters) 242 REAL(wp) , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: bathy !: number of ocean level (=0, 1, ... , jpk-1) 243 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: mbkt, mbku, mbkv !: bottom last wet T-, U- and V-level 233 244 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: tmask_i !: interior domain T-point mask 234 245 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: tmask_h !: internal domain T-point mask (Figure 8.5 NEMO book) 235 246 236 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: misfdep !: top first ocean level 237 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: mikt, miku, mikv, mikf !: first wet T-, U-, V-, F- oceanlevel (ISF)238 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: risfdep !: Iceshelf draft 239 240 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: ssmask, ssumask, ssvmask , ssfmask!: surface mask at T-,U-, V- and F-pts247 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: misfdep !: top first ocean level (ISF) 248 INTEGER , PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: mikt, miku, mikv, mikf !: top first wet T-, U-, V-, F-level (ISF) 249 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: risfdep !: Iceshelf draft (ISF) 250 251 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: ssmask, ssumask, ssvmask !: surface mask at T-,U-, V- and F-pts 241 252 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:,:), TARGET :: tmask, umask, vmask, fmask !: land/ocean mask at T-, U-, V- and F-pts 242 253 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:,:), TARGET :: wmask, wumask, wvmask !: land/ocean mask at WT-, WU- and WV-pts … … 269 280 270 281 !!---------------------------------------------------------------------- 271 !! mpp reproducibility272 !!----------------------------------------------------------------------273 274 275 276 LOGICAL, PUBLIC, PARAMETER :: lk_mpp_rep = .FALSE. !: agrif flag277 278 279 !!----------------------------------------------------------------------280 282 !! agrif domain 281 283 !!---------------------------------------------------------------------- 282 283 284 284 #if defined key_agrif 285 LOGICAL, PUBLIC, PARAMETER :: lk_agrif = .TRUE. !: agrif flag 286 #else 285 287 LOGICAL, PUBLIC, PARAMETER :: lk_agrif = .FALSE. !: agrif flag 286 287 288 !!---------------------------------------------------------------------- 289 !! NEMO/O PA 4.0 , NEMO Consortium (2011)290 !! $Id: dom_oce.F90 6140 2015-12-21 11:35:23Z timgraham $291 !! Software governed by the CeCILL licen ce (./LICENSE)288 #endif 289 290 !!---------------------------------------------------------------------- 291 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 292 !! $Id: dom_oce.F90 10068 2018-08-28 14:09:04Z nicolasmartin $ 293 !! Software governed by the CeCILL license (see ./LICENSE) 292 294 !!---------------------------------------------------------------------- 293 295 CONTAINS 294 296 295 297 #if ! defined key_agrif 296 298 !!---------------------------------------------------------------------- 297 299 !! NOT 'key_agrif' dummy function No AGRIF zoom … … 304 306 Agrif_CFixed = '0' 305 307 END FUNCTION Agrif_CFixed 306 308 #endif 307 309 308 310 INTEGER FUNCTION dom_oce_alloc() 309 311 !!---------------------------------------------------------------------- 310 INTEGER, DIMENSION(1 3) :: ierr312 INTEGER, DIMENSION(12) :: ierr 311 313 !!---------------------------------------------------------------------- 312 314 ierr(:) = 0 313 315 ! 314 ALLOCATE( mig(jpi), mjg(jpj), nfiimpp(jpni,jpnj), & 315 & nfipproc(jpni,jpnj), nfilcit(jpni,jpnj), STAT=ierr(1) ) 316 ! 317 ALLOCATE( nimppt(jpnij) , ibonit(jpnij) , nlcit(jpnij) , nlcjt(jpnij) , & 318 & njmppt(jpnij) , ibonjt(jpnij) , nldit(jpnij) , nldjt(jpnij) , & 319 & nleit(jpnij) , nlejt(jpnij) , & 320 & mi0(jpidta) , mi1 (jpidta), mj0(jpjdta) , mj1 (jpjdta), & 321 & tpol(jpiglo) , fpol(jpiglo) , STAT=ierr(2) ) 316 ALLOCATE( mig(jpi), mjg(jpj), STAT=ierr(1) ) 317 ! 318 ALLOCATE( mi0(jpiglo) , mi1 (jpiglo), mj0(jpjglo) , mj1 (jpjglo) , & 319 & tpol(jpiglo) , fpol(jpiglo) , STAT=ierr(2) ) 322 320 ! 323 321 ALLOCATE( glamt(jpi,jpj) , glamu(jpi,jpj) , glamv(jpi,jpj) , glamf(jpi,jpj) , & … … 331 329 & e1e2v(jpi,jpj) , r1_e1e2v(jpi,jpj) , e1_e2v(jpi,jpj) , & 332 330 & e1e2f(jpi,jpj) , r1_e1e2f(jpi,jpj) , & 333 & ff_f(jpi,jpj) , ff_t(jpi,jpj) , STAT=ierr(3) )334 ! 335 ALLOCATE( gdept_0(jpi,jpj,jpk) , gdepw_0(jpi,jpj,jpk) , gde3w_0(jpi,jpj,jpk) , &331 & ff_f (jpi,jpj) , ff_t (jpi,jpj) , STAT=ierr(3) ) 332 ! 333 ALLOCATE( gdept_0(jpi,jpj,jpk) , gdepw_0(jpi,jpj,jpk) , gde3w_0(jpi,jpj,jpk) , & 336 334 & gdept_b(jpi,jpj,jpk) , gdepw_b(jpi,jpj,jpk) , & 337 335 & gdept_n(jpi,jpj,jpk) , gdepw_n(jpi,jpj,jpk) , gde3w_n(jpi,jpj,jpk) , STAT=ierr(4) ) … … 352 350 ! 353 351 ! 354 ALLOCATE( gdept_1d(jpk) , gdepw_1d(jpk) , & 355 & e3t_1d (jpk) , e3w_1d (jpk) , e3tp (jpi,jpj), e3wp(jpi,jpj) , & 356 & gsigt (jpk) , gsigw (jpk) , gsi3w(jpk) , & 357 & esigt (jpk) , esigw (jpk) , STAT=ierr(7) ) 358 ! 352 ALLOCATE( gdept_1d(jpk) , e3tp (jpi,jpj), e3wp(jpi,jpj) ,gdepw_1d(jpk) , e3t_1d(jpk) , e3w_1d(jpk) , STAT=ierr(7) ) 353 ! 354 ALLOCATE( bathy(jpi,jpj),mbathy(jpi,jpj), tmask_i(jpi,jpj) , tmask_h(jpi,jpj) , & 355 & ssmask (jpi,jpj) , ssumask(jpi,jpj) , ssvmask(jpi,jpj) , & 356 & mbkt (jpi,jpj) , mbku (jpi,jpj) , mbkv (jpi,jpj) , STAT=ierr(9) ) 357 ! 358 ALLOCATE( misfdep(jpi,jpj) , mikt(jpi,jpj) , miku(jpi,jpj) , & 359 & risfdep(jpi,jpj) , mikv(jpi,jpj) , mikf(jpi,jpj) , STAT=ierr(10) ) 360 ! 361 ALLOCATE( tmask(jpi,jpj,jpk) , umask(jpi,jpj,jpk) , & 362 & vmask(jpi,jpj,jpk) , fmask(jpi,jpj,jpk) , STAT=ierr(11) ) 363 ! 364 ALLOCATE( wmask(jpi,jpj,jpk) , wumask(jpi,jpj,jpk), wvmask(jpi,jpj,jpk) , STAT=ierr(12) ) 365 359 366 ALLOCATE( hbatv (jpi,jpj) , hbatf (jpi,jpj) , & 360 367 & hbatt (jpi,jpj) , hbatu (jpi,jpj) , & … … 362 369 & hifv (jpi,jpj) , hiff (jpi,jpj) , & 363 370 & hift (jpi,jpj) , hifu (jpi,jpj) , STAT=ierr(8) ) 364 365 ALLOCATE( mbathy(jpi,jpj) , bathy (jpi,jpj) , &366 & tmask_i(jpi,jpj) , tmask_h(jpi,jpj) , &367 & ssmask (jpi,jpj) , ssumask(jpi,jpj) , ssvmask(jpi,jpj) , ssfmask(jpi,jpj) , &368 & mbkt (jpi,jpj) , mbku (jpi,jpj) , mbkv (jpi,jpj) , STAT=ierr(9) )369 370 ! (ISF) Allocation of basic array371 ALLOCATE( misfdep(jpi,jpj) , risfdep(jpi,jpj), &372 & mikt(jpi,jpj), miku(jpi,jpj), mikv(jpi,jpj) , &373 & mikf(jpi,jpj), STAT=ierr(10) )374 375 ALLOCATE( tmask(jpi,jpj,jpk) , umask(jpi,jpj,jpk), &376 & vmask(jpi,jpj,jpk) , fmask(jpi,jpj,jpk), STAT=ierr(11) )377 378 ALLOCATE( wmask(jpi,jpj,jpk) , wumask(jpi,jpj,jpk), wvmask(jpi,jpj,jpk) , STAT=ierr(12) )379 371 ! 380 372 dom_oce_alloc = MAXVAL(ierr) -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/domain.F90
r10725 r10727 24 24 USE dom_oce ! domain: ocean 25 25 USE phycst ! physical constants 26 USE closea ! closed seas26 ! USE closea ! closed seas 27 27 USE domhgr ! domain: set the horizontal mesh 28 28 USE domzgr ! domain: set the vertical mesh 29 USE domstp ! domain: set the time-step29 ! USE domstp ! domain: set the time-step 30 30 USE dommsk ! domain: set the mask system 31 31 USE domwri ! domain: write the meshmask file … … 43 43 44 44 PUBLIC dom_init ! called by opa.F90 45 PUBLIC dom_nam ! called by opa.F90 46 PUBLIC cfg_write ! called by opa.F90 45 47 46 48 !!------------------------------------------------------------------------- … … 72 74 !!---------------------------------------------------------------------- 73 75 ! 74 IF( nn_timing == 1 ) CALL timing_start('dom_init')76 ! IF( nn_timing == 1 ) CALL timing_start('dom_init') 75 77 ! 76 78 IF(lwp) THEN … … 83 85 ! 84 86 CALL dom_nam ! read namelist ( namrun, namdom ) 85 CALL dom_clo ! Closed seas and lake 87 ! CALL dom_clo ! Closed seas and lake 88 86 89 CALL dom_hgr ! Horizontal mesh 87 90 CALL dom_zgr ! Vertical mesh and bathymetry … … 135 138 CALL cfg_write ! create the configuration file 136 139 ! 137 IF( nn_timing == 1 ) CALL timing_stop('dom_init')140 ! IF( nn_timing == 1 ) CALL timing_stop('dom_init') 138 141 ! 139 142 END SUBROUTINE dom_init … … 156 159 & nn_stock, nn_write , ln_mskland , ln_clobber , nn_chunksz, nn_euler , & 157 160 & ln_cfmeta, ln_iscpl 158 NAMELIST/namdom/ nn_bathy, rn_bathy , rn_e3zps_min, rn_e3zps_rat, nn_msh, rn_hmin, rn_isfhmin, & 161 NAMELIST/namdom/ nn_bathy, cn_topo, cn_bath, cn_lon, cn_lat, nn_interp, & 162 & rn_bathy , rn_e3zps_min, rn_e3zps_rat, nn_msh, rn_hmin, rn_isfhmin, & 159 163 & rn_atfp , rn_rdt , nn_closea , ln_crs , jphgr_msh , & 160 164 & ppglam0, ppgphi0, ppe1_deg, ppe2_deg, ppe1_m, ppe2_m, & … … 209 213 ENDIF 210 214 211 no = nn_no ! conversion DOCTOR names into model names (this should disappear soon)212 215 cexper = cn_exp 213 216 nrstdt = nn_rstctl … … 271 274 WRITE(numout,*) ' Namelist namdom : space & time domain' 272 275 WRITE(numout,*) ' flag read/compute bathymetry nn_bathy = ', nn_bathy 276 IF( nn_bathy == 2 ) THEN 277 WRITE(numout,*) ' compute bathymetry from file cn_topo = ', cn_topo 278 ENDIF 273 279 WRITE(numout,*) ' Depth (if =0 bathy=jpkm1) rn_bathy = ', rn_bathy 274 280 WRITE(numout,*) ' min depth of the ocean (>0) or rn_hmin = ', rn_hmin … … 331 337 !!---------------------------------------------------------------------- 332 338 ! 339 #undef CHECK_DOM 340 #ifdef CHECK_DOM 333 341 IF(lk_mpp) THEN 334 342 CALL mpp_minloc( e1t(:,:), tmask_i(:,:), ze1min, iimi1,ijmi1 ) … … 364 372 WRITE(numout,"(14x,'e2t mini: ',1f10.2,' at i = ',i5,' j= ',i5)") ze2min, iimi2, ijmi2 365 373 ENDIF 374 #endif 366 375 ! 367 376 END SUBROUTINE dom_ctl … … 400 409 ! 401 410 clnam = 'domain_cfg' ! filename (configuration information) 402 CALL iom_open( TRIM(clnam), inum, ldwrt = .TRUE. , kiolib = jprstlib )411 CALL iom_open( TRIM(clnam), inum, ldwrt = .TRUE.)!, kiolib = jprstlib ) 403 412 404 413 ! -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/domcfg.f90
r9598 r10727 37 37 !!---------------------------------------------------------------------- 38 38 ! 39 IF( nn_timing == 1 ) CALL timing_start('dom_cfg')39 ! IF( nn_timing == 1 ) CALL timing_start('dom_cfg') 40 40 ! 41 41 IF(lwp) THEN ! Control print … … 60 60 CALL dom_glo ! global domain versus zoom and/or local domain 61 61 ! 62 IF( nn_timing == 1 ) CALL timing_stop('dom_cfg')62 ! IF( nn_timing == 1 ) CALL timing_stop('dom_cfg') 63 63 ! 64 64 END SUBROUTINE dom_cfg -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/domhgr.F90
r10725 r10727 112 112 !!---------------------------------------------------------------------- 113 113 ! 114 IF( nn_timing == 1 ) CALL timing_start('dom_hgr')114 ! IF( nn_timing == 1 ) CALL timing_start('dom_hgr') 115 115 ! 116 116 IF(lwp) THEN … … 131 131 CASE ( 0 ) !== read in coordinate.nc file ==! 132 132 ! 133 #if defined key_agrif 134 IF (agrif_root()) THEN 135 #endif 133 136 IF(lwp) WRITE(numout,*) 134 137 IF(lwp) WRITE(numout,*) ' curvilinear coordinate on the sphere read in "coordinate" file' … … 143 146 e1e2v (:,:) = e1v(:,:) * e2v(:,:) 144 147 ENDIF 148 #if defined key_agrif 149 ELSE 150 CALL Agrif_InitValues_cont() 151 ENDIF 152 #endif 145 153 ! 146 154 CASE ( 1 ) !== geographical mesh on the sphere with regular (in degree) grid-spacing ==! … … 272 280 ze1 = 106000. / REAL( jp_cfg , wp ) 273 281 ! benchmark: forced the resolution to be about 100 km 274 IF( nbench /= 0 ) ze1 = 106000._wp282 ! IF( nbench /= 0 ) ze1 = 106000._wp 275 283 zsin_alpha = - SQRT( 2._wp ) * 0.5_wp 276 284 zcos_alpha = SQRT( 2._wp ) * 0.5_wp 277 285 ze1deg = ze1 / (ra * rad) 278 IF( nbench /= 0 ) ze1deg = ze1deg / REAL( jp_cfg , wp ) ! benchmark: keep the lat/+lon286 ! IF( nbench /= 0 ) ze1deg = ze1deg / REAL( jp_cfg , wp ) ! benchmark: keep the lat/+lon 279 287 ! ! at the right jp_cfg resolution 280 288 glam0 = zlam1 + zcos_alpha * ze1deg * REAL( jpjglo-2 , wp ) … … 395 403 zminff=ff_f(nldi,nldj) 396 404 zmaxff=ff_f(nldi,nlej) 397 CALL mpp_min( zminff ) ! min over the global domain398 CALL mpp_max( zmaxff ) ! max over the global domain405 CALL mpp_min( 'toto',zminff ) ! min over the global domain 406 CALL mpp_max( 'toto',zmaxff ) ! max over the global domain 399 407 IF(lwp) WRITE(numout,*) ' Coriolis parameter varies globally from ', zminff,' to ', zmaxff 400 408 END IF … … 418 426 zminff=ff_f(nldi,nldj) 419 427 zmaxff=ff_f(nldi,nlej) 420 CALL mpp_min( zminff ) ! min over the global domain421 CALL mpp_max( zmaxff ) ! max over the global domain428 CALL mpp_min('toto', zminff ) ! min over the global domain 429 CALL mpp_max( 'toto',zmaxff ) ! max over the global domain 422 430 IF(lwp) WRITE(numout,*) ' Coriolis parameter varies globally from ', zminff,' to ', zmaxff 423 431 END IF … … 430 438 ! The equator line must be the latitude coordinate axe 431 439 432 IF( nperio == 2 ) THEN433 znorme = SQRT( SUM( gphiu(:,2) * gphiu(:,2) ) ) / REAL( jpi )434 IF( znorme > 1.e-13 ) CALL ctl_stop( ' ===>>>> : symmetrical condition: rerun with good equator line' )435 ENDIF436 ! 437 IF( nn_timing == 1 ) CALL timing_stop('dom_hgr')440 ! IF( nperio == 2 ) THEN 441 ! znorme = SQRT( SUM( gphiu(:,2) * gphiu(:,2) ) ) / REAL( jpi ) 442 ! IF( znorme > 1.e-13 ) CALL ctl_stop( ' ===>>>> : symmetrical condition: rerun with good equator line' ) 443 ! ENDIF 444 ! 445 ! IF( nn_timing == 1 ) CALL timing_stop('dom_hgr') 438 446 ! 439 447 END SUBROUTINE dom_hgr -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/dommsk.F90
r10725 r10727 120 120 !!--------------------------------------------------------------------- 121 121 ! 122 IF( nn_timing == 1 ) CALL timing_start('dom_msk')122 ! IF( nn_timing == 1 ) CALL timing_start('dom_msk') 123 123 ! 124 124 CALL wrk_alloc( jpi, jpj, imsk ) … … 180 180 ! Interior domain mask (used for global sum) 181 181 ! -------------------- 182 tmask_i(:,:) = ssmask(:,:) ! (ISH) tmask_i = 1 even on the ice shelf183 184 tmask_h(:,:) = 1._wp ! 0 on the halo and 1 elsewhere185 iif = jpreci ! ???186 iil = nlci - jpreci + 1187 ijf = jprecj ! ???188 ijl = nlcj - jprecj + 1189 190 tmask_h( 1 :iif, : ) = 0._wp ! first columns191 tmask_h(iil:jpi, : ) = 0._wp ! last columns (including mpp extra columns)192 tmask_h( : , 1 :ijf) = 0._wp ! first rows193 tmask_h( : ,ijl:jpj) = 0._wp ! last rows (including mpp extra rows)182 ! tmask_i(:,:) = ssmask(:,:) ! (ISH) tmask_i = 1 even on the ice shelf 183 184 ! tmask_h(:,:) = 1._wp ! 0 on the halo and 1 elsewhere 185 ! iif = jpreci ! ??? 186 ! iil = nlci - jpreci + 1 187 ! ijf = jprecj ! ??? 188 ! ijl = nlcj - jprecj + 1 189 190 ! tmask_h( 1 :iif, : ) = 0._wp ! first columns 191 ! tmask_h(iil:jpi, : ) = 0._wp ! last columns (including mpp extra columns) 192 ! tmask_h( : , 1 :ijf) = 0._wp ! first rows 193 ! tmask_h( : ,ijl:jpj) = 0._wp ! last rows (including mpp extra rows) 194 194 195 195 ! north fold mask 196 196 ! --------------- 197 tpol(1:jpiglo) = 1._wp198 fpol(1:jpiglo) = 1._wp199 IF( jperio == 3 .OR. jperio == 4 ) THEN ! T-point pivot200 tpol(jpiglo/2+1:jpiglo) = 0._wp201 fpol( 1 :jpiglo) = 0._wp202 IF( mjg(nlej) == jpjglo ) THEN ! only half of the nlcj-1 row203 DO ji = iif+1, iil-1204 tmask_h(ji,nlej-1) = tmask_h(ji,nlej-1) * tpol(mig(ji))205 END DO206 ENDIF207 ENDIF197 ! tpol(1:jpiglo) = 1._wp 198 ! fpol(1:jpiglo) = 1._wp 199 ! IF( jperio == 3 .OR. jperio == 4 ) THEN ! T-point pivot 200 ! tpol(jpiglo/2+1:jpiglo) = 0._wp 201 ! fpol( 1 :jpiglo) = 0._wp 202 ! IF( mjg(nlej) == jpjglo ) THEN ! only half of the nlcj-1 row 203 ! DO ji = iif+1, iil-1 204 ! tmask_h(ji,nlej-1) = tmask_h(ji,nlej-1) * tpol(mig(ji)) 205 ! END DO 206 ! ENDIF 207 ! ENDIF 208 208 209 tmask_i(:,:) = tmask_i(:,:) * tmask_h(:,:)210 211 IF( jperio == 5 .OR. jperio == 6 ) THEN ! F-point pivot212 tpol( 1 :jpiglo) = 0._wp213 fpol(jpiglo/2+1:jpiglo) = 0._wp214 ENDIF209 ! tmask_i(:,:) = tmask_i(:,:) * tmask_h(:,:) 210 211 ! IF( jperio == 5 .OR. jperio == 6 ) THEN ! F-point pivot 212 ! tpol( 1 :jpiglo) = 0._wp 213 ! fpol(jpiglo/2+1:jpiglo) = 0._wp 214 ! ENDIF 215 215 216 216 ! 2. Ocean/land mask at u-, v-, and z-points (computed from tmask) … … 229 229 END DO 230 230 ! (ISF) MIN(1,SUM(umask)) is here to check if you have effectively at least 1 wet cell at u point 231 232 DO ji = 1, jpim1 ! vector loop233 ssumask(ji,jj) = ssmask(ji,jj) * ssmask(ji+1,jj ) * MIN(1._wp,SUM(umask(ji,jj,:)))234 ssvmask(ji,jj) = ssmask(ji,jj) * ssmask(ji ,jj+1) * MIN(1._wp,SUM(vmask(ji,jj,:)))235 END DO236 DO ji = 1, jpim1 ! NO vector opt.237 ssfmask(ji,jj) = ssmask(ji,jj ) * ssmask(ji+1,jj ) &238 & * ssmask(ji,jj+1) * ssmask(ji+1,jj+1) * MIN(1._wp,SUM(fmask(ji,jj,:)))239 END DO240 END DO241 CALL lbc_lnk( umask , 'U', 1._wp ) ! Lateral boundary conditions242 CALL lbc_lnk( vmask , 'V', 1._wp )243 CALL lbc_lnk( fmask , 'F', 1._wp )244 CALL lbc_lnk(ssumask, 'U', 1._wp ) ! Lateral boundary conditions245 CALL lbc_lnk(ssvmask, 'V', 1._wp )246 CALL lbc_lnk(ssfmask, 'F', 1._wp )231 ! DO jj = 1, jpjm1 232 ! DO ji = 1, jpim1 ! vector loop 233 ! ssumask(ji,jj) = ssmask(ji,jj) * ssmask(ji+1,jj ) * MIN(1._wp,SUM(umask(ji,jj,:))) 234 ! ssvmask(ji,jj) = ssmask(ji,jj) * ssmask(ji ,jj+1) * MIN(1._wp,SUM(vmask(ji,jj,:))) 235 !! END DO 236 ! DO ji = 1, jpim1 ! NO vector opt. 237 ! ssfmask(ji,jj) = ssmask(ji,jj ) * ssmask(ji+1,jj ) & 238 ! & * ssmask(ji,jj+1) * ssmask(ji+1,jj+1) * MIN(1._wp,SUM(fmask(ji,jj,:))) 239 ! END DO 240 ! END DO 241 CALL lbc_lnk( 'toto',umask , 'U', 1._wp ) ! Lateral boundary conditions 242 CALL lbc_lnk( 'toto',vmask , 'V', 1._wp ) 243 CALL lbc_lnk( 'toto',fmask , 'F', 1._wp ) 244 ! CALL lbc_lnk( 'toto',ssumask, 'U', 1._wp ) ! Lateral boundary conditions 245 ! CALL lbc_lnk( 'toto',ssvmask, 'V', 1._wp ) 246 ! CALL lbc_lnk( 'toto',ssfmask, 'F', 1._wp ) 247 247 248 248 ! 3. Ocean/land mask at wu-, wv- and w points … … 355 355 ENDIF 356 356 ! 357 CALL lbc_lnk( fmask, 'F', 1._wp ) ! Lateral boundary conditions on fmask357 CALL lbc_lnk( 'toto',fmask, 'F', 1._wp ) ! Lateral boundary conditions on fmask 358 358 ! 359 359 ! CAUTION : The fmask may be further modified in dyn_vor_init ( dynvor.F90 ) … … 362 362 CALL wrk_dealloc( jpi, jpj, zwf ) 363 363 ! 364 IF( nn_timing == 1 ) CALL timing_stop('dom_msk')364 ! IF( nn_timing == 1 ) CALL timing_stop('dom_msk') 365 365 ! 366 366 END SUBROUTINE dom_msk -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/domngb.F90
r10725 r10727 11 11 !!---------------------------------------------------------------------- 12 12 USE dom_oce ! ocean space and time domain 13 ! 13 14 USE in_out_manager ! I/O manager 14 15 USE lib_mpp ! for mppsum 15 USE wrk_nemo ! Memory allocation16 USE timing ! Timing17 16 18 17 IMPLICIT NONE … … 23 22 !!---------------------------------------------------------------------- 24 23 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 25 !! $Id: domngb.F90 6140 2015-12-21 11:35:23Z timgraham$26 !! Software governed by the CeCILL licen ce (./LICENSE)24 !! $Id: domngb.F90 10425 2018-12-19 21:54:16Z smasson $ 25 !! Software governed by the CeCILL license (see ./LICENSE) 27 26 !!---------------------------------------------------------------------- 28 27 CONTAINS … … 45 44 INTEGER , DIMENSION(2) :: iloc 46 45 REAL(wp) :: zlon, zmini 47 REAL(wp), POINTER, DIMENSION(:,:) ::zglam, zgphi, zmask, zdist46 REAL(wp), DIMENSION(jpi,jpj) :: zglam, zgphi, zmask, zdist 48 47 !!-------------------------------------------------------------------- 49 !50 IF( nn_timing == 1 ) CALL timing_start('dom_ngb')51 !52 CALL wrk_alloc( jpi,jpj, zglam, zgphi, zmask, zdist )53 48 ! 54 49 zmask(:,:) = 0._wp … … 62 57 END SELECT 63 58 64 IF (jphgr_msh /= 2 .AND. jphgr_msh /= 3) THEN 65 zlon = MOD( plon + 720., 360. ) ! plon between 0 and 360 66 zglam(:,:) = MOD( zglam(:,:) + 720., 360. ) ! glam between 0 and 360 67 IF( zlon > 270. ) zlon = zlon - 360. ! zlon between -90 and 270 68 IF( zlon < 90. ) WHERE( zglam(:,:) > 180. ) zglam(:,:) = zglam(:,:) - 360. ! glam between -180 and 180 69 zglam(:,:) = zglam(:,:) - zlon 70 ELSE 71 zglam(:,:) = zglam(:,:) - plon 72 END IF 59 zlon = MOD( plon + 720., 360. ) ! plon between 0 and 360 60 zglam(:,:) = MOD( zglam(:,:) + 720., 360. ) ! glam between 0 and 360 61 IF( zlon > 270. ) zlon = zlon - 360. ! zlon between -90 and 270 62 IF( zlon < 90. ) WHERE( zglam(:,:) > 180. ) zglam(:,:) = zglam(:,:) - 360. ! glam between -180 and 180 63 zglam(:,:) = zglam(:,:) - zlon 73 64 74 65 zgphi(:,:) = zgphi(:,:) - plat … … 76 67 77 68 IF( lk_mpp ) THEN 78 CALL mpp_minloc( zdist(:,:), zmask, zmini, kii, kjj) 69 CALL mpp_minloc( 'domngb', zdist(:,:), zmask, zmini, iloc) 70 kii = iloc(1) ; kjj = iloc(2) 79 71 ELSE 80 72 iloc(:) = MINLOC( zdist(:,:), mask = zmask(:,:) == 1.e0 ) … … 83 75 ENDIF 84 76 ! 85 CALL wrk_dealloc( jpi,jpj, zglam, zgphi, zmask, zdist )86 !87 IF( nn_timing == 1 ) CALL timing_stop('dom_ngb')88 !89 77 END SUBROUTINE dom_ngb 90 78 -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/domvvl.F90
r10725 r10727 6 6 !! History : 2.0 ! 2006-06 (B. Levier, L. Marie) original code 7 7 !! 3.1 ! 2009-02 (G. Madec, M. Leclair, R. Benshila) pure z* coordinate 8 !! 3.3 ! 2011-10 (M. Leclair) totally rewrote domvvl: 9 !! vvl option includes z_star and z_tilde coordinates 8 !! 3.3 ! 2011-10 (M. Leclair) totally rewrote domvvl: vvl option includes z_star and z_tilde coordinates 10 9 !! 3.6 ! 2014-11 (P. Mathiot) add ice shelf capability 11 10 !!---------------------------------------------------------------------- … … 22 21 USE phycst ! physical constant 23 22 USE dom_oce ! ocean space and time domain 23 ! USE wet_dry ! wetting and drying 24 ! USE usrdef_istate ! user defined initial state (wad only) 25 ! USE restart ! ocean restart 24 26 ! 25 27 USE in_out_manager ! I/O manager … … 27 29 USE lib_mpp ! distributed memory computing library 28 30 USE lbclnk ! ocean lateral boundary conditions (or mpp link) 29 USE wrk_nemo ! Memory allocation30 31 USE timing ! Timing 31 32 … … 34 35 35 36 PUBLIC dom_vvl_init ! called by domain.F90 37 PUBLIC dom_vvl_sf_nxt ! called by step.F90 38 PUBLIC dom_vvl_sf_swp ! called by step.F90 39 PUBLIC dom_vvl_interpol ! called by dynnxt.F90 36 40 37 41 ! !!* Namelist nam_vvl … … 57 61 58 62 !! * Substitutions 63 # include "vectopt_loop_substitute.h90" 59 64 !!---------------------------------------------------------------------- 60 !! *** vectopt_loop_substitute *** 61 !!---------------------------------------------------------------------- 62 !! ** purpose : substitute the inner loop start/end indices with CPP macro 63 !! allow unrolling of do-loop (useful with vector processors) 64 !!---------------------------------------------------------------------- 65 !!---------------------------------------------------------------------- 66 !! NEMO/OPA 3.7 , NEMO Consortium (2014) 67 !! $Id: vectopt_loop_substitute.h90 4990 2014-12-15 16:42:49Z timgraham $ 68 !! Software governed by the CeCILL licence (./LICENSE) 69 !!---------------------------------------------------------------------- 70 !!---------------------------------------------------------------------- 71 !! NEMO/OPA 3.7 , NEMO-Consortium (2015) 72 !! $Id: domvvl.F90 6351 2016-02-24 18:50:11Z cetlod $ 73 !! Software governed by the CeCILL licence (./LICENSE) 65 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 66 !! $Id: domvvl.F90 10425 2018-12-19 21:54:16Z smasson $ 67 !! Software governed by the CeCILL license (see ./LICENSE) 74 68 !!---------------------------------------------------------------------- 75 69 CONTAINS … … 84 78 & dtilde_e3t_a(jpi,jpj,jpk) , un_td (jpi,jpj,jpk) , vn_td (jpi,jpj,jpk) , & 85 79 & STAT = dom_vvl_alloc ) 86 IF( lk_mpp ) CALL mpp_sum (dom_vvl_alloc )87 IF( dom_vvl_alloc /= 0 ) CALL ctl_ warn('dom_vvl_alloc: failed to allocate arrays')80 CALL mpp_sum ( 'domvvl', dom_vvl_alloc ) 81 IF( dom_vvl_alloc /= 0 ) CALL ctl_stop( 'STOP', 'dom_vvl_alloc: failed to allocate arrays' ) 88 82 un_td = 0._wp 89 83 vn_td = 0._wp … … 91 85 IF( ln_vvl_ztilde ) THEN 92 86 ALLOCATE( frq_rst_e3t(jpi,jpj) , frq_rst_hdv(jpi,jpj) , hdiv_lf(jpi,jpj,jpk) , STAT= dom_vvl_alloc ) 93 IF( lk_mpp ) CALL mpp_sum (dom_vvl_alloc )94 IF( dom_vvl_alloc /= 0 ) CALL ctl_ warn('dom_vvl_alloc: failed to allocate arrays')87 CALL mpp_sum ( 'domvvl', dom_vvl_alloc ) 88 IF( dom_vvl_alloc /= 0 ) CALL ctl_stop( 'STOP', 'dom_vvl_alloc: failed to allocate arrays' ) 95 89 ENDIF 96 90 ! … … 125 119 !!---------------------------------------------------------------------- 126 120 ! 127 IF( nn_timing == 1 ) CALL timing_start('dom_vvl_init')128 !129 121 IF(lwp) WRITE(numout,*) 130 122 IF(lwp) WRITE(numout,*) 'dom_vvl_init : Variable volume activated' … … 137 129 ! 138 130 ! ! Read or initialize e3t_(b/n), tilde_e3t_(b/n) and hdiv_lf 131 CALL dom_vvl_rst( nit000, 'READ' ) 139 132 e3t_a(:,:,jpk) = e3t_0(:,:,jpk) ! last level always inside the sea floor set one for all 140 133 ! … … 153 146 CALL dom_vvl_interpol( e3v_n(:,:,:), e3vw_n(:,:,:), 'VW' ) ! from V to UW 154 147 CALL dom_vvl_interpol( e3v_b(:,:,:), e3vw_b(:,:,:), 'VW' ) 148 149 ! We need to define e3[tuv]_a for AGRIF initialisation (should not be a problem for the restartability...) 150 e3t_a(:,:,:) = e3t_n(:,:,:) 151 e3u_a(:,:,:) = e3u_n(:,:,:) 152 e3v_a(:,:,:) = e3v_n(:,:,:) 155 153 ! 156 154 ! !== depth of t and w-point ==! (set the isf depth as it is in the initial timestep) … … 235 233 END DO 236 234 END DO 237 IF( cp_cfg == "orca" .AND. jp_cfg == 3 ) THEN ! ORCA2: Suppress ztilde in the Foxe Basin for ORCA2 238 ii0 = 103 ; ii1 = 111 239 ij0 = 128 ; ij1 = 135 ; 240 frq_rst_e3t( mi0(ii0):mi1(ii1) , mj0(ij0):mj1(ij1) ) = 0.0_wp 241 frq_rst_hdv( mi0(ii0):mi1(ii1) , mj0(ij0):mj1(ij1) ) = 1.e0_wp / rdt 235 IF( cn_cfg == "orca" .OR. cn_cfg == "ORCA" ) THEN 236 IF( nn_cfg == 3 ) THEN ! ORCA2: Suppress ztilde in the Foxe Basin for ORCA2 237 ii0 = 103 ; ii1 = 111 238 ij0 = 128 ; ij1 = 135 ; 239 frq_rst_e3t( mi0(ii0):mi1(ii1) , mj0(ij0):mj1(ij1) ) = 0.0_wp 240 frq_rst_hdv( mi0(ii0):mi1(ii1) , mj0(ij0):mj1(ij1) ) = 1.e0_wp / rdt 241 ENDIF 242 242 ENDIF 243 243 ENDIF 244 244 ENDIF 245 245 ! 246 IF( nn_timing == 1 ) CALL timing_stop('dom_vvl_init') 246 IF(lwxios) THEN 247 ! define variables in restart file when writing with XIOS 248 CALL iom_set_rstw_var_active('e3t_b') 249 CALL iom_set_rstw_var_active('e3t_n') 250 ! ! ----------------------- ! 251 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN ! z_tilde and layer cases ! 252 ! ! ----------------------- ! 253 CALL iom_set_rstw_var_active('tilde_e3t_b') 254 CALL iom_set_rstw_var_active('tilde_e3t_n') 255 END IF 256 ! ! -------------! 257 IF( ln_vvl_ztilde ) THEN ! z_tilde case ! 258 ! ! ------------ ! 259 CALL iom_set_rstw_var_active('hdiv_lf') 260 ENDIF 261 ! 262 ENDIF 247 263 ! 248 264 END SUBROUTINE dom_vvl_init 265 266 267 SUBROUTINE dom_vvl_sf_nxt( kt, kcall ) 268 !!---------------------------------------------------------------------- 269 !! *** ROUTINE dom_vvl_sf_nxt *** 270 !! 271 !! ** Purpose : - compute the after scale factors used in tra_zdf, dynnxt, 272 !! tranxt and dynspg routines 273 !! 274 !! ** Method : - z_star case: Repartition of ssh INCREMENT proportionnaly to the level thickness. 275 !! - z_tilde_case: after scale factor increment = 276 !! high frequency part of horizontal divergence 277 !! + retsoring towards the background grid 278 !! + thickness difusion 279 !! Then repartition of ssh INCREMENT proportionnaly 280 !! to the "baroclinic" level thickness. 281 !! 282 !! ** Action : - hdiv_lf : restoring towards full baroclinic divergence in z_tilde case 283 !! - tilde_e3t_a: after increment of vertical scale factor 284 !! in z_tilde case 285 !! - e3(t/u/v)_a 286 !! 287 !! Reference : Leclair, M., and Madec, G. 2011, Ocean Modelling. 288 !!---------------------------------------------------------------------- 289 INTEGER, INTENT( in ) :: kt ! time step 290 INTEGER, INTENT( in ), OPTIONAL :: kcall ! optional argument indicating call sequence 291 ! 292 INTEGER :: ji, jj, jk ! dummy loop indices 293 INTEGER , DIMENSION(3) :: ijk_max, ijk_min ! temporary integers 294 REAL(wp) :: z2dt, z_tmin, z_tmax ! local scalars 295 LOGICAL :: ll_do_bclinic ! local logical 296 REAL(wp), DIMENSION(jpi,jpj) :: zht, z_scale, zwu, zwv, zhdiv 297 REAL(wp), DIMENSION(jpi,jpj,jpk) :: ze3t 298 !!---------------------------------------------------------------------- 299 ! 300 IF( ln_linssh ) RETURN ! No calculation in linear free surface 301 ! 302 IF( ln_timing ) CALL timing_start('dom_vvl_sf_nxt') 303 ! 304 IF( kt == nit000 ) THEN 305 IF(lwp) WRITE(numout,*) 306 IF(lwp) WRITE(numout,*) 'dom_vvl_sf_nxt : compute after scale factors' 307 IF(lwp) WRITE(numout,*) '~~~~~~~~~~~~~~' 308 ENDIF 309 310 ll_do_bclinic = .TRUE. 311 IF( PRESENT(kcall) ) THEN 312 IF( kcall == 2 .AND. ln_vvl_ztilde ) ll_do_bclinic = .FALSE. 313 ENDIF 314 315 ! ******************************* ! 316 ! After acale factors at t-points ! 317 ! ******************************* ! 318 ! ! --------------------------------------------- ! 319 ! ! z_star coordinate and barotropic z-tilde part ! 320 ! ! --------------------------------------------- ! 321 ! 322 z_scale(:,:) = ( ssha(:,:) - sshb(:,:) ) * ssmask(:,:) / ( ht_0(:,:) + sshn(:,:) + 1. - ssmask(:,:) ) 323 DO jk = 1, jpkm1 324 ! formally this is the same as e3t_a = e3t_0*(1+ssha/ht_0) 325 e3t_a(:,:,jk) = e3t_b(:,:,jk) + e3t_n(:,:,jk) * z_scale(:,:) * tmask(:,:,jk) 326 END DO 327 ! 328 IF( ln_vvl_ztilde .OR. ln_vvl_layer .AND. ll_do_bclinic ) THEN ! z_tilde or layer coordinate ! 329 ! ! ------baroclinic part------ ! 330 ! I - initialization 331 ! ================== 332 333 ! 1 - barotropic divergence 334 ! ------------------------- 335 zhdiv(:,:) = 0._wp 336 zht(:,:) = 0._wp 337 DO jk = 1, jpkm1 338 zhdiv(:,:) = zhdiv(:,:) + e3t_n(:,:,jk) * hdivn(:,:,jk) 339 zht (:,:) = zht (:,:) + e3t_n(:,:,jk) * tmask(:,:,jk) 340 END DO 341 zhdiv(:,:) = zhdiv(:,:) / ( zht(:,:) + 1. - tmask_i(:,:) ) 342 343 ! 2 - Low frequency baroclinic horizontal divergence (z-tilde case only) 344 ! -------------------------------------------------- 345 IF( ln_vvl_ztilde ) THEN 346 IF( kt > nit000 ) THEN 347 DO jk = 1, jpkm1 348 hdiv_lf(:,:,jk) = hdiv_lf(:,:,jk) - rdt * frq_rst_hdv(:,:) & 349 & * ( hdiv_lf(:,:,jk) - e3t_n(:,:,jk) * ( hdivn(:,:,jk) - zhdiv(:,:) ) ) 350 END DO 351 ENDIF 352 ENDIF 353 354 ! II - after z_tilde increments of vertical scale factors 355 ! ======================================================= 356 tilde_e3t_a(:,:,:) = 0._wp ! tilde_e3t_a used to store tendency terms 357 358 ! 1 - High frequency divergence term 359 ! ---------------------------------- 360 IF( ln_vvl_ztilde ) THEN ! z_tilde case 361 DO jk = 1, jpkm1 362 tilde_e3t_a(:,:,jk) = tilde_e3t_a(:,:,jk) - ( e3t_n(:,:,jk) * ( hdivn(:,:,jk) - zhdiv(:,:) ) - hdiv_lf(:,:,jk) ) 363 END DO 364 ELSE ! layer case 365 DO jk = 1, jpkm1 366 tilde_e3t_a(:,:,jk) = tilde_e3t_a(:,:,jk) - e3t_n(:,:,jk) * ( hdivn(:,:,jk) - zhdiv(:,:) ) * tmask(:,:,jk) 367 END DO 368 ENDIF 369 370 ! 2 - Restoring term (z-tilde case only) 371 ! ------------------ 372 IF( ln_vvl_ztilde ) THEN 373 DO jk = 1, jpk 374 tilde_e3t_a(:,:,jk) = tilde_e3t_a(:,:,jk) - frq_rst_e3t(:,:) * tilde_e3t_b(:,:,jk) 375 END DO 376 ENDIF 377 378 ! 3 - Thickness diffusion term 379 ! ---------------------------- 380 zwu(:,:) = 0._wp 381 zwv(:,:) = 0._wp 382 DO jk = 1, jpkm1 ! a - first derivative: diffusive fluxes 383 DO jj = 1, jpjm1 384 DO ji = 1, fs_jpim1 ! vector opt. 385 un_td(ji,jj,jk) = rn_ahe3 * umask(ji,jj,jk) * e2_e1u(ji,jj) & 386 & * ( tilde_e3t_b(ji,jj,jk) - tilde_e3t_b(ji+1,jj ,jk) ) 387 vn_td(ji,jj,jk) = rn_ahe3 * vmask(ji,jj,jk) * e1_e2v(ji,jj) & 388 & * ( tilde_e3t_b(ji,jj,jk) - tilde_e3t_b(ji ,jj+1,jk) ) 389 zwu(ji,jj) = zwu(ji,jj) + un_td(ji,jj,jk) 390 zwv(ji,jj) = zwv(ji,jj) + vn_td(ji,jj,jk) 391 END DO 392 END DO 393 END DO 394 DO jj = 1, jpj ! b - correction for last oceanic u-v points 395 DO ji = 1, jpi 396 un_td(ji,jj,mbku(ji,jj)) = un_td(ji,jj,mbku(ji,jj)) - zwu(ji,jj) 397 vn_td(ji,jj,mbkv(ji,jj)) = vn_td(ji,jj,mbkv(ji,jj)) - zwv(ji,jj) 398 END DO 399 END DO 400 DO jk = 1, jpkm1 ! c - second derivative: divergence of diffusive fluxes 401 DO jj = 2, jpjm1 402 DO ji = fs_2, fs_jpim1 ! vector opt. 403 tilde_e3t_a(ji,jj,jk) = tilde_e3t_a(ji,jj,jk) + ( un_td(ji-1,jj ,jk) - un_td(ji,jj,jk) & 404 & + vn_td(ji ,jj-1,jk) - vn_td(ji,jj,jk) & 405 & ) * r1_e1e2t(ji,jj) 406 END DO 407 END DO 408 END DO 409 ! ! d - thickness diffusion transport: boundary conditions 410 ! (stored for tracer advction and continuity equation) 411 CALL lbc_lnk_multi( 'domvvl', un_td , 'U' , -1._wp, vn_td , 'V' , -1._wp) 412 413 ! 4 - Time stepping of baroclinic scale factors 414 ! --------------------------------------------- 415 ! Leapfrog time stepping 416 ! ~~~~~~~~~~~~~~~~~~~~~~ 417 IF( neuler == 0 .AND. kt == nit000 ) THEN 418 z2dt = rdt 419 ELSE 420 z2dt = 2.0_wp * rdt 421 ENDIF 422 CALL lbc_lnk( 'domvvl', tilde_e3t_a(:,:,:), 'T', 1._wp ) 423 tilde_e3t_a(:,:,:) = tilde_e3t_b(:,:,:) + z2dt * tmask(:,:,:) * tilde_e3t_a(:,:,:) 424 425 ! Maximum deformation control 426 ! ~~~~~~~~~~~~~~~~~~~~~~~~~~~ 427 ze3t(:,:,jpk) = 0._wp 428 DO jk = 1, jpkm1 429 ze3t(:,:,jk) = tilde_e3t_a(:,:,jk) / e3t_0(:,:,jk) * tmask(:,:,jk) * tmask_i(:,:) 430 END DO 431 z_tmax = MAXVAL( ze3t(:,:,:) ) 432 CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 433 z_tmin = MINVAL( ze3t(:,:,:) ) 434 CALL mpp_min( 'domvvl', z_tmin ) ! min over the global domain 435 ! - ML - test: for the moment, stop simulation for too large e3_t variations 436 IF( ( z_tmax > rn_zdef_max ) .OR. ( z_tmin < - rn_zdef_max ) ) THEN 437 IF( lk_mpp ) THEN 438 CALL mpp_maxloc( 'domvvl', ze3t, tmask, z_tmax, ijk_max ) 439 CALL mpp_minloc( 'domvvl', ze3t, tmask, z_tmin, ijk_min ) 440 ELSE 441 ijk_max = MAXLOC( ze3t(:,:,:) ) 442 ijk_max(1) = ijk_max(1) + nimpp - 1 443 ijk_max(2) = ijk_max(2) + njmpp - 1 444 ijk_min = MINLOC( ze3t(:,:,:) ) 445 ijk_min(1) = ijk_min(1) + nimpp - 1 446 ijk_min(2) = ijk_min(2) + njmpp - 1 447 ENDIF 448 IF (lwp) THEN 449 WRITE(numout, *) 'MAX( tilde_e3t_a(:,:,:) / e3t_0(:,:,:) ) =', z_tmax 450 WRITE(numout, *) 'at i, j, k=', ijk_max 451 WRITE(numout, *) 'MIN( tilde_e3t_a(:,:,:) / e3t_0(:,:,:) ) =', z_tmin 452 WRITE(numout, *) 'at i, j, k=', ijk_min 453 CALL ctl_stop( 'STOP', 'MAX( ABS( tilde_e3t_a(:,:,: ) ) / e3t_0(:,:,:) ) too high') 454 ENDIF 455 ENDIF 456 ! - ML - end test 457 ! - ML - Imposing these limits will cause a baroclinicity error which is corrected for below 458 tilde_e3t_a(:,:,:) = MIN( tilde_e3t_a(:,:,:), rn_zdef_max * e3t_0(:,:,:) ) 459 tilde_e3t_a(:,:,:) = MAX( tilde_e3t_a(:,:,:), - rn_zdef_max * e3t_0(:,:,:) ) 460 461 ! 462 ! "tilda" change in the after scale factor 463 ! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 464 DO jk = 1, jpkm1 465 dtilde_e3t_a(:,:,jk) = tilde_e3t_a(:,:,jk) - tilde_e3t_b(:,:,jk) 466 END DO 467 ! III - Barotropic repartition of the sea surface height over the baroclinic profile 468 ! ================================================================================== 469 ! add ( ssh increment + "baroclinicity error" ) proportionly to e3t(n) 470 ! - ML - baroclinicity error should be better treated in the future 471 ! i.e. locally and not spread over the water column. 472 ! (keep in mind that the idea is to reduce Eulerian velocity as much as possible) 473 zht(:,:) = 0. 474 DO jk = 1, jpkm1 475 zht(:,:) = zht(:,:) + tilde_e3t_a(:,:,jk) * tmask(:,:,jk) 476 END DO 477 z_scale(:,:) = - zht(:,:) / ( ht_0(:,:) + sshn(:,:) + 1. - ssmask(:,:) ) 478 DO jk = 1, jpkm1 479 dtilde_e3t_a(:,:,jk) = dtilde_e3t_a(:,:,jk) + e3t_n(:,:,jk) * z_scale(:,:) * tmask(:,:,jk) 480 END DO 481 482 ENDIF 483 484 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN ! z_tilde or layer coordinate ! 485 ! ! ---baroclinic part--------- ! 486 DO jk = 1, jpkm1 487 e3t_a(:,:,jk) = e3t_a(:,:,jk) + dtilde_e3t_a(:,:,jk) * tmask(:,:,jk) 488 END DO 489 ENDIF 490 491 IF( ln_vvl_dbg .AND. .NOT. ll_do_bclinic ) THEN ! - ML - test: control prints for debuging 492 ! 493 IF( lwp ) WRITE(numout, *) 'kt =', kt 494 IF ( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN 495 z_tmax = MAXVAL( tmask(:,:,1) * tmask_i(:,:) * ABS( zht(:,:) ) ) 496 CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 497 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(SUM(tilde_e3t_a))) =', z_tmax 498 END IF 499 ! 500 zht(:,:) = 0.0_wp 501 DO jk = 1, jpkm1 502 zht(:,:) = zht(:,:) + e3t_n(:,:,jk) * tmask(:,:,jk) 503 END DO 504 z_tmax = MAXVAL( tmask(:,:,1) * tmask_i(:,:) * ABS( ht_0(:,:) + sshn(:,:) - zht(:,:) ) ) 505 CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 506 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(ht_0+sshn-SUM(e3t_n))) =', z_tmax 507 ! 508 zht(:,:) = 0.0_wp 509 DO jk = 1, jpkm1 510 zht(:,:) = zht(:,:) + e3t_a(:,:,jk) * tmask(:,:,jk) 511 END DO 512 z_tmax = MAXVAL( tmask(:,:,1) * tmask_i(:,:) * ABS( ht_0(:,:) + ssha(:,:) - zht(:,:) ) ) 513 CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 514 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(ht_0+ssha-SUM(e3t_a))) =', z_tmax 515 ! 516 zht(:,:) = 0.0_wp 517 DO jk = 1, jpkm1 518 zht(:,:) = zht(:,:) + e3t_b(:,:,jk) * tmask(:,:,jk) 519 END DO 520 z_tmax = MAXVAL( tmask(:,:,1) * tmask_i(:,:) * ABS( ht_0(:,:) + sshb(:,:) - zht(:,:) ) ) 521 CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 522 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(ht_0+sshb-SUM(e3t_b))) =', z_tmax 523 ! 524 z_tmax = MAXVAL( tmask(:,:,1) * ABS( sshb(:,:) ) ) 525 CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 526 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(sshb))) =', z_tmax 527 ! 528 z_tmax = MAXVAL( tmask(:,:,1) * ABS( sshn(:,:) ) ) 529 CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 530 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(sshn))) =', z_tmax 531 ! 532 z_tmax = MAXVAL( tmask(:,:,1) * ABS( ssha(:,:) ) ) 533 CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 534 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(ssha))) =', z_tmax 535 END IF 536 537 ! *********************************** ! 538 ! After scale factors at u- v- points ! 539 ! *********************************** ! 540 541 CALL dom_vvl_interpol( e3t_a(:,:,:), e3u_a(:,:,:), 'U' ) 542 CALL dom_vvl_interpol( e3t_a(:,:,:), e3v_a(:,:,:), 'V' ) 543 544 ! *********************************** ! 545 ! After depths at u- v points ! 546 ! *********************************** ! 547 548 hu_a(:,:) = e3u_a(:,:,1) * umask(:,:,1) 549 hv_a(:,:) = e3v_a(:,:,1) * vmask(:,:,1) 550 DO jk = 2, jpkm1 551 hu_a(:,:) = hu_a(:,:) + e3u_a(:,:,jk) * umask(:,:,jk) 552 hv_a(:,:) = hv_a(:,:) + e3v_a(:,:,jk) * vmask(:,:,jk) 553 END DO 554 ! ! Inverse of the local depth 555 !!gm BUG ? don't understand the use of umask_i here ..... 556 r1_hu_a(:,:) = ssumask(:,:) / ( hu_a(:,:) + 1._wp - ssumask(:,:) ) 557 r1_hv_a(:,:) = ssvmask(:,:) / ( hv_a(:,:) + 1._wp - ssvmask(:,:) ) 558 ! 559 IF( ln_timing ) CALL timing_stop('dom_vvl_sf_nxt') 560 ! 561 END SUBROUTINE dom_vvl_sf_nxt 562 563 564 SUBROUTINE dom_vvl_sf_swp( kt ) 565 !!---------------------------------------------------------------------- 566 !! *** ROUTINE dom_vvl_sf_swp *** 567 !! 568 !! ** Purpose : compute time filter and swap of scale factors 569 !! compute all depths and related variables for next time step 570 !! write outputs and restart file 571 !! 572 !! ** Method : - swap of e3t with trick for volume/tracer conservation 573 !! - reconstruct scale factor at other grid points (interpolate) 574 !! - recompute depths and water height fields 575 !! 576 !! ** Action : - e3t_(b/n), tilde_e3t_(b/n) and e3(u/v)_n ready for next time step 577 !! - Recompute: 578 !! e3(u/v)_b 579 !! e3w_n 580 !! e3(u/v)w_b 581 !! e3(u/v)w_n 582 !! gdept_n, gdepw_n and gde3w_n 583 !! h(u/v) and h(u/v)r 584 !! 585 !! Reference : Leclair, M., and G. Madec, 2009, Ocean Modelling. 586 !! Leclair, M., and G. Madec, 2011, Ocean Modelling. 587 !!---------------------------------------------------------------------- 588 INTEGER, INTENT( in ) :: kt ! time step 589 ! 590 INTEGER :: ji, jj, jk ! dummy loop indices 591 REAL(wp) :: zcoef ! local scalar 592 !!---------------------------------------------------------------------- 593 ! 594 IF( ln_linssh ) RETURN ! No calculation in linear free surface 595 ! 596 IF( ln_timing ) CALL timing_start('dom_vvl_sf_swp') 597 ! 598 IF( kt == nit000 ) THEN 599 IF(lwp) WRITE(numout,*) 600 IF(lwp) WRITE(numout,*) 'dom_vvl_sf_swp : - time filter and swap of scale factors' 601 IF(lwp) WRITE(numout,*) '~~~~~~~~~~~~~~ - interpolate scale factors and compute depths for next time step' 602 ENDIF 603 ! 604 ! Time filter and swap of scale factors 605 ! ===================================== 606 ! - ML - e3(t/u/v)_b are allready computed in dynnxt. 607 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN 608 IF( neuler == 0 .AND. kt == nit000 ) THEN 609 tilde_e3t_b(:,:,:) = tilde_e3t_n(:,:,:) 610 ELSE 611 tilde_e3t_b(:,:,:) = tilde_e3t_n(:,:,:) & 612 & + atfp * ( tilde_e3t_b(:,:,:) - 2.0_wp * tilde_e3t_n(:,:,:) + tilde_e3t_a(:,:,:) ) 613 ENDIF 614 tilde_e3t_n(:,:,:) = tilde_e3t_a(:,:,:) 615 ENDIF 616 gdept_b(:,:,:) = gdept_n(:,:,:) 617 gdepw_b(:,:,:) = gdepw_n(:,:,:) 618 619 e3t_n(:,:,:) = e3t_a(:,:,:) 620 e3u_n(:,:,:) = e3u_a(:,:,:) 621 e3v_n(:,:,:) = e3v_a(:,:,:) 622 623 ! Compute all missing vertical scale factor and depths 624 ! ==================================================== 625 ! Horizontal scale factor interpolations 626 ! -------------------------------------- 627 ! - ML - e3u_b and e3v_b are allready computed in dynnxt 628 ! - JC - hu_b, hv_b, hur_b, hvr_b also 629 630 CALL dom_vvl_interpol( e3u_n(:,:,:), e3f_n(:,:,:), 'F' ) 631 632 ! Vertical scale factor interpolations 633 CALL dom_vvl_interpol( e3t_n(:,:,:), e3w_n(:,:,:), 'W' ) 634 CALL dom_vvl_interpol( e3u_n(:,:,:), e3uw_n(:,:,:), 'UW' ) 635 CALL dom_vvl_interpol( e3v_n(:,:,:), e3vw_n(:,:,:), 'VW' ) 636 CALL dom_vvl_interpol( e3t_b(:,:,:), e3w_b(:,:,:), 'W' ) 637 CALL dom_vvl_interpol( e3u_b(:,:,:), e3uw_b(:,:,:), 'UW' ) 638 CALL dom_vvl_interpol( e3v_b(:,:,:), e3vw_b(:,:,:), 'VW' ) 639 640 ! t- and w- points depth (set the isf depth as it is in the initial step) 641 gdept_n(:,:,1) = 0.5_wp * e3w_n(:,:,1) 642 gdepw_n(:,:,1) = 0.0_wp 643 gde3w_n(:,:,1) = gdept_n(:,:,1) - sshn(:,:) 644 DO jk = 2, jpk 645 DO jj = 1,jpj 646 DO ji = 1,jpi 647 ! zcoef = (tmask(ji,jj,jk) - wmask(ji,jj,jk)) ! 0 everywhere tmask = wmask, ie everywhere expect at jk = mikt 648 ! 1 for jk = mikt 649 zcoef = (tmask(ji,jj,jk) - wmask(ji,jj,jk)) 650 gdepw_n(ji,jj,jk) = gdepw_n(ji,jj,jk-1) + e3t_n(ji,jj,jk-1) 651 gdept_n(ji,jj,jk) = zcoef * ( gdepw_n(ji,jj,jk ) + 0.5 * e3w_n(ji,jj,jk) ) & 652 & + (1-zcoef) * ( gdept_n(ji,jj,jk-1) + e3w_n(ji,jj,jk) ) 653 gde3w_n(ji,jj,jk) = gdept_n(ji,jj,jk) - sshn(ji,jj) 654 END DO 655 END DO 656 END DO 657 658 ! Local depth and Inverse of the local depth of the water 659 ! ------------------------------------------------------- 660 hu_n(:,:) = hu_a(:,:) ; r1_hu_n(:,:) = r1_hu_a(:,:) 661 hv_n(:,:) = hv_a(:,:) ; r1_hv_n(:,:) = r1_hv_a(:,:) 662 ! 663 ht_n(:,:) = e3t_n(:,:,1) * tmask(:,:,1) 664 DO jk = 2, jpkm1 665 ht_n(:,:) = ht_n(:,:) + e3t_n(:,:,jk) * tmask(:,:,jk) 666 END DO 667 668 ! write restart file 669 ! ================== 670 IF( lrst_oce ) CALL dom_vvl_rst( kt, 'WRITE' ) 671 ! 672 IF( ln_timing ) CALL timing_stop('dom_vvl_sf_swp') 673 ! 674 END SUBROUTINE dom_vvl_sf_swp 249 675 250 676 … … 265 691 ! 266 692 INTEGER :: ji, jj, jk ! dummy loop indices 267 REAL(wp) :: zlnwd ! =1./0. when ln_wd = T/F 268 !!---------------------------------------------------------------------- 269 ! 270 IF( nn_timing == 1 ) CALL timing_start('dom_vvl_interpol') 271 ! 272 zlnwd = 0.0_wp 693 REAL(wp) :: zlnwd ! =1./0. when ln_wd_il = T/F 694 !!---------------------------------------------------------------------- 695 ! 696 ! IF(ln_wd_il) THEN 697 ! zlnwd = 1.0_wp 698 ! ELSE 699 zlnwd = 0.0_wp 700 ! END IF 273 701 ! 274 702 SELECT CASE ( pout ) !== type of interpolation ==! … … 277 705 DO jk = 1, jpk 278 706 DO jj = 1, jpjm1 279 DO ji = 1, jpim1 ! vector opt.707 DO ji = 1, fs_jpim1 ! vector opt. 280 708 pe3_out(ji,jj,jk) = 0.5_wp * ( umask(ji,jj,jk) * (1.0_wp - zlnwd) + zlnwd ) * r1_e1e2u(ji,jj) & 281 709 & * ( e1e2t(ji ,jj) * ( pe3_in(ji ,jj,jk) - e3t_0(ji ,jj,jk) ) & … … 284 712 END DO 285 713 END DO 286 CALL lbc_lnk( pe3_out(:,:,:), 'U', 1._wp )714 CALL lbc_lnk( 'domvvl', pe3_out(:,:,:), 'U', 1._wp ) 287 715 pe3_out(:,:,:) = pe3_out(:,:,:) + e3u_0(:,:,:) 288 716 ! … … 290 718 DO jk = 1, jpk 291 719 DO jj = 1, jpjm1 292 DO ji = 1, jpim1 ! vector opt.720 DO ji = 1, fs_jpim1 ! vector opt. 293 721 pe3_out(ji,jj,jk) = 0.5_wp * ( vmask(ji,jj,jk) * (1.0_wp - zlnwd) + zlnwd ) * r1_e1e2v(ji,jj) & 294 722 & * ( e1e2t(ji,jj ) * ( pe3_in(ji,jj ,jk) - e3t_0(ji,jj ,jk) ) & … … 297 725 END DO 298 726 END DO 299 CALL lbc_lnk( pe3_out(:,:,:), 'V', 1._wp )727 CALL lbc_lnk( 'domvvl', pe3_out(:,:,:), 'V', 1._wp ) 300 728 pe3_out(:,:,:) = pe3_out(:,:,:) + e3v_0(:,:,:) 301 729 ! … … 303 731 DO jk = 1, jpk 304 732 DO jj = 1, jpjm1 305 DO ji = 1, jpim1 ! vector opt.733 DO ji = 1, fs_jpim1 ! vector opt. 306 734 pe3_out(ji,jj,jk) = 0.5_wp * ( umask(ji,jj,jk) * umask(ji,jj+1,jk) * (1.0_wp - zlnwd) + zlnwd ) & 307 735 & * r1_e1e2f(ji,jj) & … … 311 739 END DO 312 740 END DO 313 CALL lbc_lnk( pe3_out(:,:,:), 'F', 1._wp )741 CALL lbc_lnk( 'domvvl', pe3_out(:,:,:), 'F', 1._wp ) 314 742 pe3_out(:,:,:) = pe3_out(:,:,:) + e3f_0(:,:,:) 315 743 ! … … 351 779 END SELECT 352 780 ! 353 IF( nn_timing == 1 ) CALL timing_stop('dom_vvl_interpol')354 !355 781 END SUBROUTINE dom_vvl_interpol 782 783 784 SUBROUTINE dom_vvl_rst( kt, cdrw ) 785 !!--------------------------------------------------------------------- 786 !! *** ROUTINE dom_vvl_rst *** 787 !! 788 !! ** Purpose : Read or write VVL file in restart file 789 !! 790 !! ** Method : use of IOM library 791 !! if the restart does not contain vertical scale factors, 792 !! they are set to the _0 values 793 !! if the restart does not contain vertical scale factors increments (z_tilde), 794 !! they are set to 0. 795 !!---------------------------------------------------------------------- 796 INTEGER , INTENT(in) :: kt ! ocean time-step 797 CHARACTER(len=*), INTENT(in) :: cdrw ! "READ"/"WRITE" flag 798 ! 799 INTEGER :: ji, jj, jk 800 INTEGER :: id1, id2, id3, id4, id5 ! local integers 801 !!---------------------------------------------------------------------- 802 ! 803 IF( TRIM(cdrw) == 'READ' ) THEN ! Read/initialise 804 ! ! =============== 805 IF( .false. ) THEN !* Read the restart file 806 CALL rst_read_open ! open the restart file if necessary 807 CALL iom_get( numror, jpdom_autoglo, 'sshn' , sshn, ldxios = lrxios ) 808 ! 809 id1 = iom_varid( numror, 'e3t_b', ldstop = .FALSE. ) 810 id2 = iom_varid( numror, 'e3t_n', ldstop = .FALSE. ) 811 id3 = iom_varid( numror, 'tilde_e3t_b', ldstop = .FALSE. ) 812 id4 = iom_varid( numror, 'tilde_e3t_n', ldstop = .FALSE. ) 813 id5 = iom_varid( numror, 'hdiv_lf', ldstop = .FALSE. ) 814 ! ! --------- ! 815 ! ! all cases ! 816 ! ! --------- ! 817 IF( MIN( id1, id2 ) > 0 ) THEN ! all required arrays exist 818 CALL iom_get( numror, jpdom_autoglo, 'e3t_b', e3t_b(:,:,:), ldxios = lrxios ) 819 CALL iom_get( numror, jpdom_autoglo, 'e3t_n', e3t_n(:,:,:), ldxios = lrxios ) 820 ! needed to restart if land processor not computed 821 IF(lwp) write(numout,*) 'dom_vvl_rst : e3t_b and e3t_n found in restart files' 822 WHERE ( tmask(:,:,:) == 0.0_wp ) 823 e3t_n(:,:,:) = e3t_0(:,:,:) 824 e3t_b(:,:,:) = e3t_0(:,:,:) 825 END WHERE 826 IF( neuler == 0 ) THEN 827 e3t_b(:,:,:) = e3t_n(:,:,:) 828 ENDIF 829 ELSE IF( id1 > 0 ) THEN 830 IF(lwp) write(numout,*) 'dom_vvl_rst WARNING : e3t_n not found in restart files' 831 IF(lwp) write(numout,*) 'e3t_n set equal to e3t_b.' 832 IF(lwp) write(numout,*) 'neuler is forced to 0' 833 CALL iom_get( numror, jpdom_autoglo, 'e3t_b', e3t_b(:,:,:), ldxios = lrxios ) 834 e3t_n(:,:,:) = e3t_b(:,:,:) 835 neuler = 0 836 ELSE IF( id2 > 0 ) THEN 837 IF(lwp) write(numout,*) 'dom_vvl_rst WARNING : e3t_b not found in restart files' 838 IF(lwp) write(numout,*) 'e3t_b set equal to e3t_n.' 839 IF(lwp) write(numout,*) 'neuler is forced to 0' 840 CALL iom_get( numror, jpdom_autoglo, 'e3t_n', e3t_n(:,:,:), ldxios = lrxios ) 841 e3t_b(:,:,:) = e3t_n(:,:,:) 842 neuler = 0 843 ELSE 844 IF(lwp) write(numout,*) 'dom_vvl_rst WARNING : e3t_n not found in restart file' 845 IF(lwp) write(numout,*) 'Compute scale factor from sshn' 846 IF(lwp) write(numout,*) 'neuler is forced to 0' 847 DO jk = 1, jpk 848 e3t_n(:,:,jk) = e3t_0(:,:,jk) * ( ht_0(:,:) + sshn(:,:) ) & 849 & / ( ht_0(:,:) + 1._wp - ssmask(:,:) ) * tmask(:,:,jk) & 850 & + e3t_0(:,:,jk) * (1._wp -tmask(:,:,jk)) 851 END DO 852 e3t_b(:,:,:) = e3t_n(:,:,:) 853 neuler = 0 854 ENDIF 855 ! ! ----------- ! 856 IF( ln_vvl_zstar ) THEN ! z_star case ! 857 ! ! ----------- ! 858 IF( MIN( id3, id4 ) > 0 ) THEN 859 CALL ctl_stop( 'dom_vvl_rst: z_star cannot restart from a z_tilde or layer run' ) 860 ENDIF 861 ! ! ----------------------- ! 862 ELSE ! z_tilde and layer cases ! 863 ! ! ----------------------- ! 864 IF( MIN( id3, id4 ) > 0 ) THEN ! all required arrays exist 865 CALL iom_get( numror, jpdom_autoglo, 'tilde_e3t_b', tilde_e3t_b(:,:,:), ldxios = lrxios ) 866 CALL iom_get( numror, jpdom_autoglo, 'tilde_e3t_n', tilde_e3t_n(:,:,:), ldxios = lrxios ) 867 ELSE ! one at least array is missing 868 tilde_e3t_b(:,:,:) = 0.0_wp 869 tilde_e3t_n(:,:,:) = 0.0_wp 870 ENDIF 871 ! ! ------------ ! 872 IF( ln_vvl_ztilde ) THEN ! z_tilde case ! 873 ! ! ------------ ! 874 IF( id5 > 0 ) THEN ! required array exists 875 CALL iom_get( numror, jpdom_autoglo, 'hdiv_lf', hdiv_lf(:,:,:), ldxios = lrxios ) 876 ELSE ! array is missing 877 hdiv_lf(:,:,:) = 0.0_wp 878 ENDIF 879 ENDIF 880 ENDIF 881 ! 882 ELSE !* Initialize at "rest" 883 ! 884 885 IF( .false. ) THEN ! MJB ll_wd edits start here - these are essential 886 ! 887 !wet dry here 888 ! 889 ELSE 890 ! 891 ! Just to read set ssh in fact, called latter once vertical grid 892 ! is set up: 893 ! CALL usr_def_istate( gdept_0, tmask, tsb, ub, vb, sshb ) 894 ! ! 895 ! DO jk=1,jpk 896 ! e3t_b(:,:,jk) = e3t_0(:,:,jk) * ( ht_0(:,:) + sshb(:,:) ) & 897 ! & / ( ht_0(:,:) + 1._wp -ssmask(:,:) ) * tmask(:,:,jk) 898 ! END DO 899 ! e3t_n(:,:,:) = e3t_b(:,:,:) 900 sshn(:,:)=0._wp 901 e3t_n(:,:,:)=e3t_0(:,:,:) 902 e3t_b(:,:,:)=e3t_0(:,:,:) 903 ! 904 END IF ! end of ll_wd edits 905 906 IF( ln_vvl_ztilde .OR. ln_vvl_layer) THEN 907 tilde_e3t_b(:,:,:) = 0._wp 908 tilde_e3t_n(:,:,:) = 0._wp 909 IF( ln_vvl_ztilde ) hdiv_lf(:,:,:) = 0._wp 910 END IF 911 ENDIF 912 ! 913 ELSEIF( TRIM(cdrw) == 'WRITE' ) THEN ! Create restart file 914 ! ! =================== 915 IF(lwp) WRITE(numout,*) '---- dom_vvl_rst ----' 916 IF( lwxios ) CALL iom_swap( cwxios_context ) 917 ! ! --------- ! 918 ! ! all cases ! 919 ! ! --------- ! 920 CALL iom_rstput( kt, nitrst, numrow, 'e3t_b', e3t_b(:,:,:), ldxios = lwxios ) 921 CALL iom_rstput( kt, nitrst, numrow, 'e3t_n', e3t_n(:,:,:), ldxios = lwxios ) 922 ! ! ----------------------- ! 923 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN ! z_tilde and layer cases ! 924 ! ! ----------------------- ! 925 CALL iom_rstput( kt, nitrst, numrow, 'tilde_e3t_b', tilde_e3t_b(:,:,:), ldxios = lwxios) 926 CALL iom_rstput( kt, nitrst, numrow, 'tilde_e3t_n', tilde_e3t_n(:,:,:), ldxios = lwxios) 927 END IF 928 ! ! -------------! 929 IF( ln_vvl_ztilde ) THEN ! z_tilde case ! 930 ! ! ------------ ! 931 CALL iom_rstput( kt, nitrst, numrow, 'hdiv_lf', hdiv_lf(:,:,:), ldxios = lwxios) 932 ENDIF 933 ! 934 IF( lwxios ) CALL iom_swap( cxios_context ) 935 ENDIF 936 ! 937 END SUBROUTINE dom_vvl_rst 356 938 357 939 … … 372 954 REWIND( numnam_ref ) ! Namelist nam_vvl in reference namelist : 373 955 READ ( numnam_ref, nam_vvl, IOSTAT = ios, ERR = 901) 374 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nam_vvl in reference namelist', lwp ) 375 ! 956 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nam_vvl in reference namelist', lwp ) 376 957 REWIND( numnam_cfg ) ! Namelist nam_vvl in configuration namelist : Parameters of the run 377 958 READ ( numnam_cfg, nam_vvl, IOSTAT = ios, ERR = 902 ) 378 902 IF( ios /=0 ) CALL ctl_nam ( ios , 'nam_vvl in configuration namelist', lwp )959 902 IF( ios > 0 ) CALL ctl_nam ( ios , 'nam_vvl in configuration namelist', lwp ) 379 960 IF(lwm) WRITE ( numond, nam_vvl ) 380 961 ! … … 383 964 WRITE(numout,*) 'dom_vvl_ctl : choice/control of the variable vertical coordinate' 384 965 WRITE(numout,*) '~~~~~~~~~~~' 385 WRITE(numout,*) ' 386 WRITE(numout,*) ' 387 WRITE(numout,*) ' 388 WRITE(numout,*) ' 389 WRITE(numout,*) ' 966 WRITE(numout,*) ' Namelist nam_vvl : chose a vertical coordinate' 967 WRITE(numout,*) ' zstar ln_vvl_zstar = ', ln_vvl_zstar 968 WRITE(numout,*) ' ztilde ln_vvl_ztilde = ', ln_vvl_ztilde 969 WRITE(numout,*) ' layer ln_vvl_layer = ', ln_vvl_layer 970 WRITE(numout,*) ' ztilde as zstar ln_vvl_ztilde_as_zstar = ', ln_vvl_ztilde_as_zstar 390 971 WRITE(numout,*) ' ztilde near the equator ln_vvl_zstar_at_eqtor = ', ln_vvl_zstar_at_eqtor 391 ! WRITE(numout,*) ' Namelist nam_vvl : chose kinetic-to-potential energy conservation' 392 ! WRITE(numout,*) ' ln_vvl_kepe = ', ln_vvl_kepe 393 WRITE(numout,*) ' Namelist nam_vvl : thickness diffusion coefficient' 394 WRITE(numout,*) ' rn_ahe3 = ', rn_ahe3 395 WRITE(numout,*) ' Namelist nam_vvl : maximum e3t deformation fractional change' 396 WRITE(numout,*) ' rn_zdef_max = ', rn_zdef_max 972 WRITE(numout,*) ' !' 973 WRITE(numout,*) ' thickness diffusion coefficient rn_ahe3 = ', rn_ahe3 974 WRITE(numout,*) ' maximum e3t deformation fractional change rn_zdef_max = ', rn_zdef_max 397 975 IF( ln_vvl_ztilde_as_zstar ) THEN 398 WRITE(numout,*) ' ztilde running in zstar emulation mode;'399 WRITE(numout,*) ' 400 WRITE(numout,*) ' 401 WRITE(numout,*) ' rn_rst_e3t = 0.0'402 WRITE(numout,*) ' 403 WRITE(numout,*) ' rn_lf_cutoff =1.0/rdt'976 WRITE(numout,*) ' ztilde running in zstar emulation mode (ln_vvl_ztilde_as_zstar=T) ' 977 WRITE(numout,*) ' ignoring namelist timescale parameters and using:' 978 WRITE(numout,*) ' hard-wired : z-tilde to zstar restoration timescale (days)' 979 WRITE(numout,*) ' rn_rst_e3t = 0.e0' 980 WRITE(numout,*) ' hard-wired : z-tilde cutoff frequency of low-pass filter (days)' 981 WRITE(numout,*) ' rn_lf_cutoff = 1.0/rdt' 404 982 ELSE 405 WRITE(numout,*) ' Namelist nam_vvl : z-tilde to zstar restoration timescale (days)' 406 WRITE(numout,*) ' rn_rst_e3t = ', rn_rst_e3t 407 WRITE(numout,*) ' Namelist nam_vvl : z-tilde cutoff frequency of low-pass filter (days)' 408 WRITE(numout,*) ' rn_lf_cutoff = ', rn_lf_cutoff 409 ENDIF 410 WRITE(numout,*) ' Namelist nam_vvl : debug prints' 411 WRITE(numout,*) ' ln_vvl_dbg = ', ln_vvl_dbg 983 WRITE(numout,*) ' z-tilde to zstar restoration timescale (days) rn_rst_e3t = ', rn_rst_e3t 984 WRITE(numout,*) ' z-tilde cutoff frequency of low-pass filter (days) rn_lf_cutoff = ', rn_lf_cutoff 985 ENDIF 986 WRITE(numout,*) ' debug prints flag ln_vvl_dbg = ', ln_vvl_dbg 412 987 ENDIF 413 988 ! … … 422 997 IF(lwp) THEN ! Print the choice 423 998 WRITE(numout,*) 424 IF( ln_vvl_zstar ) WRITE(numout,*) ' 425 IF( ln_vvl_ztilde ) WRITE(numout,*) ' 426 IF( ln_vvl_layer ) WRITE(numout,*) ' 427 IF( ln_vvl_ztilde_as_zstar ) WRITE(numout,*) ' 428 ! - ML - Option not developed yet429 ! IF( ln_vvl_kepe ) WRITE(numout,*) ' kinetic to potential energy transfer : option used'430 ! IF( .NOT. ln_vvl_kepe ) WRITE(numout,*) ' kinetic to potential energy transfer : option not used' 431 ENDIF432 ! 999 IF( ln_vvl_zstar ) WRITE(numout,*) ' ==>>> zstar vertical coordinate is used' 1000 IF( ln_vvl_ztilde ) WRITE(numout,*) ' ==>>> ztilde vertical coordinate is used' 1001 IF( ln_vvl_layer ) WRITE(numout,*) ' ==>>> layer vertical coordinate is used' 1002 IF( ln_vvl_ztilde_as_zstar ) WRITE(numout,*) ' ==>>> to emulate a zstar coordinate' 1003 ENDIF 1004 ! 1005 #if defined key_agrif 1006 IF( (.NOT.Agrif_Root()).AND.(.NOT.ln_vvl_zstar) ) CALL ctl_stop( 'AGRIF is implemented with zstar coordinate only' ) 1007 #endif 433 1008 ! 434 1009 END SUBROUTINE dom_vvl_ctl -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/domwri.F90
r10725 r10727 8 8 !! NEMO 1.0 ! 2002-08 (G. Madec) F90 and several file 9 9 !! 3.0 ! 2008-01 (S. Masson) add dom_uniq 10 !! 4.0 ! 2016-01 (G. Madec) simplified mesh_mask.nc file 10 11 !!---------------------------------------------------------------------- 11 12 … … 16 17 !!---------------------------------------------------------------------- 17 18 USE dom_oce ! ocean space and time domain 19 USE phycst , ONLY : rsmall 20 ! USE wet_dry, ONLY : ll_wd ! Wetting and drying 21 ! 18 22 USE in_out_manager ! I/O manager 19 23 USE iom ! I/O library 20 24 USE lbclnk ! lateral boundary conditions - mpp exchanges 21 25 USE lib_mpp ! MPP library 22 USE wrk_nemo ! Memory allocation23 USE timing ! Timing24 USE phycst25 26 26 27 IMPLICIT NONE … … 28 29 29 30 PUBLIC dom_wri ! routine called by inidom.F90 30 PUBLIC dom_wri_coordinate ! routine called by domhgr.F9031 31 PUBLIC dom_stiff ! routine called by inidom.F90 32 32 33 !!---------------------------------------------------------------------- 34 !! NEMO/OPA 3.7 , NEMO Consortium (2014) 35 !! $Id: vectopt_loop_substitute.h90 4990 2014-12-15 16:42:49Z timgraham $ 36 !! Software governed by the CeCILL licence (./LICENSE) 33 !! * Substitutions 34 # include "vectopt_loop_substitute.h90" 35 !!---------------------------------------------------------------------- 36 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 37 !! $Id: domwri.F90 10425 2018-12-19 21:54:16Z smasson $ 38 !! Software governed by the CeCILL license (see ./LICENSE) 37 39 !!---------------------------------------------------------------------- 38 40 CONTAINS 39 40 SUBROUTINE dom_wri_coordinate41 !!----------------------------------------------------------------------42 !! *** ROUTINE dom_wri_coordinate ***43 !!44 !! ** Purpose : Create the NetCDF file which contains all the45 !! standard coordinate information plus the surface,46 !! e1e2u and e1e2v. By doing so, those surface will47 !! not be changed by the reduction of e1u or e2v scale48 !! factors in some straits.49 !! NB: call just after the read of standard coordinate50 !! and the reduction of scale factors in some straits51 !!52 !! ** output file : coordinate_e1e2u_v.nc53 !!----------------------------------------------------------------------54 INTEGER :: inum0 ! temprary units for 'coordinate_e1e2u_v.nc' file55 CHARACTER(len=21) :: clnam0 ! filename (mesh and mask informations)56 ! ! workspaces57 REAL(wp), POINTER, DIMENSION(:,: ) :: zprt, zprw58 REAL(wp), POINTER, DIMENSION(:,:,:) :: zdepu, zdepv59 !!----------------------------------------------------------------------60 !61 IF( nn_timing == 1 ) CALL timing_start('dom_wri_coordinate')62 !63 IF(lwp) WRITE(numout,*)64 IF(lwp) WRITE(numout,*) 'dom_wri_coordinate : create NetCDF coordinate file'65 IF(lwp) WRITE(numout,*) '~~~~~~~~~~~~~~~~~~'66 67 clnam0 = 'coordinate_e1e2u_v' ! filename (mesh and mask informations)68 69 ! create 'coordinate_e1e2u_v.nc' file70 ! ============================71 !72 CALL iom_open( TRIM(clnam0), inum0, ldwrt = .TRUE., kiolib = jprstlib )73 !74 ! ! horizontal mesh (inum3)75 CALL iom_rstput( 0, 0, inum0, 'glamt', glamt, ktype = jp_r8 ) ! ! latitude76 CALL iom_rstput( 0, 0, inum0, 'glamu', glamu, ktype = jp_r8 )77 CALL iom_rstput( 0, 0, inum0, 'glamv', glamv, ktype = jp_r8 )78 CALL iom_rstput( 0, 0, inum0, 'glamf', glamf, ktype = jp_r8 )79 80 CALL iom_rstput( 0, 0, inum0, 'gphit', gphit, ktype = jp_r8 ) ! ! longitude81 CALL iom_rstput( 0, 0, inum0, 'gphiu', gphiu, ktype = jp_r8 )82 CALL iom_rstput( 0, 0, inum0, 'gphiv', gphiv, ktype = jp_r8 )83 CALL iom_rstput( 0, 0, inum0, 'gphif', gphif, ktype = jp_r8 )84 85 CALL iom_rstput( 0, 0, inum0, 'e1t', e1t, ktype = jp_r8 ) ! ! e1 scale factors86 CALL iom_rstput( 0, 0, inum0, 'e1u', e1u, ktype = jp_r8 )87 CALL iom_rstput( 0, 0, inum0, 'e1v', e1v, ktype = jp_r8 )88 CALL iom_rstput( 0, 0, inum0, 'e1f', e1f, ktype = jp_r8 )89 90 CALL iom_rstput( 0, 0, inum0, 'e2t', e2t, ktype = jp_r8 ) ! ! e2 scale factors91 CALL iom_rstput( 0, 0, inum0, 'e2u', e2u, ktype = jp_r8 )92 CALL iom_rstput( 0, 0, inum0, 'e2v', e2v, ktype = jp_r8 )93 CALL iom_rstput( 0, 0, inum0, 'e2f', e2f, ktype = jp_r8 )94 95 CALL iom_rstput( 0, 0, inum0, 'e1e2u', e1e2u, ktype = jp_r8 )96 CALL iom_rstput( 0, 0, inum0, 'e1e2v', e1e2v, ktype = jp_r8 )97 98 CALL iom_close( inum0 )99 !100 IF( nn_timing == 1 ) CALL timing_stop('dom_wri_coordinate')101 !102 END SUBROUTINE dom_wri_coordinate103 104 41 105 42 SUBROUTINE dom_wri … … 112 49 !! diagnostic computation. 113 50 !! 114 !! ** Method : Write in a file all the arrays generated in routines 115 !! domhgr, domzgr, and dommsk. Note: the file contain depends on 116 !! the vertical coord. used (z-coord, partial steps, s-coord) 117 !! MOD(nmsh, 3) = 1 : 'mesh_mask.nc' file 118 !! = 2 : 'mesh.nc' and mask.nc' files 119 !! = 0 : 'mesh_hgr.nc', 'mesh_zgr.nc' and 120 !! 'mask.nc' files 121 !! For huge size domain, use option 2 or 3 depending on your 122 !! vertical coordinate. 123 !! 124 !! if nmsh <= 3: write full 3D arrays for e3[tuvw] and gdep[tuvw] 125 !! if 3 < nmsh <= 6: write full 3D arrays for e3[tuvw] and 2D arrays 126 !! corresponding to the depth of the bottom t- and w-points 127 !! if 6 < nmsh <= 9: write 2D arrays corresponding to the depth and the 128 !! thickness (e3[tw]_ps) of the bottom points 51 !! ** Method : create a file with all domain related arrays 129 52 !! 130 53 !! ** output file : meshmask.nc : domain size, horizontal grid-point position, 131 54 !! masks, depth and vertical scale factors 132 55 !!---------------------------------------------------------------------- 133 !! 134 INTEGER :: inum0 ! temprary units for 'mesh_mask.nc' file 135 INTEGER :: inum1 ! temprary units for 'mesh.nc' file 136 INTEGER :: inum2 ! temprary units for 'mask.nc' file 137 INTEGER :: inum3 ! temprary units for 'mesh_hgr.nc' file 138 INTEGER :: inum4 ! temprary units for 'mesh_zgr.nc' file 139 CHARACTER(len=21) :: clnam0 ! filename (mesh and mask informations) 140 CHARACTER(len=21) :: clnam1 ! filename (mesh informations) 141 CHARACTER(len=21) :: clnam2 ! filename (mask informations) 142 CHARACTER(len=21) :: clnam3 ! filename (horizontal mesh informations) 143 CHARACTER(len=21) :: clnam4 ! filename (vertical mesh informations) 56 INTEGER :: inum ! temprary units for 'mesh_mask.nc' file 57 CHARACTER(len=21) :: clnam ! filename (mesh and mask informations) 144 58 INTEGER :: ji, jj, jk ! dummy loop indices 145 ! ! workspaces 146 REAL(wp), POINTER, DIMENSION(:,: ) :: zprt, zprw 147 REAL(wp), POINTER, DIMENSION(:,:,:) :: zdepu, zdepv 148 !!---------------------------------------------------------------------- 149 ! 150 IF( nn_timing == 1 ) CALL timing_start('dom_wri') 151 ! 152 CALL wrk_alloc( jpi, jpj, zprt, zprw ) 153 CALL wrk_alloc( jpi, jpj, jpk, zdepu, zdepv ) 59 INTEGER :: izco, izps, isco, icav 60 ! 61 REAL(wp), DIMENSION(jpi,jpj) :: zprt, zprw ! 2D workspace 62 REAL(wp), DIMENSION(jpi,jpj,jpk) :: zdepu, zdepv ! 3D workspace 63 !!---------------------------------------------------------------------- 154 64 ! 155 65 IF(lwp) WRITE(numout,*) … … 157 67 IF(lwp) WRITE(numout,*) '~~~~~~~' 158 68 159 clnam0 = 'mesh_mask' ! filename (mesh and mask informations) 160 clnam1 = 'mesh' ! filename (mesh informations) 161 clnam2 = 'mask' ! filename (mask informations) 162 clnam3 = 'mesh_hgr' ! filename (horizontal mesh informations) 163 clnam4 = 'mesh_zgr' ! filename (vertical mesh informations) 164 165 SELECT CASE ( MOD(nmsh, 3) ) 166 ! ! ============================ 167 CASE ( 1 ) ! create 'mesh_mask.nc' file 168 ! ! ============================ 169 CALL iom_open( TRIM(clnam0), inum0, ldwrt = .TRUE., kiolib = jprstlib ) 170 inum2 = inum0 ! put all the informations 171 inum3 = inum0 ! in unit inum0 172 inum4 = inum0 173 174 ! ! ============================ 175 CASE ( 2 ) ! create 'mesh.nc' and 176 ! ! 'mask.nc' files 177 ! ! ============================ 178 CALL iom_open( TRIM(clnam1), inum1, ldwrt = .TRUE., kiolib = jprstlib ) 179 CALL iom_open( TRIM(clnam2), inum2, ldwrt = .TRUE., kiolib = jprstlib ) 180 inum3 = inum1 ! put mesh informations 181 inum4 = inum1 ! in unit inum1 182 ! ! ============================ 183 CASE ( 0 ) ! create 'mesh_hgr.nc' 184 ! ! 'mesh_zgr.nc' and 185 ! ! 'mask.nc' files 186 ! ! ============================ 187 CALL iom_open( TRIM(clnam2), inum2, ldwrt = .TRUE., kiolib = jprstlib ) 188 CALL iom_open( TRIM(clnam3), inum3, ldwrt = .TRUE., kiolib = jprstlib ) 189 CALL iom_open( TRIM(clnam4), inum4, ldwrt = .TRUE., kiolib = jprstlib ) 190 ! 191 END SELECT 192 193 ! ! masks (inum2) 194 CALL iom_rstput( 0, 0, inum2, 'tmask', tmask, ktype = jp_i1 ) ! ! land-sea mask 195 CALL iom_rstput( 0, 0, inum2, 'umask', umask, ktype = jp_i1 ) 196 CALL iom_rstput( 0, 0, inum2, 'vmask', vmask, ktype = jp_i1 ) 197 CALL iom_rstput( 0, 0, inum2, 'fmask', fmask, ktype = jp_i1 ) 69 clnam = 'mesh_mask' ! filename (mesh and mask informations) 70 71 ! ! ============================ 72 ! ! create 'mesh_mask.nc' file 73 ! ! ============================ 74 CALL iom_open( TRIM(clnam), inum, ldwrt = .TRUE. ) 75 ! 76 ! ! global domain size 77 CALL iom_rstput( 0, 0, inum, 'jpiglo', REAL( jpiglo, wp), ktype = jp_i4 ) 78 CALL iom_rstput( 0, 0, inum, 'jpjglo', REAL( jpjglo, wp), ktype = jp_i4 ) 79 CALL iom_rstput( 0, 0, inum, 'jpkglo', REAL( jpkglo, wp), ktype = jp_i4 ) 80 81 ! ! domain characteristics 82 CALL iom_rstput( 0, 0, inum, 'jperio', REAL( jperio, wp), ktype = jp_i4 ) 83 ! ! type of vertical coordinate 84 IF( ln_zco ) THEN ; izco = 1 ; ELSE ; izco = 0 ; ENDIF 85 IF( ln_zps ) THEN ; izps = 1 ; ELSE ; izps = 0 ; ENDIF 86 IF( ln_sco ) THEN ; isco = 1 ; ELSE ; isco = 0 ; ENDIF 87 CALL iom_rstput( 0, 0, inum, 'ln_zco' , REAL( izco, wp), ktype = jp_i4 ) 88 CALL iom_rstput( 0, 0, inum, 'ln_zps' , REAL( izps, wp), ktype = jp_i4 ) 89 CALL iom_rstput( 0, 0, inum, 'ln_sco' , REAL( isco, wp), ktype = jp_i4 ) 90 ! ! ocean cavities under iceshelves 91 IF( ln_isfcav ) THEN ; icav = 1 ; ELSE ; icav = 0 ; ENDIF 92 CALL iom_rstput( 0, 0, inum, 'ln_isfcav', REAL( icav, wp), ktype = jp_i4 ) 93 94 ! ! masks 95 CALL iom_rstput( 0, 0, inum, 'tmask', tmask, ktype = jp_i1 ) ! ! land-sea mask 96 CALL iom_rstput( 0, 0, inum, 'umask', umask, ktype = jp_i1 ) 97 CALL iom_rstput( 0, 0, inum, 'vmask', vmask, ktype = jp_i1 ) 98 CALL iom_rstput( 0, 0, inum, 'fmask', fmask, ktype = jp_i1 ) 198 99 199 100 CALL dom_uniq( zprw, 'T' ) 200 101 DO jj = 1, jpj 201 102 DO ji = 1, jpi 202 jk=mikt(ji,jj) 203 zprt(ji,jj) = tmask(ji,jj,jk) * zprw(ji,jj) ! ! unique point mask 103 zprt(ji,jj) = ssmask(ji,jj) * zprw(ji,jj) ! ! unique point mask 204 104 END DO 205 105 END DO ! ! unique point mask 206 CALL iom_rstput( 0, 0, inum 2, 'tmaskutil', zprt, ktype = jp_i1 )106 CALL iom_rstput( 0, 0, inum, 'tmaskutil', zprt, ktype = jp_i1 ) 207 107 CALL dom_uniq( zprw, 'U' ) 208 108 DO jj = 1, jpj 209 109 DO ji = 1, jpi 210 jk=miku(ji,jj) 211 zprt(ji,jj) = umask(ji,jj,jk) * zprw(ji,jj) ! ! unique point mask 110 zprt(ji,jj) = ssumask(ji,jj) * zprw(ji,jj) ! ! unique point mask 212 111 END DO 213 112 END DO 214 CALL iom_rstput( 0, 0, inum 2, 'umaskutil', zprt, ktype = jp_i1 )113 CALL iom_rstput( 0, 0, inum, 'umaskutil', zprt, ktype = jp_i1 ) 215 114 CALL dom_uniq( zprw, 'V' ) 216 115 DO jj = 1, jpj 217 116 DO ji = 1, jpi 218 jk=mikv(ji,jj) 219 zprt(ji,jj) = vmask(ji,jj,jk) * zprw(ji,jj) ! ! unique point mask 117 zprt(ji,jj) = ssvmask(ji,jj) * zprw(ji,jj) ! ! unique point mask 220 118 END DO 221 119 END DO 222 CALL iom_rstput( 0, 0, inum2, 'vmaskutil', zprt, ktype = jp_i1 ) 223 CALL dom_uniq( zprw, 'F' ) 224 DO jj = 1, jpj 225 DO ji = 1, jpi 226 jk=mikf(ji,jj) 227 zprt(ji,jj) = fmask(ji,jj,jk) * zprw(ji,jj) ! ! unique point mask 228 END DO 229 END DO 230 CALL iom_rstput( 0, 0, inum2, 'fmaskutil', zprt, ktype = jp_i1 ) 120 CALL iom_rstput( 0, 0, inum, 'vmaskutil', zprt, ktype = jp_i1 ) 121 !!gm ssfmask has been removed ==>> find another solution to defined fmaskutil 122 !! Here we just remove the output of fmaskutil. 123 ! CALL dom_uniq( zprw, 'F' ) 124 ! DO jj = 1, jpj 125 ! DO ji = 1, jpi 126 ! zprt(ji,jj) = ssfmask(ji,jj) * zprw(ji,jj) ! ! unique point mask 127 ! END DO 128 ! END DO 129 ! CALL iom_rstput( 0, 0, inum, 'fmaskutil', zprt, ktype = jp_i1 ) 130 !!gm 231 131 232 132 ! ! horizontal mesh (inum3) 233 CALL iom_rstput( 0, 0, inum 3, 'glamt', glamt, ktype = jp_r8 ) ! ! latitude234 CALL iom_rstput( 0, 0, inum 3, 'glamu', glamu, ktype = jp_r8 )235 CALL iom_rstput( 0, 0, inum 3, 'glamv', glamv, ktype = jp_r8 )236 CALL iom_rstput( 0, 0, inum 3, 'glamf', glamf, ktype = jp_r8 )237 238 CALL iom_rstput( 0, 0, inum 3, 'gphit', gphit, ktype = jp_r8 ) ! ! longitude239 CALL iom_rstput( 0, 0, inum 3, 'gphiu', gphiu, ktype = jp_r8 )240 CALL iom_rstput( 0, 0, inum 3, 'gphiv', gphiv, ktype = jp_r8 )241 CALL iom_rstput( 0, 0, inum 3, 'gphif', gphif, ktype = jp_r8 )242 243 CALL iom_rstput( 0, 0, inum 3, 'e1t', e1t, ktype = jp_r8 ) ! ! e1 scale factors244 CALL iom_rstput( 0, 0, inum 3, 'e1u', e1u, ktype = jp_r8 )245 CALL iom_rstput( 0, 0, inum 3, 'e1v', e1v, ktype = jp_r8 )246 CALL iom_rstput( 0, 0, inum 3, 'e1f', e1f, ktype = jp_r8 )247 248 CALL iom_rstput( 0, 0, inum 3, 'e2t', e2t, ktype = jp_r8 ) ! ! e2 scale factors249 CALL iom_rstput( 0, 0, inum 3, 'e2u', e2u, ktype = jp_r8 )250 CALL iom_rstput( 0, 0, inum 3, 'e2v', e2v, ktype = jp_r8 )251 CALL iom_rstput( 0, 0, inum 3, 'e2f', e2f, ktype = jp_r8 )252 253 CALL iom_rstput( 0, 0, inum 3, 'ff_f', ff_f, ktype = jp_r8 )! ! coriolis factor254 CALL iom_rstput( 0, 0, inum 3, 'ff_t', ff_t, ktype = jp_r8 ) ! ! coriolis factor133 CALL iom_rstput( 0, 0, inum, 'glamt', glamt, ktype = jp_r8 ) ! ! latitude 134 CALL iom_rstput( 0, 0, inum, 'glamu', glamu, ktype = jp_r8 ) 135 CALL iom_rstput( 0, 0, inum, 'glamv', glamv, ktype = jp_r8 ) 136 CALL iom_rstput( 0, 0, inum, 'glamf', glamf, ktype = jp_r8 ) 137 138 CALL iom_rstput( 0, 0, inum, 'gphit', gphit, ktype = jp_r8 ) ! ! longitude 139 CALL iom_rstput( 0, 0, inum, 'gphiu', gphiu, ktype = jp_r8 ) 140 CALL iom_rstput( 0, 0, inum, 'gphiv', gphiv, ktype = jp_r8 ) 141 CALL iom_rstput( 0, 0, inum, 'gphif', gphif, ktype = jp_r8 ) 142 143 CALL iom_rstput( 0, 0, inum, 'e1t', e1t, ktype = jp_r8 ) ! ! e1 scale factors 144 CALL iom_rstput( 0, 0, inum, 'e1u', e1u, ktype = jp_r8 ) 145 CALL iom_rstput( 0, 0, inum, 'e1v', e1v, ktype = jp_r8 ) 146 CALL iom_rstput( 0, 0, inum, 'e1f', e1f, ktype = jp_r8 ) 147 148 CALL iom_rstput( 0, 0, inum, 'e2t', e2t, ktype = jp_r8 ) ! ! e2 scale factors 149 CALL iom_rstput( 0, 0, inum, 'e2u', e2u, ktype = jp_r8 ) 150 CALL iom_rstput( 0, 0, inum, 'e2v', e2v, ktype = jp_r8 ) 151 CALL iom_rstput( 0, 0, inum, 'e2f', e2f, ktype = jp_r8 ) 152 153 CALL iom_rstput( 0, 0, inum, 'ff_f', ff_f, ktype = jp_r8 ) ! ! coriolis factor 154 CALL iom_rstput( 0, 0, inum, 'ff_t', ff_t, ktype = jp_r8 ) 255 155 256 156 ! note that mbkt is set to 1 over land ==> use surface tmask 257 157 zprt(:,:) = ssmask(:,:) * REAL( mbkt(:,:) , wp ) 258 CALL iom_rstput( 0, 0, inum 4, 'mbathy', zprt, ktype = jp_i2) ! ! nb of ocean T-points158 CALL iom_rstput( 0, 0, inum, 'mbathy', zprt, ktype = jp_i4 ) ! ! nb of ocean T-points 259 159 zprt(:,:) = ssmask(:,:) * REAL( mikt(:,:) , wp ) 260 CALL iom_rstput( 0, 0, inum 4, 'misf', zprt, ktype = jp_i2) ! ! nb of ocean T-points160 CALL iom_rstput( 0, 0, inum, 'misf', zprt, ktype = jp_i4 ) ! ! nb of ocean T-points 261 161 zprt(:,:) = ssmask(:,:) * REAL( risfdep(:,:) , wp ) 262 CALL iom_rstput( 0, 0, inum4, 'isfdraft', zprt, ktype = jp_r8 ) ! ! nb of ocean T-points 263 264 IF( ln_sco ) THEN ! s-coordinate 265 CALL iom_rstput( 0, 0, inum4, 'hbatt', hbatt ) 266 CALL iom_rstput( 0, 0, inum4, 'hbatu', hbatu ) 267 CALL iom_rstput( 0, 0, inum4, 'hbatv', hbatv ) 268 CALL iom_rstput( 0, 0, inum4, 'hbatf', hbatf ) 269 ! 270 CALL iom_rstput( 0, 0, inum4, 'gsigt', gsigt ) ! ! scaling coef. 271 CALL iom_rstput( 0, 0, inum4, 'gsigw', gsigw ) 272 CALL iom_rstput( 0, 0, inum4, 'gsi3w', gsi3w ) 273 CALL iom_rstput( 0, 0, inum4, 'esigt', esigt ) 274 CALL iom_rstput( 0, 0, inum4, 'esigw', esigw ) 275 ! 276 CALL iom_rstput( 0, 0, inum4, 'e3t_0', e3t_0 ) ! ! scale factors 277 CALL iom_rstput( 0, 0, inum4, 'e3u_0', e3u_0 ) 278 CALL iom_rstput( 0, 0, inum4, 'e3v_0', e3v_0 ) 279 CALL iom_rstput( 0, 0, inum4, 'e3w_0', e3w_0 ) 280 ! 281 CALL iom_rstput( 0, 0, inum4, 'gdept_1d' , gdept_1d ) ! ! stretched system 282 CALL iom_rstput( 0, 0, inum4, 'gdepw_1d' , gdepw_1d ) 283 CALL iom_rstput( 0, 0, inum4, 'gdept_0', gdept_0, ktype = jp_r8 ) 284 CALL iom_rstput( 0, 0, inum4, 'gdepw_0', gdepw_0, ktype = jp_r8 ) 162 CALL iom_rstput( 0, 0, inum, 'isfdraft', zprt, ktype = jp_r8 ) ! ! nb of ocean T-points 163 ! ! vertical mesh 164 CALL iom_rstput( 0, 0, inum, 'e3t_0', e3t_0, ktype = jp_r8 ) ! ! scale factors 165 CALL iom_rstput( 0, 0, inum, 'e3u_0', e3u_0, ktype = jp_r8 ) 166 CALL iom_rstput( 0, 0, inum, 'e3v_0', e3v_0, ktype = jp_r8 ) 167 CALL iom_rstput( 0, 0, inum, 'e3w_0', e3w_0, ktype = jp_r8 ) 168 ! 169 CALL iom_rstput( 0, 0, inum, 'gdept_1d' , gdept_1d , ktype = jp_r8 ) ! stretched system 170 CALL iom_rstput( 0, 0, inum, 'gdepw_1d' , gdepw_1d , ktype = jp_r8 ) 171 CALL iom_rstput( 0, 0, inum, 'gdept_0' , gdept_0 , ktype = jp_r8 ) 172 CALL iom_rstput( 0, 0, inum, 'gdepw_0' , gdepw_0 , ktype = jp_r8 ) 173 ! 174 IF( ln_sco ) THEN ! s-coordinate stiffness 285 175 CALL dom_stiff( zprt ) 286 CALL iom_rstput( 0, 0, inum 4, 'stiffness', zprt ) !! Max. grid stiffness ratio176 CALL iom_rstput( 0, 0, inum, 'stiffness', zprt ) ! Max. grid stiffness ratio 287 177 ENDIF 288 289 IF( ln_zps ) THEN ! z-coordinate - partial steps 290 ! 291 IF( nmsh <= 6 ) THEN ! ! 3D vertical scale factors 292 CALL iom_rstput( 0, 0, inum4, 'e3t_0', e3t_0 ) 293 CALL iom_rstput( 0, 0, inum4, 'e3u_0', e3u_0 ) 294 CALL iom_rstput( 0, 0, inum4, 'e3v_0', e3v_0 ) 295 CALL iom_rstput( 0, 0, inum4, 'e3w_0', e3w_0 ) 296 ELSE ! ! 2D masked bottom ocean scale factors 297 DO jj = 1,jpj 298 DO ji = 1,jpi 299 e3tp(ji,jj) = e3t_0(ji,jj,mbkt(ji,jj)) * ssmask(ji,jj) 300 e3wp(ji,jj) = e3w_0(ji,jj,mbkt(ji,jj)) * ssmask(ji,jj) 301 END DO 302 END DO 303 CALL iom_rstput( 0, 0, inum4, 'e3t_ps', e3tp ) 304 CALL iom_rstput( 0, 0, inum4, 'e3w_ps', e3wp ) 305 END IF 306 ! 307 IF( nmsh <= 3 ) THEN ! ! 3D depth 308 CALL iom_rstput( 0, 0, inum4, 'gdept_0', gdept_0, ktype = jp_r8 ) 309 DO jk = 1,jpk 310 DO jj = 1, jpjm1 311 DO ji = 1, jpim1 ! vector opt. 312 zdepu(ji,jj,jk) = MIN( gdept_0(ji,jj,jk) , gdept_0(ji+1,jj ,jk) ) 313 zdepv(ji,jj,jk) = MIN( gdept_0(ji,jj,jk) , gdept_0(ji ,jj+1,jk) ) 314 END DO 315 END DO 316 END DO 317 CALL lbc_lnk( zdepu, 'U', 1. ) ; CALL lbc_lnk( zdepv, 'V', 1. ) 318 CALL iom_rstput( 0, 0, inum4, 'gdepu', zdepu, ktype = jp_r8 ) 319 CALL iom_rstput( 0, 0, inum4, 'gdepv', zdepv, ktype = jp_r8 ) 320 CALL iom_rstput( 0, 0, inum4, 'gdepw_0', gdepw_0, ktype = jp_r8 ) 321 ELSE ! ! 2D bottom depth 322 DO jj = 1,jpj 323 DO ji = 1,jpi 324 zprt(ji,jj) = gdept_0(ji,jj,mbkt(ji,jj) ) * ssmask(ji,jj) 325 zprw(ji,jj) = gdepw_0(ji,jj,mbkt(ji,jj)+1) * ssmask(ji,jj) 326 END DO 327 END DO 328 CALL iom_rstput( 0, 0, inum4, 'hdept', zprt, ktype = jp_r8 ) 329 CALL iom_rstput( 0, 0, inum4, 'hdepw', zprw, ktype = jp_r8 ) 330 ENDIF 331 ! 332 CALL iom_rstput( 0, 0, inum4, 'gdept_1d', gdept_1d ) ! ! reference z-coord. 333 CALL iom_rstput( 0, 0, inum4, 'gdepw_1d', gdepw_1d ) 334 CALL iom_rstput( 0, 0, inum4, 'e3t_1d' , e3t_1d ) 335 CALL iom_rstput( 0, 0, inum4, 'e3w_1d' , e3w_1d ) 336 ENDIF 337 338 IF( ln_zco ) THEN 339 ! ! z-coordinate - full steps 340 CALL iom_rstput( 0, 0, inum4, 'gdept_1d', gdept_1d ) ! ! depth 341 CALL iom_rstput( 0, 0, inum4, 'gdepw_1d', gdepw_1d ) 342 CALL iom_rstput( 0, 0, inum4, 'e3t_1d' , e3t_1d ) ! ! scale factors 343 CALL iom_rstput( 0, 0, inum4, 'e3w_1d' , e3w_1d ) 344 ENDIF 178 ! 179 ! IF( ll_wd ) CALL iom_rstput( 0, 0, inum, 'ht_0' , ht_0 , ktype = jp_r8 ) 180 345 181 ! ! ============================ 346 !! close the files182 CALL iom_close( inum ) ! close the files 347 183 ! ! ============================ 348 SELECT CASE ( MOD(nmsh, 3) )349 CASE ( 1 )350 CALL iom_close( inum0 )351 CASE ( 2 )352 CALL iom_close( inum1 )353 CALL iom_close( inum2 )354 CASE ( 0 )355 CALL iom_close( inum2 )356 CALL iom_close( inum3 )357 CALL iom_close( inum4 )358 END SELECT359 !360 CALL wrk_dealloc( jpi, jpj, zprt, zprw )361 CALL wrk_dealloc( jpi, jpj, jpk, zdepu, zdepv )362 !363 IF( nn_timing == 1 ) CALL timing_stop('dom_wri')364 !365 184 END SUBROUTINE dom_wri 366 185 … … 375 194 !! 2) check which elements have been changed 376 195 !!---------------------------------------------------------------------- 377 !378 196 CHARACTER(len=1) , INTENT(in ) :: cdgrd ! 379 197 REAL(wp), DIMENSION(:,:), INTENT(inout) :: puniq ! … … 382 200 INTEGER :: ji ! dummy loop indices 383 201 LOGICAL, DIMENSION(SIZE(puniq,1),SIZE(puniq,2),1) :: lldbl ! store whether each point is unique or not 384 REAL(wp), POINTER, DIMENSION(:,:) :: ztstref 385 !!---------------------------------------------------------------------- 386 ! 387 IF( nn_timing == 1 ) CALL timing_start('dom_uniq') 388 ! 389 CALL wrk_alloc( jpi, jpj, ztstref ) 202 REAL(wp), DIMENSION(jpi,jpj) :: ztstref 203 !!---------------------------------------------------------------------- 390 204 ! 391 205 ! build an array with different values for each element … … 396 210 ! 397 211 puniq(:,:) = ztstref(:,:) ! default definition 398 CALL lbc_lnk( puniq, cdgrd, 1. ) ! apply boundary conditions212 CALL lbc_lnk( 'domwri', puniq, cdgrd, 1. ) ! apply boundary conditions 399 213 lldbl(:,:,1) = puniq(:,:) == ztstref(:,:) ! check which values have been changed 400 214 ! … … 402 216 ! fill only the inner part of the cpu with llbl converted into real 403 217 puniq(nldi:nlei,nldj:nlej) = REAL( COUNT( lldbl(nldi:nlei,nldj:nlej,:), dim = 3 ) , wp ) 404 !405 CALL wrk_dealloc( jpi, jpj, ztstref )406 !407 IF( nn_timing == 1 ) CALL timing_stop('dom_uniq')408 218 ! 409 219 END SUBROUTINE dom_uniq … … 461 271 END DO 462 272 END DO 463 CALL lbc_lnk( zx1, 'T', 1. )273 CALL lbc_lnk( 'domwri', zx1, 'T', 1. ) 464 274 ! 465 275 IF( PRESENT( px1 ) ) px1 = zx1 … … 467 277 zrxmax = MAXVAL( zx1 ) 468 278 ! 469 IF( lk_mpp ) CALL mpp_max(zrxmax ) ! max over the global domain279 CALL mpp_max( 'domwri', zrxmax ) ! max over the global domain 470 280 ! 471 281 IF(lwp) THEN -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/domzgr.F90
r10725 r10727 17 17 !! 3.4 ! 2012-08 (J. Siddorn) added Siddorn and Furner stretching function 18 18 !! 3.4 ! 2012-12 (R. Bourdalle-Badie and G. Reffray) modify C1D case 19 !! 3.6 ! 2014-11 (P. Mathiot and C. Harris) add ice shelf capabilitye 19 !! 3.6 ! 2014-11 (P. Mathiot and C. Harris) add ice shelf capabilitye 20 20 !! 3.? ! 2015-11 (H. Liu) Modifications for Wetting/Drying 21 21 !!---------------------------------------------------------------------- … … 37 37 USE oce ! ocean variables 38 38 USE dom_oce ! ocean domain 39 USE closea ! closed seas39 ! USE closea ! closed seas 40 40 ! 41 41 USE in_out_manager ! I/O manager … … 45 45 USE wrk_nemo ! Memory allocation 46 46 USE timing ! Timing 47 USE dombat 47 48 48 49 IMPLICIT NONE … … 59 60 REAL(wp) :: rn_rmax ! maximum cut-off r-value allowed (0<rn_rmax<1) 60 61 REAL(wp) :: rn_hc ! Critical depth for transition from sigma to stretched coordinates 62 INTEGER , PUBLIC :: ntopo !: = 0/1 ,compute/read the bathymetry file 63 REAL(wp), PUBLIC :: e3zps_min !: miminum thickness for partial steps (meters) 64 REAL(wp), PUBLIC :: e3zps_rat !: minimum thickness ration for partial steps 65 INTEGER, PUBLIC :: nperio !: type of lateral boundary condition 66 61 67 ! Song and Haidvogel 1994 stretching parameters 62 68 REAL(wp) :: rn_theta ! surface control parameter (0<=rn_theta<=20) … … 115 121 !!---------------------------------------------------------------------- 116 122 ! 117 IF( nn_timing == 1 ) CALL timing_start('dom_zgr')123 ! IF( nn_timing == 1 ) CALL timing_start('dom_zgr') 118 124 ! 119 125 REWIND( numnam_ref ) ! Namelist namzgr in reference namelist : Vertical coordinate … … 183 189 ENDIF 184 190 ! 185 IF( nn_timing == 1 ) CALL timing_stop('dom_zgr')191 ! IF( nn_timing == 1 ) CALL timing_stop('dom_zgr') 186 192 ! 187 193 END SUBROUTINE dom_zgr … … 217 223 !!---------------------------------------------------------------------- 218 224 ! 219 IF( nn_timing == 1 ) CALL timing_start('zgr_z')225 ! IF( nn_timing == 1 ) CALL timing_start('zgr_z') 220 226 ! 221 227 ! Set variables from parameters … … 349 355 END DO 350 356 ! 351 IF( nn_timing == 1 ) CALL timing_stop('zgr_z')357 ! IF( nn_timing == 1 ) CALL timing_stop('zgr_z') 352 358 ! 353 359 END SUBROUTINE zgr_z … … 395 401 !!---------------------------------------------------------------------- 396 402 ! 397 IF( nn_timing == 1 ) CALL timing_start('zgr_bat')403 ! IF( nn_timing == 1 ) CALL timing_start('zgr_bat') 398 404 ! 399 405 IF(lwp) WRITE(numout,*) … … 516 522 ! 517 523 ! ! ================ ! 518 ELSEIF( ntopo == 1 ) THEN ! read in file ! (over the local domain)524 ELSEIF( ntopo == 1 .OR. ntopo ==2 ) THEN ! read in file ! (over the local domain) 519 525 ! ! ================ ! 520 526 ! … … 554 560 ENDIF 555 561 IF( ln_zps .OR. ln_sco ) THEN ! zps or sco : read meter bathymetry 556 CALL iom_open ( 'bathy_meter.nc', inum ) 557 IF ( ln_isfcav ) THEN 558 CALL iom_get ( inum, jpdom_data, 'Bathymetry_isf', bathy, lrowattr=.false. ) 562 #if defined key_agrif 563 IF (agrif_root()) THEN 564 #endif 565 IF( ntopo == 1) THEN 566 CALL iom_open ( 'bathy_meter.nc', inum ) 567 IF ( ln_isfcav ) THEN 568 CALL iom_get ( inum, jpdom_data, 'Bathymetry_isf', bathy, lrowattr=.false. ) 569 ELSE 570 CALL iom_get ( inum, jpdom_data, 'Bathymetry' , bathy, lrowattr=ln_use_jattr ) 571 END IF 572 CALL iom_close( inum ) 559 573 ELSE 560 CALL iom_get ( inum, jpdom_data, 'Bathymetry' , bathy, lrowattr=ln_use_jattr ) 561 END IF 562 CALL iom_close( inum ) 574 CALL dom_bat 575 ENDIF 576 #if defined key_agrif 577 ELSE 578 IF( ntopo == 1) THEN 579 CALL agrif_create_bathy_meter() 580 ELSE 581 CALL dom_bat 582 ENDIF 583 ENDIF 584 #endif 563 585 ! 564 586 ! initialisation isf variables … … 611 633 ENDIF 612 634 ! 613 IF( nn_closea == 0 ) CALL clo_bat( bathy, mbathy ) !== NO closed seas or lakes ==!635 ! IF( nn_closea == 0 ) CALL clo_bat( bathy, mbathy ) !== NO closed seas or lakes ==! 614 636 ! 615 637 IF ( .not. ln_sco ) THEN !== set a minimum depth ==! … … 624 646 ENDIF 625 647 ! 626 IF( nn_timing == 1 ) CALL timing_stop('zgr_bat')648 ! IF( nn_timing == 1 ) CALL timing_stop('zgr_bat') 627 649 ! 628 650 END SUBROUTINE zgr_bat … … 708 730 !!---------------------------------------------------------------------- 709 731 ! 710 IF( nn_timing == 1 ) CALL timing_start('zgr_bat_ctl')732 ! IF( nn_timing == 1 ) CALL timing_start('zgr_bat_ctl') 711 733 ! 712 734 CALL wrk_alloc( jpi, jpj, zbathy ) … … 738 760 END DO 739 761 END DO 740 IF( lk_mpp ) CALL mpp_sum( icompt )762 ! IF( lk_mpp ) CALL mpp_sum( icompt ) 741 763 IF( icompt == 0 ) THEN 742 764 IF(lwp) WRITE(numout,*)' no isolated ocean grid points' … … 746 768 IF( lk_mpp ) THEN 747 769 zbathy(:,:) = FLOAT( mbathy(:,:) ) 748 CALL lbc_lnk( zbathy, 'T', 1._wp )770 CALL lbc_lnk( 'toto',zbathy, 'T', 1._wp ) 749 771 mbathy(:,:) = INT( zbathy(:,:) ) 750 772 ENDIF … … 784 806 ! ... mono- or macro-tasking: T-point, >0, 2D array, no slab 785 807 zbathy(:,:) = FLOAT( mbathy(:,:) ) 786 CALL lbc_lnk( zbathy, 'T', 1._wp )808 CALL lbc_lnk( 'toto',zbathy, 'T', 1._wp ) 787 809 mbathy(:,:) = INT( zbathy(:,:) ) 788 810 ENDIF … … 805 827 CALL wrk_dealloc( jpi, jpj, zbathy ) 806 828 ! 807 IF( nn_timing == 1 ) CALL timing_stop('zgr_bat_ctl')829 !! IF( nn_timing == 1 ) CALL timing_stop('zgr_bat_ctl') 808 830 ! 809 831 END SUBROUTINE zgr_bat_ctl … … 826 848 !!---------------------------------------------------------------------- 827 849 ! 828 IF( nn_timing == 1 ) CALL timing_start('zgr_bot_level')850 ! IF( nn_timing == 1 ) CALL timing_start('zgr_bot_level') 829 851 ! 830 852 CALL wrk_alloc( jpi, jpj, zmbk ) … … 844 866 END DO 845 867 ! converte into REAL to use lbc_lnk ; impose a min value of 1 as a zero can be set in lbclnk 846 zmbk(:,:) = REAL( mbku(:,:), wp ) ; CALL lbc_lnk( zmbk,'U',1.) ; mbku (:,:) = MAX( INT( zmbk(:,:) ), 1 )847 zmbk(:,:) = REAL( mbkv(:,:), wp ) ; CALL lbc_lnk( zmbk,'V',1.) ; mbkv (:,:) = MAX( INT( zmbk(:,:) ), 1 )868 zmbk(:,:) = REAL( mbku(:,:), wp ) ; CALL lbc_lnk('toto',zmbk,'U',1.) ; mbku (:,:) = MAX( INT( zmbk(:,:) ), 1 ) 869 zmbk(:,:) = REAL( mbkv(:,:), wp ) ; CALL lbc_lnk('toto',zmbk,'V',1.) ; mbkv (:,:) = MAX( INT( zmbk(:,:) ), 1 ) 848 870 ! 849 871 CALL wrk_dealloc( jpi, jpj, zmbk ) 850 872 ! 851 IF( nn_timing == 1 ) CALL timing_stop('zgr_bot_level')873 ! IF( nn_timing == 1 ) CALL timing_stop('zgr_bot_level') 852 874 ! 853 875 END SUBROUTINE zgr_bot_level … … 870 892 !!---------------------------------------------------------------------- 871 893 ! 872 IF( nn_timing == 1 ) CALL timing_start('zgr_top_level')894 ! IF( nn_timing == 1 ) CALL timing_start('zgr_top_level') 873 895 ! 874 896 CALL wrk_alloc( jpi, jpj, zmik ) … … 889 911 890 912 ! converte into REAL to use lbc_lnk ; impose a min value of 1 as a zero can be set in lbclnk 891 zmik(:,:) = REAL( miku(:,:), wp ) ; CALL lbc_lnk( zmik,'U',1.) ; miku (:,:) = MAX( INT( zmik(:,:) ), 1 )892 zmik(:,:) = REAL( mikv(:,:), wp ) ; CALL lbc_lnk( zmik,'V',1.) ; mikv (:,:) = MAX( INT( zmik(:,:) ), 1 )893 zmik(:,:) = REAL( mikf(:,:), wp ) ; CALL lbc_lnk( zmik,'F',1.) ; mikf (:,:) = MAX( INT( zmik(:,:) ), 1 )913 zmik(:,:) = REAL( miku(:,:), wp ) ; CALL lbc_lnk('toto',zmik,'U',1.) ; miku (:,:) = MAX( INT( zmik(:,:) ), 1 ) 914 zmik(:,:) = REAL( mikv(:,:), wp ) ; CALL lbc_lnk('toto',zmik,'V',1.) ; mikv (:,:) = MAX( INT( zmik(:,:) ), 1 ) 915 zmik(:,:) = REAL( mikf(:,:), wp ) ; CALL lbc_lnk('toto',zmik,'F',1.) ; mikf (:,:) = MAX( INT( zmik(:,:) ), 1 ) 894 916 ! 895 917 CALL wrk_dealloc( jpi, jpj, zmik ) 896 918 ! 897 IF( nn_timing == 1 ) CALL timing_stop('zgr_top_level')919 ! IF( nn_timing == 1 ) CALL timing_stop('zgr_top_level') 898 920 ! 899 921 END SUBROUTINE zgr_top_level … … 911 933 !!---------------------------------------------------------------------- 912 934 ! 913 IF( nn_timing == 1 ) CALL timing_start('zgr_zco')935 ! IF( nn_timing == 1 ) CALL timing_start('zgr_zco') 914 936 ! 915 937 DO jk = 1, jpk … … 926 948 END DO 927 949 ! 928 IF( nn_timing == 1 ) CALL timing_stop('zgr_zco')950 ! IF( nn_timing == 1 ) CALL timing_stop('zgr_zco') 929 951 ! 930 952 END SUBROUTINE zgr_zco … … 985 1007 !!--------------------------------------------------------------------- 986 1008 ! 987 IF( nn_timing == 1 ) CALL timing_start('zgr_zps')1009 ! IF( nn_timing == 1 ) CALL timing_start('zgr_zps') 988 1010 ! 989 1011 CALL wrk_alloc( jpi,jpj,jpk, zprt ) … … 1118 1140 END IF 1119 1141 1120 CALL lbc_lnk( e3u_0 , 'U', 1._wp ) ; CALL lbc_lnk(e3uw_0, 'U', 1._wp ) ! lateral boundary conditions1121 CALL lbc_lnk( e3v_0 , 'V', 1._wp ) ; CALL lbc_lnk(e3vw_0, 'V', 1._wp )1142 CALL lbc_lnk('toto', e3u_0 , 'U', 1._wp ) ; CALL lbc_lnk('toto', e3uw_0, 'U', 1._wp ) ! lateral boundary conditions 1143 CALL lbc_lnk( 'toto',e3v_0 , 'V', 1._wp ) ; CALL lbc_lnk('toto', e3vw_0, 'V', 1._wp ) 1122 1144 ! 1123 1145 … … 1140 1162 END DO 1141 1163 END DO 1142 CALL lbc_lnk( e3f_0, 'F', 1._wp ) ! Lateral boundary conditions1164 CALL lbc_lnk('toto', e3f_0, 'F', 1._wp ) ! Lateral boundary conditions 1143 1165 ! 1144 1166 DO jk = 1, jpk ! set to z-scale factor if zero (i.e. along closed boundaries) … … 1183 1205 CALL wrk_dealloc( jpi,jpj,jpk, zprt ) 1184 1206 ! 1185 IF( nn_timing == 1 ) CALL timing_stop('zgr_zps')1207 ! IF( nn_timing == 1 ) CALL timing_stop('zgr_zps') 1186 1208 ! 1187 1209 END SUBROUTINE zgr_zps … … 1217 1239 !!--------------------------------------------------------------------- 1218 1240 ! 1219 IF( nn_timing == 1 ) CALL timing_start('zgr_isf')1241 !! IF( nn_timing == 1 ) CALL timing_start('zgr_isf') 1220 1242 ! 1221 1243 CALL wrk_alloc( jpi,jpj, zbathy, zmask, zrisfdep) … … 1264 1286 IF( lk_mpp ) THEN 1265 1287 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1266 CALL lbc_lnk( zbathy, 'T', 1. )1288 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1267 1289 misfdep(:,:) = INT( zbathy(:,:) ) 1268 1290 1269 CALL lbc_lnk( risfdep,'T', 1. )1270 CALL lbc_lnk( bathy, 'T', 1. )1291 CALL lbc_lnk( 'toto',risfdep,'T', 1. ) 1292 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1271 1293 1272 1294 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1273 CALL lbc_lnk( zbathy, 'T', 1. )1295 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1274 1296 mbathy(:,:) = INT( zbathy(:,:) ) 1275 1297 ENDIF … … 1385 1407 IF( lk_mpp ) THEN 1386 1408 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1387 CALL lbc_lnk( zbathy, 'T', 1. )1409 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1388 1410 misfdep(:,:) = INT( zbathy(:,:) ) 1389 1411 1390 CALL lbc_lnk( risfdep,'T', 1. )1391 CALL lbc_lnk( bathy, 'T', 1. )1412 CALL lbc_lnk( 'toto',risfdep,'T', 1. ) 1413 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1392 1414 1393 1415 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1394 CALL lbc_lnk( zbathy, 'T', 1. )1416 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1395 1417 mbathy(:,:) = INT( zbathy(:,:) ) 1396 1418 ENDIF … … 1422 1444 IF( lk_mpp ) THEN 1423 1445 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1424 CALL lbc_lnk( zbathy, 'T', 1. )1446 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1425 1447 misfdep(:,:) = INT( zbathy(:,:) ) 1426 1448 1427 CALL lbc_lnk( risfdep,'T', 1. )1428 CALL lbc_lnk( bathy, 'T', 1. )1449 CALL lbc_lnk( 'toto',risfdep,'T', 1. ) 1450 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1429 1451 1430 1452 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1431 CALL lbc_lnk( zbathy, 'T', 1. )1453 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1432 1454 mbathy(:,:) = INT( zbathy(:,:) ) 1433 1455 ENDIF … … 1459 1481 IF( lk_mpp ) THEN 1460 1482 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1461 CALL lbc_lnk( zbathy, 'T', 1. )1483 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1462 1484 misfdep(:,:) = INT( zbathy(:,:) ) 1463 1485 1464 CALL lbc_lnk( risfdep,'T', 1. )1465 CALL lbc_lnk( bathy, 'T', 1. )1486 CALL lbc_lnk( 'toto',risfdep,'T', 1. ) 1487 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1466 1488 1467 1489 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1468 CALL lbc_lnk( zbathy, 'T', 1. )1490 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1469 1491 mbathy(:,:) = INT( zbathy(:,:) ) 1470 1492 ENDIF … … 1496 1518 IF( lk_mpp ) THEN 1497 1519 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1498 CALL lbc_lnk( zbathy, 'T', 1. )1520 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1499 1521 misfdep(:,:) = INT( zbathy(:,:) ) 1500 1522 1501 CALL lbc_lnk( risfdep,'T', 1. )1502 CALL lbc_lnk( bathy, 'T', 1. )1523 CALL lbc_lnk( 'toto',risfdep,'T', 1. ) 1524 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1503 1525 1504 1526 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1505 CALL lbc_lnk( zbathy, 'T', 1. )1527 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1506 1528 mbathy(:,:) = INT( zbathy(:,:) ) 1507 1529 ENDIF … … 1533 1555 IF( lk_mpp ) THEN 1534 1556 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1535 CALL lbc_lnk( zbathy, 'T', 1. )1557 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1536 1558 misfdep(:,:) = INT( zbathy(:,:) ) 1537 1559 1538 CALL lbc_lnk( risfdep,'T', 1. )1539 CALL lbc_lnk( bathy, 'T', 1. )1560 CALL lbc_lnk( 'toto',risfdep,'T', 1. ) 1561 CALL lbc_lnk('toto', bathy, 'T', 1. ) 1540 1562 1541 1563 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1542 CALL lbc_lnk( zbathy, 'T', 1. )1564 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1543 1565 mbathy(:,:) = INT( zbathy(:,:) ) 1544 1566 ENDIF … … 1565 1587 IF( lk_mpp ) THEN 1566 1588 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1567 CALL lbc_lnk( zbathy, 'T', 1. )1589 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1568 1590 misfdep(:,:) = INT( zbathy(:,:) ) 1569 1591 1570 CALL lbc_lnk( risfdep,'T', 1. )1571 CALL lbc_lnk( bathy, 'T', 1. )1592 CALL lbc_lnk( 'toto',risfdep,'T', 1. ) 1593 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1572 1594 1573 1595 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1574 CALL lbc_lnk( zbathy, 'T', 1. )1596 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1575 1597 mbathy(:,:) = INT( zbathy(:,:) ) 1576 1598 ENDIF … … 1601 1623 IF( lk_mpp ) THEN 1602 1624 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1603 CALL lbc_lnk( zbathy, 'T', 1. )1625 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1604 1626 misfdep(:,:) = INT( zbathy(:,:) ) 1605 1627 1606 CALL lbc_lnk( risfdep, 'T', 1. )1607 CALL lbc_lnk( bathy, 'T', 1. )1628 CALL lbc_lnk( 'toto',risfdep, 'T', 1. ) 1629 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1608 1630 1609 1631 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1610 CALL lbc_lnk( zbathy, 'T', 1. )1632 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1611 1633 mbathy(:,:) = INT( zbathy(:,:) ) 1612 1634 ENDIF … … 1634 1656 IF( lk_mpp ) THEN 1635 1657 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1636 CALL lbc_lnk( zbathy, 'T', 1. )1658 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1637 1659 misfdep(:,:) = INT( zbathy(:,:) ) 1638 1660 1639 CALL lbc_lnk( risfdep, 'T', 1. )1640 CALL lbc_lnk( bathy, 'T', 1. )1661 CALL lbc_lnk( 'toto',risfdep, 'T', 1. ) 1662 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1641 1663 1642 1664 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1643 CALL lbc_lnk( zbathy, 'T', 1. )1665 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1644 1666 mbathy(:,:) = INT( zbathy(:,:) ) 1645 1667 ENDIF … … 1654 1676 IF( lk_mpp ) THEN 1655 1677 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1656 CALL lbc_lnk( zbathy, 'T', 1. )1678 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1657 1679 misfdep(:,:) = INT( zbathy(:,:) ) 1658 1680 1659 CALL lbc_lnk( risfdep, 'T', 1. )1660 CALL lbc_lnk( bathy, 'T', 1. )1681 CALL lbc_lnk('toto', risfdep, 'T', 1. ) 1682 CALL lbc_lnk('toto', bathy, 'T', 1. ) 1661 1683 1662 1684 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1663 CALL lbc_lnk( zbathy, 'T', 1. )1685 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1664 1686 mbathy(:,:) = INT( zbathy(:,:) ) 1665 1687 ENDIF … … 1674 1696 IF( lk_mpp ) THEN 1675 1697 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1676 CALL lbc_lnk( zbathy, 'T', 1. )1698 CALL lbc_lnk('toto', zbathy, 'T', 1. ) 1677 1699 misfdep(:,:) = INT( zbathy(:,:) ) 1678 1700 1679 CALL lbc_lnk( risfdep,'T', 1. )1680 CALL lbc_lnk( bathy, 'T', 1. )1701 CALL lbc_lnk('toto', risfdep,'T', 1. ) 1702 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1681 1703 1682 1704 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1683 CALL lbc_lnk( zbathy, 'T', 1. )1705 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1684 1706 mbathy(:,:) = INT( zbathy(:,:) ) 1685 1707 ENDIF … … 1694 1716 IF( lk_mpp ) THEN 1695 1717 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1696 CALL lbc_lnk( zbathy, 'T', 1. )1718 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1697 1719 misfdep(:,:) = INT( zbathy(:,:) ) 1698 1720 1699 CALL lbc_lnk( risfdep,'T', 1. )1700 CALL lbc_lnk( bathy, 'T', 1. )1721 CALL lbc_lnk( 'toto',risfdep,'T', 1. ) 1722 CALL lbc_lnk('toto', bathy, 'T', 1. ) 1701 1723 1702 1724 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1703 CALL lbc_lnk( zbathy, 'T', 1. )1725 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1704 1726 mbathy(:,:) = INT( zbathy(:,:) ) 1705 1727 ENDIF … … 1714 1736 IF( lk_mpp ) THEN 1715 1737 zbathy(:,:) = FLOAT( misfdep(:,:) ) 1716 CALL lbc_lnk( zbathy, 'T', 1. )1738 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1717 1739 misfdep(:,:) = INT( zbathy(:,:) ) 1718 1740 1719 CALL lbc_lnk( risfdep,'T', 1. )1720 CALL lbc_lnk( bathy, 'T', 1. )1741 CALL lbc_lnk( 'toto',risfdep,'T', 1. ) 1742 CALL lbc_lnk( 'toto',bathy, 'T', 1. ) 1721 1743 1722 1744 zbathy(:,:) = FLOAT( mbathy(:,:) ) 1723 CALL lbc_lnk( zbathy, 'T', 1. )1745 CALL lbc_lnk( 'toto',zbathy, 'T', 1. ) 1724 1746 mbathy(:,:) = INT( zbathy(:,:) ) 1725 1747 ENDIF … … 1827 1849 ! ... on ik / ik-1 1828 1850 e3w_0 (ji,jj,ik ) = e3t_0 (ji,jj,ik) !2._wp * (gdept_0(ji,jj,ik) - gdepw_0(ji,jj,ik)) 1851 gdept_0(ji,jj,ik-1) = gdept_0(ji,jj,ik) - e3w_0(ji,jj,ik) 1829 1852 e3t_0 (ji,jj,ik-1) = gdepw_0(ji,jj,ik) - gdepw_1d(ik-1) 1830 ! The next line isn't required and doesn't affect results - included for consistency with bathymetry code 1831 gdep t_0(ji,jj,ik-1) = gdept_1d(ik-1)1853 e3w_0 (ji,jj,ik-1) = gdept_0(ji,jj,ik-1) - gdept_1d(ik-2) 1854 gdepw_0(ji,jj,ik-1) = gdepw_0(ji,jj,ik) - e3t_0(ji,jj,ik-1) 1832 1855 ENDIF 1833 1856 END DO … … 1857 1880 CALL wrk_dealloc( jpi, jpj, zmisfdep, zmbathy ) 1858 1881 ! 1859 IF( nn_timing == 1 ) CALL timing_stop('zgr_isf')1882 ! IF( nn_timing == 1 ) CALL timing_stop('zgr_isf') 1860 1883 ! 1861 1884 END SUBROUTINE zgr_isf … … 1919 1942 !!---------------------------------------------------------------------- 1920 1943 ! 1921 IF( nn_timing == 1 ) CALL timing_start('zgr_sco')1944 !! IF( nn_timing == 1 ) CALL timing_start('zgr_sco') 1922 1945 ! 1923 1946 CALL wrk_alloc( jpi,jpj, zenv, ztmp, zmsk, zri, zrj, zhbat , ztmpi1, ztmpi2, ztmpj1, ztmpj2 ) … … 2001 2024 2002 2025 ! apply lateral boundary condition CAUTION: keep the value when the lbc field is zero 2003 CALL lbc_lnk( zenv, 'T', 1._wp, 'no0' )2026 CALL lbc_lnk( 'toto',zenv, 'T', 1._wp, 'no0' ) 2004 2027 ! 2005 2028 ! smooth the bathymetry (if required) … … 2055 2078 END DO 2056 2079 END DO 2057 IF( lk_mpp ) CALL mpp_max( zrmax ) ! max over the global domain2080 ! IF( lk_mpp ) CALL mpp_max( zrmax ) ! max over the global domain 2058 2081 ! 2059 2082 IF(lwp)WRITE(numout,*) 'zgr_sco : iter= ',jl, ' rmax= ', zrmax … … 2065 2088 END DO 2066 2089 ! apply lateral boundary condition CAUTION: keep the value when the lbc field is zero 2067 CALL lbc_lnk( zenv, 'T', 1._wp, 'no0' )2090 CALL lbc_lnk( 'toto',zenv, 'T', 1._wp, 'no0' ) 2068 2091 ! ! ================ ! 2069 2092 END DO ! End loop ! … … 2109 2132 ! Apply lateral boundary condition 2110 2133 !!gm ! CAUTION: retain non zero value in the initial file this should be OK for orca cfg, not for EEL 2111 zhbat(:,:) = hbatu(:,:) ; CALL lbc_lnk( hbatu, 'U', 1._wp )2134 zhbat(:,:) = hbatu(:,:) ; CALL lbc_lnk('toto', hbatu, 'U', 1._wp ) 2112 2135 DO jj = 1, jpj 2113 2136 DO ji = 1, jpi … … 2119 2142 END DO 2120 2143 END DO 2121 zhbat(:,:) = hbatv(:,:) ; CALL lbc_lnk( hbatv, 'V', 1._wp )2144 zhbat(:,:) = hbatv(:,:) ; CALL lbc_lnk('toto', hbatv, 'V', 1._wp ) 2122 2145 DO jj = 1, jpj 2123 2146 DO ji = 1, jpi … … 2128 2151 END DO 2129 2152 END DO 2130 zhbat(:,:) = hbatf(:,:) ; CALL lbc_lnk( hbatf, 'F', 1._wp )2153 zhbat(:,:) = hbatf(:,:) ; CALL lbc_lnk('toto', hbatf, 'F', 1._wp ) 2131 2154 DO jj = 1, jpj 2132 2155 DO ji = 1, jpi … … 2176 2199 ENDIF 2177 2200 2178 CALL lbc_lnk( e3t_0 , 'T', 1._wp )2179 CALL lbc_lnk( e3u_0 , 'U', 1._wp )2180 CALL lbc_lnk( e3v_0 , 'V', 1._wp )2181 CALL lbc_lnk( e3f_0 , 'F', 1._wp )2182 CALL lbc_lnk( e3w_0 , 'W', 1._wp )2183 CALL lbc_lnk( e3uw_0, 'U', 1._wp )2184 CALL lbc_lnk( e3vw_0, 'V', 1._wp )2201 CALL lbc_lnk( 'toto',e3t_0 , 'T', 1._wp ) 2202 CALL lbc_lnk( 'toto',e3u_0 , 'U', 1._wp ) 2203 CALL lbc_lnk( 'toto',e3v_0 , 'V', 1._wp ) 2204 CALL lbc_lnk( 'toto',e3f_0 , 'F', 1._wp ) 2205 CALL lbc_lnk( 'toto',e3w_0 , 'W', 1._wp ) 2206 CALL lbc_lnk( 'toto',e3uw_0, 'U', 1._wp ) 2207 CALL lbc_lnk('toto', e3vw_0, 'V', 1._wp ) 2185 2208 ! 2186 2209 WHERE( e3t_0 (:,:,:) == 0._wp ) e3t_0 (:,:,:) = 1._wp … … 2314 2337 CALL wrk_dealloc( jpi, jpj, zenv, ztmp, zmsk, zri, zrj, zhbat , ztmpi1, ztmpi2, ztmpj1, ztmpj2 ) 2315 2338 ! 2316 IF( nn_timing == 1 ) CALL timing_stop('zgr_sco')2339 !!! IF( nn_timing == 1 ) CALL timing_stop('zgr_sco') 2317 2340 ! 2318 2341 END SUBROUTINE zgr_sco … … 2585 2608 ENDDO 2586 2609 ! 2587 CALL lbc_lnk( e3t_0 ,'T',1.) ; CALL lbc_lnk(e3u_0 ,'T',1.)2588 CALL lbc_lnk( e3v_0 ,'T',1.) ; CALL lbc_lnk(e3f_0 ,'T',1.)2589 CALL lbc_lnk( e3w_0 ,'T',1.)2590 CALL lbc_lnk( e3uw_0,'T',1.) ; CALL lbc_lnk(e3vw_0,'T',1.)2610 CALL lbc_lnk('toto',e3t_0 ,'T',1.) ; CALL lbc_lnk('toto',e3u_0 ,'T',1.) 2611 CALL lbc_lnk('toto',e3v_0 ,'T',1.) ; CALL lbc_lnk('toto',e3f_0 ,'T',1.) 2612 CALL lbc_lnk('toto',e3w_0 ,'T',1.) 2613 CALL lbc_lnk('toto',e3uw_0,'T',1.) ; CALL lbc_lnk('toto',e3vw_0,'T',1.) 2591 2614 ! 2592 2615 CALL wrk_dealloc( jpi,jpj,jpk, z_gsigw3, z_gsigt3, z_gsi3w3 ) -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/in_out_manager.F90
r10725 r10727 18 18 PUBLIC 19 19 20 21 !22 20 !!---------------------------------------------------------------------- 23 21 !! namrun namelist parameters … … 30 28 LOGICAL :: ln_rstart !: start from (F) rest or (T) a restart file 31 29 LOGICAL :: ln_rst_list !: output restarts at list of times (T) or by frequency (F) 32 INTEGER :: nn_no !: job number33 30 INTEGER :: nn_rstctl !: control of the time step (0, 1 or 2) 34 31 INTEGER :: nn_rstssh = 0 !: hand made initilization of ssh or not (1/0) … … 46 43 LOGICAL :: ln_clobber !: clobber (overwrite) an existing file 47 44 INTEGER :: nn_chunksz !: chunksize (bytes) for NetCDF file (works only with iom_nf90 routines) 45 LOGICAL :: ln_xios_read !: use xios to read single file restart 46 INTEGER :: nn_wxios !: write resart using xios 0 - no, 1 - single, 2 - multiple file output 47 INTEGER :: nn_no !: Assimilation cycle 48 49 #if defined key_netcdf4 50 !!---------------------------------------------------------------------- 51 !! namnc4 namelist parameters (key_netcdf4) 52 !!---------------------------------------------------------------------- 53 ! The following four values determine the partitioning of the output fields 54 ! into netcdf4 chunks. They are unrelated to the nn_chunk_sz setting which is 55 ! for runtime optimisation. The individual netcdf4 chunks can be optionally 56 ! gzipped (recommended) leading to significant reductions in I/O volumes 57 ! !!!** variables only used with iom_nf90 routines and key_netcdf4 ** 58 INTEGER :: nn_nchunks_i !: number of chunks required in the i-dimension 59 INTEGER :: nn_nchunks_j !: number of chunks required in the j-dimension 60 INTEGER :: nn_nchunks_k !: number of chunks required in the k-dimension 61 INTEGER :: nn_nchunks_t !: number of chunks required in the t-dimension 62 LOGICAL :: ln_nc4zip !: netcdf4 usage: (T) chunk and compress output using the HDF5 sublayers of netcdf4 63 ! ! (F) ignore chunking request and use the netcdf4 library 64 ! ! to produce netcdf3-compatible files 65 #endif 66 48 67 !$AGRIF_DO_NOT_TREAT 49 68 TYPE(snc4_ctl) :: snc4set !: netcdf4 chunking control structure (always needed for decision making) … … 55 74 56 75 CHARACTER(lc) :: cexper !: experiment name used for output filename 57 INTEGER :: no !: job number58 76 INTEGER :: nrstdt !: control of the time step (0, 1 or 2) 59 77 INTEGER :: nit000 !: index of the first time step … … 71 89 INTEGER :: nitrst !: time step at which restart file should be written 72 90 LOGICAL :: lrst_oce !: logical to control the oce restart write 91 LOGICAL :: lrst_ice !: logical to control the ice restart write 73 92 INTEGER :: numror = 0 !: logical unit for ocean restart (read). Init to 0 is needed for SAS (in daymod.F90) 93 INTEGER :: numrir !: logical unit for ice restart (read) 74 94 INTEGER :: numrow !: logical unit for ocean restart (write) 95 INTEGER :: numriw !: logical unit for ice restart (write) 75 96 INTEGER :: nrst_lst !: number of restart to output next 76 97 … … 78 99 !! output monitoring 79 100 !!---------------------------------------------------------------------- 80 LOGICAL :: ln_ctl !: run control for debugging 81 INTEGER :: nn_timing !: run control for timing 82 INTEGER :: nn_diacfl !: flag whether to create CFL diagnostics 83 INTEGER :: nn_print !: level of print (0 no print) 84 INTEGER :: nn_ictls !: Start i indice for the SUM control 85 INTEGER :: nn_ictle !: End i indice for the SUM control 86 INTEGER :: nn_jctls !: Start j indice for the SUM control 87 INTEGER :: nn_jctle !: End j indice for the SUM control 88 INTEGER :: nn_isplt !: number of processors following i 89 INTEGER :: nn_jsplt !: number of processors following j 90 INTEGER :: nn_bench !: benchmark parameter (0/1) 91 INTEGER :: nn_bit_cmp = 0 !: bit reproducibility (0/1) 101 LOGICAL :: ln_ctl !: run control for debugging 102 TYPE :: sn_ctl !: optional use structure for finer control over output selection 103 LOGICAL :: l_config = .FALSE. !: activate/deactivate finer control 104 ! Note if l_config is True then ln_ctl is ignored. 105 ! Otherwise setting ln_ctl True is equivalent to setting 106 ! all the following logicals in this structure True 107 LOGICAL :: l_runstat = .FALSE. !: Produce/do not produce run.stat file (T/F) 108 LOGICAL :: l_trcstat = .FALSE. !: Produce/do not produce tracer.stat file (T/F) 109 LOGICAL :: l_oceout = .FALSE. !: Produce all ocean.outputs (T) or just one (F) 110 LOGICAL :: l_layout = .FALSE. !: Produce all layout.dat files (T) or just one (F) 111 LOGICAL :: l_mppout = .FALSE. !: Produce/do not produce mpp.output_XXXX files (T/F) 112 LOGICAL :: l_mpptop = .FALSE. !: Produce/do not produce mpp.top.output_XXXX files (T/F) 113 ! Optional subsetting of processor report files 114 ! Default settings of 0/1000000/1 should ensure all areas report. 115 ! Set to a more restrictive range to select specific areas 116 INTEGER :: procmin = 0 !: Minimum narea to output 117 INTEGER :: procmax = 1000000 !: Maximum narea to output 118 INTEGER :: procincr = 1 !: narea increment to output 119 INTEGER :: ptimincr = 1 !: timestep increment to output (time.step and run.stat) 120 END TYPE sn_ctl 92 121 122 TYPE (sn_ctl) :: sn_cfctl !: run control structure for selective output 123 LOGICAL :: ln_timing !: run control for timing 124 LOGICAL :: ln_diacfl !: flag whether to create CFL diagnostics 125 INTEGER :: nn_print !: level of print (0 no print) 126 INTEGER :: nn_ictls !: Start i indice for the SUM control 127 INTEGER :: nn_ictle !: End i indice for the SUM control 128 INTEGER :: nn_jctls !: Start j indice for the SUM control 129 INTEGER :: nn_jctle !: End j indice for the SUM control 130 INTEGER :: nn_isplt !: number of processors following i 131 INTEGER :: nn_jsplt !: number of processors following j 132 INTEGER :: nn_bench !: benchmark parameter (0/1) 133 INTEGER :: nn_bit_cmp = 0 !: bit reproducibility (0/1) 93 134 ! 94 INTEGER :: nprint, nictls, nictle, njctls, njctle, isplt, jsplt , nbench!: OLD namelist names135 INTEGER :: nprint, nictls, nictle, njctls, njctle, isplt, jsplt !: OLD namelist names 95 136 96 137 INTEGER :: ijsplt = 1 !: nb of local domain = nb of processors … … 101 142 INTEGER :: numstp = -1 !: logical unit for time step 102 143 INTEGER :: numtime = -1 !: logical unit for timing 103 INTEGER :: numout = 6 !: logical unit for output print; Set to stdout to ensure any early 104 ! output can be collected; do not change 144 INTEGER :: numout = 6 !: logical unit for output print; Set to stdout to ensure any 145 INTEGER :: numnul = -1 !: logical unit for /dev/null 146 ! ! early output can be collected; do not change 105 147 INTEGER :: numnam_ref = -1 !: logical unit for reference namelist 106 148 INTEGER :: numnam_cfg = -1 !: logical unit for configuration specific namelist … … 110 152 INTEGER :: numoni = -1 !: logical unit for Output Namelist Ice 111 153 INTEGER :: numevo_ice = -1 !: logical unit for ice variables (temp. evolution) 112 INTEGER :: num sol = -1 !: logical unit for solverstatistics154 INTEGER :: numrun = -1 !: logical unit for run statistics 113 155 INTEGER :: numdct_in = -1 !: logical unit for transports computing 114 156 INTEGER :: numdct_vol = -1 !: logical unit for voulume transports output … … 121 163 !! Run control 122 164 !!---------------------------------------------------------------------- 165 INTEGER :: no_print = 0 !: optional argument of fld_fill (if present, suppress some control print) 123 166 INTEGER :: nstop = 0 !: error flag (=number of reason for a premature stop run) 124 167 INTEGER :: nwarn = 0 !: warning flag (=number of warning found during the run) … … 132 175 LOGICAL :: lwp = .FALSE. !: boolean : true on the 1st processor only .OR. ln_ctl 133 176 LOGICAL :: lsp_area = .TRUE. !: to make a control print over a specific area 177 CHARACTER(lc) :: cxios_context !: context name used in xios 178 CHARACTER(lc) :: crxios_context !: context name used in xios to read restart 179 CHARACTER(lc) :: cwxios_context !: context name used in xios to write restart file 134 180 135 181 !!---------------------------------------------------------------------- 136 182 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 137 !! $Id: in_out_manager.F90 6140 2015-12-21 11:35:23Z timgraham$138 !! Software governed by the CeCILL licen ce (./LICENSE)183 !! $Id: in_out_manager.F90 10570 2019-01-24 15:14:49Z acc $ 184 !! Software governed by the CeCILL license (see ./LICENSE) 139 185 !!===================================================================== 140 186 END MODULE in_out_manager -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/ioipsl.f90
r6951 r10727 6 6 ! See IOIPSL/IOIPSL_License_CeCILL.txt 7 7 ! 8 USE errioipsl 8 USE errioipsl 9 USE calendar 9 10 USE stringop 10 USE mathelp11 USE getincom12 USE calendar13 11 USE fliocom 14 USE flincom 15 USE histcom 16 USE restcom 12 17 13 END MODULE ioipsl -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/iom.F90
r10725 r10727 1 1 MODULE iom 2 !!===================================================================== 2 !!====================================================================== 3 3 !! *** MODULE iom *** 4 4 !! Input/Output manager : Library to read input files 5 !!==================================================================== 5 !!====================================================================== 6 6 !! History : 2.0 ! 2005-12 (J. Belier) Original code 7 7 !! 2.0 ! 2006-02 (S. Masson) Adaptation to NEMO 8 8 !! 3.0 ! 2007-07 (D. Storkey) Changes to iom_gettime 9 9 !! 3.4 ! 2012-12 (R. Bourdalle-Badie and G. Reffray) add C1D case 10 !!-------------------------------------------------------------------- 11 12 !!-------------------------------------------------------------------- 10 !! 3.6 ! 2014-15 DIMG format removed 11 !! 3.6 ! 2015-15 (J. Harle) Added procedure to read REAL attributes 12 !! 4.0 ! 2017-11 (M. Andrejczuk) Extend IOM interface to write any 3D fields 13 !!---------------------------------------------------------------------- 14 15 !!---------------------------------------------------------------------- 13 16 !! iom_open : open a file read only 14 17 !! iom_close : close a file or all files opened by iom 15 18 !! iom_get : read a field (interfaced to several routines) 16 !! iom_gettime : read the time axis cdvar in the file17 19 !! iom_varid : get the id of a variable in a file 18 20 !! iom_rstput : write a field in a restart file (interfaced to several routines) 19 !!-------------------------------------------------------------------- 21 !!---------------------------------------------------------------------- 20 22 USE dom_oce ! ocean space and time domain 21 23 USE lbclnk ! lateal boundary condition / mpp exchanges … … 24 26 USE in_out_manager ! I/O manager 25 27 USE lib_mpp ! MPP library 28 #if defined key_iomput 29 USE sbc_oce , ONLY : nn_fsbc ! ocean space and time domain 30 USE trc_oce , ONLY : nn_dttrc ! !: frequency of step on passive tracers 31 USE icb_oce , ONLY : nclasses, class_num ! !: iceberg classes 32 #if defined key_si3 33 USE ice , ONLY : jpl 34 #endif 35 USE domngb ! ocean space and time domain 36 USE phycst ! physical constants 37 USE dianam ! build name of file 38 USE xios 39 # endif 40 USE ioipsl, ONLY : ju2ymds ! for calendar 41 #if defined key_top 42 USE trc, ONLY : profsed 43 #endif 44 USE lib_fortran 26 45 27 46 IMPLICIT NONE 28 47 PUBLIC ! must be public to be able to access iom_def through iom 29 48 49 #if defined key_iomput 50 LOGICAL, PUBLIC, PARAMETER :: lk_iomput = .TRUE. !: iom_put flag 51 #else 30 52 LOGICAL, PUBLIC, PARAMETER :: lk_iomput = .FALSE. !: iom_put flag 31 PUBLIC iom_init, iom_swap, iom_open, iom_close, iom_setkt, iom_varid, iom_get, iom_gettime, iom_rstput, iom_put 32 PUBLIC iom_getatt, iom_use, iom_context_finalize 53 #endif 54 PUBLIC iom_init, iom_swap, iom_open, iom_close, iom_setkt, iom_varid, iom_get 55 PUBLIC iom_chkatt, iom_getatt, iom_putatt, iom_getszuld, iom_rstput, iom_delay_rst, iom_put 56 PUBLIC iom_use, iom_context_finalize 33 57 34 58 PRIVATE iom_rp0d, iom_rp1d, iom_rp2d, iom_rp3d 35 59 PRIVATE iom_g0d, iom_g1d, iom_g2d, iom_g3d, iom_get_123d 36 60 PRIVATE iom_p1d, iom_p2d, iom_p3d 61 #if defined key_iomput 62 PRIVATE iom_set_domain_attr, iom_set_axis_attr, iom_set_field_attr, iom_set_file_attr, iom_get_file_attr, iom_set_grid_attr 63 PRIVATE set_grid, set_grid_bounds, set_scalar, set_xmlatt, set_mooring, iom_update_file_name, iom_sdate 64 PRIVATE iom_set_rst_context, iom_set_rstw_active, iom_set_rstr_active 65 # endif 66 PUBLIC iom_set_rstw_var_active, iom_set_rstw_core, iom_set_rst_vars 37 67 38 68 INTERFACE iom_get … … 40 70 END INTERFACE 41 71 INTERFACE iom_getatt 42 MODULE PROCEDURE iom_g0d_intatt 72 MODULE PROCEDURE iom_g0d_iatt, iom_g1d_iatt, iom_g0d_ratt, iom_g1d_ratt, iom_g0d_catt 73 END INTERFACE 74 INTERFACE iom_putatt 75 MODULE PROCEDURE iom_p0d_iatt, iom_p1d_iatt, iom_p0d_ratt, iom_p1d_ratt, iom_p0d_catt 43 76 END INTERFACE 44 77 INTERFACE iom_rstput 45 78 MODULE PROCEDURE iom_rp0d, iom_rp1d, iom_rp2d, iom_rp3d 46 79 END INTERFACE 47 INTERFACE iom_put48 MODULE PROCEDURE iom_p0d, iom_p1d, iom_p2d, iom_p3d49 END INTERFACE50 80 INTERFACE iom_put 81 MODULE PROCEDURE iom_p0d, iom_p1d, iom_p2d, iom_p3d 82 END INTERFACE iom_put 83 51 84 !!---------------------------------------------------------------------- 52 85 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 53 !! $Id: iom.F90 8572 2017-09-28 08:27:06Z cbricaud$54 !! Software governed by the CeCILL licen ce (./LICENSE)86 !! $Id: iom.F90 10523 2019-01-16 09:36:03Z smasson $ 87 !! Software governed by the CeCILL license (see ./LICENSE) 55 88 !!---------------------------------------------------------------------- 56 57 89 CONTAINS 58 90 59 SUBROUTINE iom_init( cdname )91 SUBROUTINE iom_init( cdname, fname, ld_tmppatch ) 60 92 !!---------------------------------------------------------------------- 61 93 !! *** ROUTINE *** … … 64 96 !! 65 97 !!---------------------------------------------------------------------- 66 CHARACTER(len=*), INTENT(in) :: cdname 67 98 CHARACTER(len=*), INTENT(in) :: cdname 99 CHARACTER(len=*), OPTIONAL, INTENT(in) :: fname 100 LOGICAL , OPTIONAL, INTENT(in) :: ld_tmppatch 101 #if defined key_iomput 102 ! 103 TYPE(xios_duration) :: dtime = xios_duration(0, 0, 0, 0, 0, 0) 104 TYPE(xios_date) :: start_date 105 CHARACTER(len=lc) :: clname 106 INTEGER :: ji, jkmin 107 LOGICAL :: llrst_context ! is context related to restart 108 ! 109 REAL(wp), ALLOCATABLE, DIMENSION(:,:) :: zt_bnds, zw_bnds 110 LOGICAL :: ll_tmppatch = .TRUE. !: seb: patch before we remove periodicity 111 INTEGER :: nldi_save, nlei_save !: and close boundaries in output files 112 INTEGER :: nldj_save, nlej_save !: 113 !!---------------------------------------------------------------------- 114 ! 115 ! seb: patch before we remove periodicity and close boundaries in output files 116 IF( PRESENT(ld_tmppatch) ) THEN ; ll_tmppatch = ld_tmppatch 117 ELSE ; ll_tmppatch = .TRUE. 118 ENDIF 119 IF ( ll_tmppatch ) THEN 120 nldi_save = nldi ; nlei_save = nlei 121 nldj_save = nldj ; nlej_save = nlej 122 IF( nimpp == 1 ) nldi = 1 123 IF( nimpp + jpi - 1 == jpiglo ) nlei = jpi 124 IF( njmpp == 1 ) nldj = 1 125 IF( njmpp + jpj - 1 == jpjglo ) nlej = jpj 126 ENDIF 127 ! 128 ALLOCATE( zt_bnds(2,jpk), zw_bnds(2,jpk) ) 129 ! 130 clname = cdname 131 IF( TRIM(Agrif_CFixed()) /= '0' ) clname = TRIM(Agrif_CFixed())//"_"//TRIM(cdname) 132 CALL xios_context_initialize(TRIM(clname), mpi_comm_oce) 133 CALL iom_swap( cdname ) 134 llrst_context = (TRIM(cdname) == TRIM(crxios_context) .OR. TRIM(cdname) == TRIM(cwxios_context)) 135 136 ! Calendar type is now defined in xml file 137 SELECT CASE ( nleapy ) ! Choose calendar for IOIPSL 138 CASE ( 1) ; CALL xios_define_calendar( TYPE = "Gregorian", time_origin = xios_date(1900,01,01,00,00,00), & 139 & start_date = xios_date(nyear,nmonth,nday,0,0,0) ) 140 CASE ( 0) ; CALL xios_define_calendar( TYPE = "NoLeap" , time_origin = xios_date(1900,01,01,00,00,00), & 141 & start_date = xios_date(nyear,nmonth,nday,0,0,0) ) 142 CASE (30) ; CALL xios_define_calendar( TYPE = "D360" , time_origin = xios_date(1900,01,01,00,00,00), & 143 & start_date = xios_date(nyear,nmonth,nday,0,0,0) ) 144 END SELECT 145 146 ! horizontal grid definition 147 IF(.NOT.llrst_context) CALL set_scalar 148 ! 149 IF( TRIM(cdname) == TRIM(cxios_context) ) THEN 150 CALL set_grid( "T", glamt, gphit, .FALSE., .FALSE. ) 151 CALL set_grid( "U", glamu, gphiu, .FALSE., .FALSE. ) 152 CALL set_grid( "V", glamv, gphiv, .FALSE., .FALSE. ) 153 CALL set_grid( "W", glamt, gphit, .FALSE., .FALSE. ) 154 CALL set_grid_znl( gphit ) 155 ! 156 IF( ln_cfmeta ) THEN ! Add additional grid metadata 157 CALL iom_set_domain_attr("grid_T", area = e1e2t(nldi:nlei, nldj:nlej)) 158 CALL iom_set_domain_attr("grid_U", area = e1e2u(nldi:nlei, nldj:nlej)) 159 CALL iom_set_domain_attr("grid_V", area = e1e2v(nldi:nlei, nldj:nlej)) 160 CALL iom_set_domain_attr("grid_W", area = e1e2t(nldi:nlei, nldj:nlej)) 161 CALL set_grid_bounds( "T", glamf, gphif, glamt, gphit ) 162 CALL set_grid_bounds( "U", glamv, gphiv, glamu, gphiu ) 163 CALL set_grid_bounds( "V", glamu, gphiu, glamv, gphiv ) 164 CALL set_grid_bounds( "W", glamf, gphif, glamt, gphit ) 165 ENDIF 166 ENDIF 167 ! 168 IF( TRIM(cdname) == TRIM(cxios_context)//"_crs" ) THEN 169 CALL dom_grid_crs ! Save the parent grid information & Switch to coarse grid domain 170 ! 171 CALL set_grid( "T", glamt_crs, gphit_crs, .FALSE., .FALSE. ) 172 CALL set_grid( "U", glamu_crs, gphiu_crs, .FALSE., .FALSE. ) 173 CALL set_grid( "V", glamv_crs, gphiv_crs, .FALSE., .FALSE. ) 174 CALL set_grid( "W", glamt_crs, gphit_crs, .FALSE., .FALSE. ) 175 CALL set_grid_znl( gphit_crs ) 176 ! 177 CALL dom_grid_glo ! Return to parent grid domain 178 ! 179 IF( ln_cfmeta .AND. .NOT. llrst_context) THEN ! Add additional grid metadata 180 CALL iom_set_domain_attr("grid_T", area = e1e2t_crs(nldi:nlei, nldj:nlej)) 181 CALL iom_set_domain_attr("grid_U", area = e1u_crs(nldi:nlei, nldj:nlej) * e2u_crs(nldi:nlei, nldj:nlej)) 182 CALL iom_set_domain_attr("grid_V", area = e1v_crs(nldi:nlei, nldj:nlej) * e2v_crs(nldi:nlei, nldj:nlej)) 183 CALL iom_set_domain_attr("grid_W", area = e1e2t_crs(nldi:nlei, nldj:nlej)) 184 CALL set_grid_bounds( "T", glamf_crs, gphif_crs, glamt_crs, gphit_crs ) 185 CALL set_grid_bounds( "U", glamv_crs, gphiv_crs, glamu_crs, gphiu_crs ) 186 CALL set_grid_bounds( "V", glamu_crs, gphiu_crs, glamv_crs, gphiv_crs ) 187 CALL set_grid_bounds( "W", glamf_crs, gphif_crs, glamt_crs, gphit_crs ) 188 ENDIF 189 ENDIF 190 ! 191 ! vertical grid definition 192 IF(.NOT.llrst_context) THEN 193 CALL iom_set_axis_attr( "deptht", paxis = gdept_1d ) 194 CALL iom_set_axis_attr( "depthu", paxis = gdept_1d ) 195 CALL iom_set_axis_attr( "depthv", paxis = gdept_1d ) 196 CALL iom_set_axis_attr( "depthw", paxis = gdepw_1d ) 197 198 ! Add vertical grid bounds 199 jkmin = MIN(2,jpk) ! in case jpk=1 (i.e. sas2D) 200 zt_bnds(2,: ) = gdept_1d(:) 201 zt_bnds(1,jkmin:jpk) = gdept_1d(1:jpkm1) 202 zt_bnds(1,1 ) = gdept_1d(1) - e3w_1d(1) 203 zw_bnds(1,: ) = gdepw_1d(:) 204 zw_bnds(2,1:jpkm1 ) = gdepw_1d(jkmin:jpk) 205 zw_bnds(2,jpk: ) = gdepw_1d(jpk) + e3t_1d(jpk) 206 CALL iom_set_axis_attr( "deptht", bounds=zw_bnds ) 207 CALL iom_set_axis_attr( "depthu", bounds=zw_bnds ) 208 CALL iom_set_axis_attr( "depthv", bounds=zw_bnds ) 209 CALL iom_set_axis_attr( "depthw", bounds=zt_bnds ) 210 ! 211 # if defined key_floats 212 CALL iom_set_axis_attr( "nfloat", (/ (REAL(ji,wp), ji=1,jpnfl) /) ) 213 # endif 214 # if defined key_si3 215 CALL iom_set_axis_attr( "ncatice", (/ (REAL(ji,wp), ji=1,jpl) /) ) 216 ! SIMIP diagnostics (4 main arctic straits) 217 CALL iom_set_axis_attr( "nstrait", (/ (REAL(ji,wp), ji=1,4) /) ) 218 # endif 219 #if defined key_top 220 CALL iom_set_axis_attr( "profsed", paxis = profsed ) 221 #endif 222 CALL iom_set_axis_attr( "icbcla", class_num ) 223 CALL iom_set_axis_attr( "iax_20C", (/ REAL(20,wp) /) ) 224 CALL iom_set_axis_attr( "iax_28C", (/ REAL(28,wp) /) ) 225 ENDIF 226 ! 227 ! automatic definitions of some of the xml attributs 228 IF( TRIM(cdname) == TRIM(crxios_context) ) THEN 229 !set names of the fields in restart file IF using XIOS to read data 230 CALL iom_set_rst_context(.TRUE.) 231 CALL iom_set_rst_vars(rst_rfields) 232 !set which fields are to be read from restart file 233 CALL iom_set_rstr_active() 234 ELSE IF( TRIM(cdname) == TRIM(cwxios_context) ) THEN 235 !set names of the fields in restart file IF using XIOS to write data 236 CALL iom_set_rst_context(.FALSE.) 237 CALL iom_set_rst_vars(rst_wfields) 238 !set which fields are to be written to a restart file 239 CALL iom_set_rstw_active(fname) 240 ELSE 241 CALL set_xmlatt 242 ENDIF 243 ! 244 ! end file definition 245 dtime%second = rdt 246 CALL xios_set_timestep( dtime ) 247 CALL xios_close_context_definition() 248 CALL xios_update_calendar( 0 ) 249 ! 250 DEALLOCATE( zt_bnds, zw_bnds ) 251 ! 252 IF ( ll_tmppatch ) THEN 253 nldi = nldi_save ; nlei = nlei_save 254 nldj = nldj_save ; nlej = nlej_save 255 ENDIF 256 #endif 257 ! 68 258 END SUBROUTINE iom_init 69 259 260 SUBROUTINE iom_set_rstw_var_active(field) 261 !!--------------------------------------------------------------------- 262 !! *** SUBROUTINE iom_set_rstw_var_active *** 263 !! 264 !! ** Purpose : enable variable in restart file when writing with XIOS 265 !!--------------------------------------------------------------------- 266 CHARACTER(len = *), INTENT(IN) :: field 267 INTEGER :: i 268 LOGICAL :: llis_set 269 CHARACTER(LEN=256) :: clinfo ! info character 270 271 #if defined key_iomput 272 llis_set = .FALSE. 273 274 DO i = 1, max_rst_fields 275 IF(TRIM(rst_wfields(i)%vname) == field) THEN 276 rst_wfields(i)%active = .TRUE. 277 llis_set = .TRUE. 278 EXIT 279 ENDIF 280 ENDDO 281 !Warn if variable is not in defined in rst_wfields 282 IF(.NOT.llis_set) THEN 283 WRITE(ctmp1,*) 'iom_set_rstw_var_active: variable ', field ,' is available for writing but not defined' 284 CALL ctl_stop( 'iom_set_rstw_var_active:', ctmp1 ) 285 ENDIF 286 #else 287 clinfo = 'iom_set_rstw_var_active: key_iomput is needed to use XIOS restart read/write functionality' 288 CALL ctl_stop('STOP', TRIM(clinfo)) 289 #endif 290 291 END SUBROUTINE iom_set_rstw_var_active 292 293 SUBROUTINE iom_set_rstr_active() 294 !!--------------------------------------------------------------------- 295 !! *** SUBROUTINE iom_set_rstr_active *** 296 !! 297 !! ** Purpose : define file name in XIOS context for reading restart file, 298 !! enable variables present in restart file for reading with XIOS 299 !!--------------------------------------------------------------------- 300 301 !sets enabled = .TRUE. for each field in restart file 302 CHARACTER(len=256) :: rst_file 303 304 #if defined key_iomput 305 TYPE(xios_field) :: field_hdl 306 TYPE(xios_file) :: file_hdl 307 TYPE(xios_filegroup) :: filegroup_hdl 308 INTEGER :: i 309 CHARACTER(lc) :: clpath 310 311 clpath = TRIM(cn_ocerst_indir) 312 IF( clpath(LEN_TRIM(clpath):) /= '/' ) clpath = TRIM(clpath) // '/' 313 IF( TRIM(Agrif_CFixed()) == '0' ) THEN 314 rst_file = TRIM(clpath)//TRIM(cn_ocerst_in) 315 ELSE 316 rst_file = TRIM(clpath)//'1_'//TRIM(cn_ocerst_in) 317 ENDIF 318 !set name of the restart file and enable available fields 319 if(lwp) WRITE(numout,*) 'Setting restart filename (for XIOS) to: ',rst_file 320 CALL xios_get_handle("file_definition", filegroup_hdl ) 321 CALL xios_add_child(filegroup_hdl, file_hdl, 'rrestart') 322 CALL xios_set_file_attr( "rrestart", name=trim(rst_file), type="one_file", & 323 par_access="collective", enabled=.TRUE., mode="read", & 324 output_freq=xios_timestep) 325 !define variables for restart context 326 DO i = 1, max_rst_fields 327 IF( TRIM(rst_rfields(i)%vname) /= "NO_NAME") THEN 328 IF( iom_varid( numror, TRIM(rst_rfields(i)%vname), ldstop = .FALSE. ) > 0 ) THEN 329 CALL xios_add_child(file_hdl, field_hdl, TRIM(rst_rfields(i)%vname)) 330 SELECT CASE (TRIM(rst_rfields(i)%grid)) 331 CASE ("grid_N_3D") 332 CALL xios_set_attr (field_hdl, enabled = .TRUE., name = TRIM(rst_rfields(i)%vname), & 333 domain_ref="grid_N", axis_ref="nav_lev", operation = "instant") 334 CASE ("grid_N") 335 CALL xios_set_attr (field_hdl, enabled = .TRUE., name = TRIM(rst_rfields(i)%vname), & 336 domain_ref="grid_N", operation = "instant") 337 CASE ("grid_vector") 338 CALL xios_set_attr (field_hdl, enabled = .TRUE., name = TRIM(rst_rfields(i)%vname), & 339 axis_ref="nav_lev", operation = "instant") 340 CASE ("grid_scalar") 341 CALL xios_set_attr (field_hdl, enabled = .TRUE., name = TRIM(rst_rfields(i)%vname), & 342 scalar_ref = "grid_scalar", operation = "instant") 343 END SELECT 344 IF(lwp) WRITE(numout,*) 'XIOS read: ', TRIM(rst_rfields(i)%vname), ' enabled in ', TRIM(rst_file) 345 ENDIF 346 ENDIF 347 END DO 348 #endif 349 END SUBROUTINE iom_set_rstr_active 350 351 SUBROUTINE iom_set_rstw_core(cdmdl) 352 !!--------------------------------------------------------------------- 353 !! *** SUBROUTINE iom_set_rstw_core *** 354 !! 355 !! ** Purpose : set variables which are always in restart file 356 !!--------------------------------------------------------------------- 357 CHARACTER (len=*), INTENT (IN) :: cdmdl ! model OPA or SAS 358 CHARACTER(LEN=256) :: clinfo ! info character 359 #if defined key_iomput 360 IF(cdmdl == "OPA") THEN 361 !from restart.F90 362 CALL iom_set_rstw_var_active("rdt") 363 IF ( .NOT. ln_diurnal_only ) THEN 364 CALL iom_set_rstw_var_active('ub' ) 365 CALL iom_set_rstw_var_active('vb' ) 366 CALL iom_set_rstw_var_active('tb' ) 367 CALL iom_set_rstw_var_active('sb' ) 368 CALL iom_set_rstw_var_active('sshb') 369 ! 370 CALL iom_set_rstw_var_active('un' ) 371 CALL iom_set_rstw_var_active('vn' ) 372 CALL iom_set_rstw_var_active('tn' ) 373 CALL iom_set_rstw_var_active('sn' ) 374 CALL iom_set_rstw_var_active('sshn') 375 CALL iom_set_rstw_var_active('rhop') 376 ! extra variable needed for the ice sheet coupling 377 IF ( ln_iscpl ) THEN 378 CALL iom_set_rstw_var_active('tmask') 379 CALL iom_set_rstw_var_active('umask') 380 CALL iom_set_rstw_var_active('vmask') 381 CALL iom_set_rstw_var_active('smask') 382 CALL iom_set_rstw_var_active('e3t_n') 383 CALL iom_set_rstw_var_active('e3u_n') 384 CALL iom_set_rstw_var_active('e3v_n') 385 CALL iom_set_rstw_var_active('gdepw_n') 386 END IF 387 ENDIF 388 IF(ln_diurnal) CALL iom_set_rstw_var_active('Dsst') 389 !from trasbc.F90 390 CALL iom_set_rstw_var_active('sbc_hc_b') 391 CALL iom_set_rstw_var_active('sbc_sc_b') 392 ENDIF 393 #else 394 clinfo = 'iom_set_rstw_core: key_iomput is needed to use XIOS restart read/write functionality' 395 CALL ctl_stop('STOP', TRIM(clinfo)) 396 #endif 397 END SUBROUTINE iom_set_rstw_core 398 399 SUBROUTINE iom_set_rst_vars(fields) 400 !!--------------------------------------------------------------------- 401 !! *** SUBROUTINE iom_set_rst_vars *** 402 !! 403 !! ** Purpose : Fill array fields with the information about all 404 !! possible variables and corresponding grids definition 405 !! for reading/writing restart with XIOS 406 !!--------------------------------------------------------------------- 407 TYPE(RST_FIELD), INTENT(INOUT) :: fields(max_rst_fields) 408 INTEGER :: i 409 410 i = 0 411 i = i + 1; fields(i)%vname="rdt"; fields(i)%grid="grid_scalar" 412 i = i + 1; fields(i)%vname="un"; fields(i)%grid="grid_N_3D" 413 i = i + 1; fields(i)%vname="ub"; fields(i)%grid="grid_N_3D" 414 i = i + 1; fields(i)%vname="vn"; fields(i)%grid="grid_N_3D" 415 i = i + 1; fields(i)%vname="vb"; fields(i)%grid="grid_N_3D" 416 i = i + 1; fields(i)%vname="tn"; fields(i)%grid="grid_N_3D" 417 i = i + 1; fields(i)%vname="tb"; fields(i)%grid="grid_N_3D" 418 i = i + 1; fields(i)%vname="sn"; fields(i)%grid="grid_N_3D" 419 i = i + 1; fields(i)%vname="sb"; fields(i)%grid="grid_N_3D" 420 i = i + 1; fields(i)%vname="sshn"; fields(i)%grid="grid_N" 421 i = i + 1; fields(i)%vname="sshb"; fields(i)%grid="grid_N" 422 i = i + 1; fields(i)%vname="rhop"; fields(i)%grid="grid_N_3D" 423 i = i + 1; fields(i)%vname="kt"; fields(i)%grid="grid_scalar" 424 i = i + 1; fields(i)%vname="ndastp"; fields(i)%grid="grid_scalar" 425 i = i + 1; fields(i)%vname="adatrj"; fields(i)%grid="grid_scalar" 426 i = i + 1; fields(i)%vname="utau_b"; fields(i)%grid="grid_N" 427 i = i + 1; fields(i)%vname="vtau_b"; fields(i)%grid="grid_N" 428 i = i + 1; fields(i)%vname="qns_b"; fields(i)%grid="grid_N" 429 i = i + 1; fields(i)%vname="emp_b"; fields(i)%grid="grid_N" 430 i = i + 1; fields(i)%vname="sfx_b"; fields(i)%grid="grid_N" 431 i = i + 1; fields(i)%vname="en" ; fields(i)%grid="grid_N_3D" 432 i = i + 1; fields(i)%vname="avt_k"; fields(i)%grid="grid_N_3D" 433 i = i + 1; fields(i)%vname="avm_k"; fields(i)%grid="grid_N_3D" 434 i = i + 1; fields(i)%vname="dissl"; fields(i)%grid="grid_N_3D" 435 i = i + 1; fields(i)%vname="sbc_hc_b"; fields(i)%grid="grid_N" 436 i = i + 1; fields(i)%vname="sbc_sc_b"; fields(i)%grid="grid_N" 437 i = i + 1; fields(i)%vname="qsr_hc_b"; fields(i)%grid="grid_N_3D" 438 i = i + 1; fields(i)%vname="fraqsr_1lev"; fields(i)%grid="grid_N" 439 i = i + 1; fields(i)%vname="greenland_icesheet_mass" 440 fields(i)%grid="grid_scalar" 441 i = i + 1; fields(i)%vname="greenland_icesheet_timelapsed" 442 fields(i)%grid="grid_scalar" 443 i = i + 1; fields(i)%vname="greenland_icesheet_mass_roc" 444 fields(i)%grid="grid_scalar" 445 i = i + 1; fields(i)%vname="antarctica_icesheet_mass" 446 fields(i)%grid="grid_scalar" 447 i = i + 1; fields(i)%vname="antarctica_icesheet_timelapsed" 448 fields(i)%grid="grid_scalar" 449 i = i + 1; fields(i)%vname="antarctica_icesheet_mass_roc" 450 fields(i)%grid="grid_scalar" 451 i = i + 1; fields(i)%vname="frc_v"; fields(i)%grid="grid_scalar" 452 i = i + 1; fields(i)%vname="frc_t"; fields(i)%grid="grid_scalar" 453 i = i + 1; fields(i)%vname="frc_s"; fields(i)%grid="grid_scalar" 454 i = i + 1; fields(i)%vname="frc_wn_t"; fields(i)%grid="grid_scalar" 455 i = i + 1; fields(i)%vname="frc_wn_s"; fields(i)%grid="grid_scalar" 456 i = i + 1; fields(i)%vname="ssh_ini"; fields(i)%grid="grid_N" 457 i = i + 1; fields(i)%vname="e3t_ini"; fields(i)%grid="grid_N_3D" 458 i = i + 1; fields(i)%vname="hc_loc_ini"; fields(i)%grid="grid_N_3D" 459 i = i + 1; fields(i)%vname="sc_loc_ini"; fields(i)%grid="grid_N_3D" 460 i = i + 1; fields(i)%vname="ssh_hc_loc_ini"; fields(i)%grid="grid_N" 461 i = i + 1; fields(i)%vname="ssh_sc_loc_ini"; fields(i)%grid="grid_N" 462 i = i + 1; fields(i)%vname="tilde_e3t_b"; fields(i)%grid="grid_N" 463 i = i + 1; fields(i)%vname="tilde_e3t_n"; fields(i)%grid="grid_N" 464 i = i + 1; fields(i)%vname="hdiv_lf"; fields(i)%grid="grid_N" 465 i = i + 1; fields(i)%vname="ub2_b"; fields(i)%grid="grid_N" 466 i = i + 1; fields(i)%vname="vb2_b"; fields(i)%grid="grid_N" 467 i = i + 1; fields(i)%vname="sshbb_e"; fields(i)%grid="grid_N" 468 i = i + 1; fields(i)%vname="ubb_e"; fields(i)%grid="grid_N" 469 i = i + 1; fields(i)%vname="vbb_e"; fields(i)%grid="grid_N" 470 i = i + 1; fields(i)%vname="sshb_e"; fields(i)%grid="grid_N" 471 i = i + 1; fields(i)%vname="ub_e"; fields(i)%grid="grid_N" 472 i = i + 1; fields(i)%vname="vb_e"; fields(i)%grid="grid_N" 473 i = i + 1; fields(i)%vname="fwf_isf_b"; fields(i)%grid="grid_N" 474 i = i + 1; fields(i)%vname="isf_sc_b"; fields(i)%grid="grid_N" 475 i = i + 1; fields(i)%vname="isf_hc_b"; fields(i)%grid="grid_N" 476 i = i + 1; fields(i)%vname="ssh_ibb"; fields(i)%grid="grid_N" 477 i = i + 1; fields(i)%vname="rnf_b"; fields(i)%grid="grid_N" 478 i = i + 1; fields(i)%vname="rnf_hc_b"; fields(i)%grid="grid_N" 479 i = i + 1; fields(i)%vname="rnf_sc_b"; fields(i)%grid="grid_N" 480 i = i + 1; fields(i)%vname="nn_fsbc"; fields(i)%grid="grid_scalar" 481 i = i + 1; fields(i)%vname="ssu_m"; fields(i)%grid="grid_N" 482 i = i + 1; fields(i)%vname="ssv_m"; fields(i)%grid="grid_N" 483 i = i + 1; fields(i)%vname="sst_m"; fields(i)%grid="grid_N" 484 i = i + 1; fields(i)%vname="sss_m"; fields(i)%grid="grid_N" 485 i = i + 1; fields(i)%vname="ssh_m"; fields(i)%grid="grid_N" 486 i = i + 1; fields(i)%vname="e3t_m"; fields(i)%grid="grid_N" 487 i = i + 1; fields(i)%vname="frq_m"; fields(i)%grid="grid_N" 488 i = i + 1; fields(i)%vname="avmb"; fields(i)%grid="grid_vector" 489 i = i + 1; fields(i)%vname="avtb"; fields(i)%grid="grid_vector" 490 i = i + 1; fields(i)%vname="ub2_i_b"; fields(i)%grid="grid_N" 491 i = i + 1; fields(i)%vname="vb2_i_b"; fields(i)%grid="grid_N" 492 i = i + 1; fields(i)%vname="ntime"; fields(i)%grid="grid_scalar" 493 i = i + 1; fields(i)%vname="Dsst"; fields(i)%grid="grid_scalar" 494 i = i + 1; fields(i)%vname="tmask"; fields(i)%grid="grid_N_3D" 495 i = i + 1; fields(i)%vname="umask"; fields(i)%grid="grid_N_3D" 496 i = i + 1; fields(i)%vname="vmask"; fields(i)%grid="grid_N_3D" 497 i = i + 1; fields(i)%vname="smask"; fields(i)%grid="grid_N_3D" 498 i = i + 1; fields(i)%vname="gdepw_n"; fields(i)%grid="grid_N_3D" 499 i = i + 1; fields(i)%vname="e3t_n"; fields(i)%grid="grid_N_3D" 500 i = i + 1; fields(i)%vname="e3u_n"; fields(i)%grid="grid_N_3D" 501 i = i + 1; fields(i)%vname="e3v_n"; fields(i)%grid="grid_N_3D" 502 i = i + 1; fields(i)%vname="surf_ini"; fields(i)%grid="grid_N" 503 i = i + 1; fields(i)%vname="e3t_b"; fields(i)%grid="grid_N_3D" 504 i = i + 1; fields(i)%vname="hmxl_n"; fields(i)%grid="grid_N_3D" 505 i = i + 1; fields(i)%vname="un_bf"; fields(i)%grid="grid_N" 506 i = i + 1; fields(i)%vname="vn_bf"; fields(i)%grid="grid_N" 507 i = i + 1; fields(i)%vname="hbl"; fields(i)%grid="grid_N" 508 i = i + 1; fields(i)%vname="hbli"; fields(i)%grid="grid_N" 509 i = i + 1; fields(i)%vname="wn"; fields(i)%grid="grid_N_3D" 510 511 IF( i-1 > max_rst_fields) THEN 512 WRITE(ctmp1,*) 'E R R O R : iom_set_rst_vars SIZE of RST_FIELD array is too small' 513 CALL ctl_stop( 'iom_set_rst_vars:', ctmp1 ) 514 ENDIF 515 END SUBROUTINE iom_set_rst_vars 516 517 518 SUBROUTINE iom_set_rstw_active(cdrst_file) 519 !!--------------------------------------------------------------------- 520 !! *** SUBROUTINE iom_set_rstw_active *** 521 !! 522 !! ** Purpose : define file name in XIOS context for writing restart 523 !! enable variables present in restart file for writing 524 !!--------------------------------------------------------------------- 525 !sets enabled = .TRUE. for each field in restart file 526 CHARACTER(len=*) :: cdrst_file 527 #if defined key_iomput 528 TYPE(xios_field) :: field_hdl 529 TYPE(xios_file) :: file_hdl 530 TYPE(xios_filegroup) :: filegroup_hdl 531 INTEGER :: i 532 CHARACTER(lc) :: clpath 533 534 !set name of the restart file and enable available fields 535 IF(lwp) WRITE(numout,*) 'Setting restart filename (for XIOS write) to: ',cdrst_file 536 CALL xios_get_handle("file_definition", filegroup_hdl ) 537 CALL xios_add_child(filegroup_hdl, file_hdl, 'wrestart') 538 IF(nxioso.eq.1) THEN 539 CALL xios_set_file_attr( "wrestart", type="one_file", enabled=.TRUE.,& 540 mode="write", output_freq=xios_timestep) 541 if(lwp) write(numout,*) 'OPEN ', trim(cdrst_file), ' in one_file mode' 542 ELSE 543 CALL xios_set_file_attr( "wrestart", type="multiple_file", enabled=.TRUE.,& 544 mode="write", output_freq=xios_timestep) 545 if(lwp) write(numout,*) 'OPEN ', trim(cdrst_file), ' in multiple_file mode' 546 ENDIF 547 CALL xios_set_file_attr( "wrestart", name=trim(cdrst_file)) 548 !define fields for restart context 549 DO i = 1, max_rst_fields 550 IF( rst_wfields(i)%active ) THEN 551 CALL xios_add_child(file_hdl, field_hdl, TRIM(rst_wfields(i)%vname)) 552 SELECT CASE (TRIM(rst_wfields(i)%grid)) 553 CASE ("grid_N_3D") 554 CALL xios_set_attr (field_hdl, enabled = .TRUE., name = TRIM(rst_wfields(i)%vname), & 555 domain_ref="grid_N", axis_ref="nav_lev", prec = 8, operation = "instant") 556 CASE ("grid_N") 557 CALL xios_set_attr (field_hdl, enabled = .TRUE., name = TRIM(rst_wfields(i)%vname), & 558 domain_ref="grid_N", prec = 8, operation = "instant") 559 CASE ("grid_vector") 560 CALL xios_set_attr (field_hdl, enabled = .TRUE., name = TRIM(rst_wfields(i)%vname), & 561 axis_ref="nav_lev", prec = 8, operation = "instant") 562 CASE ("grid_scalar") 563 CALL xios_set_attr (field_hdl, enabled = .TRUE., name = TRIM(rst_wfields(i)%vname), & 564 scalar_ref = "grid_scalar", prec = 8, operation = "instant") 565 END SELECT 566 ENDIF 567 END DO 568 #endif 569 END SUBROUTINE iom_set_rstw_active 570 571 SUBROUTINE iom_set_rst_context(ld_rstr) 572 !!--------------------------------------------------------------------- 573 !! *** SUBROUTINE iom_set_rst_context *** 574 !! 575 !! ** Purpose : Define domain, axis and grid for restart (read/write) 576 !! context 577 !! 578 !!--------------------------------------------------------------------- 579 LOGICAL, INTENT(IN) :: ld_rstr 580 !ld_rstr is true for restart context. There is no need to define grid for 581 !restart read, because it's read from file 582 #if defined key_iomput 583 TYPE(xios_domaingroup) :: domaingroup_hdl 584 TYPE(xios_domain) :: domain_hdl 585 TYPE(xios_axisgroup) :: axisgroup_hdl 586 TYPE(xios_axis) :: axis_hdl 587 TYPE(xios_scalar) :: scalar_hdl 588 TYPE(xios_scalargroup) :: scalargroup_hdl 589 590 CALL xios_get_handle("domain_definition",domaingroup_hdl) 591 CALL xios_add_child(domaingroup_hdl, domain_hdl, "grid_N") 592 CALL set_grid("N", glamt, gphit, .TRUE., ld_rstr) 593 594 CALL xios_get_handle("axis_definition",axisgroup_hdl) 595 CALL xios_add_child(axisgroup_hdl, axis_hdl, "nav_lev") 596 !AGRIF fails to compile when unit= is in call to xios_set_axis_attr 597 ! CALL xios_set_axis_attr( "nav_lev", long_name="Vertical levels", unit="m", positive="down") 598 CALL xios_set_axis_attr( "nav_lev", long_name="Vertical levels in meters", positive="down") 599 CALL iom_set_axis_attr( "nav_lev", paxis = gdept_1d ) 600 601 CALL xios_get_handle("scalar_definition", scalargroup_hdl) 602 CALL xios_add_child(scalargroup_hdl, scalar_hdl, "grid_scalar") 603 #endif 604 END SUBROUTINE iom_set_rst_context 70 605 71 606 SUBROUTINE iom_swap( cdname ) … … 76 611 !!--------------------------------------------------------------------- 77 612 CHARACTER(len=*), INTENT(in) :: cdname 613 #if defined key_iomput 614 TYPE(xios_context) :: nemo_hdl 615 616 IF( TRIM(Agrif_CFixed()) == '0' ) THEN 617 CALL xios_get_handle(TRIM(cdname),nemo_hdl) 618 ELSE 619 CALL xios_get_handle(TRIM(Agrif_CFixed())//"_"//TRIM(cdname),nemo_hdl) 620 ENDIF 621 ! 622 CALL xios_set_current_context(nemo_hdl) 623 #endif 78 624 ! 79 625 END SUBROUTINE iom_swap 80 626 81 627 82 SUBROUTINE iom_open( cdname, kiomid, ldwrt, kdom, kiolib, ldstop, ldiof)628 SUBROUTINE iom_open( cdname, kiomid, ldwrt, kdom, lagrif, ldstop, ldiof, kdlev ) 83 629 !!--------------------------------------------------------------------- 84 630 !! *** SUBROUTINE iom_open *** … … 90 636 LOGICAL , INTENT(in ), OPTIONAL :: ldwrt ! open in write modeb (default = .FALSE.) 91 637 INTEGER , INTENT(in ), OPTIONAL :: kdom ! Type of domain to be written (default = jpdom_local_noovlap) 92 INTEGER , INTENT(in ), OPTIONAL :: kiolib ! library used to open the file (default = jpnf90)93 638 LOGICAL , INTENT(in ), OPTIONAL :: ldstop ! stop if open to read a non-existing file (default = .TRUE.) 639 LOGICAL , INTENT(in ), OPTIONAL :: lagrif ! add 1_ prefix for AGRIF (default = .TRUE. 94 640 LOGICAL , INTENT(in ), OPTIONAL :: ldiof ! Interp On the Fly, needed for AGRIF (default = .FALSE.) 95 641 INTEGER , INTENT(in ), OPTIONAL :: kdlev ! number of vertical levels 642 ! 96 643 CHARACTER(LEN=256) :: clname ! the name of the file based on cdname [[+clcpu]+clcpu] 97 644 CHARACTER(LEN=256) :: cltmpn ! tempory name to store clname (in writting mode) 98 CHARACTER(LEN=10) :: clsuffix ! ".nc" or ".dimg"645 CHARACTER(LEN=10) :: clsuffix ! ".nc" 99 646 CHARACTER(LEN=15) :: clcpu ! the cpu number (max jpmax_digits digits) 100 647 CHARACTER(LEN=256) :: clinfo ! info character … … 104 651 LOGICAL :: llstop ! local definition of ldstop 105 652 LOGICAL :: lliof ! local definition of ldiof 106 INTEGER :: iolib ! library do we use to open the file653 LOGICAL :: llagrif ! local definition of lagrif 107 654 INTEGER :: icnt ! counter for digits in clcpu (max = jpmax_digits) 108 655 INTEGER :: iln, ils ! lengths of character … … 137 684 ELSE ; llstop = .TRUE. 138 685 ENDIF 139 ! what library do we use to open the file?140 IF( PRESENT( kiolib) ) THEN ; iolib = kiolib141 ELSE ; iolib = jpnf90686 ! do we add agrif suffix 687 IF( PRESENT(lagrif) ) THEN ; llagrif = lagrif 688 ELSE ; llagrif = .TRUE. 142 689 ENDIF 143 690 ! are we using interpolation on the fly? … … 147 694 ! do we read the overlap 148 695 ! ugly patch SM+JMM+RB to overwrite global definition in some cases 149 llnoov = (jpni * jpnj ) == jpnij .AND. .NOT. lk_agrif 696 llnoov = (jpni * jpnj ) == jpnij .AND. .NOT. lk_agrif 150 697 ! create the file name by added, if needed, TRIM(Agrif_CFixed()) and TRIM(clsuffix) 151 698 ! ============= 152 699 clname = trim(cdname) 153 IF ( .NOT. Agrif_Root() .AND. .NOT. lliof ) THEN700 IF ( .NOT. Agrif_Root() .AND. .NOT. lliof .AND. llagrif) THEN 154 701 iln = INDEX(clname,'/') 155 702 cltmpn = clname(1:iln) … … 158 705 ENDIF 159 706 ! which suffix should we use? 160 SELECT CASE (iolib) 161 CASE (jpnf90 ) ; clsuffix = '.nc' 162 CASE DEFAULT ; clsuffix = '' 163 END SELECT 707 clsuffix = '.nc' 164 708 ! Add the suffix if needed 165 709 iln = LEN_TRIM(clname) … … 173 717 IF( .NOT.llok ) THEN 174 718 ! we try to add the cpu number to the name 175 WRITE(clcpu,*) narea-1 719 WRITE(clcpu,*) narea-1 720 176 721 clcpu = TRIM(ADJUSTL(clcpu)) 177 722 iln = INDEX(clname,TRIM(clsuffix), back = .TRUE.) … … 186 731 icnt = icnt + 1 187 732 END DO 733 ELSE 734 lxios_sini = .TRUE. 188 735 ENDIF 189 736 IF( llwrt ) THEN … … 220 767 END SELECT 221 768 ENDIF 222 ! Open the NetCDF or RSTDIMGfile769 ! Open the NetCDF file 223 770 ! ============= 224 771 ! do we have some free file identifier? … … 243 790 ENDIF 244 791 IF( istop == nstop ) THEN ! no error within this routine 245 SELECT CASE (iolib) 246 CASE (jpnf90 ) ; CALL iom_nf90_open( clname, kiomid, llwrt, llok, idompar ) 247 CASE DEFAULT 248 END SELECT 792 CALL iom_nf90_open( clname, kiomid, llwrt, llok, idompar, kdlev = kdlev ) 249 793 ENDIF 250 794 ! … … 279 823 DO jf = i_s, i_e 280 824 IF( iom_file(jf)%nfid > 0 ) THEN 281 SELECT CASE (iom_file(jf)%iolib) 282 CASE (jpnf90 ) ; CALL iom_nf90_close( jf ) 283 CASE DEFAULT 284 END SELECT 825 CALL iom_nf90_close( jf ) 285 826 iom_file(jf)%nfid = 0 ! free the id 286 827 IF( PRESENT(kiomid) ) kiomid = 0 ! return 0 as id to specify that the file was closed … … 304 845 INTEGER , INTENT(in ) :: kiomid ! file Identifier 305 846 CHARACTER(len=*) , INTENT(in ) :: cdvar ! name of the variable 306 INTEGER, DIMENSION(:), INTENT( out), OPTIONAL :: kdimsz ! size of the dimensions847 INTEGER, DIMENSION(:), INTENT( out), OPTIONAL :: kdimsz ! size of each dimension 307 848 INTEGER, INTENT( out), OPTIONAL :: kndims ! size of the dimensions 308 849 LOGICAL , INTENT(in ), OPTIONAL :: ldstop ! stop if looking for non-existing variable (default = .TRUE.) … … 335 876 iiv = iiv + 1 336 877 IF( iiv <= jpmax_vars ) THEN 337 SELECT CASE (iom_file(kiomid)%iolib) 338 CASE (jpnf90 ) ; iom_varid = iom_nf90_varid ( kiomid, cdvar, iiv, kdimsz, kndims ) 339 CASE DEFAULT 340 END SELECT 878 iom_varid = iom_nf90_varid( kiomid, cdvar, iiv, kdimsz, kndims ) 341 879 ELSE 342 880 CALL ctl_stop( trim(clinfo), 'Too many variables in the file '//iom_file(kiomid)%name, & 343 & 881 & 'increase the parameter jpmax_vars') 344 882 ENDIF 345 883 IF( llstop .AND. iom_varid == -1 ) CALL ctl_stop( TRIM(clinfo)//' not found' ) … … 348 886 IF( PRESENT(kdimsz) ) THEN 349 887 i_nvd = iom_file(kiomid)%ndims(iiv) 350 IF( i_nvd == size(kdimsz) ) THEN351 kdimsz( :) = iom_file(kiomid)%dimsz(1:i_nvd,iiv)888 IF( i_nvd <= size(kdimsz) ) THEN 889 kdimsz(1:i_nvd) = iom_file(kiomid)%dimsz(1:i_nvd,iiv) 352 890 ELSE 353 891 WRITE(ctmp1,*) i_nvd, size(kdimsz) … … 366 904 !! INTERFACE iom_get 367 905 !!---------------------------------------------------------------------- 368 SUBROUTINE iom_g0d( kiomid, cdvar, pvar, ktime )906 SUBROUTINE iom_g0d( kiomid, cdvar, pvar, ktime, ldxios ) 369 907 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 370 908 CHARACTER(len=*), INTENT(in ) :: cdvar ! Name of the variable 371 909 REAL(wp) , INTENT( out) :: pvar ! read field 372 910 INTEGER , INTENT(in ), OPTIONAL :: ktime ! record number 911 LOGICAL , INTENT(in ), OPTIONAL :: ldxios ! use xios to read restart 373 912 ! 374 913 INTEGER :: idvar ! variable id … … 378 917 CHARACTER(LEN=100) :: clname ! file name 379 918 CHARACTER(LEN=1) :: cldmspc ! 380 ! 381 itime = 1 382 IF( PRESENT(ktime) ) itime = ktime 383 ! 384 clname = iom_file(kiomid)%name 385 clinfo = ' iom_g0d, file: '//trim(clname)//', var: '//trim(cdvar) 386 ! 387 IF( kiomid > 0 ) THEN 388 idvar = iom_varid( kiomid, cdvar ) 389 IF( iom_file(kiomid)%nfid > 0 .AND. idvar > 0 ) THEN 390 idmspc = iom_file ( kiomid )%ndims( idvar ) 391 IF( iom_file(kiomid)%luld(idvar) ) idmspc = idmspc - 1 392 WRITE(cldmspc , fmt='(i1)') idmspc 393 IF( idmspc > 0 ) CALL ctl_stop( TRIM(clinfo), 'When reading to a 0D array, we do not accept data', & 394 & 'with 1 or more spatial dimensions: '//cldmspc//' were found.' , & 395 & 'Use ncwa -a to suppress the unnecessary dimensions' ) 396 SELECT CASE (iom_file(kiomid)%iolib) 397 CASE (jpnf90 ) ; CALL iom_nf90_get( kiomid, idvar, pvar, itime ) 398 CASE DEFAULT 399 END SELECT 919 LOGICAL :: llxios 920 ! 921 llxios = .FALSE. 922 IF( PRESENT(ldxios) ) llxios = ldxios 923 924 IF(.NOT.llxios) THEN ! read data using default library 925 itime = 1 926 IF( PRESENT(ktime) ) itime = ktime 927 ! 928 clname = iom_file(kiomid)%name 929 clinfo = ' iom_g0d, file: '//trim(clname)//', var: '//trim(cdvar) 930 ! 931 IF( kiomid > 0 ) THEN 932 idvar = iom_varid( kiomid, cdvar ) 933 IF( iom_file(kiomid)%nfid > 0 .AND. idvar > 0 ) THEN 934 idmspc = iom_file ( kiomid )%ndims( idvar ) 935 IF( iom_file(kiomid)%luld(idvar) ) idmspc = idmspc - 1 936 WRITE(cldmspc , fmt='(i1)') idmspc 937 IF( idmspc > 0 ) CALL ctl_stop( TRIM(clinfo), 'When reading to a 0D array, we do not accept data', & 938 & 'with 1 or more spatial dimensions: '//cldmspc//' were found.' , & 939 & 'Use ncwa -a to suppress the unnecessary dimensions' ) 940 CALL iom_nf90_get( kiomid, idvar, pvar, itime ) 941 ENDIF 400 942 ENDIF 943 ELSE 944 #if defined key_iomput 945 IF(lwp) WRITE(numout,*) 'XIOS RST READ (0D): ', trim(cdvar) 946 CALL iom_swap( TRIM(crxios_context) ) 947 CALL xios_recv_field( trim(cdvar), pvar) 948 CALL iom_swap( TRIM(cxios_context) ) 949 #else 950 WRITE(ctmp1,*) 'Can not use XIOS in iom_g0d, file: '//trim(clname)//', var:'//trim(cdvar) 951 CALL ctl_stop( 'iom_g0d', ctmp1 ) 952 #endif 401 953 ENDIF 402 954 END SUBROUTINE iom_g0d 403 955 404 SUBROUTINE iom_g1d( kiomid, kdom, cdvar, pvar, ktime, kstart, kcount )956 SUBROUTINE iom_g1d( kiomid, kdom, cdvar, pvar, ktime, kstart, kcount, ldxios ) 405 957 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 406 958 INTEGER , INTENT(in ) :: kdom ! Type of domain to be read … … 410 962 INTEGER , INTENT(in ), DIMENSION(1), OPTIONAL :: kstart ! start axis position of the reading 411 963 INTEGER , INTENT(in ), DIMENSION(1), OPTIONAL :: kcount ! number of points in each axis 964 LOGICAL , INTENT(in ), OPTIONAL :: ldxios ! read data using XIOS 412 965 ! 413 966 IF( kiomid > 0 ) THEN 414 967 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_get_123d( kiomid, kdom , cdvar , pv_r1d=pvar, & 415 & ktime=ktime, kstart=kstart, kcount=kcount ) 968 & ktime=ktime, kstart=kstart, kcount=kcount, & 969 & ldxios=ldxios ) 416 970 ENDIF 417 971 END SUBROUTINE iom_g1d 418 972 419 SUBROUTINE iom_g2d( kiomid, kdom, cdvar, pvar, ktime, kstart, kcount, lrowattr 973 SUBROUTINE iom_g2d( kiomid, kdom, cdvar, pvar, ktime, kstart, kcount, lrowattr, ldxios) 420 974 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 421 975 INTEGER , INTENT(in ) :: kdom ! Type of domain to be read … … 429 983 ! called open_ocean_jstart to set the start 430 984 ! value for the 2nd dimension (netcdf only) 985 LOGICAL , INTENT(in ), OPTIONAL :: ldxios ! read data using XIOS 431 986 ! 432 987 IF( kiomid > 0 ) THEN 433 988 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_get_123d( kiomid, kdom , cdvar , pv_r2d=pvar, & 434 989 & ktime=ktime, kstart=kstart, kcount=kcount, & 435 & lrowattr=lrowattr 990 & lrowattr=lrowattr, ldxios=ldxios) 436 991 ENDIF 437 992 END SUBROUTINE iom_g2d 438 993 439 SUBROUTINE iom_g3d( kiomid, kdom, cdvar, pvar, ktime, kstart, kcount, lrowattr )994 SUBROUTINE iom_g3d( kiomid, kdom, cdvar, pvar, ktime, kstart, kcount, lrowattr, ldxios ) 440 995 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 441 996 INTEGER , INTENT(in ) :: kdom ! Type of domain to be read … … 449 1004 ! called open_ocean_jstart to set the start 450 1005 ! value for the 2nd dimension (netcdf only) 1006 LOGICAL , INTENT(in ), OPTIONAL :: ldxios ! read data using XIOS 451 1007 ! 452 1008 IF( kiomid > 0 ) THEN 453 1009 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_get_123d( kiomid, kdom , cdvar , pv_r3d=pvar, & 454 1010 & ktime=ktime, kstart=kstart, kcount=kcount, & 455 & lrowattr=lrowattr )1011 & lrowattr=lrowattr, ldxios=ldxios ) 456 1012 ENDIF 457 1013 END SUBROUTINE iom_g3d … … 461 1017 & pv_r1d, pv_r2d, pv_r3d, & 462 1018 & ktime , kstart, kcount, & 463 & lrowattr 1019 & lrowattr, ldxios ) 464 1020 !!----------------------------------------------------------------------- 465 1021 !! *** ROUTINE iom_get_123d *** … … 482 1038 ! called open_ocean_jstart to set the start 483 1039 ! value for the 2nd dimension (netcdf only) 484 ! 1040 LOGICAL , INTENT(in ), OPTIONAL :: ldxios ! use XIOS to read restart 1041 ! 1042 LOGICAL :: llxios ! local definition for XIOS read 485 1043 LOGICAL :: llnoov ! local definition to read overlap 486 1044 LOGICAL :: luse_jattr ! local definition to read open_ocean_jstart file attribute … … 506 1064 CHARACTER(LEN=256) :: clname ! file name 507 1065 CHARACTER(LEN=1) :: clrankpv, cldmspc ! 1066 LOGICAL :: ll_depth_spec ! T => if kstart, kcount present then *only* use values for 3rd spatial dimension. 1067 INTEGER :: inlev ! number of levels for 3D data 1068 REAL(wp) :: gma, gmi 508 1069 !--------------------------------------------------------------------- 509 1070 ! 510 clname = iom_file(kiomid)%name ! esier to read 511 clinfo = ' iom_get_123d, file: '//trim(clname)//', var: '//trim(cdvar) 512 ! local definition of the domain ? 1071 inlev = -1 1072 IF( PRESENT(pv_r3d) ) inlev = SIZE(pv_r3d, 3) 1073 ! 1074 llxios = .FALSE. 1075 if(PRESENT(ldxios)) llxios = ldxios 1076 idvar = iom_varid( kiomid, cdvar ) 513 1077 idom = kdom 514 ! do we read the overlap 515 ! ugly patch SM+JMM+RB to overwrite global definition in some cases 516 llnoov = (jpni * jpnj ) == jpnij .AND. .NOT. lk_agrif 517 ! check kcount and kstart optionals parameters... 518 IF( PRESENT(kcount) .AND. (.NOT. PRESENT(kstart)) ) CALL ctl_stop(trim(clinfo), 'kcount present needs kstart present') 519 IF( PRESENT(kstart) .AND. (.NOT. PRESENT(kcount)) ) CALL ctl_stop(trim(clinfo), 'kstart present needs kcount present') 520 IF( PRESENT(kstart) .AND. idom /= jpdom_unknown ) CALL ctl_stop(trim(clinfo), 'kstart present needs kdom = jpdom_unknown') 521 522 luse_jattr = .false. 523 IF( PRESENT(lrowattr) ) THEN 524 IF( lrowattr .AND. idom /= jpdom_data ) CALL ctl_stop(trim(clinfo), 'lrowattr present and true needs kdom = jpdom_data') 525 IF( lrowattr .AND. idom == jpdom_data ) luse_jattr = .true. 526 ENDIF 527 IF( luse_jattr ) THEN 528 SELECT CASE (iom_file(kiomid)%iolib) 529 CASE (jpnf90 ) 530 ! Ok 531 CASE DEFAULT 532 END SELECT 533 ENDIF 534 535 ! Search for the variable in the data base (eventually actualize data) 536 istop = nstop 537 idvar = iom_varid( kiomid, cdvar ) 538 ! 539 IF( idvar > 0 ) THEN 540 ! to write iom_file(kiomid)%dimsz in a shorter way ! 541 idimsz(:) = iom_file(kiomid)%dimsz(:, idvar) 542 inbdim = iom_file(kiomid)%ndims(idvar) ! number of dimensions in the file 543 idmspc = inbdim ! number of spatial dimensions in the file 544 IF( iom_file(kiomid)%luld(idvar) ) idmspc = inbdim - 1 545 IF( idmspc > 3 ) CALL ctl_stop(trim(clinfo), 'the file has more than 3 spatial dimensions this case is not coded...') 1078 ! 1079 IF(.NOT.llxios) THEN 1080 clname = iom_file(kiomid)%name ! esier to read 1081 clinfo = ' iom_get_123d, file: '//trim(clname)//', var: '//trim(cdvar) 1082 ! local definition of the domain ? 1083 ! do we read the overlap 1084 ! ugly patch SM+JMM+RB to overwrite global definition in some cases 1085 llnoov = (jpni * jpnj ) == jpnij .AND. .NOT. lk_agrif 1086 ! check kcount and kstart optionals parameters... 1087 IF( PRESENT(kcount) .AND. (.NOT. PRESENT(kstart)) ) CALL ctl_stop(trim(clinfo), 'kcount present needs kstart present') 1088 IF( PRESENT(kstart) .AND. (.NOT. PRESENT(kcount)) ) CALL ctl_stop(trim(clinfo), 'kstart present needs kcount present') 1089 IF( PRESENT(kstart) .AND. idom /= jpdom_unknown .AND. idom /= jpdom_autoglo_xy ) & 1090 & CALL ctl_stop(trim(clinfo), 'kstart present needs kdom = jpdom_unknown or kdom = jpdom_autoglo_xy') 1091 1092 luse_jattr = .false. 1093 IF( PRESENT(lrowattr) ) THEN 1094 IF( lrowattr .AND. idom /= jpdom_data ) CALL ctl_stop(trim(clinfo), 'lrowattr present and true needs kdom = jpdom_data') 1095 IF( lrowattr .AND. idom == jpdom_data ) luse_jattr = .true. 1096 ENDIF 1097 1098 ! Search for the variable in the data base (eventually actualize data) 1099 istop = nstop 546 1100 ! 547 ! update idom definition... 548 ! Identify the domain in case of jpdom_auto(glo/dta) definition 549 IF( idom == jpdom_autoglo .OR. idom == jpdom_autodta ) THEN 550 IF( idom == jpdom_autoglo ) THEN ; idom = jpdom_global 551 ELSE ; idom = jpdom_data 1101 IF( idvar > 0 ) THEN 1102 ! to write iom_file(kiomid)%dimsz in a shorter way ! 1103 idimsz(:) = iom_file(kiomid)%dimsz(:, idvar) 1104 inbdim = iom_file(kiomid)%ndims(idvar) ! number of dimensions in the file 1105 idmspc = inbdim ! number of spatial dimensions in the file 1106 IF( iom_file(kiomid)%luld(idvar) ) idmspc = inbdim - 1 1107 IF( idmspc > 3 ) CALL ctl_stop(trim(clinfo), 'the file has more than 3 spatial dimensions this case is not coded...') 1108 ! 1109 ! update idom definition... 1110 ! Identify the domain in case of jpdom_auto(glo/dta) definition 1111 IF( idom == jpdom_autoglo_xy ) THEN 1112 ll_depth_spec = .TRUE. 1113 idom = jpdom_autoglo 1114 ELSE 1115 ll_depth_spec = .FALSE. 552 1116 ENDIF 553 ind1 = INDEX( clname, '_', back = .TRUE. ) + 1 554 ind2 = INDEX( clname, '.', back = .TRUE. ) - 1 555 IF( ind2 > ind1 ) THEN ; IF( VERIFY( clname(ind1:ind2), '0123456789' ) == 0 ) idom = jpdom_local ; ENDIF 556 ENDIF 557 ! Identify the domain in case of jpdom_local definition 558 IF( idom == jpdom_local ) THEN 559 IF( idimsz(1) == jpi .AND. idimsz(2) == jpj ) THEN ; idom = jpdom_local_full 560 ELSEIF( idimsz(1) == nlci .AND. idimsz(2) == nlcj ) THEN ; idom = jpdom_local_noextra 561 ELSEIF( idimsz(1) == (nlei - nldi + 1) .AND. idimsz(2) == (nlej - nldj + 1) ) THEN ; idom = jpdom_local_noovlap 562 ELSE ; CALL ctl_stop( trim(clinfo), 'impossible to identify the local domain' ) 1117 IF( idom == jpdom_autoglo .OR. idom == jpdom_autodta ) THEN 1118 IF( idom == jpdom_autoglo ) THEN ; idom = jpdom_global 1119 ELSE ; idom = jpdom_data 1120 ENDIF 1121 ind1 = INDEX( clname, '_', back = .TRUE. ) + 1 1122 ind2 = INDEX( clname, '.', back = .TRUE. ) - 1 1123 IF( ind2 > ind1 ) THEN ; IF( VERIFY( clname(ind1:ind2), '0123456789' ) == 0 ) idom = jpdom_local ; ENDIF 563 1124 ENDIF 564 ENDIF 565 ! 566 ! check the consistency between input array and data rank in the file 567 ! 568 ! initializations 569 itime = 1 570 IF( PRESENT(ktime) ) itime = ktime 571 572 irankpv = 1 * COUNT( (/PRESENT(pv_r1d)/) ) + 2 * COUNT( (/PRESENT(pv_r2d)/) ) + 3 * COUNT( (/PRESENT(pv_r3d)/) ) 573 WRITE(clrankpv, fmt='(i1)') irankpv 574 WRITE(cldmspc , fmt='(i1)') idmspc 575 ! 576 IF( idmspc < irankpv ) THEN 577 CALL ctl_stop( TRIM(clinfo), 'The file has only '//cldmspc//' spatial dimension', & 578 & 'it is impossible to read a '//clrankpv//'D array from this file...' ) 579 ELSEIF( idmspc == irankpv ) THEN 580 IF( PRESENT(pv_r1d) .AND. idom /= jpdom_unknown ) & 581 & CALL ctl_stop( TRIM(clinfo), 'case not coded...You must use jpdom_unknown' ) 582 ELSEIF( idmspc > irankpv ) THEN 583 IF( PRESENT(pv_r2d) .AND. itime == 1 .AND. idimsz(3) == 1 .AND. idmspc == 3 ) THEN 584 CALL ctl_warn( trim(clinfo), '2D array but 3 spatial dimensions for the data...' , & 585 & 'As the size of the z dimension is 1 and as we try to read the first record, ', & 586 & 'we accept this case, even if there is a possible mix-up between z and time dimension' ) 587 idmspc = idmspc - 1 588 ELSE 589 CALL ctl_stop( TRIM(clinfo), 'To keep iom lisibility, when reading a '//clrankpv//'D array,' , & 590 & 'we do not accept data with '//cldmspc//' spatial dimensions', & 591 & 'Use ncwa -a to suppress the unnecessary dimensions' ) 1125 ! Identify the domain in case of jpdom_local definition 1126 IF( idom == jpdom_local ) THEN 1127 IF( idimsz(1) == jpi .AND. idimsz(2) == jpj ) THEN ; idom = jpdom_local_full 1128 ELSEIF( idimsz(1) == nlci .AND. idimsz(2) == nlcj ) THEN ; idom = jpdom_local_noextra 1129 ELSEIF( idimsz(1) == (nlei - nldi + 1) .AND. idimsz(2) == (nlej - nldj + 1) ) THEN ; idom = jpdom_local_noovlap 1130 ELSE ; CALL ctl_stop( trim(clinfo), 'impossible to identify the local domain' ) 592 1131 ENDIF 593 ENDIF 594 595 ! 596 ! definition of istart and icnt 597 ! 598 icnt (:) = 1 599 istart(:) = 1 600 istart(idmspc+1) = itime 601 602 IF( PRESENT(kstart) ) THEN ; istart(1:idmspc) = kstart(1:idmspc) ; icnt(1:idmspc) = kcount(1:idmspc) 603 ELSE 604 IF( idom == jpdom_unknown ) THEN ; icnt(1:idmspc) = idimsz(1:idmspc) 605 ELSE 606 IF( .NOT. PRESENT(pv_r1d) ) THEN ! not a 1D array 607 IF( idom == jpdom_data ) THEN 608 jstartrow = 1 609 IF( luse_jattr ) THEN 610 CALL iom_getatt(kiomid, 'open_ocean_jstart', jstartrow ) ! -999 is returned if the attribute is not found 611 jstartrow = MAX(1,jstartrow) 1132 ENDIF 1133 ! 1134 ! check the consistency between input array and data rank in the file 1135 ! 1136 ! initializations 1137 itime = 1 1138 IF( PRESENT(ktime) ) itime = ktime 1139 ! 1140 irankpv = 1 * COUNT( (/PRESENT(pv_r1d)/) ) + 2 * COUNT( (/PRESENT(pv_r2d)/) ) + 3 * COUNT( (/PRESENT(pv_r3d)/) ) 1141 WRITE(clrankpv, fmt='(i1)') irankpv 1142 WRITE(cldmspc , fmt='(i1)') idmspc 1143 ! 1144 IF( idmspc < irankpv ) THEN 1145 CALL ctl_stop( TRIM(clinfo), 'The file has only '//cldmspc//' spatial dimension', & 1146 & 'it is impossible to read a '//clrankpv//'D array from this file...' ) 1147 ELSEIF( idmspc == irankpv ) THEN 1148 IF( PRESENT(pv_r1d) .AND. idom /= jpdom_unknown ) & 1149 & CALL ctl_stop( TRIM(clinfo), 'case not coded...You must use jpdom_unknown' ) 1150 ELSEIF( idmspc > irankpv ) THEN 1151 IF( PRESENT(pv_r2d) .AND. itime == 1 .AND. idimsz(3) == 1 .AND. idmspc == 3 ) THEN 1152 CALL ctl_warn( trim(clinfo), '2D array but 3 spatial dimensions for the data...' , & 1153 & 'As the size of the z dimension is 1 and as we try to read the first record, ', & 1154 & 'we accept this case, even if there is a possible mix-up between z and time dimension' ) 1155 idmspc = idmspc - 1 1156 ELSE 1157 CALL ctl_stop( TRIM(clinfo), 'To keep iom lisibility, when reading a '//clrankpv//'D array,' , & 1158 & 'we do not accept data with '//cldmspc//' spatial dimensions', & 1159 & 'Use ncwa -a to suppress the unnecessary dimensions' ) 1160 ENDIF 1161 ENDIF 1162 ! 1163 ! definition of istart and icnt 1164 ! 1165 icnt (:) = 1 1166 istart(:) = 1 1167 istart(idmspc+1) = itime 1168 1169 IF( PRESENT(kstart) .AND. .NOT. ll_depth_spec ) THEN 1170 istart(1:idmspc) = kstart(1:idmspc) 1171 icnt (1:idmspc) = kcount(1:idmspc) 1172 ELSE 1173 IF(idom == jpdom_unknown ) THEN 1174 icnt(1:idmspc) = idimsz(1:idmspc) 1175 ELSE 1176 IF( .NOT. PRESENT(pv_r1d) ) THEN ! not a 1D array 1177 IF( idom == jpdom_data ) THEN 1178 jstartrow = 1 1179 IF( luse_jattr ) THEN 1180 CALL iom_getatt(kiomid, 'open_ocean_jstart', jstartrow ) ! -999 is returned if the attribute is not found 1181 jstartrow = MAX(1,jstartrow) 1182 ENDIF 1183 istart(1:2) = (/ mig(1), mjg(1) + jstartrow - 1 /) ! icnt(1:2) done below 1184 ELSEIF( idom == jpdom_global ) THEN ; istart(1:2) = (/ nimpp , njmpp /) ! icnt(1:2) done below 612 1185 ENDIF 613 istart(1:2) = (/ mig(1), mjg(1) + jstartrow - 1 /) ! icnt(1:2) done below 614 ELSEIF( idom == jpdom_global ) THEN ; istart(1:2) = (/ nimpp , njmpp /) ! icnt(1:2) done below 615 ENDIF 616 ! we do not read the overlap -> we start to read at nldi, nldj 1186 ! we do not read the overlap -> we start to read at nldi, nldj 617 1187 ! JMM + SM: ugly patch before getting the new version of lib_mpp) 618 1188 ! IF( idom /= jpdom_local_noovlap ) istart(1:2) = istart(1:2) + (/ nldi - 1, nldj - 1 /) 619 IF( llnoov .AND. idom /= jpdom_local_noovlap ) istart(1:2) = istart(1:2) + (/ nldi - 1, nldj - 1 /)1189 IF( llnoov .AND. idom /= jpdom_local_noovlap ) istart(1:2) = istart(1:2) + (/ nldi - 1, nldj - 1 /) 620 1190 ! we do not read the overlap and the extra-halos -> from nldi to nlei and from nldj to nlej 621 1191 ! JMM + SM: ugly patch before getting the new version of lib_mpp) 622 1192 ! icnt(1:2) = (/ nlei - nldi + 1, nlej - nldj + 1 /) 623 IF( llnoov ) THEN ; icnt(1:2) = (/ nlei - nldi + 1, nlej - nldj + 1 /) 624 ELSE ; icnt(1:2) = (/ nlci , nlcj /) 625 ENDIF 626 IF( PRESENT(pv_r3d) ) THEN 627 IF( idom == jpdom_data ) THEN ; icnt(3) = jpkdta 628 ELSE ; icnt(3) = jpk 1193 IF( llnoov ) THEN ; icnt(1:2) = (/ nlei - nldi + 1, nlej - nldj + 1 /) 1194 ELSE ; icnt(1:2) = (/ nlci , nlcj /) 1195 ENDIF 1196 IF( PRESENT(pv_r3d) ) THEN 1197 IF( idom == jpdom_data ) THEN ; icnt(3) = inlev 1198 ELSEIF( ll_depth_spec .AND. PRESENT(kstart) ) THEN ; istart(3) = kstart(3) ; icnt(3) = kcount(3) 1199 ELSE ; icnt(3) = inlev 1200 ENDIF 629 1201 ENDIF 630 1202 ENDIF 631 1203 ENDIF 632 1204 ENDIF 633 ENDIF 634 635 ! check that istart and icnt can be used with this file 636 !- 637 DO jl = 1, jpmax_dims 638 itmp = istart(jl)+icnt(jl)-1 639 IF( itmp > idimsz(jl) .AND. idimsz(jl) /= 0 ) THEN 640 WRITE( ctmp1, FMT="('(istart(', i1, ') + icnt(', i1, ') - 1) = ', i5)" ) jl, jl, itmp 641 WRITE( ctmp2, FMT="(' is larger than idimsz(', i1,') = ', i5)" ) jl, idimsz(jl) 642 CALL ctl_stop( trim(clinfo), 'start and count too big regarding to the size of the data, ', ctmp1, ctmp2 ) 643 ENDIF 644 END DO 645 646 ! check that icnt matches the input array 647 !- 648 IF( idom == jpdom_unknown ) THEN 649 IF( irankpv == 1 ) ishape(1:1) = SHAPE(pv_r1d) 650 IF( irankpv == 2 ) ishape(1:2) = SHAPE(pv_r2d) 651 IF( irankpv == 3 ) ishape(1:3) = SHAPE(pv_r3d) 652 ctmp1 = 'd' 653 ELSE 654 IF( irankpv == 2 ) THEN 1205 1206 ! check that istart and icnt can be used with this file 1207 !- 1208 DO jl = 1, jpmax_dims 1209 itmp = istart(jl)+icnt(jl)-1 1210 IF( itmp > idimsz(jl) .AND. idimsz(jl) /= 0 ) THEN 1211 WRITE( ctmp1, FMT="('(istart(', i1, ') + icnt(', i1, ') - 1) = ', i5)" ) jl, jl, itmp 1212 WRITE( ctmp2, FMT="(' is larger than idimsz(', i1,') = ', i5)" ) jl, idimsz(jl) 1213 CALL ctl_stop( trim(clinfo), 'start and count too big regarding to the size of the data, ', ctmp1, ctmp2 ) 1214 ENDIF 1215 END DO 1216 1217 ! check that icnt matches the input array 1218 !- 1219 IF( idom == jpdom_unknown ) THEN 1220 IF( irankpv == 1 ) ishape(1:1) = SHAPE(pv_r1d) 1221 IF( irankpv == 2 ) ishape(1:2) = SHAPE(pv_r2d) 1222 IF( irankpv == 3 ) ishape(1:3) = SHAPE(pv_r3d) 1223 ctmp1 = 'd' 1224 ELSE 1225 IF( irankpv == 2 ) THEN 655 1226 ! JMM + SM: ugly patch before getting the new version of lib_mpp) 656 1227 ! ishape(1:2) = SHAPE(pv_r2d(nldi:nlei,nldj:nlej )) ; ctmp1 = 'd(nldi:nlei,nldj:nlej)' 657 IF( llnoov ) THEN ; ishape(1:2)=SHAPE(pv_r2d(nldi:nlei,nldj:nlej )) ; ctmp1='d(nldi:nlei,nldj:nlej)' 658 ELSE ; ishape(1:2)=SHAPE(pv_r2d(1 :nlci,1 :nlcj )) ; ctmp1='d(1:nlci,1:nlcj)' 1228 IF( llnoov ) THEN ; ishape(1:2)=SHAPE(pv_r2d(nldi:nlei,nldj:nlej )) ; ctmp1='d(nldi:nlei,nldj:nlej)' 1229 ELSE ; ishape(1:2)=SHAPE(pv_r2d(1 :nlci,1 :nlcj )) ; ctmp1='d(1:nlci,1:nlcj)' 1230 ENDIF 1231 ENDIF 1232 IF( irankpv == 3 ) THEN 1233 ! JMM + SM: ugly patch before getting the new version of lib_mpp) 1234 ! ishape(1:3) = SHAPE(pv_r3d(nldi:nlei,nldj:nlej,:)) ; ctmp1 = 'd(nldi:nlei,nldj:nlej,:)' 1235 IF( llnoov ) THEN ; ishape(1:3)=SHAPE(pv_r3d(nldi:nlei,nldj:nlej,:)) ; ctmp1='d(nldi:nlei,nldj:nlej,:)' 1236 ELSE ; ishape(1:3)=SHAPE(pv_r3d(1 :nlci,1 :nlcj,:)) ; ctmp1='d(1:nlci,1:nlcj,:)' 1237 ENDIF 659 1238 ENDIF 660 1239 ENDIF 661 IF( irankpv == 3 ) THEN 662 ! JMM + SM: ugly patch before getting the new version of lib_mpp) 663 ! ishape(1:3) = SHAPE(pv_r3d(nldi:nlei,nldj:nlej,:)) ; ctmp1 = 'd(nldi:nlei,nldj:nlej,:)' 664 IF( llnoov ) THEN ; ishape(1:3)=SHAPE(pv_r3d(nldi:nlei,nldj:nlej,:)) ; ctmp1='d(nldi:nlei,nldj:nlej,:)' 665 ELSE ; ishape(1:3)=SHAPE(pv_r3d(1 :nlci,1 :nlcj,:)) ; ctmp1='d(1:nlci,1:nlcj,:)' 666 ENDIF 667 ENDIF 1240 1241 DO jl = 1, irankpv 1242 WRITE( ctmp2, FMT="(', ', i1,'): ', i5,' /= icnt(', i1,'):', i5)" ) jl, ishape(jl), jl, icnt(jl) 1243 IF( ishape(jl) /= icnt(jl) ) CALL ctl_stop( TRIM(clinfo), 'size(pv_r'//clrankpv//TRIM(ctmp1)//TRIM(ctmp2) ) 1244 END DO 1245 668 1246 ENDIF 669 670 DO jl = 1, irankpv 671 WRITE( ctmp2, FMT="(', ', i1,'): ', i5,' /= icnt(', i1,'):', i5)" ) jl, ishape(jl), jl, icnt(jl) 672 IF( ishape(jl) /= icnt(jl) ) CALL ctl_stop( TRIM(clinfo), 'size(pv_r'//clrankpv//TRIM(ctmp1)//TRIM(ctmp2) ) 673 END DO 674 675 ENDIF 676 677 ! read the data 678 !- 679 IF( idvar > 0 .AND. istop == nstop ) THEN ! no additional errors until this point... 680 ! 1247 1248 ! read the data 1249 !- 1250 IF( idvar > 0 .AND. istop == nstop ) THEN ! no additional errors until this point... 1251 ! 681 1252 ! find the right index of the array to be read 682 1253 ! JMM + SM: ugly patch before getting the new version of lib_mpp) … … 684 1255 ! ELSE ; ix1 = 1 ; ix2 = icnt(1) ; iy1 = 1 ; iy2 = icnt(2) 685 1256 ! ENDIF 686 IF( llnoov ) THEN 687 IF( idom /= jpdom_unknown ) THEN ; ix1 = nldi ; ix2 = nlei ; iy1 = nldj ; iy2 = nlej 688 ELSE ; ix1 = 1 ; ix2 = icnt(1) ; iy1 = 1 ; iy2 = icnt(2) 1257 IF( llnoov ) THEN 1258 IF( idom /= jpdom_unknown ) THEN ; ix1 = nldi ; ix2 = nlei ; iy1 = nldj ; iy2 = nlej 1259 ELSE ; ix1 = 1 ; ix2 = icnt(1) ; iy1 = 1 ; iy2 = icnt(2) 1260 ENDIF 1261 ELSE 1262 IF( idom /= jpdom_unknown ) THEN ; ix1 = 1 ; ix2 = nlci ; iy1 = 1 ; iy2 = nlcj 1263 ELSE ; ix1 = 1 ; ix2 = icnt(1) ; iy1 = 1 ; iy2 = icnt(2) 1264 ENDIF 1265 ENDIF 1266 1267 CALL iom_nf90_get( kiomid, idvar, inbdim, istart, icnt, ix1, ix2, iy1, iy2, pv_r1d, pv_r2d, pv_r3d ) 1268 1269 IF( istop == nstop ) THEN ! no additional errors until this point... 1270 IF(lwp) WRITE(numout,"(10x,' read ',a,' (rec: ',i6,') in ',a,' ok')") TRIM(cdvar), itime, TRIM(iom_file(kiomid)%name) 1271 1272 !--- overlap areas and extra hallows (mpp) 1273 IF( PRESENT(pv_r2d) .AND. idom /= jpdom_unknown ) THEN 1274 CALL lbc_lnk( 'iom', pv_r2d,'Z',-999.,'no0' ) 1275 ELSEIF( PRESENT(pv_r3d) .AND. idom /= jpdom_unknown ) THEN 1276 ! this if could be simplified with the new lbc_lnk that works with any size of the 3rd dimension 1277 IF( icnt(3) == inlev ) THEN 1278 CALL lbc_lnk( 'iom', pv_r3d,'Z',-999.,'no0' ) 1279 ELSE ! put some arbitrary value (a call to lbc_lnk will be done later...) 1280 DO jj = nlcj+1, jpj ; pv_r3d(1:nlci, jj, :) = pv_r3d(1:nlci, nlej, :) ; END DO 1281 DO ji = nlci+1, jpi ; pv_r3d(ji , : , :) = pv_r3d(nlei , : , :) ; END DO 1282 ENDIF 1283 ENDIF 1284 ! 1285 ELSE 1286 ! return if istop == nstop is false 1287 RETURN 689 1288 ENDIF 690 1289 ELSE 691 IF( idom /= jpdom_unknown ) THEN ; ix1 = 1 ; ix2 = nlci ; iy1 = 1 ; iy2 = nlcj 692 ELSE ; ix1 = 1 ; ix2 = icnt(1) ; iy1 = 1 ; iy2 = icnt(2) 693 ENDIF 694 ENDIF 695 696 SELECT CASE (iom_file(kiomid)%iolib) 697 CASE (jpnf90 ) ; CALL iom_nf90_get( kiomid, idvar, inbdim, istart, icnt, ix1, ix2, iy1, iy2, & 698 & pv_r1d, pv_r2d, pv_r3d ) 699 CASE DEFAULT 700 END SELECT 701 702 IF( istop == nstop ) THEN ! no additional errors until this point... 703 IF(lwp) WRITE(numout,"(10x,' read ',a,' (rec: ',i6,') in ',a,' ok')") TRIM(cdvar), itime, TRIM(iom_file(kiomid)%name) 704 705 !--- overlap areas and extra hallows (mpp) 706 IF( PRESENT(pv_r2d) .AND. idom /= jpdom_unknown ) THEN 707 CALL lbc_lnk( pv_r2d,'Z',-999.,'no0' ) 708 ELSEIF( PRESENT(pv_r3d) .AND. idom /= jpdom_unknown ) THEN 709 ! this if could be simplified with the new lbc_lnk that works with any size of the 3rd dimension 710 IF( icnt(3) == jpk ) THEN 711 CALL lbc_lnk( pv_r3d,'Z',-999.,'no0' ) 712 ELSE ! put some arbitrary value (a call to lbc_lnk will be done later...) 713 DO jj = nlcj+1, jpj ; pv_r3d(1:nlci, jj, :) = pv_r3d(1:nlci, nlej, :) ; END DO 714 DO ji = nlci+1, jpi ; pv_r3d(ji , : , :) = pv_r3d(nlei , : , :) ; END DO 715 ENDIF 716 ENDIF 717 718 !--- Apply scale_factor and offset 719 zscf = iom_file(kiomid)%scf(idvar) ! scale factor 720 zofs = iom_file(kiomid)%ofs(idvar) ! offset 721 IF( PRESENT(pv_r1d) ) THEN 722 IF( zscf /= 1. ) pv_r1d(:) = pv_r1d(:) * zscf 723 IF( zofs /= 0. ) pv_r1d(:) = pv_r1d(:) + zofs 724 ELSEIF( PRESENT(pv_r2d) ) THEN 725 !CDIR COLLAPSE 726 IF( zscf /= 1.) pv_r2d(:,:) = pv_r2d(:,:) * zscf 727 !CDIR COLLAPSE 728 IF( zofs /= 0.) pv_r2d(:,:) = pv_r2d(:,:) + zofs 729 ELSEIF( PRESENT(pv_r3d) ) THEN 730 !CDIR COLLAPSE 731 IF( zscf /= 1.) pv_r3d(:,:,:) = pv_r3d(:,:,:) * zscf 732 !CDIR COLLAPSE 733 IF( zofs /= 0.) pv_r3d(:,:,:) = pv_r3d(:,:,:) + zofs 734 ENDIF 735 ! 1290 ! return if statment idvar > 0 .AND. istop == nstop is false 1291 RETURN 736 1292 ENDIF 737 1293 ! 1294 ELSE ! read using XIOS. Only if KEY_IOMPUT is defined 1295 #if defined key_iomput 1296 !would be good to be able to check which context is active and swap only if current is not restart 1297 CALL iom_swap( TRIM(crxios_context) ) 1298 IF( PRESENT(pv_r3d) ) THEN 1299 pv_r3d(:, :, :) = 0. 1300 if(lwp) write(numout,*) 'XIOS RST READ (3D): ',trim(cdvar) 1301 CALL xios_recv_field( trim(cdvar), pv_r3d) 1302 IF(idom /= jpdom_unknown ) then 1303 CALL lbc_lnk( 'iom', pv_r3d,'Z',-999.,'no0' ) 1304 ENDIF 1305 ELSEIF( PRESENT(pv_r2d) ) THEN 1306 pv_r2d(:, :) = 0. 1307 if(lwp) write(numout,*) 'XIOS RST READ (2D): ', trim(cdvar) 1308 CALL xios_recv_field( trim(cdvar), pv_r2d) 1309 IF(idom /= jpdom_unknown ) THEN 1310 CALL lbc_lnk('iom', pv_r2d,'Z',-999.,'no0') 1311 ENDIF 1312 ELSEIF( PRESENT(pv_r1d) ) THEN 1313 pv_r1d(:) = 0. 1314 if(lwp) write(numout,*) 'XIOS RST READ (1D): ', trim(cdvar) 1315 CALL xios_recv_field( trim(cdvar), pv_r1d) 1316 ENDIF 1317 CALL iom_swap( TRIM(cxios_context) ) 1318 #else 1319 istop = istop + 1 1320 clinfo = 'Can not use XIOS in iom_get_123d, file: '//trim(clname)//', var:'//trim(cdvar) 1321 #endif 1322 ENDIF 1323 !some final adjustments 1324 ! C1D case : always call lbc_lnk to replicate the central value over the whole 3X3 domain 1325 1326 !--- Apply scale_factor and offset 1327 zscf = iom_file(kiomid)%scf(idvar) ! scale factor 1328 zofs = iom_file(kiomid)%ofs(idvar) ! offset 1329 IF( PRESENT(pv_r1d) ) THEN 1330 IF( zscf /= 1. ) pv_r1d(:) = pv_r1d(:) * zscf 1331 IF( zofs /= 0. ) pv_r1d(:) = pv_r1d(:) + zofs 1332 ELSEIF( PRESENT(pv_r2d) ) THEN 1333 IF( zscf /= 1.) pv_r2d(:,:) = pv_r2d(:,:) * zscf 1334 IF( zofs /= 0.) pv_r2d(:,:) = pv_r2d(:,:) + zofs 1335 ELSEIF( PRESENT(pv_r3d) ) THEN 1336 IF( zscf /= 1.) pv_r3d(:,:,:) = pv_r3d(:,:,:) * zscf 1337 IF( zofs /= 0.) pv_r3d(:,:,:) = pv_r3d(:,:,:) + zofs 738 1338 ENDIF 739 1339 ! … … 741 1341 742 1342 743 SUBROUTINE iom_gettime( kiomid, ptime, cdvar, kntime, cdunits, cdcalendar ) 744 !!-------------------------------------------------------------------- 745 !! *** SUBROUTINE iom_gettime *** 746 !! 747 !! ** Purpose : read the time axis cdvar in the file 748 !!-------------------------------------------------------------------- 749 INTEGER , INTENT(in ) :: kiomid ! file Identifier 750 REAL(wp), DIMENSION(:) , INTENT( out) :: ptime ! the time axis 751 CHARACTER(len=*), OPTIONAL , INTENT(in ) :: cdvar ! time axis name 752 INTEGER , OPTIONAL , INTENT( out) :: kntime ! number of times in file 753 CHARACTER(len=*), OPTIONAL , INTENT( out) :: cdunits ! units attribute of time coordinate 754 CHARACTER(len=*), OPTIONAL , INTENT( out) :: cdcalendar ! calendar attribute of 755 ! 756 INTEGER, DIMENSION(1) :: kdimsz 757 INTEGER :: idvar ! id of the variable 758 CHARACTER(LEN=32) :: tname ! local name of time coordinate 759 CHARACTER(LEN=100) :: clinfo ! info character 760 !--------------------------------------------------------------------- 761 ! 762 IF ( PRESENT(cdvar) ) THEN 763 tname = cdvar 764 ELSE 765 tname = iom_file(kiomid)%uldname 766 ENDIF 1343 FUNCTION iom_getszuld ( kiomid ) 1344 !!----------------------------------------------------------------------- 1345 !! *** FUNCTION iom_getszuld *** 1346 !! 1347 !! ** Purpose : get the size of the unlimited dimension in a file 1348 !! (return -1 if not found) 1349 !!----------------------------------------------------------------------- 1350 INTEGER, INTENT(in ) :: kiomid ! file Identifier 1351 ! 1352 INTEGER :: iom_getszuld 1353 !!----------------------------------------------------------------------- 1354 iom_getszuld = -1 767 1355 IF( kiomid > 0 ) THEN 768 clinfo = 'iom_gettime, file: '//trim(iom_file(kiomid)%name)//', var: '//trim(tname) 769 IF ( PRESENT(kntime) ) THEN 770 idvar = iom_varid( kiomid, tname, kdimsz = kdimsz ) 771 kntime = kdimsz(1) 772 ELSE 773 idvar = iom_varid( kiomid, tname ) 774 ENDIF 775 ! 776 ptime(:) = 0. ! default definition 777 IF( idvar > 0 ) THEN 778 IF( iom_file(kiomid)%ndims(idvar) == 1 ) THEN 779 IF( iom_file(kiomid)%luld(idvar) ) THEN 780 IF( iom_file(kiomid)%dimsz(1,idvar) <= size(ptime) ) THEN 781 SELECT CASE (iom_file(kiomid)%iolib) 782 CASE (jpnf90 ) ; CALL iom_nf90_gettime( kiomid, idvar, ptime, cdunits, cdcalendar ) 783 CASE DEFAULT 784 END SELECT 785 ELSE 786 WRITE(ctmp1,*) 'error with the size of ptime ',size(ptime),iom_file(kiomid)%dimsz(1,idvar) 787 CALL ctl_stop( trim(clinfo), trim(ctmp1) ) 788 ENDIF 789 ELSE 790 CALL ctl_stop( trim(clinfo), 'variable dimension is not unlimited... use iom_get' ) 791 ENDIF 792 ELSE 793 CALL ctl_stop( trim(clinfo), 'the variable has more than 1 dimension' ) 794 ENDIF 795 ELSE 796 CALL ctl_stop( trim(clinfo), 'variable not found in '//iom_file(kiomid)%name ) 797 ENDIF 798 ENDIF 799 ! 800 END SUBROUTINE iom_gettime 801 1356 IF( iom_file(kiomid)%iduld > 0 ) iom_getszuld = iom_file(kiomid)%lenuld 1357 ENDIF 1358 END FUNCTION iom_getszuld 1359 1360 1361 !!---------------------------------------------------------------------- 1362 !! INTERFACE iom_chkatt 1363 !!---------------------------------------------------------------------- 1364 SUBROUTINE iom_chkatt( kiomid, cdatt, llok, ksize, cdvar ) 1365 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1366 CHARACTER(len=*), INTENT(in ) :: cdatt ! Name of the attribute 1367 LOGICAL , INTENT( out) :: llok ! Error code 1368 INTEGER , INTENT( out), OPTIONAL :: ksize ! Size of the attribute array 1369 CHARACTER(len=*), INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1370 ! 1371 IF( kiomid > 0 ) THEN 1372 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_chkatt( kiomid, cdatt, llok, ksize=ksize, cdvar=cdvar ) 1373 ENDIF 1374 ! 1375 END SUBROUTINE iom_chkatt 802 1376 803 1377 !!---------------------------------------------------------------------- 804 1378 !! INTERFACE iom_getatt 805 1379 !!---------------------------------------------------------------------- 806 SUBROUTINE iom_g0d_intatt( kiomid, cdatt, pvar ) 807 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 808 CHARACTER(len=*), INTENT(in ) :: cdatt ! Name of the attribute 809 INTEGER , INTENT( out) :: pvar ! read field 1380 SUBROUTINE iom_g0d_iatt( kiomid, cdatt, katt0d, cdvar ) 1381 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1382 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1383 INTEGER , INTENT( out) :: katt0d ! read field 1384 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 810 1385 ! 811 1386 IF( kiomid > 0 ) THEN 812 IF( iom_file(kiomid)%nfid > 0 ) THEN 813 SELECT CASE (iom_file(kiomid)%iolib) 814 CASE (jpnf90 ) ; CALL iom_nf90_getatt( kiomid, cdatt, pvar ) 815 CASE DEFAULT 816 END SELECT 817 ENDIF 818 ENDIF 819 END SUBROUTINE iom_g0d_intatt 1387 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_getatt( kiomid, cdatt, katt0d = katt0d, cdvar=cdvar ) 1388 ENDIF 1389 END SUBROUTINE iom_g0d_iatt 1390 1391 SUBROUTINE iom_g1d_iatt( kiomid, cdatt, katt1d, cdvar ) 1392 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1393 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1394 INTEGER, DIMENSION(:) , INTENT( out) :: katt1d ! read field 1395 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1396 ! 1397 IF( kiomid > 0 ) THEN 1398 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_getatt( kiomid, cdatt, katt1d = katt1d, cdvar=cdvar ) 1399 ENDIF 1400 END SUBROUTINE iom_g1d_iatt 1401 1402 SUBROUTINE iom_g0d_ratt( kiomid, cdatt, patt0d, cdvar ) 1403 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1404 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1405 REAL(wp) , INTENT( out) :: patt0d ! read field 1406 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1407 ! 1408 IF( kiomid > 0 ) THEN 1409 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_getatt( kiomid, cdatt, patt0d = patt0d, cdvar=cdvar ) 1410 ENDIF 1411 END SUBROUTINE iom_g0d_ratt 1412 1413 SUBROUTINE iom_g1d_ratt( kiomid, cdatt, patt1d, cdvar ) 1414 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1415 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1416 REAL(wp), DIMENSION(:), INTENT( out) :: patt1d ! read field 1417 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1418 ! 1419 IF( kiomid > 0 ) THEN 1420 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_getatt( kiomid, cdatt, patt1d = patt1d, cdvar=cdvar ) 1421 ENDIF 1422 END SUBROUTINE iom_g1d_ratt 1423 1424 SUBROUTINE iom_g0d_catt( kiomid, cdatt, cdatt0d, cdvar ) 1425 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1426 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1427 CHARACTER(len=*) , INTENT( out) :: cdatt0d ! read field 1428 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1429 ! 1430 IF( kiomid > 0 ) THEN 1431 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_getatt( kiomid, cdatt, cdatt0d = cdatt0d, cdvar=cdvar ) 1432 ENDIF 1433 END SUBROUTINE iom_g0d_catt 1434 1435 1436 !!---------------------------------------------------------------------- 1437 !! INTERFACE iom_putatt 1438 !!---------------------------------------------------------------------- 1439 SUBROUTINE iom_p0d_iatt( kiomid, cdatt, katt0d, cdvar ) 1440 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1441 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1442 INTEGER , INTENT(in ) :: katt0d ! written field 1443 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1444 ! 1445 IF( kiomid > 0 ) THEN 1446 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_putatt( kiomid, cdatt, katt0d = katt0d, cdvar=cdvar ) 1447 ENDIF 1448 END SUBROUTINE iom_p0d_iatt 1449 1450 SUBROUTINE iom_p1d_iatt( kiomid, cdatt, katt1d, cdvar ) 1451 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1452 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1453 INTEGER, DIMENSION(:) , INTENT(in ) :: katt1d ! written field 1454 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1455 ! 1456 IF( kiomid > 0 ) THEN 1457 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_putatt( kiomid, cdatt, katt1d = katt1d, cdvar=cdvar ) 1458 ENDIF 1459 END SUBROUTINE iom_p1d_iatt 1460 1461 SUBROUTINE iom_p0d_ratt( kiomid, cdatt, patt0d, cdvar ) 1462 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1463 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1464 REAL(wp) , INTENT(in ) :: patt0d ! written field 1465 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1466 ! 1467 IF( kiomid > 0 ) THEN 1468 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_putatt( kiomid, cdatt, patt0d = patt0d, cdvar=cdvar ) 1469 ENDIF 1470 END SUBROUTINE iom_p0d_ratt 1471 1472 SUBROUTINE iom_p1d_ratt( kiomid, cdatt, patt1d, cdvar ) 1473 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1474 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1475 REAL(wp), DIMENSION(:), INTENT(in ) :: patt1d ! written field 1476 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1477 ! 1478 IF( kiomid > 0 ) THEN 1479 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_putatt( kiomid, cdatt, patt1d = patt1d, cdvar=cdvar ) 1480 ENDIF 1481 END SUBROUTINE iom_p1d_ratt 1482 1483 SUBROUTINE iom_p0d_catt( kiomid, cdatt, cdatt0d, cdvar ) 1484 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 1485 CHARACTER(len=*) , INTENT(in ) :: cdatt ! Name of the attribute 1486 CHARACTER(len=*) , INTENT(in ) :: cdatt0d ! written field 1487 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! Name of the variable 1488 ! 1489 IF( kiomid > 0 ) THEN 1490 IF( iom_file(kiomid)%nfid > 0 ) CALL iom_nf90_putatt( kiomid, cdatt, cdatt0d = cdatt0d, cdvar=cdvar ) 1491 ENDIF 1492 END SUBROUTINE iom_p0d_catt 820 1493 821 1494 … … 823 1496 !! INTERFACE iom_rstput 824 1497 !!---------------------------------------------------------------------- 825 SUBROUTINE iom_rp0d( kt, kwrite, kiomid, cdvar, pvar, ktype )1498 SUBROUTINE iom_rp0d( kt, kwrite, kiomid, cdvar, pvar, ktype, ldxios ) 826 1499 INTEGER , INTENT(in) :: kt ! ocean time-step 827 1500 INTEGER , INTENT(in) :: kwrite ! writing time-step … … 830 1503 REAL(wp) , INTENT(in) :: pvar ! written field 831 1504 INTEGER , INTENT(in), OPTIONAL :: ktype ! variable external type 1505 LOGICAL, OPTIONAL :: ldxios ! xios write flag 1506 LOGICAL :: llx ! local xios write flag 832 1507 INTEGER :: ivid ! variable id 833 IF( kiomid > 0 ) THEN 834 IF( iom_file(kiomid)%nfid > 0 ) THEN 835 ivid = iom_varid( kiomid, cdvar, ldstop = .FALSE. ) 836 SELECT CASE (iom_file(kiomid)%iolib) 837 CASE (jpnf90 ) ; CALL iom_nf90_rstput( kt, kwrite, kiomid, cdvar, ivid, ktype, pv_r0d = pvar ) 838 CASE DEFAULT 839 END SELECT 1508 1509 llx = .FALSE. 1510 IF(PRESENT(ldxios)) llx = ldxios 1511 IF( llx ) THEN 1512 #ifdef key_iomput 1513 IF( kt == kwrite ) THEN 1514 IF(lwp) write(numout,*) 'RESTART: write (XIOS 0D) ',trim(cdvar) 1515 CALL xios_send_field(trim(cdvar), pvar) 1516 ENDIF 1517 #endif 1518 ELSE 1519 IF( kiomid > 0 ) THEN 1520 IF( iom_file(kiomid)%nfid > 0 ) THEN 1521 ivid = iom_varid( kiomid, cdvar, ldstop = .FALSE. ) 1522 CALL iom_nf90_rstput( kt, kwrite, kiomid, cdvar, ivid, ktype, pv_r0d = pvar ) 1523 ENDIF 840 1524 ENDIF 841 1525 ENDIF 842 1526 END SUBROUTINE iom_rp0d 843 1527 844 SUBROUTINE iom_rp1d( kt, kwrite, kiomid, cdvar, pvar, ktype )1528 SUBROUTINE iom_rp1d( kt, kwrite, kiomid, cdvar, pvar, ktype, ldxios ) 845 1529 INTEGER , INTENT(in) :: kt ! ocean time-step 846 1530 INTEGER , INTENT(in) :: kwrite ! writing time-step … … 849 1533 REAL(wp) , INTENT(in), DIMENSION( :) :: pvar ! written field 850 1534 INTEGER , INTENT(in), OPTIONAL :: ktype ! variable external type 1535 LOGICAL, OPTIONAL :: ldxios ! xios write flag 1536 LOGICAL :: llx ! local xios write flag 851 1537 INTEGER :: ivid ! variable id 852 IF( kiomid > 0 ) THEN 853 IF( iom_file(kiomid)%nfid > 0 ) THEN 854 ivid = iom_varid( kiomid, cdvar, ldstop = .FALSE. ) 855 SELECT CASE (iom_file(kiomid)%iolib) 856 CASE (jpnf90 ) ; CALL iom_nf90_rstput( kt, kwrite, kiomid, cdvar, ivid, ktype, pv_r1d = pvar ) 857 CASE DEFAULT 858 END SELECT 1538 1539 llx = .FALSE. 1540 IF(PRESENT(ldxios)) llx = ldxios 1541 IF( llx ) THEN 1542 #ifdef key_iomput 1543 IF( kt == kwrite ) THEN 1544 IF(lwp) write(numout,*) 'RESTART: write (XIOS 1D) ',trim(cdvar) 1545 CALL xios_send_field(trim(cdvar), pvar) 1546 ENDIF 1547 #endif 1548 ELSE 1549 IF( kiomid > 0 ) THEN 1550 IF( iom_file(kiomid)%nfid > 0 ) THEN 1551 ivid = iom_varid( kiomid, cdvar, ldstop = .FALSE. ) 1552 CALL iom_nf90_rstput( kt, kwrite, kiomid, cdvar, ivid, ktype, pv_r1d = pvar ) 1553 ENDIF 859 1554 ENDIF 860 1555 ENDIF 861 1556 END SUBROUTINE iom_rp1d 862 1557 863 SUBROUTINE iom_rp2d( kt, kwrite, kiomid, cdvar, pvar, ktype )1558 SUBROUTINE iom_rp2d( kt, kwrite, kiomid, cdvar, pvar, ktype, ldxios ) 864 1559 INTEGER , INTENT(in) :: kt ! ocean time-step 865 1560 INTEGER , INTENT(in) :: kwrite ! writing time-step … … 868 1563 REAL(wp) , INTENT(in), DIMENSION(:, : ) :: pvar ! written field 869 1564 INTEGER , INTENT(in), OPTIONAL :: ktype ! variable external type 1565 LOGICAL, OPTIONAL :: ldxios ! xios write flag 1566 LOGICAL :: llx 870 1567 INTEGER :: ivid ! variable id 871 IF( kiomid > 0 ) THEN 872 IF( iom_file(kiomid)%nfid > 0 ) THEN 873 ivid = iom_varid( kiomid, cdvar, ldstop = .FALSE. ) 874 SELECT CASE (iom_file(kiomid)%iolib) 875 CASE (jpnf90 ) ; CALL iom_nf90_rstput( kt, kwrite, kiomid, cdvar, ivid, ktype, pv_r2d = pvar ) 876 CASE DEFAULT 877 END SELECT 1568 1569 llx = .FALSE. 1570 IF(PRESENT(ldxios)) llx = ldxios 1571 IF( llx ) THEN 1572 #ifdef key_iomput 1573 IF( kt == kwrite ) THEN 1574 IF(lwp) write(numout,*) 'RESTART: write (XIOS 2D) ',trim(cdvar) 1575 CALL xios_send_field(trim(cdvar), pvar) 1576 ENDIF 1577 #endif 1578 ELSE 1579 IF( kiomid > 0 ) THEN 1580 IF( iom_file(kiomid)%nfid > 0 ) THEN 1581 ivid = iom_varid( kiomid, cdvar, ldstop = .FALSE. ) 1582 CALL iom_nf90_rstput( kt, kwrite, kiomid, cdvar, ivid, ktype, pv_r2d = pvar ) 1583 ENDIF 878 1584 ENDIF 879 1585 ENDIF 880 1586 END SUBROUTINE iom_rp2d 881 1587 882 SUBROUTINE iom_rp3d( kt, kwrite, kiomid, cdvar, pvar, ktype )1588 SUBROUTINE iom_rp3d( kt, kwrite, kiomid, cdvar, pvar, ktype, ldxios ) 883 1589 INTEGER , INTENT(in) :: kt ! ocean time-step 884 1590 INTEGER , INTENT(in) :: kwrite ! writing time-step … … 887 1593 REAL(wp) , INTENT(in), DIMENSION(:,:,:) :: pvar ! written field 888 1594 INTEGER , INTENT(in), OPTIONAL :: ktype ! variable external type 1595 LOGICAL, OPTIONAL :: ldxios ! xios write flag 1596 LOGICAL :: llx ! local xios write flag 889 1597 INTEGER :: ivid ! variable id 890 IF( kiomid > 0 ) THEN 891 IF( iom_file(kiomid)%nfid > 0 ) THEN 892 ivid = iom_varid( kiomid, cdvar, ldstop = .FALSE. ) 893 SELECT CASE (iom_file(kiomid)%iolib) 894 CASE (jpnf90 ) ; CALL iom_nf90_rstput( kt, kwrite, kiomid, cdvar, ivid, ktype, pv_r3d = pvar ) 895 CASE DEFAULT 896 END SELECT 1598 1599 llx = .FALSE. 1600 IF(PRESENT(ldxios)) llx = ldxios 1601 IF( llx ) THEN 1602 #ifdef key_iomput 1603 IF( kt == kwrite ) THEN 1604 IF(lwp) write(numout,*) 'RESTART: write (XIOS 3D) ',trim(cdvar) 1605 CALL xios_send_field(trim(cdvar), pvar) 1606 ENDIF 1607 #endif 1608 ELSE 1609 IF( kiomid > 0 ) THEN 1610 IF( iom_file(kiomid)%nfid > 0 ) THEN 1611 ivid = iom_varid( kiomid, cdvar, ldstop = .FALSE. ) 1612 CALL iom_nf90_rstput( kt, kwrite, kiomid, cdvar, ivid, ktype, pv_r3d = pvar ) 1613 ENDIF 897 1614 ENDIF 898 1615 ENDIF 899 1616 END SUBROUTINE iom_rp3d 900 1617 1618 1619 SUBROUTINE iom_delay_rst( cdaction, cdcpnt, kncid ) 1620 !!--------------------------------------------------------------------- 1621 !! Routine iom_delay_rst: used read/write restart related to mpp_delay 1622 !! 1623 !!--------------------------------------------------------------------- 1624 CHARACTER(len=*), INTENT(in ) :: cdaction ! 1625 CHARACTER(len=*), INTENT(in ) :: cdcpnt 1626 INTEGER , INTENT(in ) :: kncid 1627 ! 1628 INTEGER :: ji 1629 INTEGER :: indim 1630 LOGICAL :: llattexist 1631 REAL(wp), ALLOCATABLE, DIMENSION(:) :: zreal1d 1632 !!--------------------------------------------------------------------- 1633 ! 1634 ! =================================== 1635 IF( TRIM(cdaction) == 'READ' ) THEN ! read restart related to mpp_delay ! 1636 ! =================================== 1637 DO ji = 1, nbdelay 1638 IF ( c_delaycpnt(ji) == cdcpnt ) THEN 1639 CALL iom_chkatt( kncid, 'DELAY_'//c_delaylist(ji), llattexist, indim ) 1640 IF( llattexist ) THEN 1641 ALLOCATE( todelay(ji)%z1d(indim) ) 1642 CALL iom_getatt( kncid, 'DELAY_'//c_delaylist(ji), todelay(ji)%z1d(:) ) 1643 ndelayid(ji) = 0 ! set to 0 to specify that the value was read in the restart 1644 ENDIF 1645 ENDIF 1646 END DO 1647 ! ==================================== 1648 ELSE ! write restart related to mpp_delay ! 1649 ! ==================================== 1650 DO ji = 1, nbdelay ! save only ocean delayed global communication variables 1651 IF ( c_delaycpnt(ji) == cdcpnt ) THEN 1652 IF( ASSOCIATED(todelay(ji)%z1d) ) THEN 1653 CALL mpp_delay_rcv(ji) ! make sure %z1d is received 1654 CALL iom_putatt( kncid, 'DELAY_'//c_delaylist(ji), todelay(ji)%z1d(:) ) 1655 ENDIF 1656 ENDIF 1657 END DO 1658 ! 1659 ENDIF 1660 1661 END SUBROUTINE iom_delay_rst 1662 1663 901 1664 902 1665 !!---------------------------------------------------------------------- … … 907 1670 REAL(wp) , INTENT(in) :: pfield0d 908 1671 REAL(wp) , DIMENSION(jpi,jpj) :: zz ! masson 1672 #if defined key_iomput 1673 zz(:,:)=pfield0d 1674 CALL xios_send_field(cdname, zz) 1675 !CALL xios_send_field(cdname, (/pfield0d/)) 1676 #else 909 1677 IF( .FALSE. ) WRITE(numout,*) cdname, pfield0d ! useless test to avoid compilation warnings 1678 #endif 910 1679 END SUBROUTINE iom_p0d 911 1680 … … 913 1682 CHARACTER(LEN=*) , INTENT(in) :: cdname 914 1683 REAL(wp), DIMENSION(:), INTENT(in) :: pfield1d 1684 #if defined key_iomput 1685 CALL xios_send_field( cdname, RESHAPE( (/pfield1d/), (/1,1,SIZE(pfield1d)/) ) ) 1686 #else 915 1687 IF( .FALSE. ) WRITE(numout,*) cdname, pfield1d ! useless test to avoid compilation warnings 1688 #endif 916 1689 END SUBROUTINE iom_p1d 917 1690 … … 919 1692 CHARACTER(LEN=*) , INTENT(in) :: cdname 920 1693 REAL(wp), DIMENSION(:,:), INTENT(in) :: pfield2d 1694 #if defined key_iomput 1695 CALL xios_send_field(cdname, pfield2d) 1696 #else 921 1697 IF( .FALSE. ) WRITE(numout,*) cdname, pfield2d ! useless test to avoid compilation warnings 1698 #endif 922 1699 END SUBROUTINE iom_p2d 923 1700 … … 925 1702 CHARACTER(LEN=*) , INTENT(in) :: cdname 926 1703 REAL(wp), DIMENSION(:,:,:), INTENT(in) :: pfield3d 1704 #if defined key_iomput 1705 CALL xios_send_field( cdname, pfield3d ) 1706 #else 927 1707 IF( .FALSE. ) WRITE(numout,*) cdname, pfield3d ! useless test to avoid compilation warnings 1708 #endif 928 1709 END SUBROUTINE iom_p3d 1710 1711 #if defined key_iomput 929 1712 !!---------------------------------------------------------------------- 930 931 1713 !! 'key_iomput' XIOS interface 1714 !!---------------------------------------------------------------------- 1715 1716 SUBROUTINE iom_set_domain_attr( cdid, ni_glo, nj_glo, ibegin, jbegin, ni, nj, & 1717 & data_dim, data_ibegin, data_ni, data_jbegin, data_nj, lonvalue, latvalue, mask, & 1718 & nvertex, bounds_lon, bounds_lat, area ) 1719 !!---------------------------------------------------------------------- 1720 !!---------------------------------------------------------------------- 1721 CHARACTER(LEN=*) , INTENT(in) :: cdid 1722 INTEGER , OPTIONAL, INTENT(in) :: ni_glo, nj_glo, ibegin, jbegin, ni, nj 1723 INTEGER , OPTIONAL, INTENT(in) :: data_dim, data_ibegin, data_ni, data_jbegin, data_nj 1724 INTEGER , OPTIONAL, INTENT(in) :: nvertex 1725 REAL(wp), DIMENSION(:) , OPTIONAL, INTENT(in) :: lonvalue, latvalue 1726 REAL(wp), DIMENSION(:,:), OPTIONAL, INTENT(in) :: bounds_lon, bounds_lat, area 1727 LOGICAL , DIMENSION(:) , OPTIONAL, INTENT(in) :: mask 1728 !!---------------------------------------------------------------------- 1729 ! 1730 IF( xios_is_valid_domain (cdid) ) THEN 1731 CALL xios_set_domain_attr ( cdid, ni_glo=ni_glo, nj_glo=nj_glo, ibegin=ibegin, jbegin=jbegin, ni=ni, nj=nj, & 1732 & data_dim=data_dim, data_ibegin=data_ibegin, data_ni=data_ni, data_jbegin=data_jbegin, data_nj=data_nj , & 1733 & lonvalue_1D=lonvalue, latvalue_1D=latvalue, mask_1D=mask, nvertex=nvertex, bounds_lon_1D=bounds_lon, & 1734 & bounds_lat_1D=bounds_lat, area=area, type='curvilinear') 1735 ENDIF 1736 IF( xios_is_valid_domaingroup(cdid) ) THEN 1737 CALL xios_set_domaingroup_attr( cdid, ni_glo=ni_glo, nj_glo=nj_glo, ibegin=ibegin, jbegin=jbegin, ni=ni, nj=nj, & 1738 & data_dim=data_dim, data_ibegin=data_ibegin, data_ni=data_ni, data_jbegin=data_jbegin, data_nj=data_nj , & 1739 & lonvalue_1D=lonvalue, latvalue_1D=latvalue, mask_1D=mask, nvertex=nvertex, bounds_lon_1D=bounds_lon, & 1740 & bounds_lat_1D=bounds_lat, area=area, type='curvilinear' ) 1741 ENDIF 1742 ! 1743 CALL xios_solve_inheritance() 1744 ! 1745 END SUBROUTINE iom_set_domain_attr 1746 1747 1748 SUBROUTINE iom_set_zoom_domain_attr( cdid, ibegin, jbegin, ni, nj ) 1749 !!---------------------------------------------------------------------- 1750 !!---------------------------------------------------------------------- 1751 CHARACTER(LEN=*), INTENT(in) :: cdid 1752 INTEGER , INTENT(in) :: ibegin, jbegin, ni, nj 1753 ! 1754 TYPE(xios_gridgroup) :: gridgroup_hdl 1755 TYPE(xios_grid) :: grid_hdl 1756 TYPE(xios_domain) :: domain_hdl 1757 TYPE(xios_axis) :: axis_hdl 1758 CHARACTER(LEN=64) :: cldomrefid ! domain_ref name 1759 CHARACTER(len=1) :: cl1 ! last character of this name 1760 !!---------------------------------------------------------------------- 1761 ! 1762 IF( xios_is_valid_zoom_domain(cdid) ) THEN 1763 ! define the zoom_domain attributs 1764 CALL xios_set_zoom_domain_attr( cdid, ibegin=ibegin, jbegin=jbegin, ni=ni, nj=nj ) 1765 ! define a new 2D grid with this new domain 1766 CALL xios_get_handle("grid_definition", gridgroup_hdl ) 1767 CALL xios_add_child(gridgroup_hdl, grid_hdl, TRIM(cdid)//'_2D' ) ! add a new 2D grid to grid_definition 1768 CALL xios_add_child(grid_hdl, domain_hdl, TRIM(cdid) ) ! add its domain 1769 ! define a new 3D grid with this new domain 1770 CALL xios_add_child(gridgroup_hdl, grid_hdl, TRIM(cdid)//'_3D' ) ! add a new 3D grid to grid_definition 1771 CALL xios_add_child(grid_hdl, domain_hdl, TRIM(cdid) ) ! add its domain 1772 ! vertical axis 1773 cl1 = cdid(LEN_TRIM(cdid):) ! last letter of cdid 1774 cl1 = CHAR(ICHAR(cl1)+32) ! from upper to lower case 1775 CALL xios_add_child(grid_hdl, axis_hdl, 'depth'//cl1) ! add its axis 1776 ENDIF 1777 ! 1778 END SUBROUTINE iom_set_zoom_domain_attr 1779 1780 1781 SUBROUTINE iom_set_axis_attr( cdid, paxis, bounds ) 1782 !!---------------------------------------------------------------------- 1783 !!---------------------------------------------------------------------- 1784 CHARACTER(LEN=*) , INTENT(in) :: cdid 1785 REAL(wp), DIMENSION(:) , OPTIONAL, INTENT(in) :: paxis 1786 REAL(wp), DIMENSION(:,:), OPTIONAL, INTENT(in) :: bounds 1787 !!---------------------------------------------------------------------- 1788 IF( PRESENT(paxis) ) THEN 1789 IF( xios_is_valid_axis (cdid) ) CALL xios_set_axis_attr ( cdid, n_glo=SIZE(paxis), value=paxis ) 1790 IF( xios_is_valid_axisgroup(cdid) ) CALL xios_set_axisgroup_attr( cdid, n_glo=SIZE(paxis), value=paxis ) 1791 ENDIF 1792 IF( xios_is_valid_axis (cdid) ) CALL xios_set_axis_attr ( cdid, bounds=bounds ) 1793 IF( xios_is_valid_axisgroup(cdid) ) CALL xios_set_axisgroup_attr( cdid, bounds=bounds ) 1794 CALL xios_solve_inheritance() 1795 END SUBROUTINE iom_set_axis_attr 1796 1797 1798 SUBROUTINE iom_set_field_attr( cdid, freq_op, freq_offset ) 1799 !!---------------------------------------------------------------------- 1800 !!---------------------------------------------------------------------- 1801 CHARACTER(LEN=*) , INTENT(in) :: cdid 1802 TYPE(xios_duration), OPTIONAL, INTENT(in) :: freq_op 1803 TYPE(xios_duration), OPTIONAL, INTENT(in) :: freq_offset 1804 !!---------------------------------------------------------------------- 1805 IF( xios_is_valid_field (cdid) ) CALL xios_set_field_attr ( cdid, freq_op=freq_op, freq_offset=freq_offset ) 1806 IF( xios_is_valid_fieldgroup(cdid) ) CALL xios_set_fieldgroup_attr( cdid, freq_op=freq_op, freq_offset=freq_offset ) 1807 CALL xios_solve_inheritance() 1808 END SUBROUTINE iom_set_field_attr 1809 1810 1811 SUBROUTINE iom_set_file_attr( cdid, name, name_suffix ) 1812 !!---------------------------------------------------------------------- 1813 !!---------------------------------------------------------------------- 1814 CHARACTER(LEN=*) , INTENT(in) :: cdid 1815 CHARACTER(LEN=*),OPTIONAL , INTENT(in) :: name, name_suffix 1816 !!---------------------------------------------------------------------- 1817 IF( xios_is_valid_file (cdid) ) CALL xios_set_file_attr ( cdid, name=name, name_suffix=name_suffix ) 1818 IF( xios_is_valid_filegroup(cdid) ) CALL xios_set_filegroup_attr( cdid, name=name, name_suffix=name_suffix ) 1819 CALL xios_solve_inheritance() 1820 END SUBROUTINE iom_set_file_attr 1821 1822 1823 SUBROUTINE iom_get_file_attr( cdid, name, name_suffix, output_freq ) 1824 !!---------------------------------------------------------------------- 1825 !!---------------------------------------------------------------------- 1826 CHARACTER(LEN=*) , INTENT(in ) :: cdid 1827 CHARACTER(LEN=*),OPTIONAL , INTENT(out) :: name, name_suffix 1828 TYPE(xios_duration), OPTIONAL , INTENT(out) :: output_freq 1829 LOGICAL :: llexist1,llexist2,llexist3 1830 !--------------------------------------------------------------------- 1831 IF( PRESENT( name ) ) name = '' ! default values 1832 IF( PRESENT( name_suffix ) ) name_suffix = '' 1833 IF( PRESENT( output_freq ) ) output_freq = xios_duration(0,0,0,0,0,0) 1834 IF( xios_is_valid_file (cdid) ) THEN 1835 CALL xios_solve_inheritance() 1836 CALL xios_is_defined_file_attr ( cdid, name = llexist1, name_suffix = llexist2, output_freq = llexist3) 1837 IF(llexist1) CALL xios_get_file_attr ( cdid, name = name ) 1838 IF(llexist2) CALL xios_get_file_attr ( cdid, name_suffix = name_suffix ) 1839 IF(llexist3) CALL xios_get_file_attr ( cdid, output_freq = output_freq ) 1840 ENDIF 1841 IF( xios_is_valid_filegroup(cdid) ) THEN 1842 CALL xios_solve_inheritance() 1843 CALL xios_is_defined_filegroup_attr( cdid, name = llexist1, name_suffix = llexist2, output_freq = llexist3) 1844 IF(llexist1) CALL xios_get_filegroup_attr( cdid, name = name ) 1845 IF(llexist2) CALL xios_get_filegroup_attr( cdid, name_suffix = name_suffix ) 1846 IF(llexist3) CALL xios_get_filegroup_attr( cdid, output_freq = output_freq ) 1847 ENDIF 1848 END SUBROUTINE iom_get_file_attr 1849 1850 1851 SUBROUTINE iom_set_grid_attr( cdid, mask ) 1852 !!---------------------------------------------------------------------- 1853 !!---------------------------------------------------------------------- 1854 CHARACTER(LEN=*) , INTENT(in) :: cdid 1855 LOGICAL, DIMENSION(:,:,:), OPTIONAL, INTENT(in) :: mask 1856 !!---------------------------------------------------------------------- 1857 IF( xios_is_valid_grid (cdid) ) CALL xios_set_grid_attr ( cdid, mask_3D=mask ) 1858 IF( xios_is_valid_gridgroup(cdid) ) CALL xios_set_gridgroup_attr( cdid, mask_3D=mask ) 1859 CALL xios_solve_inheritance() 1860 END SUBROUTINE iom_set_grid_attr 1861 1862 SUBROUTINE iom_setkt( kt, cdname ) 1863 !!---------------------------------------------------------------------- 1864 !!---------------------------------------------------------------------- 1865 INTEGER , INTENT(in) :: kt 1866 CHARACTER(LEN=*), INTENT(in) :: cdname 1867 !!---------------------------------------------------------------------- 1868 CALL iom_swap( cdname ) ! swap to cdname context 1869 CALL xios_update_calendar(kt) 1870 IF( cdname /= TRIM(cxios_context) ) CALL iom_swap( TRIM(cxios_context) ) ! return back to nemo context 1871 END SUBROUTINE iom_setkt 1872 1873 SUBROUTINE iom_context_finalize( cdname ) 1874 !!---------------------------------------------------------------------- 1875 !!---------------------------------------------------------------------- 1876 CHARACTER(LEN=*), INTENT(in) :: cdname 1877 CHARACTER(LEN=120) :: clname 1878 !!---------------------------------------------------------------------- 1879 clname = cdname 1880 IF( TRIM(Agrif_CFixed()) .NE. '0' ) clname = TRIM(Agrif_CFixed())//"_"//clname 1881 IF( xios_is_valid_context(clname) ) THEN 1882 CALL iom_swap( cdname ) ! swap to cdname context 1883 CALL xios_context_finalize() ! finalize the context 1884 IF( cdname /= TRIM(cxios_context) ) CALL iom_swap( TRIM(cxios_context) ) ! return back to nemo context 1885 ENDIF 1886 ! 1887 END SUBROUTINE iom_context_finalize 1888 1889 1890 SUBROUTINE set_grid( cdgrd, plon, plat, ldxios, ldrxios ) 1891 !!---------------------------------------------------------------------- 1892 !! *** ROUTINE set_grid *** 1893 !! 1894 !! ** Purpose : define horizontal grids 1895 !!---------------------------------------------------------------------- 1896 CHARACTER(LEN=1) , INTENT(in) :: cdgrd 1897 REAL(wp), DIMENSION(jpi,jpj), INTENT(in) :: plon 1898 REAL(wp), DIMENSION(jpi,jpj), INTENT(in) :: plat 1899 ! 1900 INTEGER :: ni, nj 1901 REAL(wp), DIMENSION(jpi,jpj,jpk) :: zmask 1902 LOGICAL, INTENT(IN) :: ldxios, ldrxios 1903 !!---------------------------------------------------------------------- 1904 ! 1905 ni = nlei-nldi+1 1906 nj = nlej-nldj+1 1907 ! 1908 CALL iom_set_domain_attr("grid_"//cdgrd, ni_glo=jpiglo, nj_glo=jpjglo, ibegin=nimpp+nldi-2, jbegin=njmpp+nldj-2, ni=ni, nj=nj) 1909 CALL iom_set_domain_attr("grid_"//cdgrd, data_dim=2, data_ibegin = 1-nldi, data_ni = jpi, data_jbegin = 1-nldj, data_nj = jpj) 1910 !don't define lon and lat for restart reading context. 1911 IF ( .NOT.ldrxios ) & 1912 CALL iom_set_domain_attr("grid_"//cdgrd, lonvalue = RESHAPE(plon(nldi:nlei, nldj:nlej),(/ ni*nj /)), & 1913 & latvalue = RESHAPE(plat(nldi:nlei, nldj:nlej),(/ ni*nj /))) 1914 ! 1915 IF ( ln_mskland .AND. (.NOT.ldxios) ) THEN 1916 ! mask land points, keep values on coast line -> specific mask for U, V and W points 1917 SELECT CASE ( cdgrd ) 1918 CASE('T') ; zmask(:,:,:) = tmask(:,:,:) 1919 CASE('U') ; zmask(2:jpim1,:,:) = tmask(2:jpim1,:,:) + tmask(3:jpi,:,:) ; CALL lbc_lnk( 'iom', zmask, 'U', 1. ) 1920 CASE('V') ; zmask(:,2:jpjm1,:) = tmask(:,2:jpjm1,:) + tmask(:,3:jpj,:) ; CALL lbc_lnk( 'iom', zmask, 'V', 1. ) 1921 CASE('W') ; zmask(:,:,2:jpk ) = tmask(:,:,1:jpkm1) + tmask(:,:,2:jpk) ; zmask(:,:,1) = tmask(:,:,1) 1922 END SELECT 1923 ! 1924 CALL iom_set_domain_attr( "grid_"//cdgrd , mask = RESHAPE(zmask(nldi:nlei,nldj:nlej,1),(/ni*nj /)) /= 0. ) 1925 CALL iom_set_grid_attr ( "grid_"//cdgrd//"_3D", mask = RESHAPE(zmask(nldi:nlei,nldj:nlej,:),(/ni,nj,jpk/)) /= 0. ) 1926 ENDIF 1927 ! 1928 END SUBROUTINE set_grid 1929 1930 1931 SUBROUTINE set_grid_bounds( cdgrd, plon_cnr, plat_cnr, plon_pnt, plat_pnt ) 1932 !!---------------------------------------------------------------------- 1933 !! *** ROUTINE set_grid_bounds *** 1934 !! 1935 !! ** Purpose : define horizontal grid corners 1936 !! 1937 !!---------------------------------------------------------------------- 1938 CHARACTER(LEN=1) , INTENT(in) :: cdgrd 1939 REAL(wp), DIMENSION(jpi,jpj) , INTENT(in) :: plon_cnr, plat_cnr ! Lat/lon coord. of a contiguous vertex of cell (i,j) 1940 REAL(wp), DIMENSION(jpi,jpj), OPTIONAL, INTENT(in) :: plon_pnt, plat_pnt ! Lat/lon coord. of the point of cell (i,j) 1941 ! 1942 INTEGER :: ji, jj, jn, ni, nj 1943 INTEGER :: icnr, jcnr ! Offset such that the vertex coordinate (i+icnr,j+jcnr) 1944 ! ! represents the bottom-left corner of cell (i,j) 1945 REAL(wp), ALLOCATABLE, DIMENSION(:,:,:,:) :: z_bnds ! Lat/lon coordinates of the vertices of cell (i,j) 1946 REAL(wp), ALLOCATABLE, DIMENSION(:,:) :: z_fld ! Working array to determine where to rotate cells 1947 REAL(wp), ALLOCATABLE, DIMENSION(:,:) :: z_rot ! Lat/lon working array for rotation of cells 1948 !!---------------------------------------------------------------------- 1949 ! 1950 ALLOCATE( z_bnds(4,jpi,jpj,2), z_fld(jpi,jpj), z_rot(4,2) ) 1951 ! 1952 ! Offset of coordinate representing bottom-left corner 1953 SELECT CASE ( TRIM(cdgrd) ) 1954 CASE ('T', 'W') ; icnr = -1 ; jcnr = -1 1955 CASE ('U') ; icnr = 0 ; jcnr = -1 1956 CASE ('V') ; icnr = -1 ; jcnr = 0 1957 END SELECT 1958 ! 1959 ni = nlei-nldi+1 ! Dimensions of subdomain interior 1960 nj = nlej-nldj+1 1961 ! 1962 z_fld(:,:) = 1._wp 1963 CALL lbc_lnk( 'iom', z_fld, cdgrd, -1. ) ! Working array for location of northfold 1964 ! 1965 ! Cell vertices that can be defined 1966 DO jj = 2, jpjm1 1967 DO ji = 2, jpim1 1968 z_bnds(1,ji,jj,1) = plat_cnr(ji+icnr, jj+jcnr ) ! Bottom-left 1969 z_bnds(2,ji,jj,1) = plat_cnr(ji+icnr+1,jj+jcnr ) ! Bottom-right 1970 z_bnds(3,ji,jj,1) = plat_cnr(ji+icnr+1,jj+jcnr+1) ! Top-right 1971 z_bnds(4,ji,jj,1) = plat_cnr(ji+icnr, jj+jcnr+1) ! Top-left 1972 z_bnds(1,ji,jj,2) = plon_cnr(ji+icnr, jj+jcnr ) ! Bottom-left 1973 z_bnds(2,ji,jj,2) = plon_cnr(ji+icnr+1,jj+jcnr ) ! Bottom-right 1974 z_bnds(3,ji,jj,2) = plon_cnr(ji+icnr+1,jj+jcnr+1) ! Top-right 1975 z_bnds(4,ji,jj,2) = plon_cnr(ji+icnr, jj+jcnr+1) ! Top-left 1976 END DO 1977 END DO 1978 ! 1979 ! Cell vertices on boundries 1980 DO jn = 1, 4 1981 CALL lbc_lnk( 'iom', z_bnds(jn,:,:,1), cdgrd, 1., pval=999._wp ) 1982 CALL lbc_lnk( 'iom', z_bnds(jn,:,:,2), cdgrd, 1., pval=999._wp ) 1983 END DO 1984 ! 1985 ! Zero-size cells at closed boundaries if cell points provided, 1986 ! otherwise they are closed cells with unrealistic bounds 1987 IF( PRESENT(plon_pnt) .AND. PRESENT(plat_pnt) ) THEN 1988 IF( (nbondi == -1 .OR. nbondi == 2) .AND. .NOT. (jperio == 1 .OR. jperio == 4 .OR. jperio == 6) ) THEN 1989 DO jn = 1, 4 ! (West or jpni = 1), closed E-W 1990 z_bnds(jn,1,:,1) = plat_pnt(1,:) ; z_bnds(jn,1,:,2) = plon_pnt(1,:) 1991 END DO 1992 ENDIF 1993 IF( (nbondi == 1 .OR. nbondi == 2) .AND. .NOT. (jperio == 1 .OR. jperio == 4 .OR. jperio == 6) ) THEN 1994 DO jn = 1, 4 ! (East or jpni = 1), closed E-W 1995 z_bnds(jn,nlci,:,1) = plat_pnt(nlci,:) ; z_bnds(jn,nlci,:,2) = plon_pnt(nlci,:) 1996 END DO 1997 ENDIF 1998 IF( nbondj == -1 .OR. (nbondj == 2 .AND. jperio /= 2) ) THEN 1999 DO jn = 1, 4 ! South or (jpnj = 1, not symmetric) 2000 z_bnds(jn,:,1,1) = plat_pnt(:,1) ; z_bnds(jn,:,1,2) = plon_pnt(:,1) 2001 END DO 2002 ENDIF 2003 IF( (nbondj == 1 .OR. nbondj == 2) .AND. jperio < 3 ) THEN 2004 DO jn = 1, 4 ! (North or jpnj = 1), no north fold 2005 z_bnds(jn,:,nlcj,1) = plat_pnt(:,nlcj) ; z_bnds(jn,:,nlcj,2) = plon_pnt(:,nlcj) 2006 END DO 2007 ENDIF 2008 ENDIF 2009 ! 2010 IF( (nbondj == 1 .OR. nbondj == 2) .AND. jperio >= 3 ) THEN ! Rotate cells at the north fold 2011 DO jj = 1, jpj 2012 DO ji = 1, jpi 2013 IF( z_fld(ji,jj) == -1. ) THEN 2014 z_rot(1,:) = z_bnds(3,ji,jj,:) ; z_rot(2,:) = z_bnds(4,ji,jj,:) 2015 z_rot(3,:) = z_bnds(1,ji,jj,:) ; z_rot(4,:) = z_bnds(2,ji,jj,:) 2016 z_bnds(:,ji,jj,:) = z_rot(:,:) 2017 ENDIF 2018 END DO 2019 END DO 2020 ELSE IF( nbondj == 2 .AND. jperio == 2 ) THEN ! Invert cells at the symmetric equator 2021 DO ji = 1, jpi 2022 z_rot(1:2,:) = z_bnds(3:4,ji,1,:) 2023 z_rot(3:4,:) = z_bnds(1:2,ji,1,:) 2024 z_bnds(:,ji,1,:) = z_rot(:,:) 2025 END DO 2026 ENDIF 2027 ! 2028 CALL iom_set_domain_attr("grid_"//cdgrd, bounds_lat = RESHAPE(z_bnds(:,nldi:nlei,nldj:nlej,1),(/ 4,ni*nj /)), & 2029 & bounds_lon = RESHAPE(z_bnds(:,nldi:nlei,nldj:nlej,2),(/ 4,ni*nj /)), nvertex=4 ) 2030 ! 2031 DEALLOCATE( z_bnds, z_fld, z_rot ) 2032 ! 2033 END SUBROUTINE set_grid_bounds 2034 2035 2036 SUBROUTINE set_grid_znl( plat ) 2037 !!---------------------------------------------------------------------- 2038 !! *** ROUTINE set_grid_znl *** 2039 !! 2040 !! ** Purpose : define grids for zonal mean 2041 !! 2042 !!---------------------------------------------------------------------- 2043 REAL(wp), DIMENSION(jpi,jpj), INTENT(in) :: plat 2044 ! 2045 INTEGER :: ni, nj, ix, iy 2046 REAL(wp), DIMENSION(:), ALLOCATABLE :: zlon 2047 !!---------------------------------------------------------------------- 2048 ! 2049 ni=nlei-nldi+1 ! define zonal mean domain (jpj*jpk) 2050 nj=nlej-nldj+1 2051 ALLOCATE( zlon(ni*nj) ) ; zlon(:) = 0._wp 2052 ! 2053 CALL dom_ngb( -168.53, 65.03, ix, iy, 'T' ) ! i-line that passes through Bering Strait: Reference latitude (used in plots) 2054 ! CALL dom_ngb( 180., 90., ix, iy, 'T' ) ! i-line that passes near the North Pole : Reference latitude (used in plots) 2055 CALL iom_set_domain_attr("gznl", ni_glo=jpiglo, nj_glo=jpjglo, ibegin=nimpp+nldi-2, jbegin=njmpp+nldj-2, ni=ni, nj=nj) 2056 CALL iom_set_domain_attr("gznl", data_dim=2, data_ibegin = 1-nldi, data_ni = jpi, data_jbegin = 1-nldj, data_nj = jpj) 2057 CALL iom_set_domain_attr("gznl", lonvalue = zlon, & 2058 & latvalue = RESHAPE(plat(nldi:nlei, nldj:nlej),(/ ni*nj /))) 2059 CALL iom_set_zoom_domain_attr("znl_T", ibegin=ix-1, jbegin=0, ni=1, nj=jpjglo) 2060 CALL iom_set_zoom_domain_attr("znl_W", ibegin=ix-1, jbegin=0, ni=1, nj=jpjglo) 2061 ! 2062 CALL iom_update_file_name('ptr') 2063 ! 2064 END SUBROUTINE set_grid_znl 2065 2066 2067 SUBROUTINE set_scalar 2068 !!---------------------------------------------------------------------- 2069 !! *** ROUTINE set_scalar *** 2070 !! 2071 !! ** Purpose : define fake grids for scalar point 2072 !! 2073 !!---------------------------------------------------------------------- 2074 REAL(wp), DIMENSION(1) :: zz = 1. 2075 !!---------------------------------------------------------------------- 2076 ! 2077 CALL iom_set_domain_attr('scalarpoint', ni_glo=jpnij, nj_glo=1, ibegin=narea-1, jbegin=0, ni=1, nj=1) 2078 CALL iom_set_domain_attr('scalarpoint', data_dim=2, data_ibegin = 1, data_ni = 1, data_jbegin = 1, data_nj = 1) 2079 ! 2080 zz = REAL( narea, wp ) 2081 CALL iom_set_domain_attr('scalarpoint', lonvalue=zz, latvalue=zz) 2082 ! 2083 END SUBROUTINE set_scalar 2084 2085 2086 SUBROUTINE set_xmlatt 2087 !!---------------------------------------------------------------------- 2088 !! *** ROUTINE set_xmlatt *** 2089 !! 2090 !! ** Purpose : automatic definitions of some of the xml attributs... 2091 !! 2092 !!---------------------------------------------------------------------- 2093 CHARACTER(len=1),DIMENSION( 3) :: clgrd ! suffix name 2094 CHARACTER(len=256) :: clsuff ! suffix name 2095 CHARACTER(len=1) :: cl1 ! 1 character 2096 CHARACTER(len=2) :: cl2 ! 2 characters 2097 CHARACTER(len=3) :: cl3 ! 3 characters 2098 INTEGER :: ji, jg ! loop counters 2099 INTEGER :: ix, iy ! i-,j- index 2100 REAL(wp) ,DIMENSION(11) :: zlontao ! longitudes of tao moorings 2101 REAL(wp) ,DIMENSION( 7) :: zlattao ! latitudes of tao moorings 2102 REAL(wp) ,DIMENSION( 4) :: zlonrama ! longitudes of rama moorings 2103 REAL(wp) ,DIMENSION(11) :: zlatrama ! latitudes of rama moorings 2104 REAL(wp) ,DIMENSION( 3) :: zlonpira ! longitudes of pirata moorings 2105 REAL(wp) ,DIMENSION( 9) :: zlatpira ! latitudes of pirata moorings 2106 TYPE(xios_duration) :: f_op, f_of 2107 !!---------------------------------------------------------------------- 2108 ! 2109 ! frequency of the call of iom_put (attribut: freq_op) 2110 f_op%timestep = 1 ; f_of%timestep = 0 ; CALL iom_set_field_attr('field_definition', freq_op=f_op, freq_offset=f_of) 2111 f_op%timestep = 2 ; f_of%timestep = 0 ; CALL iom_set_field_attr('trendT_even' , freq_op=f_op, freq_offset=f_of) 2112 f_op%timestep = 2 ; f_of%timestep = -1 ; CALL iom_set_field_attr('trendT_odd' , freq_op=f_op, freq_offset=f_of) 2113 f_op%timestep = nn_fsbc ; f_of%timestep = 0 ; CALL iom_set_field_attr('SBC' , freq_op=f_op, freq_offset=f_of) 2114 f_op%timestep = nn_fsbc ; f_of%timestep = 0 ; CALL iom_set_field_attr('SBC_scalar' , freq_op=f_op, freq_offset=f_of) 2115 f_op%timestep = nn_dttrc ; f_of%timestep = 0 ; CALL iom_set_field_attr('ptrc_T' , freq_op=f_op, freq_offset=f_of) 2116 f_op%timestep = nn_dttrc ; f_of%timestep = 0 ; CALL iom_set_field_attr('diad_T' , freq_op=f_op, freq_offset=f_of) 2117 2118 ! output file names (attribut: name) 2119 DO ji = 1, 9 2120 WRITE(cl1,'(i1)') ji 2121 CALL iom_update_file_name('file'//cl1) 2122 END DO 2123 DO ji = 1, 99 2124 WRITE(cl2,'(i2.2)') ji 2125 CALL iom_update_file_name('file'//cl2) 2126 END DO 2127 DO ji = 1, 999 2128 WRITE(cl3,'(i3.3)') ji 2129 CALL iom_update_file_name('file'//cl3) 2130 END DO 2131 2132 ! Zooms... 2133 clgrd = (/ 'T', 'U', 'W' /) 2134 DO jg = 1, SIZE(clgrd) ! grid type 2135 cl1 = clgrd(jg) 2136 ! Equatorial section (attributs: jbegin, ni, name_suffix) 2137 CALL dom_ngb( 0., 0., ix, iy, cl1 ) 2138 CALL iom_set_zoom_domain_attr('Eq'//cl1, ibegin=0, jbegin=iy-1, ni=jpiglo, nj=1 ) 2139 CALL iom_get_file_attr ('Eq'//cl1, name_suffix = clsuff ) 2140 CALL iom_set_file_attr ('Eq'//cl1, name_suffix = TRIM(clsuff)//'_Eq') 2141 CALL iom_update_file_name('Eq'//cl1) 2142 END DO 2143 ! TAO moorings (attributs: ibegin, jbegin, name_suffix) 2144 zlontao = (/ 137.0, 147.0, 156.0, 165.0, -180.0, -170.0, -155.0, -140.0, -125.0, -110.0, -95.0 /) 2145 zlattao = (/ -8.0, -5.0, -2.0, 0.0, 2.0, 5.0, 8.0 /) 2146 CALL set_mooring( zlontao, zlattao ) 2147 ! RAMA moorings (attributs: ibegin, jbegin, name_suffix) 2148 zlonrama = (/ 55.0, 67.0, 80.5, 90.0 /) 2149 zlatrama = (/ -16.0, -12.0, -8.0, -4.0, -1.5, 0.0, 1.5, 4.0, 8.0, 12.0, 15.0 /) 2150 CALL set_mooring( zlonrama, zlatrama ) 2151 ! PIRATA moorings (attributs: ibegin, jbegin, name_suffix) 2152 zlonpira = (/ -38.0, -23.0, -10.0 /) 2153 zlatpira = (/ -19.0, -14.0, -8.0, 0.0, 4.0, 8.0, 12.0, 15.0, 20.0 /) 2154 CALL set_mooring( zlonpira, zlatpira ) 2155 ! 2156 END SUBROUTINE set_xmlatt 2157 2158 2159 SUBROUTINE set_mooring( plon, plat ) 2160 !!---------------------------------------------------------------------- 2161 !! *** ROUTINE set_mooring *** 2162 !! 2163 !! ** Purpose : automatic definitions of moorings xml attributs... 2164 !! 2165 !!---------------------------------------------------------------------- 2166 REAL(wp), DIMENSION(:), INTENT(in) :: plon, plat ! longitudes/latitudes oft the mooring 2167 ! 2168 !!$ CHARACTER(len=1),DIMENSION(4) :: clgrd = (/ 'T', 'U', 'V', 'W' /) ! suffix name 2169 CHARACTER(len=1),DIMENSION(1) :: clgrd = (/ 'T' /) ! suffix name 2170 CHARACTER(len=256) :: clname ! file name 2171 CHARACTER(len=256) :: clsuff ! suffix name 2172 CHARACTER(len=1) :: cl1 ! 1 character 2173 CHARACTER(len=6) :: clon,clat ! name of longitude, latitude 2174 INTEGER :: ji, jj, jg ! loop counters 2175 INTEGER :: ix, iy ! i-,j- index 2176 REAL(wp) :: zlon, zlat 2177 !!---------------------------------------------------------------------- 2178 DO jg = 1, SIZE(clgrd) 2179 cl1 = clgrd(jg) 2180 DO ji = 1, SIZE(plon) 2181 DO jj = 1, SIZE(plat) 2182 zlon = plon(ji) 2183 zlat = plat(jj) 2184 ! modifications for RAMA moorings 2185 IF( zlon == 67. .AND. zlat == 15. ) zlon = 65. 2186 IF( zlon == 90. .AND. zlat <= -4. ) zlon = 95. 2187 IF( zlon == 95. .AND. zlat == -4. ) zlat = -5. 2188 ! modifications for PIRATA moorings 2189 IF( zlon == -38. .AND. zlat == -19. ) zlon = -34. 2190 IF( zlon == -38. .AND. zlat == -14. ) zlon = -32. 2191 IF( zlon == -38. .AND. zlat == -8. ) zlon = -30. 2192 IF( zlon == -38. .AND. zlat == 0. ) zlon = -35. 2193 IF( zlon == -23. .AND. zlat == 20. ) zlat = 21. 2194 IF( zlon == -10. .AND. zlat == -14. ) zlat = -10. 2195 IF( zlon == -10. .AND. zlat == -8. ) zlat = -6. 2196 IF( zlon == -10. .AND. zlat == 4. ) THEN ; zlon = 0. ; zlat = 0. ; ENDIF 2197 CALL dom_ngb( zlon, zlat, ix, iy, cl1 ) 2198 IF( zlon >= 0. ) THEN 2199 IF( zlon == REAL(NINT(zlon), wp) ) THEN ; WRITE(clon, '(i3, a)') NINT( zlon), 'e' 2200 ELSE ; WRITE(clon, '(f5.1,a)') zlon , 'e' 2201 ENDIF 2202 ELSE 2203 IF( zlon == REAL(NINT(zlon), wp) ) THEN ; WRITE(clon, '(i3, a)') NINT(-zlon), 'w' 2204 ELSE ; WRITE(clon, '(f5.1,a)') -zlon , 'w' 2205 ENDIF 2206 ENDIF 2207 IF( zlat >= 0. ) THEN 2208 IF( zlat == REAL(NINT(zlat), wp) ) THEN ; WRITE(clat, '(i2, a)') NINT( zlat), 'n' 2209 ELSE ; WRITE(clat, '(f4.1,a)') zlat , 'n' 2210 ENDIF 2211 ELSE 2212 IF( zlat == REAL(NINT(zlat), wp) ) THEN ; WRITE(clat, '(i2, a)') NINT(-zlat), 's' 2213 ELSE ; WRITE(clat, '(f4.1,a)') -zlat , 's' 2214 ENDIF 2215 ENDIF 2216 clname = TRIM(ADJUSTL(clat))//TRIM(ADJUSTL(clon)) 2217 CALL iom_set_zoom_domain_attr(TRIM(clname)//cl1, ibegin= ix-1, jbegin= iy-1, ni=1, nj=1) 2218 2219 CALL iom_get_file_attr (TRIM(clname)//cl1, name_suffix = clsuff ) 2220 CALL iom_set_file_attr (TRIM(clname)//cl1, name_suffix = TRIM(clsuff)//'_'//TRIM(clname)) 2221 CALL iom_update_file_name(TRIM(clname)//cl1) 2222 END DO 2223 END DO 2224 END DO 2225 2226 END SUBROUTINE set_mooring 2227 2228 2229 SUBROUTINE iom_update_file_name( cdid ) 2230 !!---------------------------------------------------------------------- 2231 !! *** ROUTINE iom_update_file_name *** 2232 !! 2233 !! ** Purpose : 2234 !! 2235 !!---------------------------------------------------------------------- 2236 CHARACTER(LEN=*) , INTENT(in) :: cdid 2237 ! 2238 CHARACTER(LEN=256) :: clname 2239 CHARACTER(LEN=20) :: clfreq 2240 CHARACTER(LEN=20) :: cldate 2241 INTEGER :: idx 2242 INTEGER :: jn 2243 INTEGER :: itrlen 2244 INTEGER :: iyear, imonth, iday, isec 2245 REAL(wp) :: zsec 2246 LOGICAL :: llexist 2247 TYPE(xios_duration) :: output_freq 2248 !!---------------------------------------------------------------------- 2249 ! 2250 DO jn = 1, 2 2251 ! 2252 output_freq = xios_duration(0,0,0,0,0,0) 2253 IF( jn == 1 ) CALL iom_get_file_attr( cdid, name = clname, output_freq = output_freq ) 2254 IF( jn == 2 ) CALL iom_get_file_attr( cdid, name_suffix = clname ) 2255 ! 2256 IF ( TRIM(clname) /= '' ) THEN 2257 ! 2258 idx = INDEX(clname,'@expname@') + INDEX(clname,'@EXPNAME@') 2259 DO WHILE ( idx /= 0 ) 2260 clname = clname(1:idx-1)//TRIM(cexper)//clname(idx+9:LEN_TRIM(clname)) 2261 idx = INDEX(clname,'@expname@') + INDEX(clname,'@EXPNAME@') 2262 END DO 2263 ! 2264 idx = INDEX(clname,'@freq@') + INDEX(clname,'@FREQ@') 2265 DO WHILE ( idx /= 0 ) 2266 IF ( output_freq%timestep /= 0) THEN 2267 WRITE(clfreq,'(I18,A2)')INT(output_freq%timestep),'ts' 2268 itrlen = LEN_TRIM(ADJUSTL(clfreq)) 2269 ELSE IF ( output_freq%second /= 0 ) THEN 2270 WRITE(clfreq,'(I19,A1)')INT(output_freq%second),'s' 2271 itrlen = LEN_TRIM(ADJUSTL(clfreq)) 2272 ELSE IF ( output_freq%minute /= 0 ) THEN 2273 WRITE(clfreq,'(I18,A2)')INT(output_freq%minute),'mi' 2274 itrlen = LEN_TRIM(ADJUSTL(clfreq)) 2275 ELSE IF ( output_freq%hour /= 0 ) THEN 2276 WRITE(clfreq,'(I19,A1)')INT(output_freq%hour),'h' 2277 itrlen = LEN_TRIM(ADJUSTL(clfreq)) 2278 ELSE IF ( output_freq%day /= 0 ) THEN 2279 WRITE(clfreq,'(I19,A1)')INT(output_freq%day),'d' 2280 itrlen = LEN_TRIM(ADJUSTL(clfreq)) 2281 ELSE IF ( output_freq%month /= 0 ) THEN 2282 WRITE(clfreq,'(I19,A1)')INT(output_freq%month),'m' 2283 itrlen = LEN_TRIM(ADJUSTL(clfreq)) 2284 ELSE IF ( output_freq%year /= 0 ) THEN 2285 WRITE(clfreq,'(I19,A1)')INT(output_freq%year),'y' 2286 itrlen = LEN_TRIM(ADJUSTL(clfreq)) 2287 ELSE 2288 CALL ctl_stop('error in the name of file id '//TRIM(cdid), & 2289 & ' attribute output_freq is undefined -> cannot replace @freq@ in '//TRIM(clname) ) 2290 ENDIF 2291 clname = clname(1:idx-1)//TRIM(ADJUSTL(clfreq))//clname(idx+6:LEN_TRIM(clname)) 2292 idx = INDEX(clname,'@freq@') + INDEX(clname,'@FREQ@') 2293 END DO 2294 ! 2295 idx = INDEX(clname,'@startdate@') + INDEX(clname,'@STARTDATE@') 2296 DO WHILE ( idx /= 0 ) 2297 cldate = iom_sdate( fjulday - rdt / rday ) 2298 clname = clname(1:idx-1)//TRIM(cldate)//clname(idx+11:LEN_TRIM(clname)) 2299 idx = INDEX(clname,'@startdate@') + INDEX(clname,'@STARTDATE@') 2300 END DO 2301 ! 2302 idx = INDEX(clname,'@startdatefull@') + INDEX(clname,'@STARTDATEFULL@') 2303 DO WHILE ( idx /= 0 ) 2304 cldate = iom_sdate( fjulday - rdt / rday, ldfull = .TRUE. ) 2305 clname = clname(1:idx-1)//TRIM(cldate)//clname(idx+15:LEN_TRIM(clname)) 2306 idx = INDEX(clname,'@startdatefull@') + INDEX(clname,'@STARTDATEFULL@') 2307 END DO 2308 ! 2309 idx = INDEX(clname,'@enddate@') + INDEX(clname,'@ENDDATE@') 2310 DO WHILE ( idx /= 0 ) 2311 cldate = iom_sdate( fjulday + rdt / rday * REAL( nitend - nit000, wp ), ld24 = .TRUE. ) 2312 clname = clname(1:idx-1)//TRIM(cldate)//clname(idx+9:LEN_TRIM(clname)) 2313 idx = INDEX(clname,'@enddate@') + INDEX(clname,'@ENDDATE@') 2314 END DO 2315 ! 2316 idx = INDEX(clname,'@enddatefull@') + INDEX(clname,'@ENDDATEFULL@') 2317 DO WHILE ( idx /= 0 ) 2318 cldate = iom_sdate( fjulday + rdt / rday * REAL( nitend - nit000, wp ), ld24 = .TRUE., ldfull = .TRUE. ) 2319 clname = clname(1:idx-1)//TRIM(cldate)//clname(idx+13:LEN_TRIM(clname)) 2320 idx = INDEX(clname,'@enddatefull@') + INDEX(clname,'@ENDDATEFULL@') 2321 END DO 2322 ! 2323 IF( jn == 1 .AND. TRIM(Agrif_CFixed()) /= '0' ) clname = TRIM(Agrif_CFixed())//"_"//TRIM(clname) 2324 IF( jn == 1 ) CALL iom_set_file_attr( cdid, name = clname ) 2325 IF( jn == 2 ) CALL iom_set_file_attr( cdid, name_suffix = clname ) 2326 ! 2327 ENDIF 2328 ! 2329 END DO 2330 ! 2331 END SUBROUTINE iom_update_file_name 2332 2333 2334 FUNCTION iom_sdate( pjday, ld24, ldfull ) 2335 !!---------------------------------------------------------------------- 2336 !! *** ROUTINE iom_sdate *** 2337 !! 2338 !! ** Purpose : send back the date corresponding to the given julian day 2339 !!---------------------------------------------------------------------- 2340 REAL(wp), INTENT(in ) :: pjday ! julian day 2341 LOGICAL , INTENT(in ), OPTIONAL :: ld24 ! true to force 24:00 instead of 00:00 2342 LOGICAL , INTENT(in ), OPTIONAL :: ldfull ! true to get the compleate date: yyyymmdd_hh:mm:ss 2343 ! 2344 CHARACTER(LEN=20) :: iom_sdate 2345 CHARACTER(LEN=50) :: clfmt ! format used to write the date 2346 INTEGER :: iyear, imonth, iday, ihour, iminute, isec 2347 REAL(wp) :: zsec 2348 LOGICAL :: ll24, llfull 2349 !!---------------------------------------------------------------------- 2350 ! 2351 IF( PRESENT(ld24) ) THEN ; ll24 = ld24 2352 ELSE ; ll24 = .FALSE. 2353 ENDIF 2354 ! 2355 IF( PRESENT(ldfull) ) THEN ; llfull = ldfull 2356 ELSE ; llfull = .FALSE. 2357 ENDIF 2358 ! 2359 CALL ju2ymds( pjday, iyear, imonth, iday, zsec ) 2360 isec = NINT(zsec) 2361 ! 2362 IF ( ll24 .AND. isec == 0 ) THEN ! 00:00 of the next day -> move to 24:00 of the current day 2363 CALL ju2ymds( pjday - 1., iyear, imonth, iday, zsec ) 2364 isec = 86400 2365 ENDIF 2366 ! 2367 IF( iyear < 10000 ) THEN ; clfmt = "i4.4,2i2.2" ! format used to write the date 2368 ELSE ; WRITE(clfmt, "('i',i1,',2i2.2')") INT(LOG10(REAL(iyear,wp))) + 1 2369 ENDIF 2370 ! 2371 !$AGRIF_DO_NOT_TREAT 2372 ! needed in the conv 2373 IF( llfull ) THEN 2374 clfmt = TRIM(clfmt)//",'_',i2.2,':',i2.2,':',i2.2" 2375 ihour = isec / 3600 2376 isec = MOD(isec, 3600) 2377 iminute = isec / 60 2378 isec = MOD(isec, 60) 2379 WRITE(iom_sdate, '('//TRIM(clfmt)//')') iyear, imonth, iday, ihour, iminute, isec ! date of the end of run 2380 ELSE 2381 WRITE(iom_sdate, '('//TRIM(clfmt)//')') iyear, imonth, iday ! date of the end of run 2382 ENDIF 2383 !$AGRIF_END_DO_NOT_TREAT 2384 ! 2385 END FUNCTION iom_sdate 2386 2387 #else 2388 !!---------------------------------------------------------------------- 2389 !! NOT 'key_iomput' a few dummy routines 2390 !!---------------------------------------------------------------------- 932 2391 933 2392 SUBROUTINE iom_setkt( kt, cdname ) … … 942 2401 END SUBROUTINE iom_context_finalize 943 2402 2403 #endif 944 2404 945 2405 LOGICAL FUNCTION iom_use( cdname ) 2406 !!---------------------------------------------------------------------- 2407 !!---------------------------------------------------------------------- 946 2408 CHARACTER(LEN=*), INTENT(in) :: cdname 2409 !!---------------------------------------------------------------------- 2410 #if defined key_iomput 2411 iom_use = xios_field_is_active( cdname ) 2412 #else 947 2413 iom_use = .FALSE. 2414 #endif 948 2415 END FUNCTION iom_use 949 2416 -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/iom_def.F90
r10725 r10727 1 1 MODULE iom_def 2 !!===================================================================== 2 !!====================================================================== 3 3 !! *** MODULE iom_def *** 4 4 !! IOM variables definitions 5 !!==================================================================== 6 !! History : 9.0 ! 06 09 (S. Masson) Original code 7 !! " ! 07 07 (D. Storkey) Add uldname 8 !!-------------------------------------------------------------------- 9 !!--------------------------------------------------------------------------------- 10 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 11 !! $Id: iom_def.F90 6140 2015-12-21 11:35:23Z timgraham $ 12 !! Software governed by the CeCILL licence (./LICENSE) 13 !!--------------------------------------------------------------------------------- 14 5 !!====================================================================== 6 !! History : 9.0 ! 2006 09 (S. Masson) Original code 7 !! - ! 2007 07 (D. Storkey) Add uldname 8 !! 4.0 ! 2017-11 (M. Andrejczuk) Extend IOM interface to write any 3D fields 9 !!---------------------------------------------------------------------- 15 10 USE par_kind 16 11 … … 18 13 PRIVATE 19 14 20 INTEGER, PARAMETER, PUBLIC :: jpdom_data = 1 !: ( 1 :jpi dta, 1 :jpjdta)15 INTEGER, PARAMETER, PUBLIC :: jpdom_data = 1 !: ( 1 :jpiglo, 1 :jpjglo) !!gm to be suppressed 21 16 INTEGER, PARAMETER, PUBLIC :: jpdom_global = 2 !: ( 1 :jpiglo, 1 :jpjglo) 22 17 INTEGER, PARAMETER, PUBLIC :: jpdom_local = 3 !: One of the 3 following cases … … 29 24 INTEGER, PARAMETER, PUBLIC :: jpdom_autodta = 10 !: 30 25 31 INTEGER, PARAMETER, PUBLIC :: jpnf90 = 101 !: Use nf90 library32 33 INTEGER, PARAMETER, PUBLIC :: jprstlib = jpnf90 !: restarts io library34 35 26 INTEGER, PARAMETER, PUBLIC :: jp_r8 = 200 !: write REAL(8) 36 27 INTEGER, PARAMETER, PUBLIC :: jp_r4 = 201 !: write REAL(4) … … 39 30 INTEGER, PARAMETER, PUBLIC :: jp_i1 = 204 !: write INTEGER(1) 40 31 41 INTEGER, PARAMETER, PUBLIC :: jpmax_files = 100 42 INTEGER, PARAMETER, PUBLIC :: jpmax_vars = 600!: maximum number of variables in one file32 INTEGER, PARAMETER, PUBLIC :: jpmax_files = 100 !: maximum number of simultaneously opened file 33 INTEGER, PARAMETER, PUBLIC :: jpmax_vars = 1200 !: maximum number of variables in one file 43 34 INTEGER, PARAMETER, PUBLIC :: jpmax_dims = 4 !: maximum number of dimensions for one variable 44 35 INTEGER, PARAMETER, PUBLIC :: jpmax_digits = 5 !: maximum number of digits for the cpu number in the file name 45 36 37 46 38 !$AGRIF_DO_NOT_TREAT 47 39 INTEGER, PUBLIC :: iom_open_init = 0 !: used to initialize iom_file(:)%nfid to 0 40 !XIOS write restart 41 LOGICAL, PUBLIC :: lwxios !: write single file restart using XIOS 42 INTEGER, PUBLIC :: nxioso !: type of restart file when writing using XIOS 1 - single, 2 - multiple 43 !XIOS read restart 44 LOGICAL, PUBLIC :: lrxios !: read single file restart using XIOS 45 LOGICAL, PUBLIC :: lxios_sini = .FALSE. ! is restart in a single file 46 LOGICAL, PUBLIC :: lxios_set = .FALSE. 47 48 48 49 49 50 TYPE, PUBLIC :: file_descriptor 50 51 CHARACTER(LEN=240) :: name !: name of the file 51 52 INTEGER :: nfid !: identifier of the file (0 if closed) 52 INTEGER :: iolib !: library used to read the file (jpnf90 or new formats,53 53 !: jpioipsl option has been removed) 54 54 INTEGER :: nvars !: number of identified varibles in the file … … 64 64 REAL(kind=wp), DIMENSION(jpmax_vars) :: scf !: scale_factor of the variables 65 65 REAL(kind=wp), DIMENSION(jpmax_vars) :: ofs !: add_offset of the variables 66 INTEGER :: nlev ! number of vertical levels 66 67 END TYPE file_descriptor 67 68 TYPE(file_descriptor), DIMENSION(jpmax_files), PUBLIC :: iom_file !: array containing the info for all opened files 69 INTEGER, PARAMETER, PUBLIC :: max_rst_fields = 95 !: maximum number of restart variables defined in iom_set_rst_vars 70 TYPE, PUBLIC :: RST_FIELD 71 CHARACTER(len=30) :: vname = "NO_NAME" ! names of variables in restart file 72 CHARACTER(len=30) :: grid = "NO_GRID" 73 LOGICAL :: active =.FALSE. ! for restart write only: true - write field, false do not write field 74 END TYPE RST_FIELD 68 75 !$AGRIF_END_DO_NOT_TREAT 69 70 !!===================================================================== 76 ! 77 TYPE(RST_FIELD), PUBLIC, SAVE :: rst_wfields(max_rst_fields), rst_rfields(max_rst_fields) 78 ! 79 !!---------------------------------------------------------------------- 80 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 81 !! $Id: iom_def.F90 10425 2018-12-19 21:54:16Z smasson $ 82 !! Software governed by the CeCILL license (see ./LICENSE) 83 !!====================================================================== 71 84 END MODULE iom_def -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/iom_nf90.F90
r10725 r10727 1 1 MODULE iom_nf90 2 !!===================================================================== 2 !!====================================================================== 3 3 !! *** MODULE iom_nf90 *** 4 4 !! Input/Output manager : Library to read input files with NF90 (only fliocom module) 5 !!==================================================================== 5 !!====================================================================== 6 6 !! History : 9.0 ! 05 12 (J. Belier) Original code 7 7 !! 9.0 ! 06 02 (S. Masson) Adaptation to NEMO 8 8 !! " ! 07 07 (D. Storkey) Changes to iom_nf90_gettime 9 !!-------------------------------------------------------------------- 10 !!gm caution add !DIR nec: improved performance to be checked as well as no result changes 11 12 !!-------------------------------------------------------------------- 9 !! 3.6 ! 2015-15 (J. Harle) Added procedure to read REAL attributes 10 !! 4.0 ! 2017-11 (M. Andrejczuk) Extend IOM interface to write any 3D fields 11 !!---------------------------------------------------------------------- 12 13 !!---------------------------------------------------------------------- 13 14 !! iom_open : open a file read only 14 15 !! iom_close : close a file or all files opened by iom 15 16 !! iom_get : read a field (interfaced to several routines) 16 !! iom_gettime : read the time axis kvid in the file17 17 !! iom_varid : get the id of a variable in a file 18 18 !! iom_rstput : write a field in a restart file (interfaced to several routines) 19 !!-------------------------------------------------------------------- 19 !!---------------------------------------------------------------------- 20 20 USE dom_oce ! ocean space and time domain 21 21 USE lbclnk ! lateal boundary condition / mpp exchanges … … 28 28 PRIVATE 29 29 30 PUBLIC iom_nf90_open , iom_nf90_close, iom_nf90_varid, iom_nf90_get, iom_nf90_gettime, iom_nf90_rstput31 PUBLIC iom_nf90_ getatt30 PUBLIC iom_nf90_open , iom_nf90_close, iom_nf90_varid, iom_nf90_get, iom_nf90_rstput 31 PUBLIC iom_nf90_chkatt, iom_nf90_getatt, iom_nf90_putatt 32 32 33 33 INTERFACE iom_nf90_get 34 34 MODULE PROCEDURE iom_nf90_g0d, iom_nf90_g123d 35 END INTERFACE36 INTERFACE iom_nf90_getatt37 MODULE PROCEDURE iom_nf90_intatt38 35 END INTERFACE 39 36 INTERFACE iom_nf90_rstput … … 43 40 !!---------------------------------------------------------------------- 44 41 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 45 !! $Id: iom_nf90.F90 6140 2015-12-21 11:35:23Z timgraham $ 46 !! Software governed by the CeCILL licence (./LICENSE) 47 !!---------------------------------------------------------------------- 48 42 !! $Id: iom_nf90.F90 10522 2019-01-16 08:35:15Z smasson $ 43 !! Software governed by the CeCILL license (see ./LICENSE) 44 !!---------------------------------------------------------------------- 49 45 CONTAINS 50 46 51 SUBROUTINE iom_nf90_open( cdname, kiomid, ldwrt, ldok, kdompar )47 SUBROUTINE iom_nf90_open( cdname, kiomid, ldwrt, ldok, kdompar, kdlev ) 52 48 !!--------------------------------------------------------------------- 53 49 !! *** SUBROUTINE iom_open *** … … 60 56 LOGICAL , INTENT(in ) :: ldok ! check the existence 61 57 INTEGER, DIMENSION(2,5), INTENT(in ), OPTIONAL :: kdompar ! domain parameters: 58 INTEGER , INTENT(in ), OPTIONAL :: kdlev ! size of the third dimension 62 59 63 60 CHARACTER(LEN=256) :: clinfo ! info character … … 72 69 INTEGER :: ihdf5 ! local variable for retrieval of value for NF90_HDF5 73 70 LOGICAL :: llclobber ! local definition of ln_clobber 74 !--------------------------------------------------------------------- 75 71 INTEGER :: ilevels ! vertical levels 72 !--------------------------------------------------------------------- 73 ! 76 74 clinfo = ' iom_nf90_open ~~~ ' 77 istop = nstop ! store the actual value of nstop 75 istop = nstop ! store the actual value of nstop 76 ! 77 ! !number of vertical levels 78 IF( PRESENT(kdlev) ) THEN ; ilevels = kdlev ! use input value (useful for sea-ice) 79 ELSE ; ilevels = jpk ! by default jpk 80 ENDIF 81 ! 78 82 IF( nn_chunksz > 0 ) THEN ; ichunk = nn_chunksz 79 83 ELSE ; ichunk = NF90_SIZEHINT_DEFAULT … … 81 85 ! 82 86 llclobber = ldwrt .AND. ln_clobber 83 IF( ldok .AND. .NOT. llclobber ) THEN ! Open existing file...84 ! ! =============87 IF( ldok .AND. .NOT. llclobber ) THEN !== Open existing file ==! 88 ! !=========================! 85 89 IF( ldwrt ) THEN ! ... in write mode 86 90 IF(lwp) WRITE(numout,*) TRIM(clinfo)//' open existing file: '//TRIM(cdname)//' in WRITE mode' … … 95 99 CALL iom_nf90_check(NF90_OPEN( TRIM(cdname), NF90_NOWRITE, if90id, chunksize = ichunk ), clinfo) 96 100 ENDIF 97 ELSE ! the file does not exist(or we overwrite it)98 ! ! =============101 ELSE !== the file doesn't exist ==! (or we overwrite it) 102 ! !============================! 99 103 iln = INDEX( cdname, '.nc' ) 100 IF( ldwrt ) THEN !the file should be open in write mode so we create it...104 IF( ldwrt ) THEN !* the file should be open in write mode so we create it... 101 105 IF( jpnij > 1 ) THEN 102 106 WRITE(cltmp,'(a,a,i4.4,a)') cdname(1:iln-1), '_', narea-1, '.nc' … … 118 122 CALL iom_nf90_check(NF90_CREATE( TRIM(cdname), imode, if90id, chunksize = ichunk ), clinfo) 119 123 ENDIF 120 CALL iom_nf90_check(NF90_SET_FILL( if90id, NF90_NOFILL, idmy), clinfo)124 CALL iom_nf90_check(NF90_SET_FILL( if90id, NF90_NOFILL, idmy ), clinfo) 121 125 ! define dimensions 122 CALL iom_nf90_check(NF90_DEF_DIM( if90id, 'x', kdompar(1,1) , idmy ), clinfo) 123 CALL iom_nf90_check(NF90_DEF_DIM( if90id, 'y', kdompar(2,1) , idmy ), clinfo) 124 CALL iom_nf90_check(NF90_DEF_DIM( if90id, 'z', jpk , idmy ), clinfo) 125 CALL iom_nf90_check(NF90_DEF_DIM( if90id, 't', NF90_UNLIMITED, idmy ), clinfo) 126 CALL iom_nf90_check(NF90_DEF_DIM( if90id, 'x', kdompar(1,1), idmy ), clinfo) 127 CALL iom_nf90_check(NF90_DEF_DIM( if90id, 'y', kdompar(2,1), idmy ), clinfo) 128 CALL iom_nf90_check(NF90_DEF_DIM( if90id, 'nav_lev', jpk, idmy ), clinfo) 129 CALL iom_nf90_check(NF90_DEF_DIM( if90id, 'time_counter', NF90_UNLIMITED, idmy ), clinfo) 130 IF( PRESENT(kdlev) ) & 131 CALL iom_nf90_check(NF90_DEF_DIM( if90id, 'numcat', kdlev, idmy ), clinfo) 126 132 ! global attributes 127 133 CALL iom_nf90_check(NF90_PUT_ATT( if90id, NF90_GLOBAL, 'DOMAIN_number_total' , jpnij ), clinfo) … … 135 141 CALL iom_nf90_check(NF90_PUT_ATT( if90id, NF90_GLOBAL, 'DOMAIN_halo_size_end' , kdompar(:,5) ), clinfo) 136 142 CALL iom_nf90_check(NF90_PUT_ATT( if90id, NF90_GLOBAL, 'DOMAIN_type' , 'BOX' ), clinfo) 137 ELSE !the file should be open for read mode so it must exist...143 ELSE !* the file should be open for read mode so it must exist... 138 144 CALL ctl_stop( TRIM(clinfo), ' should be impossible case...' ) 139 145 ENDIF 140 146 ENDIF 147 ! 141 148 ! start to fill file informations 142 149 ! ============= … … 149 156 iom_file(kiomid)%name = TRIM(cdname) 150 157 iom_file(kiomid)%nfid = if90id 151 iom_file(kiomid)%iolib = jpnf90152 158 iom_file(kiomid)%nvars = 0 153 159 iom_file(kiomid)%irec = -1 ! useless for NetCDF files, used to know if the file is in define mode 160 iom_file(kiomid)%nlev = ilevels 154 161 CALL iom_nf90_check(NF90_Inquire(if90id, unlimitedDimId = iom_file(kiomid)%iduld), clinfo) 155 IF 156 CALL iom_nf90_check(NF90_Inquire_Dimension(if90id, iom_file(kiomid)%iduld,&157 & name = iom_file(kiomid)%uldname,&158 &len = iom_file(kiomid)%lenuld ), clinfo )162 IF( iom_file(kiomid)%iduld .GE. 0 ) THEN 163 CALL iom_nf90_check(NF90_Inquire_Dimension(if90id, iom_file(kiomid)%iduld, & 164 & name = iom_file(kiomid)%uldname, & 165 & len = iom_file(kiomid)%lenuld ), clinfo ) 159 166 ENDIF 160 167 IF(lwp) WRITE(numout,*) ' ---> '//TRIM(cdname)//' OK' … … 175 182 CHARACTER(LEN=100) :: clinfo ! info character 176 183 !--------------------------------------------------------------------- 177 !178 184 clinfo = ' iom_nf90_close , file: '//TRIM(iom_file(kiomid)%name) 179 185 CALL iom_nf90_check(NF90_CLOSE(iom_file(kiomid)%nfid), clinfo) 180 !181 186 END SUBROUTINE iom_nf90_close 182 187 … … 238 243 ! return the simension size 239 244 IF( PRESENT(kdimsz) ) THEN 240 IF( i_nvd == SIZE(kdimsz) ) THEN241 kdimsz( :) = iom_file(kiomid)%dimsz(1:i_nvd,kiv)245 IF( i_nvd <= SIZE(kdimsz) ) THEN 246 kdimsz(1:i_nvd) = iom_file(kiomid)%dimsz(1:i_nvd,kiv) 242 247 ELSE 243 248 WRITE(ctmp1,*) i_nvd, SIZE(kdimsz) … … 252 257 END FUNCTION iom_nf90_varid 253 258 259 !!---------------------------------------------------------------------- 260 !! INTERFACE iom_nf90_get 261 !!---------------------------------------------------------------------- 254 262 255 263 SUBROUTINE iom_nf90_g0d( kiomid, kvid, pvar, kstart ) … … 268 276 clinfo = 'iom_nf90_g0d , file: '//TRIM(iom_file(kiomid)%name)//', var: '//TRIM(iom_file(kiomid)%cn_var(kvid)) 269 277 CALL iom_nf90_check(NF90_GET_VAR(iom_file(kiomid)%nfid, iom_file(kiomid)%nvid(kvid), pvar, start = kstart), clinfo ) 270 !271 278 END SUBROUTINE iom_nf90_g0d 272 279 … … 313 320 314 321 315 SUBROUTINE iom_nf90_intatt( kiomid, cdatt, pvar ) 316 !!----------------------------------------------------------------------- 317 !! *** ROUTINE iom_nf90_intatt *** 318 !! 319 !! ** Purpose : read an integer attribute with NF90 322 SUBROUTINE iom_nf90_chkatt( kiomid, cdatt, llok, ksize, cdvar ) 323 !!----------------------------------------------------------------------- 324 !! *** ROUTINE iom_nf90_chkatt *** 325 !! 326 !! ** Purpose : check existence of attribute with NF90 327 !! (either a global attribute (default) or a variable 328 !! attribute if optional variable name is supplied (cdvar)) 320 329 !!----------------------------------------------------------------------- 321 330 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 322 331 CHARACTER(len=*), INTENT(in ) :: cdatt ! attribute name 323 INTEGER , INTENT( out) :: pvar ! read field 332 LOGICAL , INTENT( out) :: llok ! error code 333 INTEGER , INTENT( out), OPTIONAL & 334 & :: ksize ! attribute size 335 CHARACTER(len=*), INTENT(in ), OPTIONAL & 336 & :: cdvar ! name of the variable 324 337 ! 325 338 INTEGER :: if90id ! temporary integer 339 INTEGER :: isize ! temporary integer 340 INTEGER :: ivarid ! NetCDF variable Id 341 !--------------------------------------------------------------------- 342 ! 343 if90id = iom_file(kiomid)%nfid 344 IF( PRESENT(cdvar) ) THEN 345 ! check the variable exists in the file 346 llok = NF90_INQ_VARID( if90id, TRIM(cdvar), ivarid ) == nf90_noerr 347 IF( llok ) & 348 ! check the variable has the attribute required 349 llok = NF90_Inquire_attribute(if90id, ivarid, cdatt, len=isize ) == nf90_noerr 350 ELSE 351 llok = NF90_Inquire_attribute(if90id, NF90_GLOBAL, cdatt, len=isize ) == nf90_noerr 352 ENDIF 353 ! 354 IF( PRESENT(ksize) ) ksize = isize 355 ! 356 IF( .not. llok) & 357 CALL ctl_warn('iom_nf90_chkatt: no attribute '//cdatt//' found') 358 ! 359 END SUBROUTINE iom_nf90_chkatt 360 361 362 !!---------------------------------------------------------------------- 363 !! INTERFACE iom_nf90_getatt 364 !!---------------------------------------------------------------------- 365 366 SUBROUTINE iom_nf90_getatt( kiomid, cdatt, katt0d, katt1d, patt0d, patt1d, cdatt0d, cdvar) 367 !!----------------------------------------------------------------------- 368 !! *** ROUTINE iom_nf90_getatt *** 369 !! 370 !! ** Purpose : read an attribute with NF90 371 !! (either a global attribute (default) or a variable 372 !! attribute if optional variable name is supplied (cdvar)) 373 !!----------------------------------------------------------------------- 374 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 375 CHARACTER(len=*) , INTENT(in ) :: cdatt ! attribute name 376 INTEGER , INTENT( out), OPTIONAL :: katt0d ! read scalar integer 377 INTEGER, DIMENSION(:) , INTENT( out), OPTIONAL :: katt1d ! read 1d array integer 378 REAL(wp) , INTENT( out), OPTIONAL :: patt0d ! read scalar real 379 REAL(wp), DIMENSION(:), INTENT( out), OPTIONAL :: patt1d ! read 1d array real 380 CHARACTER(len=*) , INTENT( out), OPTIONAL :: cdatt0d ! read character 381 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! name of the variable 382 ! 383 INTEGER :: if90id ! temporary integer 384 INTEGER :: ivarid ! NetCDF variable Id 326 385 LOGICAL :: llok ! temporary logical 327 386 CHARACTER(LEN=100) :: clinfo ! info character 328 387 !--------------------------------------------------------------------- 329 ! 388 ! 330 389 if90id = iom_file(kiomid)%nfid 331 llok = NF90_Inquire_attribute(if90id, NF90_GLOBAL, cdatt) == nf90_noerr 390 IF( PRESENT(cdvar) ) THEN 391 ! check the variable exists in the file 392 llok = NF90_INQ_VARID( if90id, TRIM(cdvar), ivarid ) == nf90_noerr 393 IF( llok ) THEN 394 ! check the variable has the attribute required 395 llok = NF90_Inquire_attribute(if90id, ivarid, cdatt) == nf90_noerr 396 ELSE 397 CALL ctl_warn('iom_nf90_getatt: no variable '//TRIM(cdvar)//' found') 398 ENDIF 399 ELSE 400 llok = NF90_Inquire_attribute(if90id, NF90_GLOBAL, cdatt) == nf90_noerr 401 ivarid = NF90_GLOBAL 402 ENDIF 403 ! 332 404 IF( llok) THEN 333 405 clinfo = 'iom_nf90_getatt, file: '//TRIM(iom_file(kiomid)%name)//', att: '//TRIM(cdatt) 334 CALL iom_nf90_check(NF90_GET_ATT(if90id, NF90_GLOBAL, cdatt, values=pvar), clinfo) 406 IF(PRESENT( katt0d)) CALL iom_nf90_check(NF90_GET_ATT(if90id, ivarid, cdatt, values = katt0d), clinfo) 407 IF(PRESENT( katt1d)) CALL iom_nf90_check(NF90_GET_ATT(if90id, ivarid, cdatt, values = katt1d), clinfo) 408 IF(PRESENT( patt0d)) CALL iom_nf90_check(NF90_GET_ATT(if90id, ivarid, cdatt, values = patt0d), clinfo) 409 IF(PRESENT( patt1d)) CALL iom_nf90_check(NF90_GET_ATT(if90id, ivarid, cdatt, values = patt1d), clinfo) 410 IF(PRESENT(cdatt0d)) CALL iom_nf90_check(NF90_GET_ATT(if90id, ivarid, cdatt, values = cdatt0d), clinfo) 335 411 ELSE 336 CALL ctl_warn('iom_nf90_getatt: no attribute '//cdatt//' found') 337 pvar = -999 338 ENDIF 339 ! 340 END SUBROUTINE iom_nf90_intatt 341 342 343 SUBROUTINE iom_nf90_gettime( kiomid, kvid, ptime, cdunits, cdcalendar ) 344 !!-------------------------------------------------------------------- 345 !! *** SUBROUTINE iom_gettime *** 346 !! 347 !! ** Purpose : read the time axis kvid in the file with NF90 348 !!-------------------------------------------------------------------- 349 INTEGER , INTENT(in ) :: kiomid ! file Identifier 350 INTEGER , INTENT(in ) :: kvid ! variable id 351 REAL(wp), DIMENSION(:) , INTENT( out) :: ptime ! the time axis 352 CHARACTER(len=*), OPTIONAL, INTENT( out) :: cdunits ! units attribute 353 CHARACTER(len=*), OPTIONAL, INTENT( out) :: cdcalendar ! calendar attribute 354 ! 355 CHARACTER(LEN=100) :: clinfo ! info character 356 !--------------------------------------------------------------------- 357 clinfo = 'iom_nf90_gettime, file: '//TRIM(iom_file(kiomid)%name)//', var: '//TRIM(iom_file(kiomid)%cn_var(kvid)) 358 CALL iom_nf90_check(NF90_GET_VAR(iom_file(kiomid)%nfid, iom_file(kiomid)%nvid(kvid), ptime(:), & 359 & start=(/ 1 /), count=(/ iom_file(kiomid)%dimsz(1, kvid) /)), clinfo) 360 IF ( PRESENT(cdunits) ) THEN 361 CALL iom_nf90_check(NF90_GET_ATT(iom_file(kiomid)%nfid, iom_file(kiomid)%nvid(kvid), "units", & 362 & values=cdunits), clinfo) 363 ENDIF 364 IF ( PRESENT(cdcalendar) ) THEN 365 CALL iom_nf90_check(NF90_GET_ATT(iom_file(kiomid)%nfid, iom_file(kiomid)%nvid(kvid), "calendar", & 366 & values=cdcalendar), clinfo) 367 ENDIF 368 ! 369 END SUBROUTINE iom_nf90_gettime 412 CALL ctl_warn('iom_nf90_getatt: no attribute '//TRIM(cdatt)//' found') 413 IF(PRESENT( katt0d)) katt0d = -999 414 IF(PRESENT( katt1d)) katt1d(:) = -999 415 IF(PRESENT( patt0d)) patt0d = -999._wp 416 IF(PRESENT( patt1d)) patt1d(:) = -999._wp 417 IF(PRESENT(cdatt0d)) cdatt0d = '!' 418 ENDIF 419 ! 420 END SUBROUTINE iom_nf90_getatt 421 422 423 SUBROUTINE iom_nf90_putatt( kiomid, cdatt, katt0d, katt1d, patt0d, patt1d, cdatt0d, cdvar) 424 !!----------------------------------------------------------------------- 425 !! *** ROUTINE iom_nf90_putatt *** 426 !! 427 !! ** Purpose : write an attribute with NF90 428 !! (either a global attribute (default) or a variable 429 !! attribute if optional variable name is supplied (cdvar)) 430 !!----------------------------------------------------------------------- 431 INTEGER , INTENT(in ) :: kiomid ! Identifier of the file 432 CHARACTER(len=*) , INTENT(in ) :: cdatt ! attribute name 433 INTEGER , INTENT(in ), OPTIONAL :: katt0d ! read scalar integer 434 INTEGER, DIMENSION(:) , INTENT(in ), OPTIONAL :: katt1d ! read 1d array integer 435 REAL(wp) , INTENT(in ), OPTIONAL :: patt0d ! read scalar real 436 REAL(wp), DIMENSION(:), INTENT(in ), OPTIONAL :: patt1d ! read 1d array real 437 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdatt0d ! read character 438 CHARACTER(len=*) , INTENT(in ), OPTIONAL :: cdvar ! name of the variable 439 ! 440 INTEGER :: if90id ! temporary integer 441 INTEGER :: ivarid ! NetCDF variable Id 442 INTEGER :: isize ! Attribute size 443 INTEGER :: itype ! Attribute type 444 LOGICAL :: llok ! temporary logical 445 LOGICAL :: llatt ! temporary logical 446 LOGICAL :: lldata ! temporary logical 447 CHARACTER(LEN=100) :: clinfo ! info character 448 !--------------------------------------------------------------------- 449 ! 450 if90id = iom_file(kiomid)%nfid 451 IF( PRESENT(cdvar) ) THEN 452 llok = NF90_INQ_VARID( if90id, TRIM(cdvar), ivarid ) == nf90_noerr ! is the variable in the file? 453 IF( .NOT. llok ) THEN 454 CALL ctl_warn('iom_nf90_putatt: no variable '//TRIM(cdvar)//' found' & 455 & , ' no attribute '//cdatt//' written' ) 456 RETURN 457 ENDIF 458 ELSE 459 ivarid = NF90_GLOBAL 460 ENDIF 461 llatt = NF90_Inquire_attribute(if90id, ivarid, cdatt, len = isize, xtype = itype ) == nf90_noerr 462 ! 463 ! trick: irec used to know if the file is in define mode or not 464 lldata = iom_file(kiomid)%irec /= -1 ! default: go back in define mode if in data mode 465 IF( lldata .AND. llatt ) THEN ! attribute already there. Do we really need to go back in define mode? 466 ! do we have the appropriate type? 467 IF(PRESENT( katt0d) .OR. PRESENT( katt1d)) llok = itype == NF90_INT 468 IF(PRESENT( patt0d) .OR. PRESENT( patt1d)) llok = itype == NF90_DOUBLE 469 IF(PRESENT(cdatt0d) ) llok = itype == NF90_CHAR 470 ! and do we have the appropriate size? 471 IF(PRESENT( katt0d)) llok = llok .AND. isize == 1 472 IF(PRESENT( katt1d)) llok = llok .AND. isize == SIZE(katt1d) 473 IF(PRESENT( patt0d)) llok = llok .AND. isize == 1 474 IF(PRESENT( patt1d)) llok = llok .AND. isize == SIZE(patt1d) 475 IF(PRESENT(cdatt0d)) llok = llok .AND. isize == LEN_TRIM(cdatt0d) 476 ! 477 lldata = .NOT. llok 478 ENDIF 479 ! 480 clinfo = 'iom_nf90_putatt, file: '//TRIM(iom_file(kiomid)%name)//', att: '//TRIM(cdatt) 481 IF(lldata) CALL iom_nf90_check(NF90_REDEF( if90id ), clinfo) ! leave data mode to define mode 482 ! 483 IF(PRESENT( katt0d)) CALL iom_nf90_check(NF90_PUT_ATT(if90id, ivarid, cdatt, values = katt0d) , clinfo) 484 IF(PRESENT( katt1d)) CALL iom_nf90_check(NF90_PUT_ATT(if90id, ivarid, cdatt, values = katt1d) , clinfo) 485 IF(PRESENT( patt0d)) CALL iom_nf90_check(NF90_PUT_ATT(if90id, ivarid, cdatt, values = patt0d) , clinfo) 486 IF(PRESENT( patt1d)) CALL iom_nf90_check(NF90_PUT_ATT(if90id, ivarid, cdatt, values = patt1d) , clinfo) 487 IF(PRESENT(cdatt0d)) CALL iom_nf90_check(NF90_PUT_ATT(if90id, ivarid, cdatt, values = trim(cdatt0d)), clinfo) 488 ! 489 IF(lldata) CALL iom_nf90_check(NF90_ENDDEF( if90id ), clinfo) ! leave define mode to data mode 490 ! 491 END SUBROUTINE iom_nf90_putatt 370 492 371 493 372 494 SUBROUTINE iom_nf90_rp0123d( kt, kwrite, kiomid, cdvar , kvid , ktype, & 373 & pv_r0d, pv_r1d, pv_r2d, pv_r3d )495 & pv_r0d, pv_r1d, pv_r2d, pv_r3d ) 374 496 !!-------------------------------------------------------------------- 375 497 !! *** SUBROUTINE iom_nf90_rstput *** … … 395 517 INTEGER, DIMENSION(4) :: idimid ! dimensions id 396 518 CHARACTER(LEN=256) :: clinfo ! info character 397 CHARACTER(LEN= 12), DIMENSION( 4) :: cltmp ! temporary character519 CHARACTER(LEN= 12), DIMENSION(5) :: cltmp ! temporary character 398 520 INTEGER :: if90id ! nf90 file identifier 399 521 INTEGER :: idmy ! dummy variable 400 522 INTEGER :: itype ! variable type 401 523 INTEGER, DIMENSION(4) :: ichunksz ! NetCDF4 chunk sizes. Will be computed using 402 ! nn_nchunks_[i,j,k,t] namelist parameters 403 INTEGER :: ichunkalg, ishuffle,& 404 ideflate, ideflate_level 405 ! NetCDF4 internally fixed parameters 524 ! ! nn_nchunks_[i,j,k,t] namelist parameters 525 INTEGER :: ichunkalg, ishuffle, ideflate, ideflate_level 526 ! ! NetCDF4 internally fixed parameters 406 527 LOGICAL :: lchunk ! logical switch to activate chunking and compression 407 ! when appropriate (currently chunking is applied to 4d fields only) 528 ! ! when appropriate (currently chunking is applied to 4d fields only) 529 INTEGER :: idlv ! local variable 530 INTEGER :: idim3 ! id of the third dimension 408 531 !--------------------------------------------------------------------- 409 532 ! … … 419 542 ENDIF 420 543 ! define the dimension variables if it is not already done 421 cltmp = (/ 'nav_lon ', 'nav_lat ', 'nav_lev ', 'time_counter' /) 544 ! Warning: we must use the same character length in an array constructor (at least for gcc compiler) 545 cltmp = (/ 'nav_lon ', 'nav_lat ', 'nav_lev ', 'time_counter', 'numcat ' /) 422 546 CALL iom_nf90_check(NF90_DEF_VAR( if90id, TRIM(cltmp(1)), NF90_FLOAT , (/ 1, 2 /), iom_file(kiomid)%nvid(1) ), clinfo) 423 547 CALL iom_nf90_check(NF90_DEF_VAR( if90id, TRIM(cltmp(2)), NF90_FLOAT , (/ 1, 2 /), iom_file(kiomid)%nvid(2) ), clinfo) … … 427 551 iom_file(kiomid)%nvars = 4 428 552 iom_file(kiomid)%luld(1:4) = (/ .FALSE., .FALSE., .FALSE., .TRUE. /) 429 iom_file(kiomid)%cn_var(1:4) = cltmp 430 iom_file(kiomid)%ndims(1:4) = (/ 2, 2, 1, 1 /) 553 iom_file(kiomid)%cn_var(1:4) = cltmp(1:4) 554 iom_file(kiomid)%ndims(1:4) = (/ 2, 2, 1, 1 /) 555 IF( NF90_INQ_DIMID( if90id, 'numcat', idmy ) == nf90_noerr ) THEN ! add a 5th variable corresponding to the 5th dimension 556 CALL iom_nf90_check(NF90_DEF_VAR( if90id, TRIM(cltmp(5)), NF90_FLOAT , (/ 5 /), iom_file(kiomid)%nvid(5) ), clinfo) 557 iom_file(kiomid)%nvars = 5 558 iom_file(kiomid)%luld(5) = .FALSE. 559 iom_file(kiomid)%cn_var(5) = cltmp(5) 560 iom_file(kiomid)%ndims(5) = 1 561 ENDIF 431 562 ! trick: defined to 0 to say that dimension variables are defined but not yet written 432 563 iom_file(kiomid)%dimsz(1, 1) = 0 … … 450 581 ! variable definition 451 582 IF( PRESENT(pv_r0d) ) THEN ; idims = 0 452 ELSEIF( PRESENT(pv_r1d) ) THEN ; idims = 2 ; idimid(1:idims) = (/ 3,4/) 583 ELSEIF( PRESENT(pv_r1d) ) THEN 584 IF( SIZE(pv_r1d,1) == jpk ) THEN ; idim3 = 3 585 ELSE ; idim3 = 5 586 ENDIF 587 idims = 2 ; idimid(1:idims) = (/idim3,4/) 453 588 ELSEIF( PRESENT(pv_r2d) ) THEN ; idims = 3 ; idimid(1:idims) = (/1,2 ,4/) 454 ELSEIF( PRESENT(pv_r3d) ) THEN ; idims = 4 ; idimid(1:idims) = (/1,2,3,4/) 589 ELSEIF( PRESENT(pv_r3d) ) THEN 590 IF( SIZE(pv_r3d,3) == jpk ) THEN ; idim3 = 3 591 ELSE ; idim3 = 5 592 ENDIF 593 idims = 4 ; idimid(1:idims) = (/1,2,idim3,4/) 455 594 ENDIF 456 595 IF( PRESENT(ktype) ) THEN ! variable external type 457 596 SELECT CASE (ktype) 458 CASE (jp_r8) ; itype = NF90_DOUBLE459 CASE (jp_r4) ; itype = NF90_FLOAT460 CASE (jp_i4) ; itype = NF90_INT461 CASE (jp_i2) ; itype = NF90_SHORT462 CASE (jp_i1) ; itype = NF90_BYTE597 CASE (jp_r8) ; itype = NF90_DOUBLE 598 CASE (jp_r4) ; itype = NF90_FLOAT 599 CASE (jp_i4) ; itype = NF90_INT 600 CASE (jp_i2) ; itype = NF90_SHORT 601 CASE (jp_i1) ; itype = NF90_BYTE 463 602 CASE DEFAULT ; CALL ctl_stop( TRIM(clinfo)//' unknown variable type' ) 464 603 END SELECT … … 468 607 IF( PRESENT(pv_r0d) ) THEN 469 608 CALL iom_nf90_check(NF90_DEF_VAR( if90id, TRIM(cdvar), itype, & 470 & iom_file(kiomid)%nvid(idvar) ), clinfo)609 & iom_file(kiomid)%nvid(idvar) ), clinfo ) 471 610 ELSE 472 611 CALL iom_nf90_check(NF90_DEF_VAR( if90id, TRIM(cdvar), itype, idimid(1:idims), & 473 & iom_file(kiomid)%nvid(idvar) ), clinfo)612 & iom_file(kiomid)%nvid(idvar) ), clinfo ) 474 613 ENDIF 475 614 lchunk = .false. 476 IF( snc4set%luse .AND. idims .eq.4 )lchunk = .true.615 IF( snc4set%luse .AND. idims == 4 ) lchunk = .true. 477 616 ! update informations structure related the new variable we want to add... 478 617 iom_file(kiomid)%nvars = idvar … … 495 634 ichunksz(3) = MIN( ichunksz(3),MAX( (ichunksz(3)-1)/snc4set%nk + 1 , 1 ) ) ! Suggested default nc4set%nk=6 496 635 ichunksz(4) = 1 ! Do not allow chunks to span the 497 636 ! ! unlimited dimension 498 637 CALL iom_nf90_check(SET_NF90_DEF_VAR_CHUNKING(if90id, idvar, ichunkalg, ichunksz), clinfo) 499 638 CALL iom_nf90_check(SET_NF90_DEF_VAR_DEFLATE(if90id, idvar, ishuffle, ideflate, ideflate_level), clinfo) … … 504 643 idvar = kvid 505 644 ENDIF 506 645 ! 507 646 ! time step kwrite : write the variable 508 647 IF( kt == kwrite ) THEN … … 528 667 ! trick: is defined to 0 => dimension variable are defined but not yet written 529 668 IF( iom_file(kiomid)%dimsz(1, 1) == 0 ) THEN 530 CALL iom_nf90_check(NF90_INQ_VARID( if90id, 'nav_lon' , idmy ), clinfo) 531 CALL iom_nf90_check(NF90_PUT_VAR( if90id, idmy, glamt(ix1:ix2, iy1:iy2) ), clinfo) 532 CALL iom_nf90_check(NF90_INQ_VARID( if90id, 'nav_lat' , idmy ), clinfo) 533 CALL iom_nf90_check(NF90_PUT_VAR( if90id, idmy, gphit(ix1:ix2, iy1:iy2) ), clinfo) 534 CALL iom_nf90_check(NF90_INQ_VARID( if90id, 'nav_lev' , idmy ), clinfo) 535 CALL iom_nf90_check(NF90_PUT_VAR( if90id, idmy, gdept_1d ), clinfo) 669 CALL iom_nf90_check( NF90_INQ_VARID( if90id, 'nav_lon' , idmy ) , clinfo ) 670 CALL iom_nf90_check( NF90_PUT_VAR ( if90id, idmy, glamt(ix1:ix2, iy1:iy2) ), clinfo ) 671 CALL iom_nf90_check( NF90_INQ_VARID( if90id, 'nav_lat' , idmy ) , clinfo ) 672 CALL iom_nf90_check( NF90_PUT_VAR ( if90id, idmy, gphit(ix1:ix2, iy1:iy2) ), clinfo ) 673 CALL iom_nf90_check( NF90_INQ_VARID( if90id, 'nav_lev' , idmy ), clinfo ) 674 CALL iom_nf90_check( NF90_PUT_VAR ( if90id, idmy, gdept_1d ), clinfo ) 675 IF( NF90_INQ_VARID( if90id, 'numcat', idmy ) == nf90_noerr ) THEN 676 CALL iom_nf90_check( NF90_PUT_VAR ( if90id, idmy, (/ (idlv, idlv = 1,iom_file(kiomid)%nlev) /)), clinfo ) 677 ENDIF 536 678 ! +++ WRONG VALUE: to be improved but not really useful... 537 CALL iom_nf90_check( NF90_INQ_VARID( if90id, 'time_counter', idmy ), clinfo)538 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idmy, kt ), clinfo)679 CALL iom_nf90_check( NF90_INQ_VARID( if90id, 'time_counter', idmy ), clinfo ) 680 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idmy, kt ), clinfo ) 539 681 ! update the values of the variables dimensions size 540 CALL iom_nf90_check( NF90_INQUIRE_DIMENSION( if90id, 1, len = iom_file(kiomid)%dimsz(1,1) ), clinfo)541 CALL iom_nf90_check( NF90_INQUIRE_DIMENSION( if90id, 2, len = iom_file(kiomid)%dimsz(2,1) ), clinfo)682 CALL iom_nf90_check( NF90_INQUIRE_DIMENSION( if90id, 1, len = iom_file(kiomid)%dimsz(1,1) ), clinfo ) 683 CALL iom_nf90_check( NF90_INQUIRE_DIMENSION( if90id, 2, len = iom_file(kiomid)%dimsz(2,1) ), clinfo ) 542 684 iom_file(kiomid)%dimsz(1:2, 2) = iom_file(kiomid)%dimsz(1:2, 1) 543 CALL iom_nf90_check( NF90_INQUIRE_DIMENSION( if90id, 3, len = iom_file(kiomid)%dimsz(1,3) ), clinfo)685 CALL iom_nf90_check( NF90_INQUIRE_DIMENSION( if90id, 3, len = iom_file(kiomid)%dimsz(1,3) ), clinfo ) 544 686 iom_file(kiomid)%dimsz(1 , 4) = 1 ! unlimited dimension 545 687 IF(lwp) WRITE(numout,*) TRIM(clinfo)//' write dimension variables done' … … 550 692 ! ============= 551 693 IF( PRESENT(pv_r0d) ) THEN 552 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idvar, pv_r0d ), clinfo)694 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idvar, pv_r0d ), clinfo ) 553 695 ELSEIF( PRESENT(pv_r1d) ) THEN 554 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idvar, pv_r1d( :) ), clinfo)696 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idvar, pv_r1d(:) ), clinfo ) 555 697 ELSEIF( PRESENT(pv_r2d) ) THEN 556 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idvar, pv_r2d(ix1:ix2, iy1:iy2 ) ), clinfo)698 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idvar, pv_r2d(ix1:ix2,iy1:iy2) ), clinfo ) 557 699 ELSEIF( PRESENT(pv_r3d) ) THEN 558 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idvar, pv_r3d(ix1:ix2, iy1:iy2, :) ), clinfo)700 CALL iom_nf90_check( NF90_PUT_VAR( if90id, idvar, pv_r3d(ix1:ix2,iy1:iy2,:) ), clinfo ) 559 701 ENDIF 560 702 ! add 1 to the size of the temporal dimension (not really useful...) -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/lbclnk.F90
r10725 r10727 2 2 !!====================================================================== 3 3 !! *** MODULE lbclnk *** 4 !! Ocean: lateral boundary conditions4 !! NEMO : lateral boundary conditions 5 5 !!===================================================================== 6 6 !! History : OPA ! 1997-06 (G. Madec) Original code 7 7 !! NEMO 1.0 ! 2002-09 (G. Madec) F90: Free form and module 8 8 !! 3.2 ! 2009-03 (R. Benshila) External north fold treatment 9 !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) optimization of BDY comm. via lbc_bdy_lnk and lbc_obc_lnk9 !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) optimization of BDY comm. via lbc_bdy_lnk and lbc_obc_lnk 10 10 !! 3.4 ! 2012-12 (R. Bourdalle-Badie, G. Reffray) add a C1D case 11 11 !! 3.6 ! 2015-06 (O. Tintó and M. Castrillo) add lbc_lnk_multi 12 !!---------------------------------------------------------------------- 13 12 !! 4.0 ! 2017-03 (G. Madec) automatique allocation of array size (use with any 3rd dim size) 13 !! - ! 2017-04 (G. Madec) remove duplicated routines (lbc_lnk_2d_9, lbc_lnk_2d_multiple, lbc_lnk_3d_gather) 14 !! - ! 2017-05 (G. Madec) create generic.h90 files to generate all lbc and north fold routines 15 !!---------------------------------------------------------------------- 16 #if defined key_mpp_mpi 14 17 !!---------------------------------------------------------------------- 15 18 !! 'key_mpp_mpi' MPI massively parallel processing library 16 19 !!---------------------------------------------------------------------- 17 !! lbc_lnk : generic interface for mpp_lnk_3d and mpp_lnk_2d routines defined in lib_mpp 18 !! lbc_sum : generic interface for mpp_lnk_sum_3d and mpp_lnk_sum_2d routines defined in lib_mpp 19 !! lbc_lnk_e : generic interface for mpp_lnk_2d_e routine defined in lib_mpp 20 !! lbc_bdy_lnk : generic interface for mpp_lnk_bdy_2d and mpp_lnk_bdy_3d routines defined in lib_mpp 21 !!---------------------------------------------------------------------- 20 !! define the generic interfaces of lib_mpp routines 21 !!---------------------------------------------------------------------- 22 !! lbc_lnk : generic interface for mpp_lnk_3d and mpp_lnk_2d routines defined in lib_mpp 23 !! lbc_bdy_lnk : generic interface for mpp_lnk_bdy_2d and mpp_lnk_bdy_3d routines defined in lib_mpp 24 !!---------------------------------------------------------------------- 25 USE par_oce ! ocean dynamics and tracers 22 26 USE lib_mpp ! distributed memory computing library 23 27 USE lbcnfd ! north fold 28 29 INTERFACE lbc_lnk 30 MODULE PROCEDURE mpp_lnk_2d , mpp_lnk_3d , mpp_lnk_4d 31 END INTERFACE 32 INTERFACE lbc_lnk_ptr 33 MODULE PROCEDURE mpp_lnk_2d_ptr , mpp_lnk_3d_ptr , mpp_lnk_4d_ptr 34 END INTERFACE 24 35 INTERFACE lbc_lnk_multi 25 MODULE PROCEDURE mpp_lnk_2d_9, mpp_lnk_2d_multiple 26 END INTERFACE 27 ! 28 INTERFACE lbc_lnk 29 MODULE PROCEDURE mpp_lnk_3d_gather, mpp_lnk_3d, mpp_lnk_2d 30 END INTERFACE 31 ! 32 INTERFACE lbc_sum 33 MODULE PROCEDURE mpp_lnk_sum_3d, mpp_lnk_sum_2d 36 MODULE PROCEDURE lbc_lnk_2d_multi, lbc_lnk_3d_multi, lbc_lnk_4d_multi 34 37 END INTERFACE 35 38 ! 36 39 INTERFACE lbc_bdy_lnk 37 MODULE PROCEDURE mpp_lnk_bdy_2d, mpp_lnk_bdy_3d 38 END INTERFACE 39 ! 40 INTERFACE lbc_lnk_e 41 MODULE PROCEDURE mpp_lnk_2d_e 40 MODULE PROCEDURE mpp_lnk_bdy_2d, mpp_lnk_bdy_3d, mpp_lnk_bdy_4d 42 41 END INTERFACE 43 42 ! … … 46 45 END INTERFACE 47 46 48 PUBLIC lbc_lnk ! ocean lateral boundary conditions 49 PUBLIC lbc_lnk_multi ! modified ocean lateral boundary conditions 50 PUBLIC lbc_sum 51 PUBLIC lbc_lnk_e ! 47 PUBLIC lbc_lnk ! ocean/ice lateral boundary conditions 48 PUBLIC lbc_lnk_multi ! modified ocean/ice lateral boundary conditions 52 49 PUBLIC lbc_bdy_lnk ! ocean lateral BDY boundary conditions 53 PUBLIC lbc_lnk_icb ! 50 PUBLIC lbc_lnk_icb ! iceberg lateral boundary conditions 54 51 55 52 !!---------------------------------------------------------------------- 56 53 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 57 !! $Id: lbclnk.F90 6493 2016-04-22 13:52:52Z mathiot $ 58 !! Software governed by the CeCILL licence (./LICENSE) 59 !!---------------------------------------------------------------------- 54 !! $Id: lbclnk.F90 10425 2018-12-19 21:54:16Z smasson $ 55 !! Software governed by the CeCILL license (see ./LICENSE) 56 !!---------------------------------------------------------------------- 57 CONTAINS 58 59 #else 60 !!---------------------------------------------------------------------- 61 !! Default option shared memory computing 62 !!---------------------------------------------------------------------- 63 !! routines setting the appropriate values 64 !! on first and last row and column of the global domain 65 !!---------------------------------------------------------------------- 66 !! lbc_lnk_sum_3d: compute sum over the halos on a 3D variable on ocean mesh 67 !! lbc_lnk_sum_3d: compute sum over the halos on a 2D variable on ocean mesh 68 !! lbc_lnk : generic interface for lbc_lnk_3d and lbc_lnk_2d 69 !! lbc_lnk_3d : set the lateral boundary condition on a 3D variable on ocean mesh 70 !! lbc_lnk_2d : set the lateral boundary condition on a 2D variable on ocean mesh 71 !! lbc_bdy_lnk : set the lateral BDY boundary condition 72 !!---------------------------------------------------------------------- 73 USE oce ! ocean dynamics and tracers 74 USE dom_oce ! ocean space and time domain 75 USE in_out_manager ! I/O manager 76 USE lbcnfd ! north fold 77 78 IMPLICIT NONE 79 PRIVATE 80 81 INTERFACE lbc_lnk 82 MODULE PROCEDURE lbc_lnk_2d , lbc_lnk_3d , lbc_lnk_4d 83 END INTERFACE 84 INTERFACE lbc_lnk_ptr 85 MODULE PROCEDURE lbc_lnk_2d_ptr , lbc_lnk_3d_ptr , lbc_lnk_4d_ptr 86 END INTERFACE 87 INTERFACE lbc_lnk_multi 88 MODULE PROCEDURE lbc_lnk_2d_multi, lbc_lnk_3d_multi, lbc_lnk_4d_multi 89 END INTERFACE 90 ! 91 INTERFACE lbc_bdy_lnk 92 MODULE PROCEDURE lbc_bdy_lnk_2d, lbc_bdy_lnk_3d, lbc_bdy_lnk_4d 93 END INTERFACE 94 ! 95 INTERFACE lbc_lnk_icb 96 MODULE PROCEDURE lbc_lnk_2d_icb 97 END INTERFACE 98 99 PUBLIC lbc_lnk ! ocean/ice lateral boundary conditions 100 PUBLIC lbc_lnk_multi ! modified ocean/ice lateral boundary conditions 101 PUBLIC lbc_bdy_lnk ! ocean lateral BDY boundary conditions 102 PUBLIC lbc_lnk_icb ! iceberg lateral boundary conditions 103 104 !!---------------------------------------------------------------------- 105 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 106 !! $Id: lbclnk.F90 10425 2018-12-19 21:54:16Z smasson $ 107 !! Software governed by the CeCILL license (see ./LICENSE) 108 !!---------------------------------------------------------------------- 109 CONTAINS 110 111 !!====================================================================== 112 !! Default option 3D shared memory computing 113 !!====================================================================== 114 !! routines setting land point, or east-west cyclic, 115 !! or north-south cyclic, or north fold values 116 !! on first and last row and column of the global domain 117 !!---------------------------------------------------------------------- 118 119 !!---------------------------------------------------------------------- 120 !! *** routine lbc_lnk_(2,3,4)d *** 121 !! 122 !! * Argument : dummy argument use in lbc_lnk_... routines 123 !! ptab : array or pointer of arrays on which the boundary condition is applied 124 !! cd_nat : nature of array grid-points 125 !! psgn : sign used across the north fold boundary 126 !! kfld : optional, number of pt3d arrays 127 !! cd_mpp : optional, fill the overlap area only 128 !! pval : optional, background value (used at closed boundaries) 129 !!---------------------------------------------------------------------- 130 ! 131 ! !== 2D array and array of 2D pointer ==! 132 ! 133 # define DIM_2d 134 # define ROUTINE_LNK lbc_lnk_2d 135 # include "lbc_lnk_generic.h90" 136 # undef ROUTINE_LNK 137 # define MULTI 138 # define ROUTINE_LNK lbc_lnk_2d_ptr 139 # include "lbc_lnk_generic.h90" 140 # undef ROUTINE_LNK 141 # undef MULTI 142 # undef DIM_2d 143 ! 144 ! !== 3D array and array of 3D pointer ==! 145 ! 146 # define DIM_3d 147 # define ROUTINE_LNK lbc_lnk_3d 148 # include "lbc_lnk_generic.h90" 149 # undef ROUTINE_LNK 150 # define MULTI 151 # define ROUTINE_LNK lbc_lnk_3d_ptr 152 # include "lbc_lnk_generic.h90" 153 # undef ROUTINE_LNK 154 # undef MULTI 155 # undef DIM_3d 156 ! 157 ! !== 4D array and array of 4D pointer ==! 158 ! 159 # define DIM_4d 160 # define ROUTINE_LNK lbc_lnk_4d 161 # include "lbc_lnk_generic.h90" 162 # undef ROUTINE_LNK 163 # define MULTI 164 # define ROUTINE_LNK lbc_lnk_4d_ptr 165 # include "lbc_lnk_generic.h90" 166 # undef ROUTINE_LNK 167 # undef MULTI 168 # undef DIM_4d 169 170 !!====================================================================== 171 !! identical routines in both C1D and shared memory computing 172 !!====================================================================== 173 174 !!---------------------------------------------------------------------- 175 !! *** routine lbc_bdy_lnk_(2,3,4)d *** 176 !! 177 !! wrapper rountine to 'lbc_lnk_3d'. This wrapper is used 178 !! to maintain the same interface with regards to the mpp case 179 !!---------------------------------------------------------------------- 180 181 SUBROUTINE lbc_bdy_lnk_4d( cdname, pt4d, cd_type, psgn, ib_bdy ) 182 !!---------------------------------------------------------------------- 183 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 184 REAL(wp), DIMENSION(:,:,:,:), INTENT(inout) :: pt4d ! 3D array on which the lbc is applied 185 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 186 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold 187 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 188 !!---------------------------------------------------------------------- 189 CALL lbc_lnk_4d( cdname, pt4d, cd_type, psgn) 190 END SUBROUTINE lbc_bdy_lnk_4d 191 192 SUBROUTINE lbc_bdy_lnk_3d( cdname, pt3d, cd_type, psgn, ib_bdy ) 193 !!---------------------------------------------------------------------- 194 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 195 REAL(wp), DIMENSION(:,:,:), INTENT(inout) :: pt3d ! 3D array on which the lbc is applied 196 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 197 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold 198 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 199 !!---------------------------------------------------------------------- 200 CALL lbc_lnk_3d( cdname, pt3d, cd_type, psgn) 201 END SUBROUTINE lbc_bdy_lnk_3d 202 203 204 SUBROUTINE lbc_bdy_lnk_2d( cdname, pt2d, cd_type, psgn, ib_bdy ) 205 !!---------------------------------------------------------------------- 206 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 207 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 3D array on which the lbc is applied 208 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 209 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold 210 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 211 !!---------------------------------------------------------------------- 212 CALL lbc_lnk_2d( cdname, pt2d, cd_type, psgn) 213 END SUBROUTINE lbc_bdy_lnk_2d 214 215 216 !!gm This routine should be removed with an optional halos size added in argument of generic routines 217 218 SUBROUTINE lbc_lnk_2d_icb( cdname, pt2d, cd_type, psgn, ki, kj ) 219 !!---------------------------------------------------------------------- 220 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 221 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array on which the lbc is applied 222 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 223 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold 224 INTEGER , INTENT(in ) :: ki, kj ! sizes of extra halo (not needed in non-mpp) 225 !!---------------------------------------------------------------------- 226 CALL lbc_lnk_2d( cdname, pt2d, cd_type, psgn ) 227 END SUBROUTINE lbc_lnk_2d_icb 228 !!gm end 229 230 #endif 231 232 !!====================================================================== 233 !! identical routines in both distributed and shared memory computing 234 !!====================================================================== 235 236 !!---------------------------------------------------------------------- 237 !! *** load_ptr_(2,3,4)d *** 238 !! 239 !! * Dummy Argument : 240 !! in ==> ptab ! array to be loaded (2D, 3D or 4D) 241 !! cd_nat ! nature of pt2d array grid-points 242 !! psgn ! sign used across the north fold boundary 243 !! inout <=> ptab_ptr ! array of 2D, 3D or 4D pointers 244 !! cdna_ptr ! nature of ptab array grid-points 245 !! psgn_ptr ! sign used across the north fold boundary 246 !! kfld ! number of elements that has been attributed 247 !!---------------------------------------------------------------------- 248 249 !!---------------------------------------------------------------------- 250 !! *** lbc_lnk_(2,3,4)d_multi *** 251 !! *** load_ptr_(2,3,4)d *** 252 !! 253 !! * Argument : dummy argument use in lbc_lnk_multi_... routines 254 !! 255 !!---------------------------------------------------------------------- 256 257 # define DIM_2d 258 # define ROUTINE_MULTI lbc_lnk_2d_multi 259 # define ROUTINE_LOAD load_ptr_2d 260 # include "lbc_lnk_multi_generic.h90" 261 # undef ROUTINE_MULTI 262 # undef ROUTINE_LOAD 263 # undef DIM_2d 264 265 266 # define DIM_3d 267 # define ROUTINE_MULTI lbc_lnk_3d_multi 268 # define ROUTINE_LOAD load_ptr_3d 269 # include "lbc_lnk_multi_generic.h90" 270 # undef ROUTINE_MULTI 271 # undef ROUTINE_LOAD 272 # undef DIM_3d 273 274 275 # define DIM_4d 276 # define ROUTINE_MULTI lbc_lnk_4d_multi 277 # define ROUTINE_LOAD load_ptr_4d 278 # include "lbc_lnk_multi_generic.h90" 279 # undef ROUTINE_MULTI 280 # undef ROUTINE_LOAD 281 # undef DIM_4d 60 282 61 283 !!====================================================================== -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/lbcnfd.F90
r10725 r10727 5 5 !!====================================================================== 6 6 !! History : 3.2 ! 2009-03 (R. Benshila) Original code 7 !! 3.5 ! 2013-07 (I. Epicoco, S. Mocavero - CMCC) MPP optimization 7 !! 3.5 ! 2013-07 (I. Epicoco, S. Mocavero - CMCC) MPP optimization 8 !! 4.0 ! 2017-04 (G. Madec) automatique allocation of array argument (use any 3rd dimension) 8 9 !!---------------------------------------------------------------------- 9 10 … … 12 13 !! lbc_nfd_3d : lateral boundary condition: North fold treatment for a 3D arrays (lbc_nfd) 13 14 !! lbc_nfd_2d : lateral boundary condition: North fold treatment for a 2D arrays (lbc_nfd) 14 !! mpp_lbc_nfd_3d : North fold treatment for a 3D arrays optimized for MPP 15 !! mpp_lbc_nfd_2d : North fold treatment for a 2D arrays optimized for MPP 15 !! lbc_nfd_nogather : generic interface for lbc_nfd_nogather_3d and 16 !! lbc_nfd_nogather_2d routines (designed for use 17 !! with ln_nnogather to avoid global width arrays 18 !! mpi all gather operations) 16 19 !!---------------------------------------------------------------------- 17 20 USE dom_oce ! ocean space and time domain … … 22 25 23 26 INTERFACE lbc_nfd 24 MODULE PROCEDURE lbc_nfd_3d, lbc_nfd_2d 27 MODULE PROCEDURE lbc_nfd_2d , lbc_nfd_3d , lbc_nfd_4d 28 MODULE PROCEDURE lbc_nfd_2d_ptr, lbc_nfd_3d_ptr, lbc_nfd_4d_ptr 29 MODULE PROCEDURE lbc_nfd_2d_ext 25 30 END INTERFACE 26 31 ! 27 INTERFACE mpp_lbc_nfd 28 MODULE PROCEDURE mpp_lbc_nfd_3d, mpp_lbc_nfd_2d 32 INTERFACE lbc_nfd_nogather 33 ! ! Currently only 4d array version is needed 34 MODULE PROCEDURE lbc_nfd_nogather_2d , lbc_nfd_nogather_3d 35 MODULE PROCEDURE lbc_nfd_nogather_4d 36 MODULE PROCEDURE lbc_nfd_nogather_2d_ptr, lbc_nfd_nogather_3d_ptr 37 ! MODULE PROCEDURE lbc_nfd_nogather_4d_ptr 29 38 END INTERFACE 30 39 31 PUBLIC lbc_nfd ! north fold conditions 32 PUBLIC mpp_lbc_nfd ! north fold conditions (parallel case) 40 TYPE, PUBLIC :: PTR_2D !: array of 2D pointers (also used in lib_mpp) 41 REAL(wp), DIMENSION (:,:) , POINTER :: pt2d 42 END TYPE PTR_2D 43 TYPE, PUBLIC :: PTR_3D !: array of 3D pointers (also used in lib_mpp) 44 REAL(wp), DIMENSION (:,:,:) , POINTER :: pt3d 45 END TYPE PTR_3D 46 TYPE, PUBLIC :: PTR_4D !: array of 4D pointers (also used in lib_mpp) 47 REAL(wp), DIMENSION (:,:,:,:), POINTER :: pt4d 48 END TYPE PTR_4D 49 50 PUBLIC lbc_nfd ! north fold conditions 51 PUBLIC lbc_nfd_nogather ! north fold conditions (no allgather case) 33 52 34 53 INTEGER, PUBLIC, PARAMETER :: jpmaxngh = 3 !: … … 38 57 !!---------------------------------------------------------------------- 39 58 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 40 !! $Id: lbcnfd.F90 6140 2015-12-21 11:35:23Z timgraham$41 !! Software governed by the CeCILL licen ce (./LICENSE)59 !! $Id: lbcnfd.F90 10425 2018-12-19 21:54:16Z smasson $ 60 !! Software governed by the CeCILL license (see ./LICENSE) 42 61 !!---------------------------------------------------------------------- 43 62 CONTAINS 44 63 45 SUBROUTINE lbc_nfd_3d( pt3d, cd_type, psgn ) 46 !!---------------------------------------------------------------------- 47 !! *** routine lbc_nfd_3d *** 48 !! 49 !! ** Purpose : 3D lateral boundary condition : North fold treatment 50 !! without processor exchanges. 51 !! 52 !! ** Method : 53 !! 54 !! ** Action : pt3d with updated values along the north fold 55 !!---------------------------------------------------------------------- 56 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 57 ! ! = T , U , V , F , W points 58 REAL(wp) , INTENT(in ) :: psgn ! control of the sign change 59 ! ! = -1. , the sign is changed if north fold boundary 60 ! ! = 1. , the sign is kept if north fold boundary 61 REAL(wp), DIMENSION(:,:,:), INTENT(inout) :: pt3d ! 3D array on which the boundary condition is applied 62 ! 63 INTEGER :: ji, jk 64 INTEGER :: ijt, iju, ijpj, ijpjm1 65 !!---------------------------------------------------------------------- 64 !!---------------------------------------------------------------------- 65 !! *** routine lbc_nfd_(2,3,4)d *** 66 !!---------------------------------------------------------------------- 67 !! 68 !! ** Purpose : lateral boundary condition 69 !! North fold treatment without processor exchanges. 70 !! 71 !! ** Method : 72 !! 73 !! ** Action : ptab with updated values along the north fold 74 !!---------------------------------------------------------------------- 75 ! 76 ! !== 2D array and array of 2D pointer ==! 77 ! 78 # define DIM_2d 79 # define ROUTINE_NFD lbc_nfd_2d 80 # include "lbc_nfd_generic.h90" 81 # undef ROUTINE_NFD 82 # define MULTI 83 # define ROUTINE_NFD lbc_nfd_2d_ptr 84 # include "lbc_nfd_generic.h90" 85 # undef ROUTINE_NFD 86 # undef MULTI 87 # undef DIM_2d 88 ! 89 ! !== 2D array with extra haloes ==! 90 ! 91 # define DIM_2d 92 # define ROUTINE_NFD lbc_nfd_2d_ext 93 # include "lbc_nfd_ext_generic.h90" 94 # undef ROUTINE_NFD 95 # undef DIM_2d 96 ! 97 ! !== 3D array and array of 3D pointer ==! 98 ! 99 # define DIM_3d 100 # define ROUTINE_NFD lbc_nfd_3d 101 # include "lbc_nfd_generic.h90" 102 # undef ROUTINE_NFD 103 # define MULTI 104 # define ROUTINE_NFD lbc_nfd_3d_ptr 105 # include "lbc_nfd_generic.h90" 106 # undef ROUTINE_NFD 107 # undef MULTI 108 # undef DIM_3d 109 ! 110 ! !== 4D array and array of 4D pointer ==! 111 ! 112 # define DIM_4d 113 # define ROUTINE_NFD lbc_nfd_4d 114 # include "lbc_nfd_generic.h90" 115 # undef ROUTINE_NFD 116 # define MULTI 117 # define ROUTINE_NFD lbc_nfd_4d_ptr 118 # include "lbc_nfd_generic.h90" 119 # undef ROUTINE_NFD 120 # undef MULTI 121 # undef DIM_4d 122 ! 123 ! lbc_nfd_nogather routines 124 ! 125 ! !== 2D array and array of 2D pointer ==! 126 ! 127 # define DIM_2d 128 # define ROUTINE_NFD lbc_nfd_nogather_2d 129 # include "lbc_nfd_nogather_generic.h90" 130 # undef ROUTINE_NFD 131 # define MULTI 132 # define ROUTINE_NFD lbc_nfd_nogather_2d_ptr 133 # include "lbc_nfd_nogather_generic.h90" 134 # undef ROUTINE_NFD 135 # undef MULTI 136 # undef DIM_2d 137 ! 138 ! !== 3D array and array of 3D pointer ==! 139 ! 140 # define DIM_3d 141 # define ROUTINE_NFD lbc_nfd_nogather_3d 142 # include "lbc_nfd_nogather_generic.h90" 143 # undef ROUTINE_NFD 144 # define MULTI 145 # define ROUTINE_NFD lbc_nfd_nogather_3d_ptr 146 # include "lbc_nfd_nogather_generic.h90" 147 # undef ROUTINE_NFD 148 # undef MULTI 149 # undef DIM_3d 150 ! 151 ! !== 4D array and array of 4D pointer ==! 152 ! 153 # define DIM_4d 154 # define ROUTINE_NFD lbc_nfd_nogather_4d 155 # include "lbc_nfd_nogather_generic.h90" 156 # undef ROUTINE_NFD 157 !# define MULTI 158 !# define ROUTINE_NFD lbc_nfd_nogather_4d_ptr 159 !# include "lbc_nfd_nogather_generic.h90" 160 !# undef ROUTINE_NFD 161 !# undef MULTI 162 # undef DIM_4d 66 163 67 SELECT CASE ( jpni ) 68 CASE ( 1 ) ; ijpj = nlcj ! 1 proc only along the i-direction 69 CASE DEFAULT ; ijpj = 4 ! several proc along the i-direction 70 END SELECT 71 ijpjm1 = ijpj-1 164 !!---------------------------------------------------------------------- 72 165 73 DO jk = 1, jpk74 !75 SELECT CASE ( npolj )76 !77 CASE ( 3 , 4 ) ! * North fold T-point pivot78 !79 SELECT CASE ( cd_type )80 CASE ( 'T' , 'W' ) ! T-, W-point81 DO ji = 2, jpiglo82 ijt = jpiglo-ji+283 pt3d(ji,ijpj,jk) = psgn * pt3d(ijt,ijpj-2,jk)84 END DO85 pt3d(1,ijpj,jk) = psgn * pt3d(3,ijpj-2,jk)86 DO ji = jpiglo/2+1, jpiglo87 ijt = jpiglo-ji+288 pt3d(ji,ijpjm1,jk) = psgn * pt3d(ijt,ijpjm1,jk)89 END DO90 CASE ( 'U' ) ! U-point91 DO ji = 1, jpiglo-192 iju = jpiglo-ji+193 pt3d(ji,ijpj,jk) = psgn * pt3d(iju,ijpj-2,jk)94 END DO95 pt3d( 1 ,ijpj,jk) = psgn * pt3d( 2 ,ijpj-2,jk)96 pt3d(jpiglo,ijpj,jk) = psgn * pt3d(jpiglo-1,ijpj-2,jk)97 DO ji = jpiglo/2, jpiglo-198 iju = jpiglo-ji+199 pt3d(ji,ijpjm1,jk) = psgn * pt3d(iju,ijpjm1,jk)100 END DO101 CASE ( 'V' ) ! V-point102 DO ji = 2, jpiglo103 ijt = jpiglo-ji+2104 pt3d(ji,ijpj-1,jk) = psgn * pt3d(ijt,ijpj-2,jk)105 pt3d(ji,ijpj ,jk) = psgn * pt3d(ijt,ijpj-3,jk)106 END DO107 pt3d(1,ijpj,jk) = psgn * pt3d(3,ijpj-3,jk)108 CASE ( 'F' ) ! F-point109 DO ji = 1, jpiglo-1110 iju = jpiglo-ji+1111 pt3d(ji,ijpj-1,jk) = psgn * pt3d(iju,ijpj-2,jk)112 pt3d(ji,ijpj ,jk) = psgn * pt3d(iju,ijpj-3,jk)113 END DO114 pt3d( 1 ,ijpj,jk) = psgn * pt3d( 2 ,ijpj-3,jk)115 pt3d(jpiglo,ijpj,jk) = psgn * pt3d(jpiglo-1,ijpj-3,jk)116 END SELECT117 !118 CASE ( 5 , 6 ) ! * North fold F-point pivot119 !120 SELECT CASE ( cd_type )121 CASE ( 'T' , 'W' ) ! T-, W-point122 DO ji = 1, jpiglo123 ijt = jpiglo-ji+1124 pt3d(ji,ijpj,jk) = psgn * pt3d(ijt,ijpj-1,jk)125 END DO126 CASE ( 'U' ) ! U-point127 DO ji = 1, jpiglo-1128 iju = jpiglo-ji129 pt3d(ji,ijpj,jk) = psgn * pt3d(iju,ijpj-1,jk)130 END DO131 pt3d(jpiglo,ijpj,jk) = psgn * pt3d(1,ijpj-1,jk)132 CASE ( 'V' ) ! V-point133 DO ji = 1, jpiglo134 ijt = jpiglo-ji+1135 pt3d(ji,ijpj,jk) = psgn * pt3d(ijt,ijpj-2,jk)136 END DO137 DO ji = jpiglo/2+1, jpiglo138 ijt = jpiglo-ji+1139 pt3d(ji,ijpjm1,jk) = psgn * pt3d(ijt,ijpjm1,jk)140 END DO141 CASE ( 'F' ) ! F-point142 DO ji = 1, jpiglo-1143 iju = jpiglo-ji144 pt3d(ji,ijpj ,jk) = psgn * pt3d(iju,ijpj-2,jk)145 END DO146 pt3d(jpiglo,ijpj,jk) = psgn * pt3d(1,ijpj-2,jk)147 DO ji = jpiglo/2+1, jpiglo-1148 iju = jpiglo-ji149 pt3d(ji,ijpjm1,jk) = psgn * pt3d(iju,ijpjm1,jk)150 END DO151 END SELECT152 !153 CASE DEFAULT ! * closed : the code probably never go through154 !155 SELECT CASE ( cd_type)156 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points157 pt3d(:, 1 ,jk) = 0.e0158 pt3d(:,ijpj,jk) = 0.e0159 CASE ( 'F' ) ! F-point160 pt3d(:,ijpj,jk) = 0.e0161 END SELECT162 !163 END SELECT ! npolj164 !165 END DO166 !167 END SUBROUTINE lbc_nfd_3d168 169 170 SUBROUTINE lbc_nfd_2d( pt2d, cd_type, psgn, pr2dj )171 !!----------------------------------------------------------------------172 !! *** routine lbc_nfd_2d ***173 !!174 !! ** Purpose : 2D lateral boundary condition : North fold treatment175 !! without processor exchanges.176 !!177 !! ** Method :178 !!179 !! ** Action : pt2d with updated values along the north fold180 !!----------------------------------------------------------------------181 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points182 ! ! = T , U , V , F , W points183 REAL(wp) , INTENT(in ) :: psgn ! control of the sign change184 ! ! = -1. , the sign is changed if north fold boundary185 ! ! = 1. , the sign is kept if north fold boundary186 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied187 INTEGER , OPTIONAL , INTENT(in ) :: pr2dj ! number of additional halos188 !189 INTEGER :: ji, jl, ipr2dj190 INTEGER :: ijt, iju, ijpj, ijpjm1191 !!----------------------------------------------------------------------192 193 SELECT CASE ( jpni )194 CASE ( 1 ) ; ijpj = nlcj ! 1 proc only along the i-direction195 CASE DEFAULT ; ijpj = 4 ! several proc along the i-direction196 END SELECT197 !198 IF( PRESENT(pr2dj) ) THEN ! use of additional halos199 ipr2dj = pr2dj200 IF( jpni > 1 ) ijpj = ijpj + ipr2dj201 ELSE202 ipr2dj = 0203 ENDIF204 !205 ijpjm1 = ijpj-1206 207 208 SELECT CASE ( npolj )209 !210 CASE ( 3, 4 ) ! * North fold T-point pivot211 !212 SELECT CASE ( cd_type )213 !214 CASE ( 'T' , 'W' ) ! T- , W-points215 DO jl = 0, ipr2dj216 DO ji = 2, jpiglo217 ijt=jpiglo-ji+2218 pt2d(ji,ijpj+jl) = psgn * pt2d(ijt,ijpj-2-jl)219 END DO220 END DO221 pt2d(1,ijpj) = psgn * pt2d(3,ijpj-2)222 DO ji = jpiglo/2+1, jpiglo223 ijt=jpiglo-ji+2224 pt2d(ji,ijpj-1) = psgn * pt2d(ijt,ijpj-1)225 END DO226 CASE ( 'U' ) ! U-point227 DO jl = 0, ipr2dj228 DO ji = 1, jpiglo-1229 iju = jpiglo-ji+1230 pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-2-jl)231 END DO232 END DO233 pt2d( 1 ,ijpj ) = psgn * pt2d( 2 ,ijpj-2)234 pt2d(jpiglo,ijpj ) = psgn * pt2d(jpiglo-1,ijpj-2)235 pt2d(1 ,ijpj-1) = psgn * pt2d(jpiglo ,ijpj-1)236 DO ji = jpiglo/2, jpiglo-1237 iju = jpiglo-ji+1238 pt2d(ji,ijpjm1) = psgn * pt2d(iju,ijpjm1)239 END DO240 CASE ( 'V' ) ! V-point241 DO jl = -1, ipr2dj242 DO ji = 2, jpiglo243 ijt = jpiglo-ji+2244 pt2d(ji,ijpj+jl) = psgn * pt2d(ijt,ijpj-3-jl)245 END DO246 END DO247 pt2d( 1 ,ijpj) = psgn * pt2d( 3 ,ijpj-3)248 CASE ( 'F' ) ! F-point249 DO jl = -1, ipr2dj250 DO ji = 1, jpiglo-1251 iju = jpiglo-ji+1252 pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-3-jl)253 END DO254 END DO255 pt2d( 1 ,ijpj) = psgn * pt2d( 2 ,ijpj-3)256 pt2d(jpiglo,ijpj) = psgn * pt2d(jpiglo-1,ijpj-3)257 pt2d(jpiglo,ijpj-1) = psgn * pt2d(jpiglo-1,ijpj-2)258 pt2d( 1 ,ijpj-1) = psgn * pt2d( 2 ,ijpj-2)259 CASE ( 'I' ) ! ice U-V point (I-point)260 DO jl = 0, ipr2dj261 pt2d(2,ijpj+jl) = psgn * pt2d(3,ijpj-1+jl)262 DO ji = 3, jpiglo263 iju = jpiglo - ji + 3264 pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-1-jl)265 END DO266 END DO267 CASE ( 'J' ) ! first ice U-V point268 DO jl =0, ipr2dj269 pt2d(2,ijpj+jl) = psgn * pt2d(3,ijpj-1+jl)270 DO ji = 3, jpiglo271 iju = jpiglo - ji + 3272 pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-1-jl)273 END DO274 END DO275 CASE ( 'K' ) ! second ice U-V point276 DO jl =0, ipr2dj277 pt2d(2,ijpj+jl) = psgn * pt2d(3,ijpj-1+jl)278 DO ji = 3, jpiglo279 iju = jpiglo - ji + 3280 pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-1-jl)281 END DO282 END DO283 END SELECT284 !285 CASE ( 5, 6 ) ! * North fold F-point pivot286 !287 SELECT CASE ( cd_type )288 CASE ( 'T' , 'W' ) ! T-, W-point289 DO jl = 0, ipr2dj290 DO ji = 1, jpiglo291 ijt = jpiglo-ji+1292 pt2d(ji,ijpj+jl) = psgn * pt2d(ijt,ijpj-1-jl)293 END DO294 END DO295 CASE ( 'U' ) ! U-point296 DO jl = 0, ipr2dj297 DO ji = 1, jpiglo-1298 iju = jpiglo-ji299 pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-1-jl)300 END DO301 END DO302 pt2d(jpiglo,ijpj) = psgn * pt2d(1,ijpj-1)303 CASE ( 'V' ) ! V-point304 DO jl = 0, ipr2dj305 DO ji = 1, jpiglo306 ijt = jpiglo-ji+1307 pt2d(ji,ijpj+jl) = psgn * pt2d(ijt,ijpj-2-jl)308 END DO309 END DO310 DO ji = jpiglo/2+1, jpiglo311 ijt = jpiglo-ji+1312 pt2d(ji,ijpjm1) = psgn * pt2d(ijt,ijpjm1)313 END DO314 CASE ( 'F' ) ! F-point315 DO jl = 0, ipr2dj316 DO ji = 1, jpiglo-1317 iju = jpiglo-ji318 pt2d(ji,ijpj+jl) = psgn * pt2d(iju,ijpj-2-jl)319 END DO320 END DO321 pt2d(jpiglo,ijpj) = psgn * pt2d(1,ijpj-2)322 DO ji = jpiglo/2+1, jpiglo-1323 iju = jpiglo-ji324 pt2d(ji,ijpjm1) = psgn * pt2d(iju,ijpjm1)325 END DO326 CASE ( 'I' ) ! ice U-V point (I-point)327 pt2d( 2 ,ijpj:ijpj+ipr2dj) = 0.e0328 DO jl = 0, ipr2dj329 DO ji = 2 , jpiglo-1330 ijt = jpiglo - ji + 2331 pt2d(ji,ijpj+jl)= 0.5 * ( pt2d(ji,ijpj-1-jl) + psgn * pt2d(ijt,ijpj-1-jl) )332 END DO333 END DO334 CASE ( 'J' ) ! first ice U-V point335 pt2d( 2 ,ijpj:ijpj+ipr2dj) = 0.e0336 DO jl = 0, ipr2dj337 DO ji = 2 , jpiglo-1338 ijt = jpiglo - ji + 2339 pt2d(ji,ijpj+jl)= pt2d(ji,ijpj-1-jl)340 END DO341 END DO342 CASE ( 'K' ) ! second ice U-V point343 pt2d( 2 ,ijpj:ijpj+ipr2dj) = 0.e0344 DO jl = 0, ipr2dj345 DO ji = 2 , jpiglo-1346 ijt = jpiglo - ji + 2347 pt2d(ji,ijpj+jl)= pt2d(ijt,ijpj-1-jl)348 END DO349 END DO350 END SELECT351 !352 CASE DEFAULT ! * closed : the code probably never go through353 !354 SELECT CASE ( cd_type)355 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points356 pt2d(:, 1:1-ipr2dj ) = 0.e0357 pt2d(:,ijpj:ijpj+ipr2dj) = 0.e0358 CASE ( 'F' ) ! F-point359 pt2d(:,ijpj:ijpj+ipr2dj) = 0.e0360 CASE ( 'I' ) ! ice U-V point361 pt2d(:, 1:1-ipr2dj ) = 0.e0362 pt2d(:,ijpj:ijpj+ipr2dj) = 0.e0363 CASE ( 'J' ) ! first ice U-V point364 pt2d(:, 1:1-ipr2dj ) = 0.e0365 pt2d(:,ijpj:ijpj+ipr2dj) = 0.e0366 CASE ( 'K' ) ! second ice U-V point367 pt2d(:, 1:1-ipr2dj ) = 0.e0368 pt2d(:,ijpj:ijpj+ipr2dj) = 0.e0369 END SELECT370 !371 END SELECT372 !373 END SUBROUTINE lbc_nfd_2d374 375 376 SUBROUTINE mpp_lbc_nfd_3d( pt3dl, pt3dr, cd_type, psgn )377 !!----------------------------------------------------------------------378 !! *** routine mpp_lbc_nfd_3d ***379 !!380 !! ** Purpose : 3D lateral boundary condition : North fold treatment381 !! without processor exchanges.382 !!383 !! ** Method :384 !!385 !! ** Action : pt3d with updated values along the north fold386 !!----------------------------------------------------------------------387 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points388 ! ! = T , U , V , F , W points389 REAL(wp) , INTENT(in ) :: psgn ! control of the sign change390 ! ! = -1. , the sign is changed if north fold boundary391 ! ! = 1. , the sign is kept if north fold boundary392 REAL(wp), DIMENSION(:,:,:), INTENT(inout) :: pt3dl ! 3D array on which the boundary condition is applied393 REAL(wp), DIMENSION(:,:,:), INTENT(in ) :: pt3dr ! 3D array on which the boundary condition is applied394 !395 INTEGER :: ji, jk396 INTEGER :: ijt, iju, ijpj, ijpjm1, ijta, ijua, jia, startloop, endloop397 !!----------------------------------------------------------------------398 !399 SELECT CASE ( jpni )400 CASE ( 1 ) ; ijpj = nlcj ! 1 proc only along the i-direction401 CASE DEFAULT ; ijpj = 4 ! several proc along the i-direction402 END SELECT403 ijpjm1 = ijpj-1404 405 !406 SELECT CASE ( npolj )407 !408 CASE ( 3 , 4 ) ! * North fold T-point pivot409 !410 SELECT CASE ( cd_type )411 CASE ( 'T' , 'W' ) ! T-, W-point412 IF (nimpp .ne. 1) THEN413 startloop = 1414 ELSE415 startloop = 2416 ENDIF417 418 DO jk = 1, jpk419 DO ji = startloop, nlci420 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4421 pt3dl(ji,ijpj,jk) = psgn * pt3dr(ijt,ijpj-2,jk)422 END DO423 IF(nimpp .eq. 1) THEN424 pt3dl(1,ijpj,jk) = psgn * pt3dl(3,ijpj-2,jk)425 ENDIF426 END DO427 428 IF(nimpp .ge. (jpiglo/2+1)) THEN429 startloop = 1430 ELSEIF(((nimpp+nlci-1) .ge. (jpiglo/2+1)) .AND. (nimpp .lt. (jpiglo/2+1))) THEN431 startloop = jpiglo/2+1 - nimpp + 1432 ELSE433 startloop = nlci + 1434 ENDIF435 IF(startloop .le. nlci) THEN436 DO jk = 1, jpk437 DO ji = startloop, nlci438 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4439 jia = ji + nimpp - 1440 ijta = jpiglo - jia + 2441 IF((ijta .ge. (startloop + nimpp - 1)) .and. (ijta .lt. jia)) THEN442 pt3dl(ji,ijpjm1,jk) = psgn * pt3dl(ijta-nimpp+1,ijpjm1,jk)443 ELSE444 pt3dl(ji,ijpjm1,jk) = psgn * pt3dr(ijt,ijpjm1,jk)445 ENDIF446 END DO447 END DO448 ENDIF449 450 451 CASE ( 'U' ) ! U-point452 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN453 endloop = nlci454 ELSE455 endloop = nlci - 1456 ENDIF457 DO jk = 1, jpk458 DO ji = 1, endloop459 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3460 pt3dl(ji,ijpj,jk) = psgn * pt3dr(iju,ijpj-2,jk)461 END DO462 IF(nimpp .eq. 1) THEN463 pt3dl( 1 ,ijpj,jk) = psgn * pt3dl( 2 ,ijpj-2,jk)464 ENDIF465 IF((nimpp + nlci - 1) .eq. jpiglo) THEN466 pt3dl(nlci,ijpj,jk) = psgn * pt3dl(nlci-1,ijpj-2,jk)467 ENDIF468 END DO469 470 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN471 endloop = nlci472 ELSE473 endloop = nlci - 1474 ENDIF475 IF(nimpp .ge. (jpiglo/2)) THEN476 startloop = 1477 ELSEIF(((nimpp+nlci-1) .ge. (jpiglo/2)) .AND. (nimpp .lt. (jpiglo/2))) THEN478 startloop = jpiglo/2 - nimpp + 1479 ELSE480 startloop = endloop + 1481 ENDIF482 IF (startloop .le. endloop) THEN483 DO jk = 1, jpk484 DO ji = startloop, endloop485 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3486 jia = ji + nimpp - 1487 ijua = jpiglo - jia + 1488 IF((ijua .ge. (startloop + nimpp - 1)) .and. (ijua .lt. jia)) THEN489 pt3dl(ji,ijpjm1,jk) = psgn * pt3dl(ijua-nimpp+1,ijpjm1,jk)490 ELSE491 pt3dl(ji,ijpjm1,jk) = psgn * pt3dr(iju,ijpjm1,jk)492 ENDIF493 END DO494 END DO495 ENDIF496 497 CASE ( 'V' ) ! V-point498 IF (nimpp .ne. 1) THEN499 startloop = 1500 ELSE501 startloop = 2502 ENDIF503 DO jk = 1, jpk504 DO ji = startloop, nlci505 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4506 pt3dl(ji,ijpj-1,jk) = psgn * pt3dr(ijt,ijpj-2,jk)507 pt3dl(ji,ijpj ,jk) = psgn * pt3dr(ijt,ijpj-3,jk)508 END DO509 IF(nimpp .eq. 1) THEN510 pt3dl(1,ijpj,jk) = psgn * pt3dl(3,ijpj-3,jk)511 ENDIF512 END DO513 CASE ( 'F' ) ! F-point514 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN515 endloop = nlci516 ELSE517 endloop = nlci - 1518 ENDIF519 DO jk = 1, jpk520 DO ji = 1, endloop521 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3522 pt3dl(ji,ijpj-1,jk) = psgn * pt3dr(iju,ijpj-2,jk)523 pt3dl(ji,ijpj ,jk) = psgn * pt3dr(iju,ijpj-3,jk)524 END DO525 IF(nimpp .eq. 1) THEN526 pt3dl( 1 ,ijpj,jk) = psgn * pt3dl( 2 ,ijpj-3,jk)527 ENDIF528 IF((nimpp + nlci - 1) .eq. jpiglo) THEN529 pt3dl(nlci,ijpj,jk) = psgn * pt3dl(nlci-1,ijpj-3,jk)530 ENDIF531 END DO532 END SELECT533 !534 535 CASE ( 5 , 6 ) ! * North fold F-point pivot536 !537 SELECT CASE ( cd_type )538 CASE ( 'T' , 'W' ) ! T-, W-point539 DO jk = 1, jpk540 DO ji = 1, nlci541 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3542 pt3dl(ji,ijpj,jk) = psgn * pt3dr(ijt,ijpj-1,jk)543 END DO544 END DO545 546 CASE ( 'U' ) ! U-point547 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN548 endloop = nlci549 ELSE550 endloop = nlci - 1551 ENDIF552 DO jk = 1, jpk553 DO ji = 1, endloop554 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2555 pt3dl(ji,ijpj,jk) = psgn * pt3dr(iju,ijpj-1,jk)556 END DO557 IF((nimpp + nlci - 1) .eq. jpiglo) THEN558 pt3dl(nlci,ijpj,jk) = psgn * pt3dr(1,ijpj-1,jk)559 ENDIF560 END DO561 562 CASE ( 'V' ) ! V-point563 DO jk = 1, jpk564 DO ji = 1, nlci565 ijt = jpiglo - ji- nimpp - nfiimpp(isendto(1),jpnj) + 3566 pt3dl(ji,ijpj,jk) = psgn * pt3dr(ijt,ijpj-2,jk)567 END DO568 END DO569 570 IF(nimpp .ge. (jpiglo/2+1)) THEN571 startloop = 1572 ELSEIF(((nimpp+nlci-1) .ge. (jpiglo/2+1)) .AND. (nimpp .lt. (jpiglo/2+1))) THEN573 startloop = jpiglo/2+1 - nimpp + 1574 ELSE575 startloop = nlci + 1576 ENDIF577 IF(startloop .le. nlci) THEN578 DO jk = 1, jpk579 DO ji = startloop, nlci580 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3581 pt3dl(ji,ijpjm1,jk) = psgn * pt3dr(ijt,ijpjm1,jk)582 END DO583 END DO584 ENDIF585 586 CASE ( 'F' ) ! F-point587 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN588 endloop = nlci589 ELSE590 endloop = nlci - 1591 ENDIF592 DO jk = 1, jpk593 DO ji = 1, endloop594 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2595 pt3dl(ji,ijpj ,jk) = psgn * pt3dr(iju,ijpj-2,jk)596 END DO597 IF((nimpp + nlci - 1) .eq. jpiglo) THEN598 pt3dl(nlci,ijpj,jk) = psgn * pt3dr(1,ijpj-2,jk)599 ENDIF600 END DO601 602 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN603 endloop = nlci604 ELSE605 endloop = nlci - 1606 ENDIF607 IF(nimpp .ge. (jpiglo/2+1)) THEN608 startloop = 1609 ELSEIF(((nimpp+nlci-1) .ge. (jpiglo/2+1)) .AND. (nimpp .lt. (jpiglo/2+1))) THEN610 startloop = jpiglo/2+1 - nimpp + 1611 ELSE612 startloop = endloop + 1613 ENDIF614 IF (startloop .le. endloop) THEN615 DO jk = 1, jpk616 DO ji = startloop, endloop617 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2618 pt3dl(ji,ijpjm1,jk) = psgn * pt3dr(iju,ijpjm1,jk)619 END DO620 END DO621 ENDIF622 623 END SELECT624 625 CASE DEFAULT ! * closed : the code probably never go through626 !627 SELECT CASE ( cd_type)628 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points629 pt3dl(:, 1 ,jk) = 0.e0630 pt3dl(:,ijpj,jk) = 0.e0631 CASE ( 'F' ) ! F-point632 pt3dl(:,ijpj,jk) = 0.e0633 END SELECT634 !635 END SELECT ! npolj636 !637 !638 END SUBROUTINE mpp_lbc_nfd_3d639 640 641 SUBROUTINE mpp_lbc_nfd_2d( pt2dl, pt2dr, cd_type, psgn )642 !!----------------------------------------------------------------------643 !! *** routine mpp_lbc_nfd_2d ***644 !!645 !! ** Purpose : 2D lateral boundary condition : North fold treatment646 !! without processor exchanges.647 !!648 !! ** Method :649 !!650 !! ** Action : pt2d with updated values along the north fold651 !!----------------------------------------------------------------------652 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points653 ! ! = T , U , V , F , W points654 REAL(wp) , INTENT(in ) :: psgn ! control of the sign change655 ! ! = -1. , the sign is changed if north fold boundary656 ! ! = 1. , the sign is kept if north fold boundary657 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2dl ! 2D array on which the boundary condition is applied658 REAL(wp), DIMENSION(:,:), INTENT(in ) :: pt2dr ! 2D array on which the boundary condition is applied659 !660 INTEGER :: ji661 INTEGER :: ijt, iju, ijpj, ijpjm1, ijta, ijua, jia, startloop, endloop662 !!----------------------------------------------------------------------663 664 SELECT CASE ( jpni )665 CASE ( 1 ) ; ijpj = nlcj ! 1 proc only along the i-direction666 CASE DEFAULT ; ijpj = 4 ! several proc along the i-direction667 END SELECT668 !669 ijpjm1 = ijpj-1670 671 672 SELECT CASE ( npolj )673 !674 CASE ( 3, 4 ) ! * North fold T-point pivot675 !676 SELECT CASE ( cd_type )677 !678 CASE ( 'T' , 'W' ) ! T- , W-points679 IF (nimpp .ne. 1) THEN680 startloop = 1681 ELSE682 startloop = 2683 ENDIF684 DO ji = startloop, nlci685 ijt=jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4686 pt2dl(ji,ijpj) = psgn * pt2dr(ijt,ijpjm1-1)687 END DO688 IF (nimpp .eq. 1) THEN689 pt2dl(1,ijpj) = psgn * pt2dl(3,ijpj-2)690 ENDIF691 692 IF(nimpp .ge. (jpiglo/2+1)) THEN693 startloop = 1694 ELSEIF(((nimpp+nlci-1) .ge. (jpiglo/2+1)) .AND. (nimpp .lt. (jpiglo/2+1))) THEN695 startloop = jpiglo/2+1 - nimpp + 1696 ELSE697 startloop = nlci + 1698 ENDIF699 DO ji = startloop, nlci700 ijt=jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4701 jia = ji + nimpp - 1702 ijta = jpiglo - jia + 2703 IF((ijta .ge. (startloop + nimpp - 1)) .and. (ijta .lt. jia)) THEN704 pt2dl(ji,ijpjm1) = psgn * pt2dl(ijta-nimpp+1,ijpjm1)705 ELSE706 pt2dl(ji,ijpjm1) = psgn * pt2dr(ijt,ijpjm1)707 ENDIF708 END DO709 710 CASE ( 'U' ) ! U-point711 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN712 endloop = nlci713 ELSE714 endloop = nlci - 1715 ENDIF716 DO ji = 1, endloop717 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3718 pt2dl(ji,ijpj) = psgn * pt2dr(iju,ijpjm1-1)719 END DO720 721 IF (nimpp .eq. 1) THEN722 pt2dl( 1 ,ijpj ) = psgn * pt2dl( 2 ,ijpj-2)723 pt2dl(1 ,ijpj-1) = psgn * pt2dr(jpiglo - nfiimpp(isendto(1), jpnj) + 1, ijpj-1)724 ENDIF725 IF((nimpp + nlci - 1) .eq. jpiglo) THEN726 pt2dl(nlci,ijpj ) = psgn * pt2dl(nlci-1,ijpj-2)727 ENDIF728 729 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN730 endloop = nlci731 ELSE732 endloop = nlci - 1733 ENDIF734 IF(nimpp .ge. (jpiglo/2)) THEN735 startloop = 1736 ELSEIF(((nimpp+nlci-1) .ge. (jpiglo/2)) .AND. (nimpp .lt. (jpiglo/2))) THEN737 startloop = jpiglo/2 - nimpp + 1738 ELSE739 startloop = endloop + 1740 ENDIF741 DO ji = startloop, endloop742 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3743 jia = ji + nimpp - 1744 ijua = jpiglo - jia + 1745 IF((ijua .ge. (startloop + nimpp - 1)) .and. (ijua .lt. jia)) THEN746 pt2dl(ji,ijpjm1) = psgn * pt2dl(ijua-nimpp+1,ijpjm1)747 ELSE748 pt2dl(ji,ijpjm1) = psgn * pt2dr(iju,ijpjm1)749 ENDIF750 END DO751 752 CASE ( 'V' ) ! V-point753 IF (nimpp .ne. 1) THEN754 startloop = 1755 ELSE756 startloop = 2757 ENDIF758 DO ji = startloop, nlci759 ijt=jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4760 pt2dl(ji,ijpjm1) = psgn * pt2dr(ijt,ijpjm1-1)761 pt2dl(ji,ijpj) = psgn * pt2dr(ijt,ijpjm1-2)762 END DO763 IF (nimpp .eq. 1) THEN764 pt2dl( 1 ,ijpj) = psgn * pt2dl( 3 ,ijpj-3)765 ENDIF766 767 CASE ( 'F' ) ! F-point768 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN769 endloop = nlci770 ELSE771 endloop = nlci - 1772 ENDIF773 DO ji = 1, endloop774 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3775 pt2dl(ji,ijpjm1) = psgn * pt2dr(iju,ijpjm1-1)776 pt2dl(ji,ijpj) = psgn * pt2dr(iju,ijpjm1-2)777 END DO778 IF (nimpp .eq. 1) THEN779 pt2dl( 1 ,ijpj) = psgn * pt2dl( 2 ,ijpj-3)780 pt2dl( 1 ,ijpj-1) = psgn * pt2dl( 2 ,ijpj-2)781 ENDIF782 IF((nimpp + nlci - 1) .eq. jpiglo) THEN783 pt2dl(nlci,ijpj) = psgn * pt2dl(nlci-1,ijpj-3)784 pt2dl(nlci,ijpj-1) = psgn * pt2dl(nlci-1,ijpj-2)785 ENDIF786 787 CASE ( 'I' ) ! ice U-V point (I-point)788 IF (nimpp .ne. 1) THEN789 startloop = 1790 ELSE791 startloop = 3792 pt2dl(2,ijpj) = psgn * pt2dr(3,ijpjm1)793 ENDIF794 DO ji = startloop, nlci795 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 5796 pt2dl(ji,ijpj) = psgn * pt2dr(iju,ijpjm1)797 END DO798 799 CASE ( 'J' ) ! first ice U-V point800 IF (nimpp .ne. 1) THEN801 startloop = 1802 ELSE803 startloop = 3804 pt2dl(2,ijpj) = psgn * pt2dr(3,ijpjm1)805 ENDIF806 DO ji = startloop, nlci807 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 5808 pt2dl(ji,ijpj) = psgn * pt2dr(iju,ijpjm1)809 END DO810 811 CASE ( 'K' ) ! second ice U-V point812 IF (nimpp .ne. 1) THEN813 startloop = 1814 ELSE815 startloop = 3816 pt2dl(2,ijpj) = psgn * pt2dr(3,ijpjm1)817 ENDIF818 DO ji = startloop, nlci819 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 5820 pt2dl(ji,ijpj) = psgn * pt2dr(iju,ijpjm1)821 END DO822 823 END SELECT824 !825 CASE ( 5, 6 ) ! * North fold F-point pivot826 !827 SELECT CASE ( cd_type )828 CASE ( 'T' , 'W' ) ! T-, W-point829 DO ji = 1, nlci830 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3831 pt2dl(ji,ijpj) = psgn * pt2dr(ijt,ijpjm1)832 END DO833 834 CASE ( 'U' ) ! U-point835 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN836 endloop = nlci837 ELSE838 endloop = nlci - 1839 ENDIF840 DO ji = 1, endloop841 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2842 pt2dl(ji,ijpj) = psgn * pt2dr(iju,ijpjm1)843 END DO844 IF((nimpp + nlci - 1) .eq. jpiglo) THEN845 pt2dl(nlci,ijpj) = psgn * pt2dr(1,ijpj-1)846 ENDIF847 848 CASE ( 'V' ) ! V-point849 DO ji = 1, nlci850 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3851 pt2dl(ji,ijpj) = psgn * pt2dr(ijt,ijpjm1-1)852 END DO853 IF(nimpp .ge. (jpiglo/2+1)) THEN854 startloop = 1855 ELSEIF(((nimpp+nlci-1) .ge. (jpiglo/2+1)) .AND. (nimpp .lt. (jpiglo/2+1))) THEN856 startloop = jpiglo/2+1 - nimpp + 1857 ELSE858 startloop = nlci + 1859 ENDIF860 DO ji = startloop, nlci861 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3862 pt2dl(ji,ijpjm1) = psgn * pt2dr(ijt,ijpjm1)863 END DO864 865 CASE ( 'F' ) ! F-point866 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN867 endloop = nlci868 ELSE869 endloop = nlci - 1870 ENDIF871 DO ji = 1, endloop872 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2873 pt2dl(ji,ijpj) = psgn * pt2dr(iju,ijpjm1-1)874 END DO875 IF((nimpp + nlci - 1) .eq. jpiglo) THEN876 pt2dl(nlci,ijpj) = psgn * pt2dr(1,ijpj-2)877 ENDIF878 879 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN880 endloop = nlci881 ELSE882 endloop = nlci - 1883 ENDIF884 IF(nimpp .ge. (jpiglo/2+1)) THEN885 startloop = 1886 ELSEIF(((nimpp+nlci-1) .ge. (jpiglo/2+1)) .AND. (nimpp .lt. (jpiglo/2+1))) THEN887 startloop = jpiglo/2+1 - nimpp + 1888 ELSE889 startloop = endloop + 1890 ENDIF891 892 DO ji = startloop, endloop893 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2894 pt2dl(ji,ijpjm1) = psgn * pt2dr(iju,ijpjm1)895 END DO896 897 CASE ( 'I' ) ! ice U-V point (I-point)898 IF (nimpp .ne. 1) THEN899 startloop = 1900 ELSE901 startloop = 2902 ENDIF903 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN904 endloop = nlci905 ELSE906 endloop = nlci - 1907 ENDIF908 DO ji = startloop , endloop909 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4910 pt2dl(ji,ijpj)= 0.5 * (pt2dr(ji,ijpjm1) + psgn * pt2dr(ijt,ijpjm1))911 END DO912 913 CASE ( 'J' ) ! first ice U-V point914 IF (nimpp .ne. 1) THEN915 startloop = 1916 ELSE917 startloop = 2918 ENDIF919 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN920 endloop = nlci921 ELSE922 endloop = nlci - 1923 ENDIF924 DO ji = startloop , endloop925 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4926 pt2dl(ji,ijpj) = pt2dr(ji,ijpjm1)927 END DO928 929 CASE ( 'K' ) ! second ice U-V point930 IF (nimpp .ne. 1) THEN931 startloop = 1932 ELSE933 startloop = 2934 ENDIF935 IF ((nimpp + nlci - 1) .ne. jpiglo) THEN936 endloop = nlci937 ELSE938 endloop = nlci - 1939 ENDIF940 DO ji = startloop, endloop941 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4942 pt2dl(ji,ijpj) = pt2dr(ijt,ijpjm1)943 END DO944 945 END SELECT946 !947 CASE DEFAULT ! * closed : the code probably never go through948 !949 SELECT CASE ( cd_type)950 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points951 pt2dl(:, 1 ) = 0.e0952 pt2dl(:,ijpj) = 0.e0953 CASE ( 'F' ) ! F-point954 pt2dl(:,ijpj) = 0.e0955 CASE ( 'I' ) ! ice U-V point956 pt2dl(:, 1 ) = 0.e0957 pt2dl(:,ijpj) = 0.e0958 CASE ( 'J' ) ! first ice U-V point959 pt2dl(:, 1 ) = 0.e0960 pt2dl(:,ijpj) = 0.e0961 CASE ( 'K' ) ! second ice U-V point962 pt2dl(:, 1 ) = 0.e0963 pt2dl(:,ijpj) = 0.e0964 END SELECT965 !966 END SELECT967 !968 END SUBROUTINE mpp_lbc_nfd_2d969 166 970 167 !!====================================================================== -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/lib_fortran.F90
r10725 r10727 7 7 !! 3.4 ! 2013-06 (C. Rousset) add glob_min, glob_max 8 8 !! + 3d dim. of input is fexible (jpk, jpl...) 9 !! 4.0 ! 2016-06 (T. Lovato) double precision global sum by default 9 10 !!---------------------------------------------------------------------- 10 11 … … 20 21 USE in_out_manager ! I/O manager 21 22 USE lib_mpp ! distributed memory computing 23 USE lbclnk ! ocean lateral boundary conditions 22 24 23 25 IMPLICIT NONE … … 25 27 26 28 PUBLIC glob_sum ! used in many places (masked with tmask_i) 27 PUBLIC glob_sum_full ! used in many places (masked with tmask_h, ie omly over the halos) 29 PUBLIC glob_sum_full ! used in many places (masked with tmask_h, ie only over the halos) 30 PUBLIC local_sum ! used in trcrad, local operation before glob_sum_delay 31 PUBLIC sum3x3 ! used in trcrad, do a sum over 3x3 boxes 28 32 PUBLIC DDPDD ! also used in closea module 29 33 PUBLIC glob_min, glob_max 30 34 #if defined key_nosignedzero 31 35 PUBLIC SIGN 32 36 #endif 33 37 34 38 INTERFACE glob_sum 35 MODULE PROCEDURE glob_sum_1d, glob_sum_2d, glob_sum_3d, & 36 & glob_sum_2d_a, glob_sum_3d_a 39 MODULE PROCEDURE glob_sum_1d, glob_sum_2d, glob_sum_3d 37 40 END INTERFACE 38 41 INTERFACE glob_sum_full 39 42 MODULE PROCEDURE glob_sum_full_2d, glob_sum_full_3d 40 43 END INTERFACE 44 INTERFACE local_sum 45 MODULE PROCEDURE local_sum_2d, local_sum_3d 46 END INTERFACE 47 INTERFACE sum3x3 48 MODULE PROCEDURE sum3x3_2d, sum3x3_3d 49 END INTERFACE 41 50 INTERFACE glob_min 42 MODULE PROCEDURE glob_min_2d, glob_min_3d ,glob_min_2d_a, glob_min_3d_a51 MODULE PROCEDURE glob_min_2d, glob_min_3d 43 52 END INTERFACE 44 53 INTERFACE glob_max 45 MODULE PROCEDURE glob_max_2d, glob_max_3d ,glob_max_2d_a, glob_max_3d_a46 END INTERFACE 47 48 54 MODULE PROCEDURE glob_max_2d, glob_max_3d 55 END INTERFACE 56 57 #if defined key_nosignedzero 49 58 INTERFACE SIGN 50 59 MODULE PROCEDURE SIGN_SCALAR, SIGN_ARRAY_1D, SIGN_ARRAY_2D, SIGN_ARRAY_3D, & … … 52 61 & SIGN_ARRAY_1D_B, SIGN_ARRAY_2D_B, SIGN_ARRAY_3D_B 53 62 END INTERFACE 54 63 #endif 55 64 56 65 !!---------------------------------------------------------------------- 57 66 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 58 !! $Id: lib_fortran.F90 6140 2015-12-21 11:35:23Z timgraham$59 !! Software governed by the CeCILL licen ce (./LICENSE)67 !! $Id: lib_fortran.F90 10425 2018-12-19 21:54:16Z smasson $ 68 !! Software governed by the CeCILL license (see ./LICENSE) 60 69 !!---------------------------------------------------------------------- 61 70 CONTAINS 62 71 63 64 ! --- SUM --- 65 66 FUNCTION glob_sum_1d( ptab, kdim ) 67 !!----------------------------------------------------------------------- 68 !! *** FUNCTION glob_sum_1D *** 69 !! 70 !! ** Purpose : perform a masked sum on the inner global domain of a 1D array 71 !!----------------------------------------------------------------------- 72 INTEGER :: kdim 73 REAL(wp), INTENT(in), DIMENSION(kdim) :: ptab ! input 1D array 74 REAL(wp) :: glob_sum_1d ! global sum 75 !!----------------------------------------------------------------------- 76 ! 77 glob_sum_1d = SUM( ptab(:) ) 78 IF( lk_mpp ) CALL mpp_sum( glob_sum_1d ) 79 ! 80 END FUNCTION glob_sum_1d 81 82 FUNCTION glob_sum_2d( ptab ) 83 !!----------------------------------------------------------------------- 84 !! *** FUNCTION glob_sum_2D *** 85 !! 86 !! ** Purpose : perform a masked sum on the inner global domain of a 2D array 87 !!----------------------------------------------------------------------- 88 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab ! input 2D array 89 REAL(wp) :: glob_sum_2d ! global masked sum 90 !!----------------------------------------------------------------------- 91 ! 92 glob_sum_2d = SUM( ptab(:,:)*tmask_i(:,:) ) 93 IF( lk_mpp ) CALL mpp_sum( glob_sum_2d ) 94 ! 95 END FUNCTION glob_sum_2d 96 97 98 FUNCTION glob_sum_3d( ptab ) 99 !!----------------------------------------------------------------------- 100 !! *** FUNCTION glob_sum_3D *** 101 !! 102 !! ** Purpose : perform a masked sum on the inner global domain of a 3D array 103 !!----------------------------------------------------------------------- 104 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab ! input 3D array 105 REAL(wp) :: glob_sum_3d ! global masked sum 106 !! 107 INTEGER :: jk 108 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 109 !!----------------------------------------------------------------------- 110 ! 111 ijpk = SIZE(ptab,3) 112 ! 113 glob_sum_3d = 0.e0 114 DO jk = 1, ijpk 115 glob_sum_3d = glob_sum_3d + SUM( ptab(:,:,jk)*tmask_i(:,:) ) 72 # define GLOBSUM_CODE 73 74 # define DIM_1d 75 # define FUNCTION_GLOBSUM glob_sum_1d 76 # include "lib_fortran_generic.h90" 77 # undef FUNCTION_GLOBSUM 78 # undef DIM_1d 79 80 # define DIM_2d 81 # define OPERATION_GLOBSUM 82 # define FUNCTION_GLOBSUM glob_sum_2d 83 # include "lib_fortran_generic.h90" 84 # undef FUNCTION_GLOBSUM 85 # undef OPERATION_GLOBSUM 86 # define OPERATION_FULL_GLOBSUM 87 # define FUNCTION_GLOBSUM glob_sum_full_2d 88 # include "lib_fortran_generic.h90" 89 # undef FUNCTION_GLOBSUM 90 # undef OPERATION_FULL_GLOBSUM 91 # undef DIM_2d 92 93 # define DIM_3d 94 # define OPERATION_GLOBSUM 95 # define FUNCTION_GLOBSUM glob_sum_3d 96 # include "lib_fortran_generic.h90" 97 # undef FUNCTION_GLOBSUM 98 # undef OPERATION_GLOBSUM 99 # define OPERATION_FULL_GLOBSUM 100 # define FUNCTION_GLOBSUM glob_sum_full_3d 101 # include "lib_fortran_generic.h90" 102 # undef FUNCTION_GLOBSUM 103 # undef OPERATION_FULL_GLOBSUM 104 # undef DIM_3d 105 106 # undef GLOBSUM_CODE 107 108 109 # define GLOBMINMAX_CODE 110 111 # define DIM_2d 112 # define OPERATION_GLOBMIN 113 # define FUNCTION_GLOBMINMAX glob_min_2d 114 # include "lib_fortran_generic.h90" 115 # undef FUNCTION_GLOBMINMAX 116 # undef OPERATION_GLOBMIN 117 # define OPERATION_GLOBMAX 118 # define FUNCTION_GLOBMINMAX glob_max_2d 119 # include "lib_fortran_generic.h90" 120 # undef FUNCTION_GLOBMINMAX 121 # undef OPERATION_GLOBMAX 122 # undef DIM_2d 123 124 # define DIM_3d 125 # define OPERATION_GLOBMIN 126 # define FUNCTION_GLOBMINMAX glob_min_3d 127 # include "lib_fortran_generic.h90" 128 # undef FUNCTION_GLOBMINMAX 129 # undef OPERATION_GLOBMIN 130 # define OPERATION_GLOBMAX 131 # define FUNCTION_GLOBMINMAX glob_max_3d 132 # include "lib_fortran_generic.h90" 133 # undef FUNCTION_GLOBMINMAX 134 # undef OPERATION_GLOBMAX 135 # undef DIM_3d 136 # undef GLOBMINMAX_CODE 137 138 ! ! FUNCTION local_sum ! 139 140 FUNCTION local_sum_2d( ptab ) 141 !!---------------------------------------------------------------------- 142 REAL(wp), INTENT(in ) :: ptab(:,:) ! array on which operation is applied 143 COMPLEX(wp) :: local_sum_2d 144 ! 145 !!----------------------------------------------------------------------- 146 ! 147 COMPLEX(wp):: ctmp 148 REAL(wp) :: ztmp 149 INTEGER :: ji, jj ! dummy loop indices 150 INTEGER :: ipi, ipj ! dimensions 151 !!----------------------------------------------------------------------- 152 ! 153 ipi = SIZE(ptab,1) ! 1st dimension 154 ipj = SIZE(ptab,2) ! 2nd dimension 155 ! 156 ctmp = CMPLX( 0.e0, 0.e0, wp ) ! warning ctmp is cumulated 157 158 DO jj = 1, ipj 159 DO ji = 1, ipi 160 ztmp = ptab(ji,jj) * tmask_i(ji,jj) 161 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 162 END DO 116 163 END DO 117 IF( lk_mpp ) CALL mpp_sum( glob_sum_3d ) 118 ! 119 END FUNCTION glob_sum_3d 120 121 122 FUNCTION glob_sum_2d_a( ptab1, ptab2 ) 123 !!----------------------------------------------------------------------- 124 !! *** FUNCTION glob_sum_2D _a *** 125 !! 126 !! ** Purpose : perform a masked sum on the inner global domain of two 2D array 127 !!----------------------------------------------------------------------- 128 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab1, ptab2 ! input 2D array 129 REAL(wp) , DIMENSION(2) :: glob_sum_2d_a ! global masked sum 130 !!----------------------------------------------------------------------- 131 ! 132 glob_sum_2d_a(1) = SUM( ptab1(:,:)*tmask_i(:,:) ) 133 glob_sum_2d_a(2) = SUM( ptab2(:,:)*tmask_i(:,:) ) 134 IF( lk_mpp ) CALL mpp_sum( glob_sum_2d_a, 2 ) 135 ! 136 END FUNCTION glob_sum_2d_a 137 138 139 FUNCTION glob_sum_3d_a( ptab1, ptab2 ) 140 !!----------------------------------------------------------------------- 141 !! *** FUNCTION glob_sum_3D_a *** 142 !! 143 !! ** Purpose : perform a masked sum on the inner global domain of two 3D array 144 !!----------------------------------------------------------------------- 145 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab1, ptab2 ! input 3D array 146 REAL(wp) , DIMENSION(2) :: glob_sum_3d_a ! global masked sum 147 !! 148 INTEGER :: jk 149 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 150 !!----------------------------------------------------------------------- 151 ! 152 ijpk = SIZE(ptab1,3) 153 ! 154 glob_sum_3d_a(:) = 0.e0 155 DO jk = 1, ijpk 156 glob_sum_3d_a(1) = glob_sum_3d_a(1) + SUM( ptab1(:,:,jk)*tmask_i(:,:) ) 157 glob_sum_3d_a(2) = glob_sum_3d_a(2) + SUM( ptab2(:,:,jk)*tmask_i(:,:) ) 164 ! 165 local_sum_2d = ctmp 166 167 END FUNCTION local_sum_2d 168 169 FUNCTION local_sum_3d( ptab ) 170 !!---------------------------------------------------------------------- 171 REAL(wp), INTENT(in ) :: ptab(:,:,:) ! array on which operation is applied 172 COMPLEX(wp) :: local_sum_3d 173 ! 174 !!----------------------------------------------------------------------- 175 ! 176 COMPLEX(wp):: ctmp 177 REAL(wp) :: ztmp 178 INTEGER :: ji, jj, jk ! dummy loop indices 179 INTEGER :: ipi, ipj, ipk ! dimensions 180 !!----------------------------------------------------------------------- 181 ! 182 ipi = SIZE(ptab,1) ! 1st dimension 183 ipj = SIZE(ptab,2) ! 2nd dimension 184 ipk = SIZE(ptab,3) ! 3rd dimension 185 ! 186 ctmp = CMPLX( 0.e0, 0.e0, wp ) ! warning ctmp is cumulated 187 188 DO jk = 1, ipk 189 DO jj = 1, ipj 190 DO ji = 1, ipi 191 ztmp = ptab(ji,jj,jk) * tmask_i(ji,jj) 192 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 193 END DO 194 END DO 158 195 END DO 159 IF( lk_mpp ) CALL mpp_sum( glob_sum_3d_a, 2 ) 160 ! 161 END FUNCTION glob_sum_3d_a 162 163 FUNCTION glob_sum_full_2d( ptab ) 164 !!---------------------------------------------------------------------- 165 !! *** FUNCTION glob_sum_full_2d *** 166 !! 167 !! ** Purpose : perform a sum in calling DDPDD routine (nomask) 168 !!---------------------------------------------------------------------- 169 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab 170 REAL(wp) :: glob_sum_full_2d ! global sum 171 !! 172 !!----------------------------------------------------------------------- 173 ! 174 glob_sum_full_2d = SUM( ptab(:,:) * tmask_h(:,:) ) 175 IF( lk_mpp ) CALL mpp_sum( glob_sum_full_2d ) 176 ! 177 END FUNCTION glob_sum_full_2d 178 179 FUNCTION glob_sum_full_3d( ptab ) 180 !!---------------------------------------------------------------------- 181 !! *** FUNCTION glob_sum_full_3d *** 182 !! 183 !! ** Purpose : perform a sum on a 3D array in calling DDPDD routine (nomask) 184 !!---------------------------------------------------------------------- 185 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab 186 REAL(wp) :: glob_sum_full_3d ! global sum 187 !! 188 INTEGER :: ji, jj, jk ! dummy loop indices 189 INTEGER :: ijpk ! local variables: size of ptab 190 !!----------------------------------------------------------------------- 191 ! 192 ijpk = SIZE(ptab,3) 193 ! 194 glob_sum_full_3d = 0.e0 195 DO jk = 1, ijpk 196 glob_sum_full_3d = glob_sum_full_3d + SUM( ptab(:,:,jk) * tmask_h(:,:) ) 196 ! 197 local_sum_3d = ctmp 198 199 END FUNCTION local_sum_3d 200 201 ! ! FUNCTION sum3x3 ! 202 203 SUBROUTINE sum3x3_2d( p2d ) 204 !!----------------------------------------------------------------------- 205 !! *** routine sum3x3_2d *** 206 !! 207 !! ** Purpose : sum over 3x3 boxes 208 !!---------------------------------------------------------------------- 209 REAL(wp), DIMENSION (:,:), INTENT(inout) :: p2d 210 ! 211 INTEGER :: ji, ji2, jj, jj2 ! dummy loop indices 212 !!---------------------------------------------------------------------- 213 ! 214 IF( SIZE(p2d,1) /= jpi ) CALL ctl_stop( 'STOP', 'wrong call of sum3x3_2d, the first dimension is not equal to jpi' ) 215 IF( SIZE(p2d,2) /= jpj ) CALL ctl_stop( 'STOP', 'wrong call of sum3x3_2d, the second dimension is not equal to jpj' ) 216 ! 217 DO jj = 1, jpj 218 DO ji = 1, jpi 219 IF( MOD(mig(ji), 3) == 1 .AND. MOD(mjg(jj), 3) == 1 ) THEN ! bottom left corber of a 3x3 box 220 ji2 = MIN(mig(ji)+2, jpiglo) - nimpp + 1 ! right position of the box 221 jj2 = MIN(mjg(jj)+2, jpjglo) - njmpp + 1 ! upper position of the box 222 IF( ji2 <= jpi .AND. jj2 <= jpj ) THEN ! the box is fully included in the local mpi domain 223 p2d(ji:ji2,jj:jj2) = SUM(p2d(ji:ji2,jj:jj2)) 224 ENDIF 225 ENDIF 226 END DO 197 227 END DO 198 IF( lk_mpp ) CALL mpp_sum( glob_sum_full_3d ) 199 ! 200 END FUNCTION glob_sum_full_3d 201 202 203 204 ! --- MIN --- 205 FUNCTION glob_min_2d( ptab ) 206 !!----------------------------------------------------------------------- 207 !! *** FUNCTION glob_min_2D *** 208 !! 209 !! ** Purpose : perform a masked min on the inner global domain of a 2D array 210 !!----------------------------------------------------------------------- 211 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab ! input 2D array 212 REAL(wp) :: glob_min_2d ! global masked min 213 !!----------------------------------------------------------------------- 214 ! 215 glob_min_2d = MINVAL( ptab(:,:)*tmask_i(:,:) ) 216 IF( lk_mpp ) CALL mpp_min( glob_min_2d ) 217 ! 218 END FUNCTION glob_min_2d 219 220 FUNCTION glob_min_3d( ptab ) 221 !!----------------------------------------------------------------------- 222 !! *** FUNCTION glob_min_3D *** 223 !! 224 !! ** Purpose : perform a masked min on the inner global domain of a 3D array 225 !!----------------------------------------------------------------------- 226 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab ! input 3D array 227 REAL(wp) :: glob_min_3d ! global masked min 228 !! 229 INTEGER :: jk 230 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 231 !!----------------------------------------------------------------------- 232 ! 233 ijpk = SIZE(ptab,3) 234 ! 235 glob_min_3d = MINVAL( ptab(:,:,1)*tmask_i(:,:) ) 236 DO jk = 2, ijpk 237 glob_min_3d = MIN( glob_min_3d, MINVAL( ptab(:,:,jk)*tmask_i(:,:) ) ) 228 CALL lbc_lnk( 'lib_fortran', p2d, 'T', 1. ) 229 IF( nbondi /= -1 ) THEN 230 IF( MOD(mig( 1), 3) == 1 ) p2d( 1,:) = p2d( 2,:) 231 IF( MOD(mig( 1), 3) == 2 ) p2d( 2,:) = p2d( 1,:) 232 ENDIF 233 IF( nbondi /= 1 ) THEN 234 IF( MOD(mig(jpi-2), 3) == 1 ) p2d( jpi,:) = p2d(jpi-1,:) 235 IF( MOD(mig(jpi-2), 3) == 0 ) p2d(jpi-1,:) = p2d( jpi,:) 236 ENDIF 237 IF( nbondj /= -1 ) THEN 238 IF( MOD(mjg( 1), 3) == 1 ) p2d(:, 1) = p2d(:, 2) 239 IF( MOD(mjg( 1), 3) == 2 ) p2d(:, 2) = p2d(:, 1) 240 ENDIF 241 IF( nbondj /= 1 ) THEN 242 IF( MOD(mjg(jpj-2), 3) == 1 ) p2d(:, jpj) = p2d(:,jpj-1) 243 IF( MOD(mjg(jpj-2), 3) == 0 ) p2d(:,jpj-1) = p2d(:, jpj) 244 ENDIF 245 CALL lbc_lnk( 'lib_fortran', p2d, 'T', 1. ) 246 247 END SUBROUTINE sum3x3_2d 248 249 SUBROUTINE sum3x3_3d( p3d ) 250 !!----------------------------------------------------------------------- 251 !! *** routine sum3x3_3d *** 252 !! 253 !! ** Purpose : sum over 3x3 boxes 254 !!---------------------------------------------------------------------- 255 REAL(wp), DIMENSION (:,:,:), INTENT(inout) :: p3d 256 ! 257 INTEGER :: ji, ji2, jj, jj2, jn ! dummy loop indices 258 INTEGER :: ipn ! Third dimension size 259 !!---------------------------------------------------------------------- 260 ! 261 IF( SIZE(p3d,1) /= jpi ) CALL ctl_stop( 'STOP', 'wrong call of sum3x3_3d, the first dimension is not equal to jpi' ) 262 IF( SIZE(p3d,2) /= jpj ) CALL ctl_stop( 'STOP', 'wrong call of sum3x3_3d, the second dimension is not equal to jpj' ) 263 ipn = SIZE(p3d,3) 264 ! 265 DO jn = 1, ipn 266 DO jj = 1, jpj 267 DO ji = 1, jpi 268 IF( MOD(mig(ji), 3) == 1 .AND. MOD(mjg(jj), 3) == 1 ) THEN ! bottom left corber of a 3x3 box 269 ji2 = MIN(mig(ji)+2, jpiglo) - nimpp + 1 ! right position of the box 270 jj2 = MIN(mjg(jj)+2, jpjglo) - njmpp + 1 ! upper position of the box 271 IF( ji2 <= jpi .AND. jj2 <= jpj ) THEN ! the box is fully included in the local mpi domain 272 p3d(ji:ji2,jj:jj2,jn) = SUM(p3d(ji:ji2,jj:jj2,jn)) 273 ENDIF 274 ENDIF 275 END DO 276 END DO 238 277 END DO 239 IF( lk_mpp ) CALL mpp_min( glob_min_3d ) 240 ! 241 END FUNCTION glob_min_3d 242 243 244 FUNCTION glob_min_2d_a( ptab1, ptab2 ) 245 !!----------------------------------------------------------------------- 246 !! *** FUNCTION glob_min_2D _a *** 247 !! 248 !! ** Purpose : perform a masked min on the inner global domain of two 2D array 249 !!----------------------------------------------------------------------- 250 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab1, ptab2 ! input 2D array 251 REAL(wp) , DIMENSION(2) :: glob_min_2d_a ! global masked min 252 !!----------------------------------------------------------------------- 253 ! 254 glob_min_2d_a(1) = MINVAL( ptab1(:,:)*tmask_i(:,:) ) 255 glob_min_2d_a(2) = MINVAL( ptab2(:,:)*tmask_i(:,:) ) 256 IF( lk_mpp ) CALL mpp_min( glob_min_2d_a, 2 ) 257 ! 258 END FUNCTION glob_min_2d_a 259 260 261 FUNCTION glob_min_3d_a( ptab1, ptab2 ) 262 !!----------------------------------------------------------------------- 263 !! *** FUNCTION glob_min_3D_a *** 264 !! 265 !! ** Purpose : perform a masked min on the inner global domain of two 3D array 266 !!----------------------------------------------------------------------- 267 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab1, ptab2 ! input 3D array 268 REAL(wp) , DIMENSION(2) :: glob_min_3d_a ! global masked min 269 !! 270 INTEGER :: jk 271 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 272 !!----------------------------------------------------------------------- 273 ! 274 ijpk = SIZE(ptab1,3) 275 ! 276 glob_min_3d_a(1) = MINVAL( ptab1(:,:,1)*tmask_i(:,:) ) 277 glob_min_3d_a(2) = MINVAL( ptab2(:,:,1)*tmask_i(:,:) ) 278 DO jk = 2, ijpk 279 glob_min_3d_a(1) = MIN( glob_min_3d_a(1), MINVAL( ptab1(:,:,jk)*tmask_i(:,:) ) ) 280 glob_min_3d_a(2) = MIN( glob_min_3d_a(2), MINVAL( ptab2(:,:,jk)*tmask_i(:,:) ) ) 281 END DO 282 IF( lk_mpp ) CALL mpp_min( glob_min_3d_a, 2 ) 283 ! 284 END FUNCTION glob_min_3d_a 285 286 ! --- MAX --- 287 FUNCTION glob_max_2d( ptab ) 288 !!----------------------------------------------------------------------- 289 !! *** FUNCTION glob_max_2D *** 290 !! 291 !! ** Purpose : perform a masked max on the inner global domain of a 2D array 292 !!----------------------------------------------------------------------- 293 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab ! input 2D array 294 REAL(wp) :: glob_max_2d ! global masked max 295 !!----------------------------------------------------------------------- 296 ! 297 glob_max_2d = MAXVAL( ptab(:,:)*tmask_i(:,:) ) 298 IF( lk_mpp ) CALL mpp_max( glob_max_2d ) 299 ! 300 END FUNCTION glob_max_2d 301 302 FUNCTION glob_max_3d( ptab ) 303 !!----------------------------------------------------------------------- 304 !! *** FUNCTION glob_max_3D *** 305 !! 306 !! ** Purpose : perform a masked max on the inner global domain of a 3D array 307 !!----------------------------------------------------------------------- 308 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab ! input 3D array 309 REAL(wp) :: glob_max_3d ! global masked max 310 !! 311 INTEGER :: jk 312 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 313 !!----------------------------------------------------------------------- 314 ! 315 ijpk = SIZE(ptab,3) 316 ! 317 glob_max_3d = MAXVAL( ptab(:,:,1)*tmask_i(:,:) ) 318 DO jk = 2, ijpk 319 glob_max_3d = MAX( glob_max_3d, MAXVAL( ptab(:,:,jk)*tmask_i(:,:) ) ) 320 END DO 321 IF( lk_mpp ) CALL mpp_max( glob_max_3d ) 322 ! 323 END FUNCTION glob_max_3d 324 325 326 FUNCTION glob_max_2d_a( ptab1, ptab2 ) 327 !!----------------------------------------------------------------------- 328 !! *** FUNCTION glob_max_2D _a *** 329 !! 330 !! ** Purpose : perform a masked max on the inner global domain of two 2D array 331 !!----------------------------------------------------------------------- 332 REAL(wp), INTENT(in), DIMENSION(:,:) :: ptab1, ptab2 ! input 2D array 333 REAL(wp) , DIMENSION(2) :: glob_max_2d_a ! global masked max 334 !!----------------------------------------------------------------------- 335 ! 336 glob_max_2d_a(1) = MAXVAL( ptab1(:,:)*tmask_i(:,:) ) 337 glob_max_2d_a(2) = MAXVAL( ptab2(:,:)*tmask_i(:,:) ) 338 IF( lk_mpp ) CALL mpp_max( glob_max_2d_a, 2 ) 339 ! 340 END FUNCTION glob_max_2d_a 341 342 343 FUNCTION glob_max_3d_a( ptab1, ptab2 ) 344 !!----------------------------------------------------------------------- 345 !! *** FUNCTION glob_max_3D_a *** 346 !! 347 !! ** Purpose : perform a masked max on the inner global domain of two 3D array 348 !!----------------------------------------------------------------------- 349 REAL(wp), INTENT(in), DIMENSION(:,:,:) :: ptab1, ptab2 ! input 3D array 350 REAL(wp) , DIMENSION(2) :: glob_max_3d_a ! global masked max 351 !! 352 INTEGER :: jk 353 INTEGER :: ijpk ! local variable: size of the 3d dimension of ptab 354 !!----------------------------------------------------------------------- 355 ! 356 ijpk = SIZE(ptab1,3) 357 ! 358 glob_max_3d_a(1) = MAXVAL( ptab1(:,:,1)*tmask_i(:,:) ) 359 glob_max_3d_a(2) = MAXVAL( ptab2(:,:,1)*tmask_i(:,:) ) 360 DO jk = 2, ijpk 361 glob_max_3d_a(1) = MAX( glob_max_3d_a(1), MAXVAL( ptab1(:,:,jk)*tmask_i(:,:) ) ) 362 glob_max_3d_a(2) = MAX( glob_max_3d_a(2), MAXVAL( ptab2(:,:,jk)*tmask_i(:,:) ) ) 363 END DO 364 IF( lk_mpp ) CALL mpp_max( glob_max_3d_a, 2 ) 365 ! 366 END FUNCTION glob_max_3d_a 278 CALL lbc_lnk( 'lib_fortran', p3d, 'T', 1. ) 279 IF( nbondi /= -1 ) THEN 280 IF( MOD(mig( 1), 3) == 1 ) p3d( 1,:,:) = p3d( 2,:,:) 281 IF( MOD(mig( 1), 3) == 2 ) p3d( 2,:,:) = p3d( 1,:,:) 282 ENDIF 283 IF( nbondi /= 1 ) THEN 284 IF( MOD(mig(jpi-2), 3) == 1 ) p3d( jpi,:,:) = p3d(jpi-1,:,:) 285 IF( MOD(mig(jpi-2), 3) == 0 ) p3d(jpi-1,:,:) = p3d( jpi,:,:) 286 ENDIF 287 IF( nbondj /= -1 ) THEN 288 IF( MOD(mjg( 1), 3) == 1 ) p3d(:, 1,:) = p3d(:, 2,:) 289 IF( MOD(mjg( 1), 3) == 2 ) p3d(:, 2,:) = p3d(:, 1,:) 290 ENDIF 291 IF( nbondj /= 1 ) THEN 292 IF( MOD(mjg(jpj-2), 3) == 1 ) p3d(:, jpj,:) = p3d(:,jpj-1,:) 293 IF( MOD(mjg(jpj-2), 3) == 0 ) p3d(:,jpj-1,:) = p3d(:, jpj,:) 294 ENDIF 295 CALL lbc_lnk( 'lib_fortran', p3d, 'T', 1. ) 296 297 END SUBROUTINE sum3x3_3d 367 298 368 299 … … 401 332 END SUBROUTINE DDPDD 402 333 334 #if defined key_nosignedzero 403 335 !!---------------------------------------------------------------------- 404 336 !! 'key_nosignedzero' F90 SIGN … … 552 484 ENDIF 553 485 END FUNCTION SIGN_ARRAY_3D_B 486 #endif 554 487 555 488 !!====================================================================== -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/lib_mpp.F90
r10725 r10727 8 8 !! 8.0 ! 1998 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI 9 9 !! ! 1998 (J.M. Molines) Open boundary conditions 10 !! NEMO 1.0 ! 2003 (J. -M. Molines, G. Madec) F90, free form10 !! NEMO 1.0 ! 2003 (J.M. Molines, G. Madec) F90, free form 11 11 !! ! 2003 (J.M. Molines) add mpp_ini_north(_3d,_2d) 12 12 !! - ! 2004 (R. Bourdalle Badie) isend option in mpi … … 19 19 !! 3.2 ! 2009 (O. Marti) add mpp_ini_znl 20 20 !! 4.0 ! 2011 (G. Madec) move ctl_ routines from in_out_manager 21 !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) Add 'mpp_lnk_bdy_3d', 'mpp_lnk_obc_3d', 22 !! 'mpp_lnk_bdy_2d' and 'mpp_lnk_obc_2d' routines and update 23 !! the mppobc routine to optimize the BDY and OBC communications 24 !! 3.5 ! 2013 ( C. Ethe, G. Madec ) message passing arrays as local variables 21 !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) Add mpp_lnk_bdy_3d/2d routines to optimize the BDY comm. 22 !! 3.5 ! 2013 (C. Ethe, G. Madec) message passing arrays as local variables 25 23 !! 3.5 ! 2013 (S.Mocavero, I.Epicoco - CMCC) north fold optimizations 26 !! 3.6 ! 2015 (O. Tintó and M. Castrillo - BSC) Added 'mpp_lnk_2d_multiple', 'mpp_lbc_north_2d_multiple', 'mpp_max_multiple' 24 !! 3.6 ! 2015 (O. Tintó and M. Castrillo - BSC) Added '_multiple' case for 2D lbc and max 25 !! 4.0 ! 2017 (G. Madec) automatique allocation of array argument (use any 3rd dimension) 26 !! - ! 2017 (G. Madec) create generic.h90 files to generate all lbc and north fold routines 27 27 !!---------------------------------------------------------------------- 28 28 … … 34 34 !! get_unit : give the index of an unused logical unit 35 35 !!---------------------------------------------------------------------- 36 36 #if defined key_mpp_mpi 37 37 !!---------------------------------------------------------------------- 38 38 !! 'key_mpp_mpi' MPI massively parallel processing library … … 41 41 !! mynode : indentify the processor unit 42 42 !! mpp_lnk : interface (defined in lbclnk) for message passing of 2d or 3d arrays (mpp_lnk_2d, mpp_lnk_3d) 43 !! mpp_lnk_3d_gather : Message passing manadgement for two 3D arrays44 !! mpp_lnk_e : interface (defined in lbclnk) for message passing of 2d array with extra halo (mpp_lnk_2d_e)45 43 !! mpp_lnk_icb : interface for message passing of 2d arrays with extra halo for icebergs (mpp_lnk_2d_icb) 46 44 !! mpprecv : 47 !! mppsend : SUBROUTINE mpp_ini_znl45 !! mppsend : 48 46 !! mppscatter : 49 47 !! mppgather : … … 56 54 !! mppstop : 57 55 !! mpp_ini_north : initialisation of north fold 58 !! mpp_lbc_north : north fold processors gathering 59 !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo 60 !! mpp_lbc_north_icb : variant of mpp_lbc_north for extra outer halo with icebergs 56 !! mpp_lbc_north_icb : alternative to mpp_nfd for extra outer halo with icebergs 61 57 !!---------------------------------------------------------------------- 62 58 USE dom_oce ! ocean space and time domain 63 59 USE lbcnfd ! north fold treatment 64 60 USE in_out_manager ! I/O manager 65 USE wrk_nemo ! work arrays66 61 67 62 IMPLICIT NONE 68 63 PRIVATE 69 64 65 INTERFACE mpp_nfd 66 MODULE PROCEDURE mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d 67 MODULE PROCEDURE mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr 68 END INTERFACE 69 70 ! Interface associated to the mpp_lnk_... routines is defined in lbclnk 71 PUBLIC mpp_lnk_2d , mpp_lnk_3d , mpp_lnk_4d 72 PUBLIC mpp_lnk_2d_ptr, mpp_lnk_3d_ptr, mpp_lnk_4d_ptr 73 ! 74 !!gm this should be useless 75 PUBLIC mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d 76 PUBLIC mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr 77 !!gm end 78 ! 70 79 PUBLIC ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam 71 80 PUBLIC mynode, mppstop, mppsync, mpp_comm_free 72 PUBLIC mpp_ini_north, mpp_lbc_north, mpp_lbc_north_e 81 PUBLIC mpp_ini_north 82 PUBLIC mpp_lnk_2d_icb 83 PUBLIC mpp_lbc_north_icb 73 84 PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc 74 PUBLIC mpp_max_multiple 75 PUBLIC mpp_lnk_3d, mpp_lnk_3d_gather, mpp_lnk_2d, mpp_lnk_2d_e 76 PUBLIC mpp_lnk_2d_9 , mpp_lnk_2d_multiple 77 PUBLIC mpp_lnk_sum_3d, mpp_lnk_sum_2d 85 PUBLIC mpp_delay_max, mpp_delay_sum, mpp_delay_rcv 78 86 PUBLIC mppscatter, mppgather 79 PUBLIC mpp_ini_ice, mpp_ini_znl 80 PUBLIC mppsize 87 PUBLIC mpp_ini_znl 81 88 PUBLIC mppsend, mpprecv ! needed by TAM and ICB routines 82 PUBLIC mpp_lnk_bdy_2d, mpp_lnk_bdy_3d 83 PUBLIC mpp_lbc_north_icb, mpp_lnk_2d_icb 84 PUBLIC mpprank 85 86 TYPE arrayptr 87 REAL , DIMENSION (:,:), POINTER :: pt2d 88 END TYPE arrayptr 89 PUBLIC arrayptr 89 PUBLIC mpp_lnk_bdy_2d, mpp_lnk_bdy_3d, mpp_lnk_bdy_4d 90 90 91 91 !! * Interfaces … … 101 101 INTERFACE mpp_sum 102 102 MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real, & 103 mppsum_realdd, mppsum_a_realdd 104 END INTERFACE 105 INTERFACE mpp_lbc_north 106 MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d 103 & mppsum_realdd, mppsum_a_realdd 107 104 END INTERFACE 108 105 INTERFACE mpp_minloc … … 113 110 END INTERFACE 114 111 115 INTERFACE mpp_max_multiple116 MODULE PROCEDURE mppmax_real_multiple117 END INTERFACE118 119 112 !! ========================= !! 120 113 !! MPI variable definition !! … … 128 121 INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2) 129 122 130 INTEGER :: mppsize ! number of process131 INTEGER :: mpprank ! process number [ 0 - size-1 ]123 INTEGER, PUBLIC :: mppsize ! number of process 124 INTEGER, PUBLIC :: mpprank ! process number [ 0 - size-1 ] 132 125 !$AGRIF_DO_NOT_TREAT 133 INTEGER, PUBLIC :: mpi_comm_o pa! opa local communicator126 INTEGER, PUBLIC :: mpi_comm_oce ! opa local communicator 134 127 !$AGRIF_END_DO_NOT_TREAT 135 128 136 129 INTEGER :: MPI_SUMDD 137 138 ! variables used in case of sea-ice139 INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice (public so that it can be freed in limthd)140 INTEGER :: ngrp_iworld ! group ID for the world processors (for rheology)141 INTEGER :: ngrp_ice ! group ID for the ice processors (for rheology)142 INTEGER :: ndim_rank_ice ! number of 'ice' processors143 INTEGER :: n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm144 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_ice ! dimension ndim_rank_ice145 130 146 131 ! variables used for zonal integration 147 132 INTEGER, PUBLIC :: ncomm_znl !: communicator made by the processors on the same zonal average 148 LOGICAL, PUBLIC :: l_znl_root ! True on the 'left'most processor on the same row149 INTEGER :: ngrp_znl !group ID for the znl processors150 INTEGER :: ndim_rank_znl !number of processors on the same zonal average133 LOGICAL, PUBLIC :: l_znl_root !: True on the 'left'most processor on the same row 134 INTEGER :: ngrp_znl ! group ID for the znl processors 135 INTEGER :: ndim_rank_znl ! number of processors on the same zonal average 151 136 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_znl ! dimension ndim_rank_znl, number of the procs into the same znl domain 152 137 153 138 ! North fold condition in mpp_mpi with jpni > 1 (PUBLIC for TAM) 154 INTEGER, PUBLIC :: ngrp_world ! group ID for the world processors155 INTEGER, PUBLIC :: ngrp_opa ! group ID for the opa processors156 INTEGER, PUBLIC :: ngrp_north ! group ID for the northern processors (to be fold)157 INTEGER, PUBLIC :: ncomm_north ! communicator made by the processors belonging to ngrp_north158 INTEGER, PUBLIC :: ndim_rank_north ! number of 'sea' processor in the northern line (can be /= jpni !)159 INTEGER, PUBLIC :: njmppmax ! value of njmpp for the processors of the northern line160 INTEGER, PUBLIC :: north_root ! number (in the comm_opa) of proc 0 in the northern comm161 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE, PUBLIC :: nrank_north !dimension ndim_rank_north139 INTEGER, PUBLIC :: ngrp_world !: group ID for the world processors 140 INTEGER, PUBLIC :: ngrp_opa !: group ID for the opa processors 141 INTEGER, PUBLIC :: ngrp_north !: group ID for the northern processors (to be fold) 142 INTEGER, PUBLIC :: ncomm_north !: communicator made by the processors belonging to ngrp_north 143 INTEGER, PUBLIC :: ndim_rank_north !: number of 'sea' processor in the northern line (can be /= jpni !) 144 INTEGER, PUBLIC :: njmppmax !: value of njmpp for the processors of the northern line 145 INTEGER, PUBLIC :: north_root !: number (in the comm_opa) of proc 0 in the northern comm 146 INTEGER, PUBLIC, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_north !: dimension ndim_rank_north 162 147 163 148 ! Type of send : standard, buffered, immediate 164 CHARACTER(len=1), PUBLIC :: cn_mpi_send ! type od mpi send/recieve (S=standard, B=bsend, I=isend) 165 LOGICAL, PUBLIC :: l_isend = .FALSE. ! isend use indicator (T if cn_mpi_send='I') 166 INTEGER, PUBLIC :: nn_buffer ! size of the buffer in case of mpi_bsend 167 168 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend 169 170 LOGICAL, PUBLIC :: ln_nnogather ! namelist control of northfold comms 171 LOGICAL, PUBLIC :: l_north_nogather = .FALSE. ! internal control of northfold comms 172 INTEGER, PUBLIC :: ityp 149 CHARACTER(len=1), PUBLIC :: cn_mpi_send !: type od mpi send/recieve (S=standard, B=bsend, I=isend) 150 LOGICAL , PUBLIC :: l_isend = .FALSE. !: isend use indicator (T if cn_mpi_send='I') 151 INTEGER , PUBLIC :: nn_buffer !: size of the buffer in case of mpi_bsend 152 153 ! Communications summary report 154 CHARACTER(len=400), DIMENSION(:), ALLOCATABLE :: crname_lbc !: names of lbc_lnk calling routines 155 CHARACTER(len=400), DIMENSION(:), ALLOCATABLE :: crname_glb !: names of global comm calling routines 156 CHARACTER(len=400), DIMENSION(:), ALLOCATABLE :: crname_dlg !: names of delayed global comm calling routines 157 INTEGER, PUBLIC :: ncom_stp = 0 !: copy of time step # istp 158 INTEGER, PUBLIC :: ncom_fsbc = 1 !: copy of sbc time step # nn_fsbc 159 INTEGER, PUBLIC :: ncom_dttrc = 1 !: copy of top time step # nn_dttrc 160 INTEGER, PUBLIC :: ncom_freq !: frequency of comm diagnostic 161 INTEGER, PUBLIC , DIMENSION(:,:), ALLOCATABLE :: ncomm_sequence !: size of communicated arrays (halos) 162 INTEGER, PARAMETER, PUBLIC :: ncom_rec_max = 3000 !: max number of communication record 163 INTEGER, PUBLIC :: n_sequence_lbc = 0 !: # of communicated arraysvia lbc 164 INTEGER, PUBLIC :: n_sequence_glb = 0 !: # of global communications 165 INTEGER, PUBLIC :: n_sequence_dlg = 0 !: # of delayed global communications 166 INTEGER, PUBLIC :: numcom = -1 !: logical unit for communicaton report 167 LOGICAL, PUBLIC :: l_full_nf_update = .TRUE. !: logical for a full (2lines) update of bc at North fold report 168 INTEGER, PARAMETER, PUBLIC :: nbdelay = 2 !: number of delayed operations 169 !: name (used as id) of allreduce-delayed operations 170 ! Warning: we must use the same character length in an array constructor (at least for gcc compiler) 171 CHARACTER(len=32), DIMENSION(nbdelay), PUBLIC :: c_delaylist = (/ 'cflice', 'fwb ' /) 172 !: component name where the allreduce-delayed operation is performed 173 CHARACTER(len=3), DIMENSION(nbdelay), PUBLIC :: c_delaycpnt = (/ 'ICE' , 'OCE' /) 174 TYPE, PUBLIC :: DELAYARR 175 REAL( wp), POINTER, DIMENSION(:) :: z1d => NULL() 176 COMPLEX(wp), POINTER, DIMENSION(:) :: y1d => NULL() 177 END TYPE DELAYARR 178 TYPE( DELAYARR ), DIMENSION(nbdelay), PUBLIC :: todelay 179 INTEGER, DIMENSION(nbdelay), PUBLIC :: ndelayid = -1 !: mpi request id of the delayed operations 180 181 ! timing summary report 182 REAL(wp), DIMENSION(2), PUBLIC :: waiting_time = 0._wp 183 REAL(wp) , PUBLIC :: compute_time = 0._wp, elapsed_time = 0._wp 184 185 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend 186 187 LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms 188 LOGICAL, PUBLIC :: l_north_nogather = .FALSE. !: internal control of northfold comms 189 173 190 !!---------------------------------------------------------------------- 174 191 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 175 !! $Id: lib_mpp.F90 6490 2016-04-20 14:55:58Z mcastril$176 !! Software governed by the CeCILL licen ce (./LICENSE)192 !! $Id: lib_mpp.F90 10538 2019-01-17 10:41:10Z clem $ 193 !! Software governed by the CeCILL license (see ./LICENSE) 177 194 !!---------------------------------------------------------------------- 178 195 CONTAINS 179 196 180 181 FUNCTION mynode( ldtxt, ldname, kumnam_ref , kumnam_cfg , kumond , kstop, localComm ) 197 FUNCTION mynode( ldtxt, ldname, kumnam_ref, kumnam_cfg, kumond, kstop, localComm ) 182 198 !!---------------------------------------------------------------------- 183 199 !! *** routine mynode *** … … 196 212 LOGICAL :: mpi_was_called 197 213 ! 198 NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, jpnij,ln_nnogather214 NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, ln_nnogather 199 215 !!---------------------------------------------------------------------- 200 216 ! … … 204 220 WRITE(ldtxt(ii),*) '~~~~~~ ' ; ii = ii + 1 205 221 ! 206 207 222 REWIND( kumnam_ref ) ! Namelist nammpp in reference namelist: mpi variables 208 223 READ ( kumnam_ref, nammpp, IOSTAT = ios, ERR = 901) 209 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in reference namelist', lwp )210 224 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in reference namelist', lwp ) 225 ! 211 226 REWIND( kumnam_cfg ) ! Namelist nammpp in configuration namelist: mpi variables 212 227 READ ( kumnam_cfg, nammpp, IOSTAT = ios, ERR = 902 ) 213 902 IF( ios /= 0 )CALL ctl_nam ( ios , 'nammpp in configuration namelist', lwp )214 228 902 IF( ios > 0 ) CALL ctl_nam ( ios , 'nammpp in configuration namelist', lwp ) 229 ! 215 230 ! ! control print 216 231 WRITE(ldtxt(ii),*) ' Namelist nammpp' ; ii = ii + 1 217 232 WRITE(ldtxt(ii),*) ' mpi send type cn_mpi_send = ', cn_mpi_send ; ii = ii + 1 218 233 WRITE(ldtxt(ii),*) ' size exported buffer nn_buffer = ', nn_buffer,' bytes'; ii = ii + 1 219 220 221 222 223 224 225 226 227 228 IF(jpnij < 1)THEN 229 ! If jpnij is not specified in namelist then we calculate it - this 230 ! means there will be no land cutting out. 231 jpnij = jpni * jpnj 232 END IF 233 234 IF( (jpni < 1) .OR. (jpnj < 1) )THEN 235 WRITE(ldtxt(ii),*) ' jpni, jpnj and jpnij will be calculated automatically' ; ii = ii + 1 234 ! 235 IF( jpni < 1 .OR. jpnj < 1 ) THEN 236 WRITE(ldtxt(ii),*) ' jpni and jpnj will be calculated automatically' ; ii = ii + 1 236 237 ELSE 237 238 WRITE(ldtxt(ii),*) ' processor grid extent in i jpni = ',jpni ; ii = ii + 1 238 239 WRITE(ldtxt(ii),*) ' processor grid extent in j jpnj = ',jpnj ; ii = ii + 1 239 WRITE(ldtxt(ii),*) ' number of local domains jpnij = ',jpnij ; ii = ii + 1 240 END IF 240 ENDIF 241 241 242 242 WRITE(ldtxt(ii),*) ' avoid use of mpi_allgather at the north fold ln_nnogather = ', ln_nnogather ; ii = ii + 1 … … 259 259 CASE ( 'B' ) ! Buffer mpi send (blocking) 260 260 WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1 261 IF( Agrif_Root() ) CALL mpi_init_o pa( ldtxt, ii, ierr )261 IF( Agrif_Root() ) CALL mpi_init_oce( ldtxt, ii, ierr ) 262 262 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 263 263 WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1 … … 268 268 kstop = kstop + 1 269 269 END SELECT 270 ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN 270 ! 271 ELSEIF ( PRESENT(localComm) .AND. .NOT. mpi_was_called ) THEN 272 WRITE(ldtxt(ii),cform_err) ; ii = ii + 1 271 273 WRITE(ldtxt(ii),*) ' lib_mpp: You cannot provide a local communicator ' ; ii = ii + 1 272 274 WRITE(ldtxt(ii),*) ' without calling MPI_Init before ! ' ; ii = ii + 1 … … 279 281 CASE ( 'B' ) ! Buffer mpi send (blocking) 280 282 WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1 281 IF( Agrif_Root() ) CALL mpi_init_o pa( ldtxt, ii, ierr )283 IF( Agrif_Root() ) CALL mpi_init_oce( ldtxt, ii, ierr ) 282 284 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 283 285 WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1 … … 294 296 IF( PRESENT(localComm) ) THEN 295 297 IF( Agrif_Root() ) THEN 296 mpi_comm_o pa= localComm298 mpi_comm_oce = localComm 297 299 ENDIF 298 300 ELSE 299 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_o pa, code)301 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_oce, code) 300 302 IF( code /= MPI_SUCCESS ) THEN 301 303 DO ji = 1, SIZE(ldtxt) … … 308 310 ENDIF 309 311 310 311 312 313 314 315 316 317 318 CALL mpi_comm_rank( mpi_comm_o pa, mpprank, ierr )319 CALL mpi_comm_size( mpi_comm_o pa, mppsize, ierr )312 #if defined key_agrif 313 IF( Agrif_Root() ) THEN 314 CALL Agrif_MPI_Init(mpi_comm_oce) 315 ELSE 316 CALL Agrif_MPI_set_grid_comm(mpi_comm_oce) 317 ENDIF 318 #endif 319 320 CALL mpi_comm_rank( mpi_comm_oce, mpprank, ierr ) 321 CALL mpi_comm_size( mpi_comm_oce, mppsize, ierr ) 320 322 mynode = mpprank 321 323 … … 329 331 END FUNCTION mynode 330 332 331 332 SUBROUTINE mpp_lnk_3d( ptab, cd_type, psgn, cd_mpp, pval ) 333 !!---------------------------------------------------------------------- 334 !! *** routine mpp_lnk_3d *** 335 !! 336 !! ** Purpose : Message passing manadgement 337 !! 338 !! ** Method : Use mppsend and mpprecv function for passing mask 339 !! between processors following neighboring subdomains. 340 !! domain parameters 341 !! nlci : first dimension of the local subdomain 342 !! nlcj : second dimension of the local subdomain 343 !! nbondi : mark for "east-west local boundary" 344 !! nbondj : mark for "north-south local boundary" 345 !! noea : number for local neighboring processors 346 !! nowe : number for local neighboring processors 347 !! noso : number for local neighboring processors 348 !! nono : number for local neighboring processors 349 !! 350 !! ** Action : ptab with update value at its periphery 351 !! 352 !!---------------------------------------------------------------------- 353 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 354 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 355 ! ! = T , U , V , F , W points 356 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 357 ! ! = 1. , the sign is kept 358 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 359 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 360 ! 361 INTEGER :: ji, jj, jk, jl ! dummy loop indices 362 INTEGER :: imigr, iihom, ijhom ! temporary integers 363 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 364 REAL(wp) :: zland 365 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 366 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north 367 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east 368 !!---------------------------------------------------------------------- 369 370 ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), & 371 & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) ) 372 373 ! 374 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 375 ELSE ; zland = 0._wp ! zero by default 376 ENDIF 377 378 ! 1. standard boundary treatment 379 ! ------------------------------ 380 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values 381 ! 382 ! WARNING ptab is defined only between nld and nle 383 DO jk = 1, jpk 384 DO jj = nlcj+1, jpj ! added line(s) (inner only) 385 ptab(nldi :nlei , jj ,jk) = ptab(nldi:nlei, nlej,jk) 386 ptab(1 :nldi-1, jj ,jk) = ptab(nldi , nlej,jk) 387 ptab(nlei+1:nlci , jj ,jk) = ptab( nlei, nlej,jk) 388 END DO 389 DO ji = nlci+1, jpi ! added column(s) (full) 390 ptab(ji ,nldj :nlej ,jk) = ptab( nlei,nldj:nlej,jk) 391 ptab(ji ,1 :nldj-1,jk) = ptab( nlei,nldj ,jk) 392 ptab(ji ,nlej+1:jpj ,jk) = ptab( nlei, nlej,jk) 393 END DO 394 END DO 395 ! 396 ELSE ! standard close or cyclic treatment 397 ! 398 ! ! East-West boundaries 399 ! !* Cyclic east-west 400 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 401 ptab( 1 ,:,:) = ptab(jpim1,:,:) 402 ptab(jpi,:,:) = ptab( 2 ,:,:) 403 ELSE !* closed 404 IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point 405 ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north 406 ENDIF 407 ! ! North-South boundaries (always closed) 408 IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point 409 ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north 410 ! 411 ENDIF 412 413 ! 2. East and west directions exchange 414 ! ------------------------------------ 415 ! we play with the neigbours AND the row number because of the periodicity 416 ! 417 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 418 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 419 iihom = nlci-nreci 420 DO jl = 1, jpreci 421 zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) 422 zt3we(:,jl,:,1) = ptab(iihom +jl,:,:) 423 END DO 424 END SELECT 425 ! 426 ! ! Migrations 427 imigr = jpreci * jpj * jpk 428 ! 429 SELECT CASE ( nbondi ) 430 CASE ( -1 ) 431 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 ) 432 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 433 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 434 CASE ( 0 ) 435 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 436 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 ) 437 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 438 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 439 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 440 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 441 CASE ( 1 ) 442 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 443 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 444 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 445 END SELECT 446 ! 447 ! ! Write Dirichlet lateral conditions 448 iihom = nlci-jpreci 449 ! 450 SELECT CASE ( nbondi ) 451 CASE ( -1 ) 452 DO jl = 1, jpreci 453 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 454 END DO 455 CASE ( 0 ) 456 DO jl = 1, jpreci 457 ptab(jl ,:,:) = zt3we(:,jl,:,2) 458 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 459 END DO 460 CASE ( 1 ) 461 DO jl = 1, jpreci 462 ptab(jl ,:,:) = zt3we(:,jl,:,2) 463 END DO 464 END SELECT 465 466 ! 3. North and south directions 467 ! ----------------------------- 468 ! always closed : we play only with the neigbours 469 ! 470 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 471 ijhom = nlcj-nrecj 472 DO jl = 1, jprecj 473 zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:) 474 zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:) 475 END DO 476 ENDIF 477 ! 478 ! ! Migrations 479 imigr = jprecj * jpi * jpk 480 ! 481 SELECT CASE ( nbondj ) 482 CASE ( -1 ) 483 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 ) 484 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 485 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 486 CASE ( 0 ) 487 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 488 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 ) 489 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 490 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 491 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 492 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 493 CASE ( 1 ) 494 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 495 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 496 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 497 END SELECT 498 ! 499 ! ! Write Dirichlet lateral conditions 500 ijhom = nlcj-jprecj 501 ! 502 SELECT CASE ( nbondj ) 503 CASE ( -1 ) 504 DO jl = 1, jprecj 505 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 506 END DO 507 CASE ( 0 ) 508 DO jl = 1, jprecj 509 ptab(:,jl ,:) = zt3sn(:,jl,:,2) 510 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 511 END DO 512 CASE ( 1 ) 513 DO jl = 1, jprecj 514 ptab(:,jl,:) = zt3sn(:,jl,:,2) 515 END DO 516 END SELECT 517 518 ! 4. north fold treatment 519 ! ----------------------- 520 ! 521 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 522 ! 523 SELECT CASE ( jpni ) 524 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 525 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 526 END SELECT 527 ! 528 ENDIF 529 ! 530 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we ) 531 ! 532 END SUBROUTINE mpp_lnk_3d 533 534 535 SUBROUTINE mpp_lnk_2d_multiple( pt2d_array , type_array , psgn_array , num_fields , cd_mpp, pval ) 536 !!---------------------------------------------------------------------- 537 !! *** routine mpp_lnk_2d_multiple *** 538 !! 539 !! ** Purpose : Message passing management for multiple 2d arrays 540 !! 541 !! ** Method : Use mppsend and mpprecv function for passing mask 542 !! between processors following neighboring subdomains. 543 !! domain parameters 544 !! nlci : first dimension of the local subdomain 545 !! nlcj : second dimension of the local subdomain 546 !! nbondi : mark for "east-west local boundary" 547 !! nbondj : mark for "north-south local boundary" 548 !! noea : number for local neighboring processors 549 !! nowe : number for local neighboring processors 550 !! noso : number for local neighboring processors 551 !! nono : number for local neighboring processors 552 !!---------------------------------------------------------------------- 553 CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: type_array ! define the nature of ptab array grid-points 554 ! ! = T , U , V , F , W and I points 555 REAL(wp) , DIMENSION(:), INTENT(in ) :: psgn_array ! =-1 the sign change across the north fold boundary 556 ! ! = 1. , the sign is kept 557 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 558 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 559 !! 560 INTEGER :: ji, jj, jl ! dummy loop indices 561 INTEGER :: ii !!MULTI SEND DUMMY LOOP INDICES 562 INTEGER :: imigr, iihom, ijhom ! temporary integers 563 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 564 INTEGER :: num_fields 565 TYPE( arrayptr ), DIMENSION(:) :: pt2d_array 566 REAL(wp) :: zland 567 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 568 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 569 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 570 571 !!---------------------------------------------------------------------- 572 ! 573 ALLOCATE( zt2ns(jpi,jprecj,2*num_fields), zt2sn(jpi,jprecj,2*num_fields), & 574 & zt2ew(jpj,jpreci,2*num_fields), zt2we(jpj,jpreci,2*num_fields) ) 575 ! 576 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 577 ELSE ; zland = 0._wp ! zero by default 578 ENDIF 579 580 ! 1. standard boundary treatment 581 ! ------------------------------ 582 ! 583 !First Array 584 DO ii = 1 , num_fields 585 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values 586 ! 587 ! WARNING pt2d is defined only between nld and nle 588 DO jj = nlcj+1, jpj ! added line(s) (inner only) 589 pt2d_array(ii)%pt2d(nldi :nlei , jj) = pt2d_array(ii)%pt2d(nldi:nlei, nlej) 590 pt2d_array(ii)%pt2d(1 :nldi-1, jj) = pt2d_array(ii)%pt2d(nldi , nlej) 591 pt2d_array(ii)%pt2d(nlei+1:nlci , jj) = pt2d_array(ii)%pt2d( nlei, nlej) 592 END DO 593 DO ji = nlci+1, jpi ! added column(s) (full) 594 pt2d_array(ii)%pt2d(ji, nldj :nlej ) = pt2d_array(ii)%pt2d(nlei, nldj:nlej) 595 pt2d_array(ii)%pt2d(ji, 1 :nldj-1) = pt2d_array(ii)%pt2d(nlei, nldj ) 596 pt2d_array(ii)%pt2d(ji, nlej+1:jpj ) = pt2d_array(ii)%pt2d(nlei, nlej) 597 END DO 598 ! 599 ELSE ! standard close or cyclic treatment 600 ! 601 ! ! East-West boundaries 602 IF( nbondi == 2 .AND. & ! Cyclic east-west 603 & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 604 pt2d_array(ii)%pt2d( 1 , : ) = pt2d_array(ii)%pt2d( jpim1, : ) ! west 605 pt2d_array(ii)%pt2d( jpi , : ) = pt2d_array(ii)%pt2d( 2 , : ) ! east 606 ELSE ! closed 607 IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d( 1 : jpreci,:) = zland ! south except F-point 608 pt2d_array(ii)%pt2d(nlci-jpreci+1 : jpi ,:) = zland ! north 609 ENDIF 610 ! ! North-South boundaries (always closed) 611 IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d(:, 1:jprecj ) = zland ! south except F-point 612 pt2d_array(ii)%pt2d(:, nlcj-jprecj+1:jpj ) = zland ! north 613 ! 614 ENDIF 615 END DO 616 617 ! 2. East and west directions exchange 618 ! ------------------------------------ 619 ! we play with the neigbours AND the row number because of the periodicity 620 ! 621 DO ii = 1 , num_fields 622 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 623 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 624 iihom = nlci-nreci 625 DO jl = 1, jpreci 626 zt2ew( : , jl , ii ) = pt2d_array(ii)%pt2d( jpreci+jl , : ) 627 zt2we( : , jl , ii ) = pt2d_array(ii)%pt2d( iihom +jl , : ) 628 END DO 629 END SELECT 630 END DO 631 ! 632 ! ! Migrations 633 imigr = jpreci * jpj 634 ! 635 SELECT CASE ( nbondi ) 636 CASE ( -1 ) 637 CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req1 ) 638 CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea ) 639 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 640 CASE ( 0 ) 641 CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 ) 642 CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req2 ) 643 CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea ) 644 CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe ) 645 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 646 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 647 CASE ( 1 ) 648 CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 ) 649 CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe ) 650 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 651 END SELECT 652 ! 653 ! ! Write Dirichlet lateral conditions 654 iihom = nlci - jpreci 655 ! 656 657 DO ii = 1 , num_fields 658 SELECT CASE ( nbondi ) 659 CASE ( -1 ) 660 DO jl = 1, jpreci 661 pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii) 662 END DO 663 CASE ( 0 ) 664 DO jl = 1, jpreci 665 pt2d_array(ii)%pt2d( jl , : ) = zt2we(:,jl,num_fields+ii) 666 pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii) 667 END DO 668 CASE ( 1 ) 669 DO jl = 1, jpreci 670 pt2d_array(ii)%pt2d( jl , : )= zt2we(:,jl,num_fields+ii) 671 END DO 672 END SELECT 673 END DO 674 675 ! 3. North and south directions 676 ! ----------------------------- 677 ! always closed : we play only with the neigbours 678 ! 679 !First Array 680 DO ii = 1 , num_fields 681 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 682 ijhom = nlcj-nrecj 683 DO jl = 1, jprecj 684 zt2sn(:,jl , ii) = pt2d_array(ii)%pt2d( : , ijhom +jl ) 685 zt2ns(:,jl , ii) = pt2d_array(ii)%pt2d( : , jprecj+jl ) 686 END DO 687 ENDIF 688 END DO 689 ! 690 ! ! Migrations 691 imigr = jprecj * jpi 692 ! 693 SELECT CASE ( nbondj ) 694 CASE ( -1 ) 695 CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req1 ) 696 CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono ) 697 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 698 CASE ( 0 ) 699 CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 ) 700 CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req2 ) 701 CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono ) 702 CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso ) 703 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 704 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 705 CASE ( 1 ) 706 CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 ) 707 CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso ) 708 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 709 END SELECT 710 ! 711 ! ! Write Dirichlet lateral conditions 712 ijhom = nlcj - jprecj 713 ! 714 715 DO ii = 1 , num_fields 716 !First Array 717 SELECT CASE ( nbondj ) 718 CASE ( -1 ) 719 DO jl = 1, jprecj 720 pt2d_array(ii)%pt2d( : , ijhom+jl ) = zt2ns( : , jl , num_fields+ii ) 721 END DO 722 CASE ( 0 ) 723 DO jl = 1, jprecj 724 pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii) 725 pt2d_array(ii)%pt2d( : , ijhom + jl ) = zt2ns( : , jl , num_fields + ii ) 726 END DO 727 CASE ( 1 ) 728 DO jl = 1, jprecj 729 pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii ) 730 END DO 731 END SELECT 732 END DO 733 734 ! 4. north fold treatment 735 ! ----------------------- 736 ! 737 !First Array 738 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 739 ! 740 SELECT CASE ( jpni ) 741 CASE ( 1 ) ; 742 DO ii = 1 , num_fields 743 CALL lbc_nfd ( pt2d_array(ii)%pt2d( : , : ), type_array(ii) , psgn_array(ii) ) ! only 1 northern proc, no mpp 744 END DO 745 CASE DEFAULT ; CALL mpp_lbc_north_2d_multiple( pt2d_array, type_array, psgn_array, num_fields ) ! for all northern procs. 746 END SELECT 747 ! 748 ENDIF 749 ! 750 ! 751 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 752 ! 753 END SUBROUTINE mpp_lnk_2d_multiple 754 755 756 SUBROUTINE load_array( pt2d, cd_type, psgn, pt2d_array, type_array, psgn_array, num_fields ) 757 !!--------------------------------------------------------------------- 758 REAL(wp), DIMENSION(jpi,jpj), TARGET, INTENT(inout) :: pt2d ! Second 2D array on which the boundary condition is applied 759 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 760 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 761 TYPE(arrayptr) , DIMENSION(9) :: pt2d_array 762 CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points 763 REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary 764 INTEGER , INTENT (inout) :: num_fields 765 !!--------------------------------------------------------------------- 766 num_fields = num_fields + 1 767 pt2d_array(num_fields)%pt2d => pt2d 768 type_array(num_fields) = cd_type 769 psgn_array(num_fields) = psgn 770 END SUBROUTINE load_array 333 !!---------------------------------------------------------------------- 334 !! *** routine mpp_lnk_(2,3,4)d *** 335 !! 336 !! * Argument : dummy argument use in mpp_lnk_... routines 337 !! ptab : array or pointer of arrays on which the boundary condition is applied 338 !! cd_nat : nature of array grid-points 339 !! psgn : sign used across the north fold boundary 340 !! kfld : optional, number of pt3d arrays 341 !! cd_mpp : optional, fill the overlap area only 342 !! pval : optional, background value (used at closed boundaries) 343 !!---------------------------------------------------------------------- 344 ! 345 ! !== 2D array and array of 2D pointer ==! 346 ! 347 # define DIM_2d 348 # define ROUTINE_LNK mpp_lnk_2d 349 # include "mpp_lnk_generic.h90" 350 # undef ROUTINE_LNK 351 # define MULTI 352 # define ROUTINE_LNK mpp_lnk_2d_ptr 353 # include "mpp_lnk_generic.h90" 354 # undef ROUTINE_LNK 355 # undef MULTI 356 # undef DIM_2d 357 ! 358 ! !== 3D array and array of 3D pointer ==! 359 ! 360 # define DIM_3d 361 # define ROUTINE_LNK mpp_lnk_3d 362 # include "mpp_lnk_generic.h90" 363 # undef ROUTINE_LNK 364 # define MULTI 365 # define ROUTINE_LNK mpp_lnk_3d_ptr 366 # include "mpp_lnk_generic.h90" 367 # undef ROUTINE_LNK 368 # undef MULTI 369 # undef DIM_3d 370 ! 371 ! !== 4D array and array of 4D pointer ==! 372 ! 373 # define DIM_4d 374 # define ROUTINE_LNK mpp_lnk_4d 375 # include "mpp_lnk_generic.h90" 376 # undef ROUTINE_LNK 377 # define MULTI 378 # define ROUTINE_LNK mpp_lnk_4d_ptr 379 # include "mpp_lnk_generic.h90" 380 # undef ROUTINE_LNK 381 # undef MULTI 382 # undef DIM_4d 383 384 !!---------------------------------------------------------------------- 385 !! *** routine mpp_nfd_(2,3,4)d *** 386 !! 387 !! * Argument : dummy argument use in mpp_nfd_... routines 388 !! ptab : array or pointer of arrays on which the boundary condition is applied 389 !! cd_nat : nature of array grid-points 390 !! psgn : sign used across the north fold boundary 391 !! kfld : optional, number of pt3d arrays 392 !! cd_mpp : optional, fill the overlap area only 393 !! pval : optional, background value (used at closed boundaries) 394 !!---------------------------------------------------------------------- 395 ! 396 ! !== 2D array and array of 2D pointer ==! 397 ! 398 # define DIM_2d 399 # define ROUTINE_NFD mpp_nfd_2d 400 # include "mpp_nfd_generic.h90" 401 # undef ROUTINE_NFD 402 # define MULTI 403 # define ROUTINE_NFD mpp_nfd_2d_ptr 404 # include "mpp_nfd_generic.h90" 405 # undef ROUTINE_NFD 406 # undef MULTI 407 # undef DIM_2d 408 ! 409 ! !== 3D array and array of 3D pointer ==! 410 ! 411 # define DIM_3d 412 # define ROUTINE_NFD mpp_nfd_3d 413 # include "mpp_nfd_generic.h90" 414 # undef ROUTINE_NFD 415 # define MULTI 416 # define ROUTINE_NFD mpp_nfd_3d_ptr 417 # include "mpp_nfd_generic.h90" 418 # undef ROUTINE_NFD 419 # undef MULTI 420 # undef DIM_3d 421 ! 422 ! !== 4D array and array of 4D pointer ==! 423 ! 424 # define DIM_4d 425 # define ROUTINE_NFD mpp_nfd_4d 426 # include "mpp_nfd_generic.h90" 427 # undef ROUTINE_NFD 428 # define MULTI 429 # define ROUTINE_NFD mpp_nfd_4d_ptr 430 # include "mpp_nfd_generic.h90" 431 # undef ROUTINE_NFD 432 # undef MULTI 433 # undef DIM_4d 434 435 436 !!---------------------------------------------------------------------- 437 !! *** routine mpp_lnk_bdy_(2,3,4)d *** 438 !! 439 !! * Argument : dummy argument use in mpp_lnk_... routines 440 !! ptab : array or pointer of arrays on which the boundary condition is applied 441 !! cd_nat : nature of array grid-points 442 !! psgn : sign used across the north fold boundary 443 !! kb_bdy : BDY boundary set 444 !! kfld : optional, number of pt3d arrays 445 !!---------------------------------------------------------------------- 446 ! 447 ! !== 2D array and array of 2D pointer ==! 448 ! 449 # define DIM_2d 450 # define ROUTINE_BDY mpp_lnk_bdy_2d 451 # include "mpp_bdy_generic.h90" 452 # undef ROUTINE_BDY 453 # undef DIM_2d 454 ! 455 ! !== 3D array and array of 3D pointer ==! 456 ! 457 # define DIM_3d 458 # define ROUTINE_BDY mpp_lnk_bdy_3d 459 # include "mpp_bdy_generic.h90" 460 # undef ROUTINE_BDY 461 # undef DIM_3d 462 ! 463 ! !== 4D array and array of 4D pointer ==! 464 ! 465 # define DIM_4d 466 # define ROUTINE_BDY mpp_lnk_bdy_4d 467 # include "mpp_bdy_generic.h90" 468 # undef ROUTINE_BDY 469 # undef DIM_4d 470 471 !!---------------------------------------------------------------------- 472 !! 473 !! load_array & mpp_lnk_2d_9 à generaliser a 3D et 4D 771 474 772 475 773 SUBROUTINE mpp_lnk_2d_9( pt2dA, cd_typeA, psgnA, pt2dB, cd_typeB, psgnB, pt2dC, cd_typeC, psgnC & 774 & , pt2dD, cd_typeD, psgnD, pt2dE, cd_typeE, psgnE, pt2dF, cd_typeF, psgnF & 775 & , pt2dG, cd_typeG, psgnG, pt2dH, cd_typeH, psgnH, pt2dI, cd_typeI, psgnI, cd_mpp, pval) 776 !!--------------------------------------------------------------------- 777 ! Second 2D array on which the boundary condition is applied 778 REAL(wp), DIMENSION(jpi,jpj), TARGET , INTENT(inout) :: pt2dA 779 REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dB , pt2dC , pt2dD , pt2dE 780 REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dF , pt2dG , pt2dH , pt2dI 781 ! define the nature of ptab array grid-points 782 CHARACTER(len=1) , INTENT(in ) :: cd_typeA 783 CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeB , cd_typeC , cd_typeD , cd_typeE 784 CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeF , cd_typeG , cd_typeH , cd_typeI 785 ! =-1 the sign change across the north fold boundary 786 REAL(wp) , INTENT(in ) :: psgnA 787 REAL(wp) , OPTIONAL, INTENT(in ) :: psgnB , psgnC , psgnD , psgnE 788 REAL(wp) , OPTIONAL, INTENT(in ) :: psgnF , psgnG , psgnH , psgnI 789 CHARACTER(len=3) , OPTIONAL, INTENT(in ) :: cd_mpp ! fill the overlap area only 790 REAL(wp) , OPTIONAL, INTENT(in ) :: pval ! background value (used at closed boundaries) 791 !! 792 TYPE(arrayptr) , DIMENSION(9) :: pt2d_array 793 CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points 794 ! ! = T , U , V , F , W and I points 795 REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary 796 INTEGER :: num_fields 797 !!--------------------------------------------------------------------- 798 ! 799 num_fields = 0 800 ! 801 ! Load the first array 802 CALL load_array( pt2dA, cd_typeA, psgnA, pt2d_array, type_array, psgn_array, num_fields ) 803 ! 804 ! Look if more arrays are added 805 IF( PRESENT(psgnB) ) CALL load_array(pt2dB,cd_typeB,psgnB,pt2d_array, type_array, psgn_array,num_fields) 806 IF( PRESENT(psgnC) ) CALL load_array(pt2dC,cd_typeC,psgnC,pt2d_array, type_array, psgn_array,num_fields) 807 IF( PRESENT(psgnD) ) CALL load_array(pt2dD,cd_typeD,psgnD,pt2d_array, type_array, psgn_array,num_fields) 808 IF( PRESENT(psgnE) ) CALL load_array(pt2dE,cd_typeE,psgnE,pt2d_array, type_array, psgn_array,num_fields) 809 IF( PRESENT(psgnF) ) CALL load_array(pt2dF,cd_typeF,psgnF,pt2d_array, type_array, psgn_array,num_fields) 810 IF( PRESENT(psgnG) ) CALL load_array(pt2dG,cd_typeG,psgnG,pt2d_array, type_array, psgn_array,num_fields) 811 IF( PRESENT(psgnH) ) CALL load_array(pt2dH,cd_typeH,psgnH,pt2d_array, type_array, psgn_array,num_fields) 812 IF( PRESENT(psgnI) ) CALL load_array(pt2dI,cd_typeI,psgnI,pt2d_array, type_array, psgn_array,num_fields) 813 ! 814 CALL mpp_lnk_2d_multiple( pt2d_array, type_array, psgn_array, num_fields, cd_mpp,pval ) 815 ! 816 END SUBROUTINE mpp_lnk_2d_9 817 818 819 SUBROUTINE mpp_lnk_2d( pt2d, cd_type, psgn, cd_mpp, pval ) 820 !!---------------------------------------------------------------------- 821 !! *** routine mpp_lnk_2d *** 822 !! 823 !! ** Purpose : Message passing manadgement for 2d array 824 !! 825 !! ** Method : Use mppsend and mpprecv function for passing mask 826 !! between processors following neighboring subdomains. 827 !! domain parameters 828 !! nlci : first dimension of the local subdomain 829 !! nlcj : second dimension of the local subdomain 830 !! nbondi : mark for "east-west local boundary" 831 !! nbondj : mark for "north-south local boundary" 832 !! noea : number for local neighboring processors 833 !! nowe : number for local neighboring processors 834 !! noso : number for local neighboring processors 835 !! nono : number for local neighboring processors 836 !! 837 !!---------------------------------------------------------------------- 838 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied 839 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 840 ! ! = T , U , V , F , W and I points 841 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 842 ! ! = 1. , the sign is kept 843 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 844 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 845 !! 846 INTEGER :: ji, jj, jl ! dummy loop indices 847 INTEGER :: imigr, iihom, ijhom ! temporary integers 848 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 849 REAL(wp) :: zland 850 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 851 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 852 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 853 !!---------------------------------------------------------------------- 854 ! 855 ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), & 856 & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) ) 857 ! 858 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 859 ELSE ; zland = 0._wp ! zero by default 860 ENDIF 861 862 ! 1. standard boundary treatment 863 ! ------------------------------ 864 ! 865 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values 866 ! 867 ! WARNING pt2d is defined only between nld and nle 868 DO jj = nlcj+1, jpj ! added line(s) (inner only) 869 pt2d(nldi :nlei , jj ) = pt2d(nldi:nlei, nlej) 870 pt2d(1 :nldi-1, jj ) = pt2d(nldi , nlej) 871 pt2d(nlei+1:nlci , jj ) = pt2d( nlei, nlej) 872 END DO 873 DO ji = nlci+1, jpi ! added column(s) (full) 874 pt2d(ji ,nldj :nlej ) = pt2d( nlei,nldj:nlej) 875 pt2d(ji ,1 :nldj-1) = pt2d( nlei,nldj ) 876 pt2d(ji ,nlej+1:jpj ) = pt2d( nlei, nlej) 877 END DO 878 ! 879 ELSE ! standard close or cyclic treatment 880 ! 881 ! ! East-West boundaries 882 IF( nbondi == 2 .AND. & ! Cyclic east-west 883 & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 884 pt2d( 1 ,:) = pt2d(jpim1,:) ! west 885 pt2d(jpi,:) = pt2d( 2 ,:) ! east 886 ELSE ! closed 887 IF( .NOT. cd_type == 'F' ) pt2d( 1 :jpreci,:) = zland ! south except F-point 888 pt2d(nlci-jpreci+1:jpi ,:) = zland ! north 889 ENDIF 890 ! ! North-South boundaries (always closed) 891 IF( .NOT. cd_type == 'F' ) pt2d(:, 1 :jprecj) = zland !south except F-point 892 pt2d(:,nlcj-jprecj+1:jpj ) = zland ! north 893 ! 894 ENDIF 895 896 ! 2. East and west directions exchange 897 ! ------------------------------------ 898 ! we play with the neigbours AND the row number because of the periodicity 899 ! 900 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 901 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 902 iihom = nlci-nreci 903 DO jl = 1, jpreci 904 zt2ew(:,jl,1) = pt2d(jpreci+jl,:) 905 zt2we(:,jl,1) = pt2d(iihom +jl,:) 906 END DO 907 END SELECT 908 ! 909 ! ! Migrations 910 imigr = jpreci * jpj 911 ! 912 SELECT CASE ( nbondi ) 913 CASE ( -1 ) 914 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 ) 915 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 916 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 917 CASE ( 0 ) 918 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 919 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 ) 920 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 921 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 922 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 923 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 924 CASE ( 1 ) 925 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 926 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 927 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 928 END SELECT 929 ! 930 ! ! Write Dirichlet lateral conditions 931 iihom = nlci - jpreci 932 ! 933 SELECT CASE ( nbondi ) 934 CASE ( -1 ) 935 DO jl = 1, jpreci 936 pt2d(iihom+jl,:) = zt2ew(:,jl,2) 937 END DO 938 CASE ( 0 ) 939 DO jl = 1, jpreci 940 pt2d(jl ,:) = zt2we(:,jl,2) 941 pt2d(iihom+jl,:) = zt2ew(:,jl,2) 942 END DO 943 CASE ( 1 ) 944 DO jl = 1, jpreci 945 pt2d(jl ,:) = zt2we(:,jl,2) 946 END DO 947 END SELECT 948 949 950 ! 3. North and south directions 951 ! ----------------------------- 952 ! always closed : we play only with the neigbours 953 ! 954 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 955 ijhom = nlcj-nrecj 956 DO jl = 1, jprecj 957 zt2sn(:,jl,1) = pt2d(:,ijhom +jl) 958 zt2ns(:,jl,1) = pt2d(:,jprecj+jl) 959 END DO 960 ENDIF 961 ! 962 ! ! Migrations 963 imigr = jprecj * jpi 964 ! 965 SELECT CASE ( nbondj ) 966 CASE ( -1 ) 967 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 ) 968 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 969 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 970 CASE ( 0 ) 971 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 972 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 ) 973 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 974 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 975 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 976 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 977 CASE ( 1 ) 978 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 979 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 980 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 981 END SELECT 982 ! 983 ! ! Write Dirichlet lateral conditions 984 ijhom = nlcj - jprecj 985 ! 986 SELECT CASE ( nbondj ) 987 CASE ( -1 ) 988 DO jl = 1, jprecj 989 pt2d(:,ijhom+jl) = zt2ns(:,jl,2) 990 END DO 991 CASE ( 0 ) 992 DO jl = 1, jprecj 993 pt2d(:,jl ) = zt2sn(:,jl,2) 994 pt2d(:,ijhom+jl) = zt2ns(:,jl,2) 995 END DO 996 CASE ( 1 ) 997 DO jl = 1, jprecj 998 pt2d(:,jl ) = zt2sn(:,jl,2) 999 END DO 1000 END SELECT 1001 1002 1003 ! 4. north fold treatment 1004 ! ----------------------- 1005 ! 1006 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 1007 ! 1008 SELECT CASE ( jpni ) 1009 CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp 1010 CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs. 1011 END SELECT 1012 ! 1013 ENDIF 1014 ! 1015 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 1016 ! 1017 END SUBROUTINE mpp_lnk_2d 1018 1019 1020 SUBROUTINE mpp_lnk_3d_gather( ptab1, cd_type1, ptab2, cd_type2, psgn ) 1021 !!---------------------------------------------------------------------- 1022 !! *** routine mpp_lnk_3d_gather *** 1023 !! 1024 !! ** Purpose : Message passing manadgement for two 3D arrays 1025 !! 1026 !! ** Method : Use mppsend and mpprecv function for passing mask 1027 !! between processors following neighboring subdomains. 1028 !! domain parameters 1029 !! nlci : first dimension of the local subdomain 1030 !! nlcj : second dimension of the local subdomain 1031 !! nbondi : mark for "east-west local boundary" 1032 !! nbondj : mark for "north-south local boundary" 1033 !! noea : number for local neighboring processors 1034 !! nowe : number for local neighboring processors 1035 !! noso : number for local neighboring processors 1036 !! nono : number for local neighboring processors 1037 !! 1038 !! ** Action : ptab1 and ptab2 with update value at its periphery 1039 !! 1040 !!---------------------------------------------------------------------- 1041 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which 1042 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab2 ! the boundary condition is applied 1043 CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays 1044 CHARACTER(len=1) , INTENT(in ) :: cd_type2 ! i.e. grid-points = T , U , V , F or W points 1045 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 1046 !! ! = 1. , the sign is kept 1047 INTEGER :: jl ! dummy loop indices 1048 INTEGER :: imigr, iihom, ijhom ! temporary integers 1049 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1050 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1051 REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ns, zt4sn ! 2 x 3d for north-south & south-north 1052 REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ew, zt4we ! 2 x 3d for east-west & west-east 1053 !!---------------------------------------------------------------------- 1054 ! 1055 ALLOCATE( zt4ns(jpi,jprecj,jpk,2,2), zt4sn(jpi,jprecj,jpk,2,2) , & 1056 & zt4ew(jpj,jpreci,jpk,2,2), zt4we(jpj,jpreci,jpk,2,2) ) 1057 ! 1058 ! 1. standard boundary treatment 1059 ! ------------------------------ 1060 ! ! East-West boundaries 1061 ! !* Cyclic east-west 1062 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 1063 ptab1( 1 ,:,:) = ptab1(jpim1,:,:) 1064 ptab1(jpi,:,:) = ptab1( 2 ,:,:) 1065 ptab2( 1 ,:,:) = ptab2(jpim1,:,:) 1066 ptab2(jpi,:,:) = ptab2( 2 ,:,:) 1067 ELSE !* closed 1068 IF( .NOT. cd_type1 == 'F' ) ptab1( 1 :jpreci,:,:) = 0.e0 ! south except at F-point 1069 IF( .NOT. cd_type2 == 'F' ) ptab2( 1 :jpreci,:,:) = 0.e0 1070 ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 ! north 1071 ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0 1072 ENDIF 1073 1074 1075 ! ! North-South boundaries 1076 IF( .NOT. cd_type1 == 'F' ) ptab1(:, 1 :jprecj,:) = 0.e0 ! south except at F-point 1077 IF( .NOT. cd_type2 == 'F' ) ptab2(:, 1 :jprecj,:) = 0.e0 1078 ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 ! north 1079 ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0 1080 1081 1082 ! 2. East and west directions exchange 1083 ! ------------------------------------ 1084 ! we play with the neigbours AND the row number because of the periodicity 1085 ! 1086 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 1087 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1088 iihom = nlci-nreci 1089 DO jl = 1, jpreci 1090 zt4ew(:,jl,:,1,1) = ptab1(jpreci+jl,:,:) 1091 zt4we(:,jl,:,1,1) = ptab1(iihom +jl,:,:) 1092 zt4ew(:,jl,:,2,1) = ptab2(jpreci+jl,:,:) 1093 zt4we(:,jl,:,2,1) = ptab2(iihom +jl,:,:) 1094 END DO 1095 END SELECT 1096 ! 1097 ! ! Migrations 1098 imigr = jpreci * jpj * jpk *2 1099 ! 1100 SELECT CASE ( nbondi ) 1101 CASE ( -1 ) 1102 CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req1 ) 1103 CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea ) 1104 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1105 CASE ( 0 ) 1106 CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 1107 CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req2 ) 1108 CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea ) 1109 CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe ) 1110 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1111 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1112 CASE ( 1 ) 1113 CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 1114 CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe ) 1115 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1116 END SELECT 1117 ! 1118 ! ! Write Dirichlet lateral conditions 1119 iihom = nlci - jpreci 1120 ! 1121 SELECT CASE ( nbondi ) 1122 CASE ( -1 ) 1123 DO jl = 1, jpreci 1124 ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2) 1125 ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2) 1126 END DO 1127 CASE ( 0 ) 1128 DO jl = 1, jpreci 1129 ptab1(jl ,:,:) = zt4we(:,jl,:,1,2) 1130 ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2) 1131 ptab2(jl ,:,:) = zt4we(:,jl,:,2,2) 1132 ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2) 1133 END DO 1134 CASE ( 1 ) 1135 DO jl = 1, jpreci 1136 ptab1(jl ,:,:) = zt4we(:,jl,:,1,2) 1137 ptab2(jl ,:,:) = zt4we(:,jl,:,2,2) 1138 END DO 1139 END SELECT 1140 1141 1142 ! 3. North and south directions 1143 ! ----------------------------- 1144 ! always closed : we play only with the neigbours 1145 ! 1146 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 1147 ijhom = nlcj - nrecj 1148 DO jl = 1, jprecj 1149 zt4sn(:,jl,:,1,1) = ptab1(:,ijhom +jl,:) 1150 zt4ns(:,jl,:,1,1) = ptab1(:,jprecj+jl,:) 1151 zt4sn(:,jl,:,2,1) = ptab2(:,ijhom +jl,:) 1152 zt4ns(:,jl,:,2,1) = ptab2(:,jprecj+jl,:) 1153 END DO 1154 ENDIF 1155 ! 1156 ! ! Migrations 1157 imigr = jprecj * jpi * jpk * 2 1158 ! 1159 SELECT CASE ( nbondj ) 1160 CASE ( -1 ) 1161 CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req1 ) 1162 CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono ) 1163 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1164 CASE ( 0 ) 1165 CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 ) 1166 CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req2 ) 1167 CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono ) 1168 CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso ) 1169 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1170 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1171 CASE ( 1 ) 1172 CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 ) 1173 CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso ) 1174 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1175 END SELECT 1176 ! 1177 ! ! Write Dirichlet lateral conditions 1178 ijhom = nlcj - jprecj 1179 ! 1180 SELECT CASE ( nbondj ) 1181 CASE ( -1 ) 1182 DO jl = 1, jprecj 1183 ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2) 1184 ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2) 1185 END DO 1186 CASE ( 0 ) 1187 DO jl = 1, jprecj 1188 ptab1(:,jl ,:) = zt4sn(:,jl,:,1,2) 1189 ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2) 1190 ptab2(:,jl ,:) = zt4sn(:,jl,:,2,2) 1191 ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2) 1192 END DO 1193 CASE ( 1 ) 1194 DO jl = 1, jprecj 1195 ptab1(:,jl,:) = zt4sn(:,jl,:,1,2) 1196 ptab2(:,jl,:) = zt4sn(:,jl,:,2,2) 1197 END DO 1198 END SELECT 1199 1200 1201 ! 4. north fold treatment 1202 ! ----------------------- 1203 IF( npolj /= 0 ) THEN 1204 ! 1205 SELECT CASE ( jpni ) 1206 CASE ( 1 ) 1207 CALL lbc_nfd ( ptab1, cd_type1, psgn ) ! only for northern procs. 1208 CALL lbc_nfd ( ptab2, cd_type2, psgn ) 1209 CASE DEFAULT 1210 CALL mpp_lbc_north( ptab1, cd_type1, psgn ) ! for all northern procs. 1211 CALL mpp_lbc_north (ptab2, cd_type2, psgn) 1212 END SELECT 1213 ! 1214 ENDIF 1215 ! 1216 DEALLOCATE( zt4ns, zt4sn, zt4ew, zt4we ) 1217 ! 1218 END SUBROUTINE mpp_lnk_3d_gather 1219 1220 1221 SUBROUTINE mpp_lnk_2d_e( pt2d, cd_type, psgn, jpri, jprj ) 1222 !!---------------------------------------------------------------------- 1223 !! *** routine mpp_lnk_2d_e *** 1224 !! 1225 !! ** Purpose : Message passing manadgement for 2d array (with halo) 1226 !! 1227 !! ** Method : Use mppsend and mpprecv function for passing mask 1228 !! between processors following neighboring subdomains. 1229 !! domain parameters 1230 !! nlci : first dimension of the local subdomain 1231 !! nlcj : second dimension of the local subdomain 1232 !! jpri : number of rows for extra outer halo 1233 !! jprj : number of columns for extra outer halo 1234 !! nbondi : mark for "east-west local boundary" 1235 !! nbondj : mark for "north-south local boundary" 1236 !! noea : number for local neighboring processors 1237 !! nowe : number for local neighboring processors 1238 !! noso : number for local neighboring processors 1239 !! nono : number for local neighboring processors 1240 !! 1241 !!---------------------------------------------------------------------- 1242 INTEGER , INTENT(in ) :: jpri 1243 INTEGER , INTENT(in ) :: jprj 1244 REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo 1245 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points 1246 ! ! = T , U , V , F , W and I points 1247 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the 1248 !! ! north boundary, = 1. otherwise 1249 INTEGER :: jl ! dummy loop indices 1250 INTEGER :: imigr, iihom, ijhom ! temporary integers 1251 INTEGER :: ipreci, iprecj ! temporary integers 1252 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1253 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1254 !! 1255 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns 1256 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn 1257 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe 1258 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew 1259 !!---------------------------------------------------------------------- 1260 1261 ipreci = jpreci + jpri ! take into account outer extra 2D overlap area 1262 iprecj = jprecj + jprj 1263 1264 1265 ! 1. standard boundary treatment 1266 ! ------------------------------ 1267 ! Order matters Here !!!! 1268 ! 1269 ! !* North-South boundaries (always colsed) 1270 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jprj : jprecj ) = 0.e0 ! south except at F-point 1271 pt2d(:,nlcj-jprecj+1:jpj+jprj) = 0.e0 ! north 1272 1273 ! ! East-West boundaries 1274 ! !* Cyclic east-west 1275 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 1276 pt2d(1-jpri: 1 ,:) = pt2d(jpim1-jpri: jpim1 ,:) ! east 1277 pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west 1278 ! 1279 ELSE !* closed 1280 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point 1281 pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north 1282 ENDIF 1283 ! 1284 1285 ! north fold treatment 1286 ! ----------------------- 1287 IF( npolj /= 0 ) THEN 1288 ! 1289 SELECT CASE ( jpni ) 1290 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj ) 1291 CASE DEFAULT ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn ) 1292 END SELECT 1293 ! 1294 ENDIF 1295 1296 ! 2. East and west directions exchange 1297 ! ------------------------------------ 1298 ! we play with the neigbours AND the row number because of the periodicity 1299 ! 1300 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 1301 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1302 iihom = nlci-nreci-jpri 1303 DO jl = 1, ipreci 1304 r2dew(:,jl,1) = pt2d(jpreci+jl,:) 1305 r2dwe(:,jl,1) = pt2d(iihom +jl,:) 1306 END DO 1307 END SELECT 1308 ! 1309 ! ! Migrations 1310 imigr = ipreci * ( jpj + 2*jprj) 1311 ! 1312 SELECT CASE ( nbondi ) 1313 CASE ( -1 ) 1314 CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req1 ) 1315 CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) 1316 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1317 CASE ( 0 ) 1318 CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) 1319 CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req2 ) 1320 CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) 1321 CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) 1322 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1323 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1324 CASE ( 1 ) 1325 CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) 1326 CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) 1327 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1328 END SELECT 1329 ! 1330 ! ! Write Dirichlet lateral conditions 1331 iihom = nlci - jpreci 1332 ! 1333 SELECT CASE ( nbondi ) 1334 CASE ( -1 ) 1335 DO jl = 1, ipreci 1336 pt2d(iihom+jl,:) = r2dew(:,jl,2) 1337 END DO 1338 CASE ( 0 ) 1339 DO jl = 1, ipreci 1340 pt2d(jl-jpri,:) = r2dwe(:,jl,2) 1341 pt2d( iihom+jl,:) = r2dew(:,jl,2) 1342 END DO 1343 CASE ( 1 ) 1344 DO jl = 1, ipreci 1345 pt2d(jl-jpri,:) = r2dwe(:,jl,2) 1346 END DO 1347 END SELECT 1348 1349 1350 ! 3. North and south directions 1351 ! ----------------------------- 1352 ! always closed : we play only with the neigbours 1353 ! 1354 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 1355 ijhom = nlcj-nrecj-jprj 1356 DO jl = 1, iprecj 1357 r2dsn(:,jl,1) = pt2d(:,ijhom +jl) 1358 r2dns(:,jl,1) = pt2d(:,jprecj+jl) 1359 END DO 1360 ENDIF 1361 ! 1362 ! ! Migrations 1363 imigr = iprecj * ( jpi + 2*jpri ) 1364 ! 1365 SELECT CASE ( nbondj ) 1366 CASE ( -1 ) 1367 CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req1 ) 1368 CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) 1369 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1370 CASE ( 0 ) 1371 CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) 1372 CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req2 ) 1373 CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) 1374 CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) 1375 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1376 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1377 CASE ( 1 ) 1378 CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) 1379 CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) 1380 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1381 END SELECT 1382 ! 1383 ! ! Write Dirichlet lateral conditions 1384 ijhom = nlcj - jprecj 1385 ! 1386 SELECT CASE ( nbondj ) 1387 CASE ( -1 ) 1388 DO jl = 1, iprecj 1389 pt2d(:,ijhom+jl) = r2dns(:,jl,2) 1390 END DO 1391 CASE ( 0 ) 1392 DO jl = 1, iprecj 1393 pt2d(:,jl-jprj) = r2dsn(:,jl,2) 1394 pt2d(:,ijhom+jl ) = r2dns(:,jl,2) 1395 END DO 1396 CASE ( 1 ) 1397 DO jl = 1, iprecj 1398 pt2d(:,jl-jprj) = r2dsn(:,jl,2) 1399 END DO 1400 END SELECT 1401 ! 1402 END SUBROUTINE mpp_lnk_2d_e 1403 1404 SUBROUTINE mpp_lnk_sum_3d( ptab, cd_type, psgn, cd_mpp, pval ) 1405 !!---------------------------------------------------------------------- 1406 !! *** routine mpp_lnk_sum_3d *** 1407 !! 1408 !! ** Purpose : Message passing manadgement (sum the overlap region) 1409 !! 1410 !! ** Method : Use mppsend and mpprecv function for passing mask 1411 !! between processors following neighboring subdomains. 1412 !! domain parameters 1413 !! nlci : first dimension of the local subdomain 1414 !! nlcj : second dimension of the local subdomain 1415 !! nbondi : mark for "east-west local boundary" 1416 !! nbondj : mark for "north-south local boundary" 1417 !! noea : number for local neighboring processors 1418 !! nowe : number for local neighboring processors 1419 !! noso : number for local neighboring processors 1420 !! nono : number for local neighboring processors 1421 !! 1422 !! ** Action : ptab with update value at its periphery 1423 !! 1424 !!---------------------------------------------------------------------- 1425 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 1426 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 1427 ! ! = T , U , V , F , W points 1428 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 1429 ! ! = 1. , the sign is kept 1430 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 1431 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 1432 !! 1433 INTEGER :: ji, jj, jk, jl ! dummy loop indices 1434 INTEGER :: imigr, iihom, ijhom ! temporary integers 1435 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1436 REAL(wp) :: zland 1437 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1438 ! 1439 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north 1440 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east 1441 1442 !!---------------------------------------------------------------------- 1443 1444 ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), & 1445 & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) ) 1446 1447 ! 1448 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 1449 ELSE ; zland = 0.e0 ! zero by default 1450 ENDIF 1451 1452 ! 1. standard boundary treatment 1453 ! ------------------------------ 1454 ! 2. East and west directions exchange 1455 ! ------------------------------------ 1456 ! we play with the neigbours AND the row number because of the periodicity 1457 ! 1458 SELECT CASE ( nbondi ) ! Read lateral conditions 1459 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1460 iihom = nlci-jpreci 1461 DO jl = 1, jpreci 1462 zt3ew(:,jl,:,1) = ptab(jl ,:,:) ; ptab(jl ,:,:) = 0.0_wp 1463 zt3we(:,jl,:,1) = ptab(iihom+jl,:,:) ; ptab(iihom+jl,:,:) = 0.0_wp 1464 END DO 1465 END SELECT 1466 ! 1467 ! ! Migrations 1468 imigr = jpreci * jpj * jpk 1469 ! 1470 SELECT CASE ( nbondi ) 1471 CASE ( -1 ) 1472 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 ) 1473 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 1474 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1475 CASE ( 0 ) 1476 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 1477 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 ) 1478 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 1479 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 1480 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1481 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1482 CASE ( 1 ) 1483 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 1484 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 1485 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1486 END SELECT 1487 ! 1488 ! ! Write lateral conditions 1489 iihom = nlci-nreci 1490 ! 1491 SELECT CASE ( nbondi ) 1492 CASE ( -1 ) 1493 DO jl = 1, jpreci 1494 ptab(iihom+jl,:,:) = ptab(iihom+jl,:,:) + zt3ew(:,jl,:,2) 1495 END DO 1496 CASE ( 0 ) 1497 DO jl = 1, jpreci 1498 ptab(jpreci+jl,:,:) = ptab(jpreci+jl,:,:) + zt3we(:,jl,:,2) 1499 ptab(iihom +jl,:,:) = ptab(iihom +jl,:,:) + zt3ew(:,jl,:,2) 1500 END DO 1501 CASE ( 1 ) 1502 DO jl = 1, jpreci 1503 ptab(jpreci+jl,:,:) = ptab(jpreci+jl,:,:) + zt3we(:,jl,:,2) 1504 END DO 1505 END SELECT 1506 1507 1508 ! 3. North and south directions 1509 ! ----------------------------- 1510 ! always closed : we play only with the neigbours 1511 ! 1512 IF( nbondj /= 2 ) THEN ! Read lateral conditions 1513 ijhom = nlcj-jprecj 1514 DO jl = 1, jprecj 1515 zt3sn(:,jl,:,1) = ptab(:,ijhom+jl,:) ; ptab(:,ijhom+jl,:) = 0.0_wp 1516 zt3ns(:,jl,:,1) = ptab(:,jl ,:) ; ptab(:,jl ,:) = 0.0_wp 1517 END DO 1518 ENDIF 1519 ! 1520 ! ! Migrations 1521 imigr = jprecj * jpi * jpk 1522 ! 1523 SELECT CASE ( nbondj ) 1524 CASE ( -1 ) 1525 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 ) 1526 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 1527 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1528 CASE ( 0 ) 1529 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 1530 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 ) 1531 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 1532 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 1533 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1534 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1535 CASE ( 1 ) 1536 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 1537 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 1538 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1539 END SELECT 1540 ! 1541 ! ! Write lateral conditions 1542 ijhom = nlcj-nrecj 1543 ! 1544 SELECT CASE ( nbondj ) 1545 CASE ( -1 ) 1546 DO jl = 1, jprecj 1547 ptab(:,ijhom+jl,:) = ptab(:,ijhom+jl,:) + zt3ns(:,jl,:,2) 1548 END DO 1549 CASE ( 0 ) 1550 DO jl = 1, jprecj 1551 ptab(:,jprecj+jl,:) = ptab(:,jprecj+jl,:) + zt3sn(:,jl,:,2) 1552 ptab(:,ijhom +jl,:) = ptab(:,ijhom +jl,:) + zt3ns(:,jl,:,2) 1553 END DO 1554 CASE ( 1 ) 1555 DO jl = 1, jprecj 1556 ptab(:,jprecj+jl,:) = ptab(:,jprecj+jl,:) + zt3sn(:,jl ,:,2) 1557 END DO 1558 END SELECT 1559 1560 1561 ! 4. north fold treatment 1562 ! ----------------------- 1563 ! 1564 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 1565 ! 1566 SELECT CASE ( jpni ) 1567 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 1568 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 1569 END SELECT 1570 ! 1571 ENDIF 1572 ! 1573 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we ) 1574 ! 1575 END SUBROUTINE mpp_lnk_sum_3d 1576 1577 SUBROUTINE mpp_lnk_sum_2d( pt2d, cd_type, psgn, cd_mpp, pval ) 1578 !!---------------------------------------------------------------------- 1579 !! *** routine mpp_lnk_sum_2d *** 1580 !! 1581 !! ** Purpose : Message passing manadgement for 2d array (sum the overlap region) 1582 !! 1583 !! ** Method : Use mppsend and mpprecv function for passing mask 1584 !! between processors following neighboring subdomains. 1585 !! domain parameters 1586 !! nlci : first dimension of the local subdomain 1587 !! nlcj : second dimension of the local subdomain 1588 !! nbondi : mark for "east-west local boundary" 1589 !! nbondj : mark for "north-south local boundary" 1590 !! noea : number for local neighboring processors 1591 !! nowe : number for local neighboring processors 1592 !! noso : number for local neighboring processors 1593 !! nono : number for local neighboring processors 1594 !! 1595 !!---------------------------------------------------------------------- 1596 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied 1597 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 1598 ! ! = T , U , V , F , W and I points 1599 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 1600 ! ! = 1. , the sign is kept 1601 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 1602 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 1603 !! 1604 INTEGER :: ji, jj, jl ! dummy loop indices 1605 INTEGER :: imigr, iihom, ijhom ! temporary integers 1606 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1607 REAL(wp) :: zland 1608 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1609 ! 1610 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 1611 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 1612 1613 !!---------------------------------------------------------------------- 1614 1615 ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), & 1616 & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) ) 1617 1618 ! 1619 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 1620 ELSE ; zland = 0.e0 ! zero by default 1621 ENDIF 1622 1623 ! 1. standard boundary treatment 1624 ! ------------------------------ 1625 ! 2. East and west directions exchange 1626 ! ------------------------------------ 1627 ! we play with the neigbours AND the row number because of the periodicity 1628 ! 1629 SELECT CASE ( nbondi ) ! Read lateral conditions 1630 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1631 iihom = nlci - jpreci 1632 DO jl = 1, jpreci 1633 zt2ew(:,jl,1) = pt2d(jl ,:) ; pt2d(jl ,:) = 0.0_wp 1634 zt2we(:,jl,1) = pt2d(iihom +jl,:) ; pt2d(iihom +jl,:) = 0.0_wp 1635 END DO 1636 END SELECT 1637 ! 1638 ! ! Migrations 1639 imigr = jpreci * jpj 1640 ! 1641 SELECT CASE ( nbondi ) 1642 CASE ( -1 ) 1643 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 ) 1644 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 1645 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1646 CASE ( 0 ) 1647 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 1648 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 ) 1649 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 1650 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 1651 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1652 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1653 CASE ( 1 ) 1654 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 1655 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 1656 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1657 END SELECT 1658 ! 1659 ! ! Write lateral conditions 1660 iihom = nlci-nreci 1661 ! 1662 SELECT CASE ( nbondi ) 1663 CASE ( -1 ) 1664 DO jl = 1, jpreci 1665 pt2d(iihom+jl,:) = pt2d(iihom+jl,:) + zt2ew(:,jl,2) 1666 END DO 1667 CASE ( 0 ) 1668 DO jl = 1, jpreci 1669 pt2d(jpreci+jl,:) = pt2d(jpreci+jl,:) + zt2we(:,jl,2) 1670 pt2d(iihom +jl,:) = pt2d(iihom +jl,:) + zt2ew(:,jl,2) 1671 END DO 1672 CASE ( 1 ) 1673 DO jl = 1, jpreci 1674 pt2d(jpreci+jl,:) = pt2d(jpreci+jl,:) + zt2we(:,jl,2) 1675 END DO 1676 END SELECT 1677 1678 1679 ! 3. North and south directions 1680 ! ----------------------------- 1681 ! always closed : we play only with the neigbours 1682 ! 1683 IF( nbondj /= 2 ) THEN ! Read lateral conditions 1684 ijhom = nlcj - jprecj 1685 DO jl = 1, jprecj 1686 zt2sn(:,jl,1) = pt2d(:,ijhom +jl) ; pt2d(:,ijhom +jl) = 0.0_wp 1687 zt2ns(:,jl,1) = pt2d(:,jl ) ; pt2d(:,jl ) = 0.0_wp 1688 END DO 1689 ENDIF 1690 ! 1691 ! ! Migrations 1692 imigr = jprecj * jpi 1693 ! 1694 SELECT CASE ( nbondj ) 1695 CASE ( -1 ) 1696 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 ) 1697 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 1698 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1699 CASE ( 0 ) 1700 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 1701 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 ) 1702 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 1703 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 1704 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1705 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1706 CASE ( 1 ) 1707 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 1708 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 1709 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1710 END SELECT 1711 ! 1712 ! ! Write lateral conditions 1713 ijhom = nlcj-nrecj 1714 ! 1715 SELECT CASE ( nbondj ) 1716 CASE ( -1 ) 1717 DO jl = 1, jprecj 1718 pt2d(:,ijhom+jl) = pt2d(:,ijhom+jl) + zt2ns(:,jl,2) 1719 END DO 1720 CASE ( 0 ) 1721 DO jl = 1, jprecj 1722 pt2d(:,jprecj+jl) = pt2d(:,jprecj+jl) + zt2sn(:,jl,2) 1723 pt2d(:,ijhom +jl) = pt2d(:,ijhom +jl) + zt2ns(:,jl,2) 1724 END DO 1725 CASE ( 1 ) 1726 DO jl = 1, jprecj 1727 pt2d(:,jprecj+jl) = pt2d(:,jprecj+jl) + zt2sn(:,jl,2) 1728 END DO 1729 END SELECT 1730 1731 1732 ! 4. north fold treatment 1733 ! ----------------------- 1734 ! 1735 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 1736 ! 1737 SELECT CASE ( jpni ) 1738 CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp 1739 CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs. 1740 END SELECT 1741 ! 1742 ENDIF 1743 ! 1744 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 1745 ! 1746 END SUBROUTINE mpp_lnk_sum_2d 476 !! mpp_lnk_sum_2d et 3D ====>>>>>> à virer du code !!!! 477 478 479 !!---------------------------------------------------------------------- 480 481 1747 482 1748 483 SUBROUTINE mppsend( ktyp, pmess, kbytes, kdest, md_req ) … … 1764 499 SELECT CASE ( cn_mpi_send ) 1765 500 CASE ( 'S' ) ! Standard mpi send (blocking) 1766 CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_o pa, iflag )501 CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_oce , iflag ) 1767 502 CASE ( 'B' ) ! Buffer mpi send (blocking) 1768 CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_o pa, iflag )503 CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_oce , iflag ) 1769 504 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 1770 505 ! be carefull, one more argument here : the mpi request identifier.. 1771 CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_o pa, md_req, iflag )506 CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_oce, md_req, iflag ) 1772 507 END SELECT 1773 508 ! … … 1797 532 IF( PRESENT(ksource) ) use_source = ksource 1798 533 ! 1799 CALL mpi_recv( pmess, kbytes, mpi_double_precision, use_source, ktyp, mpi_comm_o pa, istatus, iflag )534 CALL mpi_recv( pmess, kbytes, mpi_double_precision, use_source, ktyp, mpi_comm_oce, istatus, iflag ) 1800 535 ! 1801 536 END SUBROUTINE mpprecv … … 1819 554 itaille = jpi * jpj 1820 555 CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , & 1821 & mpi_double_precision, kp , mpi_comm_o pa, ierror )556 & mpi_double_precision, kp , mpi_comm_oce, ierror ) 1822 557 ! 1823 558 END SUBROUTINE mppgather … … 1842 577 ! 1843 578 CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille , & 1844 & mpi_double_precision, kp , mpi_comm_o pa, ierror )579 & mpi_double_precision, kp , mpi_comm_oce, ierror ) 1845 580 ! 1846 581 END SUBROUTINE mppscatter 1847 582 1848 1849 SUBROUTINE mppmax_a_int( ktab, kdim, kcom ) 1850 !!---------------------------------------------------------------------- 1851 !! *** routine mppmax_a_int *** 1852 !! 1853 !! ** Purpose : Find maximum value in an integer layout array 1854 !! 1855 !!---------------------------------------------------------------------- 1856 INTEGER , INTENT(in ) :: kdim ! size of array 1857 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 1858 INTEGER , INTENT(in ), OPTIONAL :: kcom ! 1859 ! 1860 INTEGER :: ierror, localcomm ! temporary integer 1861 INTEGER, DIMENSION(kdim) :: iwork 1862 !!---------------------------------------------------------------------- 1863 ! 1864 localcomm = mpi_comm_opa 1865 IF( PRESENT(kcom) ) localcomm = kcom 1866 ! 1867 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, localcomm, ierror ) 1868 ! 1869 ktab(:) = iwork(:) 1870 ! 1871 END SUBROUTINE mppmax_a_int 1872 1873 1874 SUBROUTINE mppmax_int( ktab, kcom ) 1875 !!---------------------------------------------------------------------- 1876 !! *** routine mppmax_int *** 1877 !! 1878 !! ** Purpose : Find maximum value in an integer layout array 1879 !! 1880 !!---------------------------------------------------------------------- 1881 INTEGER, INTENT(inout) :: ktab ! ??? 1882 INTEGER, INTENT(in ), OPTIONAL :: kcom ! ??? 1883 ! 1884 INTEGER :: ierror, iwork, localcomm ! temporary integer 1885 !!---------------------------------------------------------------------- 1886 ! 1887 localcomm = mpi_comm_opa 1888 IF( PRESENT(kcom) ) localcomm = kcom 1889 ! 1890 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, localcomm, ierror ) 1891 ! 1892 ktab = iwork 1893 ! 1894 END SUBROUTINE mppmax_int 1895 1896 1897 SUBROUTINE mppmin_a_int( ktab, kdim, kcom ) 1898 !!---------------------------------------------------------------------- 1899 !! *** routine mppmin_a_int *** 1900 !! 1901 !! ** Purpose : Find minimum value in an integer layout array 1902 !! 1903 !!---------------------------------------------------------------------- 1904 INTEGER , INTENT( in ) :: kdim ! size of array 1905 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 1906 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 1907 !! 1908 INTEGER :: ierror, localcomm ! temporary integer 1909 INTEGER, DIMENSION(kdim) :: iwork 1910 !!---------------------------------------------------------------------- 1911 ! 1912 localcomm = mpi_comm_opa 1913 IF( PRESENT(kcom) ) localcomm = kcom 1914 ! 1915 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, localcomm, ierror ) 1916 ! 1917 ktab(:) = iwork(:) 1918 ! 1919 END SUBROUTINE mppmin_a_int 1920 1921 1922 SUBROUTINE mppmin_int( ktab, kcom ) 1923 !!---------------------------------------------------------------------- 1924 !! *** routine mppmin_int *** 1925 !! 1926 !! ** Purpose : Find minimum value in an integer layout array 1927 !! 1928 !!---------------------------------------------------------------------- 1929 INTEGER, INTENT(inout) :: ktab ! ??? 1930 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 1931 !! 1932 INTEGER :: ierror, iwork, localcomm 1933 !!---------------------------------------------------------------------- 1934 ! 1935 localcomm = mpi_comm_opa 1936 IF( PRESENT(kcom) ) localcomm = kcom 1937 ! 1938 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, localcomm, ierror ) 1939 ! 1940 ktab = iwork 1941 ! 1942 END SUBROUTINE mppmin_int 1943 1944 1945 SUBROUTINE mppsum_a_int( ktab, kdim ) 1946 !!---------------------------------------------------------------------- 1947 !! *** routine mppsum_a_int *** 1948 !! 1949 !! ** Purpose : Global integer sum, 1D array case 1950 !! 1951 !!---------------------------------------------------------------------- 1952 INTEGER, INTENT(in ) :: kdim ! ??? 1953 INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? 1954 ! 1955 INTEGER :: ierror 1956 INTEGER, DIMENSION (kdim) :: iwork 1957 !!---------------------------------------------------------------------- 1958 ! 1959 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 1960 ! 1961 ktab(:) = iwork(:) 1962 ! 1963 END SUBROUTINE mppsum_a_int 1964 1965 1966 SUBROUTINE mppsum_int( ktab ) 1967 !!---------------------------------------------------------------------- 1968 !! *** routine mppsum_int *** 1969 !! 1970 !! ** Purpose : Global integer sum 1971 !! 1972 !!---------------------------------------------------------------------- 1973 INTEGER, INTENT(inout) :: ktab 1974 !! 1975 INTEGER :: ierror, iwork 1976 !!---------------------------------------------------------------------- 1977 ! 1978 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 1979 ! 1980 ktab = iwork 1981 ! 1982 END SUBROUTINE mppsum_int 1983 1984 1985 SUBROUTINE mppmax_a_real( ptab, kdim, kcom ) 1986 !!---------------------------------------------------------------------- 1987 !! *** routine mppmax_a_real *** 1988 !! 1989 !! ** Purpose : Maximum 1990 !! 1991 !!---------------------------------------------------------------------- 1992 INTEGER , INTENT(in ) :: kdim 1993 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 1994 INTEGER , INTENT(in ), OPTIONAL :: kcom 1995 ! 1996 INTEGER :: ierror, localcomm 1997 REAL(wp), DIMENSION(kdim) :: zwork 1998 !!---------------------------------------------------------------------- 1999 ! 2000 localcomm = mpi_comm_opa 2001 IF( PRESENT(kcom) ) localcomm = kcom 2002 ! 2003 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, localcomm, ierror ) 2004 ptab(:) = zwork(:) 2005 ! 2006 END SUBROUTINE mppmax_a_real 2007 2008 2009 SUBROUTINE mppmax_real( ptab, kcom ) 2010 !!---------------------------------------------------------------------- 2011 !! *** routine mppmax_real *** 2012 !! 2013 !! ** Purpose : Maximum 2014 !! 2015 !!---------------------------------------------------------------------- 2016 REAL(wp), INTENT(inout) :: ptab ! ??? 2017 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? 2018 !! 2019 INTEGER :: ierror, localcomm 2020 REAL(wp) :: zwork 2021 !!---------------------------------------------------------------------- 2022 ! 2023 localcomm = mpi_comm_opa 2024 IF( PRESENT(kcom) ) localcomm = kcom 2025 ! 2026 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, localcomm, ierror ) 2027 ptab = zwork 2028 ! 2029 END SUBROUTINE mppmax_real 2030 2031 SUBROUTINE mppmax_real_multiple( ptab, NUM , kcom ) 2032 !!---------------------------------------------------------------------- 2033 !! *** routine mppmax_real *** 2034 !! 2035 !! ** Purpose : Maximum 2036 !! 2037 !!---------------------------------------------------------------------- 2038 REAL(wp), DIMENSION(:) , INTENT(inout) :: ptab ! ??? 2039 INTEGER , INTENT(in ) :: NUM 2040 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? 2041 !! 2042 INTEGER :: ierror, localcomm 2043 REAL(wp) , POINTER , DIMENSION(:) :: zwork 2044 !!---------------------------------------------------------------------- 2045 ! 2046 CALL wrk_alloc(NUM , zwork) 2047 localcomm = mpi_comm_opa 2048 IF( PRESENT(kcom) ) localcomm = kcom 2049 ! 2050 CALL mpi_allreduce( ptab, zwork, NUM, mpi_double_precision, mpi_max, localcomm, ierror ) 2051 ptab = zwork 2052 CALL wrk_dealloc(NUM , zwork) 2053 ! 2054 END SUBROUTINE mppmax_real_multiple 2055 2056 2057 SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) 2058 !!---------------------------------------------------------------------- 2059 !! *** routine mppmin_a_real *** 2060 !! 2061 !! ** Purpose : Minimum of REAL, array case 2062 !! 2063 !!----------------------------------------------------------------------- 2064 INTEGER , INTENT(in ) :: kdim 2065 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 2066 INTEGER , INTENT(in ), OPTIONAL :: kcom 2067 !! 2068 INTEGER :: ierror, localcomm 2069 REAL(wp), DIMENSION(kdim) :: zwork 2070 !!----------------------------------------------------------------------- 2071 ! 2072 localcomm = mpi_comm_opa 2073 IF( PRESENT(kcom) ) localcomm = kcom 2074 ! 2075 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, localcomm, ierror ) 2076 ptab(:) = zwork(:) 2077 ! 2078 END SUBROUTINE mppmin_a_real 2079 2080 2081 SUBROUTINE mppmin_real( ptab, kcom ) 2082 !!---------------------------------------------------------------------- 2083 !! *** routine mppmin_real *** 2084 !! 2085 !! ** Purpose : minimum of REAL, scalar case 2086 !! 2087 !!----------------------------------------------------------------------- 2088 REAL(wp), INTENT(inout) :: ptab ! 2089 INTEGER , INTENT(in ), OPTIONAL :: kcom 2090 !! 2091 INTEGER :: ierror 2092 REAL(wp) :: zwork 2093 INTEGER :: localcomm 2094 !!----------------------------------------------------------------------- 2095 ! 2096 localcomm = mpi_comm_opa 2097 IF( PRESENT(kcom) ) localcomm = kcom 2098 ! 2099 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, localcomm, ierror ) 2100 ptab = zwork 2101 ! 2102 END SUBROUTINE mppmin_real 2103 2104 2105 SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) 2106 !!---------------------------------------------------------------------- 2107 !! *** routine mppsum_a_real *** 2108 !! 2109 !! ** Purpose : global sum, REAL ARRAY argument case 2110 !! 2111 !!----------------------------------------------------------------------- 2112 INTEGER , INTENT( in ) :: kdim ! size of ptab 2113 REAL(wp), DIMENSION(kdim), INTENT( inout ) :: ptab ! input array 2114 INTEGER , INTENT( in ), OPTIONAL :: kcom 2115 !! 2116 INTEGER :: ierror ! temporary integer 2117 INTEGER :: localcomm 2118 REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace 2119 !!----------------------------------------------------------------------- 2120 ! 2121 localcomm = mpi_comm_opa 2122 IF( PRESENT(kcom) ) localcomm = kcom 2123 ! 2124 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, localcomm, ierror ) 2125 ptab(:) = zwork(:) 2126 ! 2127 END SUBROUTINE mppsum_a_real 2128 2129 2130 SUBROUTINE mppsum_real( ptab, kcom ) 2131 !!---------------------------------------------------------------------- 2132 !! *** routine mppsum_real *** 2133 !! 2134 !! ** Purpose : global sum, SCALAR argument case 2135 !! 2136 !!----------------------------------------------------------------------- 2137 REAL(wp), INTENT(inout) :: ptab ! input scalar 2138 INTEGER , INTENT(in ), OPTIONAL :: kcom 2139 !! 2140 INTEGER :: ierror, localcomm 2141 REAL(wp) :: zwork 2142 !!----------------------------------------------------------------------- 2143 ! 2144 localcomm = mpi_comm_opa 2145 IF( PRESENT(kcom) ) localcomm = kcom 2146 ! 2147 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, localcomm, ierror ) 2148 ptab = zwork 2149 ! 2150 END SUBROUTINE mppsum_real 2151 2152 2153 SUBROUTINE mppsum_realdd( ytab, kcom ) 2154 !!---------------------------------------------------------------------- 2155 !! *** routine mppsum_realdd *** 2156 !! 2157 !! ** Purpose : global sum in Massively Parallel Processing 2158 !! SCALAR argument case for double-double precision 2159 !! 2160 !!----------------------------------------------------------------------- 2161 COMPLEX(wp), INTENT(inout) :: ytab ! input scalar 2162 INTEGER , INTENT(in ), OPTIONAL :: kcom 2163 ! 2164 INTEGER :: ierror 2165 INTEGER :: localcomm 2166 COMPLEX(wp) :: zwork 2167 !!----------------------------------------------------------------------- 2168 ! 2169 localcomm = mpi_comm_opa 2170 IF( PRESENT(kcom) ) localcomm = kcom 2171 ! 2172 ! reduce local sums into global sum 2173 CALL MPI_ALLREDUCE (ytab, zwork, 1, MPI_DOUBLE_COMPLEX, MPI_SUMDD, localcomm, ierror ) 2174 ytab = zwork 2175 ! 2176 END SUBROUTINE mppsum_realdd 2177 2178 2179 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 2180 !!---------------------------------------------------------------------- 2181 !! *** routine mppsum_a_realdd *** 2182 !! 2183 !! ** Purpose : global sum in Massively Parallel Processing 2184 !! COMPLEX ARRAY case for double-double precision 2185 !! 2186 !!----------------------------------------------------------------------- 2187 INTEGER , INTENT(in ) :: kdim ! size of ytab 2188 COMPLEX(wp), DIMENSION(kdim), INTENT(inout) :: ytab ! input array 2189 INTEGER , OPTIONAL , INTENT(in ) :: kcom 2190 ! 2191 INTEGER:: ierror, localcomm ! local integer 2192 COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace 2193 !!----------------------------------------------------------------------- 2194 ! 2195 localcomm = mpi_comm_opa 2196 IF( PRESENT(kcom) ) localcomm = kcom 2197 ! 2198 CALL MPI_ALLREDUCE( ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, MPI_SUMDD, localcomm, ierror ) 2199 ytab(:) = zwork(:) 2200 ! 2201 END SUBROUTINE mppsum_a_realdd 2202 2203 2204 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) 2205 !!------------------------------------------------------------------------ 2206 !! *** routine mpp_minloc *** 2207 !! 2208 !! ** Purpose : Compute the global minimum of an array ptab 2209 !! and also give its global position 2210 !! 2211 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 2212 !! 2213 !!-------------------------------------------------------------------------- 2214 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 2215 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 2216 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 2217 INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame 2218 ! 2219 INTEGER :: ierror 2220 INTEGER , DIMENSION(2) :: ilocs 2221 REAL(wp) :: zmin ! local minimum 2222 REAL(wp), DIMENSION(2,1) :: zain, zaout 2223 !!----------------------------------------------------------------------- 2224 ! 2225 zmin = MINVAL( ptab(:,:) , mask= pmask == 1.e0 ) 2226 ilocs = MINLOC( ptab(:,:) , mask= pmask == 1.e0 ) 2227 ! 2228 ki = ilocs(1) + nimpp - 1 2229 kj = ilocs(2) + njmpp - 1 2230 ! 2231 zain(1,:)=zmin 2232 zain(2,:)=ki+10000.*kj 2233 ! 2234 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 2235 ! 2236 pmin = zaout(1,1) 2237 kj = INT(zaout(2,1)/10000.) 2238 ki = INT(zaout(2,1) - 10000.*kj ) 2239 ! 2240 END SUBROUTINE mpp_minloc2d 2241 2242 2243 SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj ,kk) 2244 !!------------------------------------------------------------------------ 2245 !! *** routine mpp_minloc *** 2246 !! 2247 !! ** Purpose : Compute the global minimum of an array ptab 2248 !! and also give its global position 2249 !! 2250 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 2251 !! 2252 !!-------------------------------------------------------------------------- 2253 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array 2254 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask 2255 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 2256 INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame 2257 !! 2258 INTEGER :: ierror 2259 REAL(wp) :: zmin ! local minimum 2260 INTEGER , DIMENSION(3) :: ilocs 2261 REAL(wp), DIMENSION(2,1) :: zain, zaout 2262 !!----------------------------------------------------------------------- 2263 ! 2264 zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) 2265 ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) 2266 ! 2267 ki = ilocs(1) + nimpp - 1 2268 kj = ilocs(2) + njmpp - 1 2269 kk = ilocs(3) 2270 ! 2271 zain(1,:)=zmin 2272 zain(2,:)=ki+10000.*kj+100000000.*kk 2273 ! 2274 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 2275 ! 2276 pmin = zaout(1,1) 2277 kk = INT( zaout(2,1) / 100000000. ) 2278 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 2279 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 2280 ! 2281 END SUBROUTINE mpp_minloc3d 2282 2283 2284 SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) 2285 !!------------------------------------------------------------------------ 2286 !! *** routine mpp_maxloc *** 2287 !! 2288 !! ** Purpose : Compute the global maximum of an array ptab 2289 !! and also give its global position 2290 !! 2291 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 2292 !! 2293 !!-------------------------------------------------------------------------- 2294 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 2295 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 2296 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 2297 INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame 2298 !! 2299 INTEGER :: ierror 2300 INTEGER, DIMENSION (2) :: ilocs 2301 REAL(wp) :: zmax ! local maximum 2302 REAL(wp), DIMENSION(2,1) :: zain, zaout 2303 !!----------------------------------------------------------------------- 2304 ! 2305 zmax = MAXVAL( ptab(:,:) , mask= pmask == 1.e0 ) 2306 ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1.e0 ) 2307 ! 2308 ki = ilocs(1) + nimpp - 1 2309 kj = ilocs(2) + njmpp - 1 2310 ! 2311 zain(1,:) = zmax 2312 zain(2,:) = ki + 10000. * kj 2313 ! 2314 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 2315 ! 2316 pmax = zaout(1,1) 2317 kj = INT( zaout(2,1) / 10000. ) 2318 ki = INT( zaout(2,1) - 10000.* kj ) 2319 ! 2320 END SUBROUTINE mpp_maxloc2d 2321 2322 2323 SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) 2324 !!------------------------------------------------------------------------ 2325 !! *** routine mpp_maxloc *** 2326 !! 2327 !! ** Purpose : Compute the global maximum of an array ptab 2328 !! and also give its global position 2329 !! 2330 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 2331 !! 2332 !!-------------------------------------------------------------------------- 2333 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array 2334 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask 2335 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 2336 INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame 2337 !! 2338 REAL(wp) :: zmax ! local maximum 2339 REAL(wp), DIMENSION(2,1) :: zain, zaout 2340 INTEGER , DIMENSION(3) :: ilocs 2341 INTEGER :: ierror 2342 !!----------------------------------------------------------------------- 2343 ! 2344 zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) 2345 ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) 2346 ! 2347 ki = ilocs(1) + nimpp - 1 2348 kj = ilocs(2) + njmpp - 1 2349 kk = ilocs(3) 2350 ! 2351 zain(1,:)=zmax 2352 zain(2,:)=ki+10000.*kj+100000000.*kk 2353 ! 2354 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 2355 ! 2356 pmax = zaout(1,1) 2357 kk = INT( zaout(2,1) / 100000000. ) 2358 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 2359 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 2360 ! 2361 END SUBROUTINE mpp_maxloc3d 2362 583 584 SUBROUTINE mpp_delay_sum( cdname, cdelay, y_in, pout, ldlast, kcom ) 585 !!---------------------------------------------------------------------- 586 !! *** routine mpp_delay_sum *** 587 !! 588 !! ** Purpose : performed delayed mpp_sum, the result is received on next call 589 !! 590 !!---------------------------------------------------------------------- 591 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 592 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 593 COMPLEX(wp), INTENT(in ), DIMENSION(:) :: y_in 594 REAL(wp), INTENT( out), DIMENSION(:) :: pout 595 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 596 INTEGER, INTENT(in ), OPTIONAL :: kcom 597 !! 598 INTEGER :: ji, isz 599 INTEGER :: idvar 600 INTEGER :: ierr, ilocalcomm 601 COMPLEX(wp), ALLOCATABLE, DIMENSION(:) :: ytmp 602 !!---------------------------------------------------------------------- 603 ilocalcomm = mpi_comm_oce 604 IF( PRESENT(kcom) ) ilocalcomm = kcom 605 606 isz = SIZE(y_in) 607 608 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ld_dlg = .TRUE. ) 609 610 idvar = -1 611 DO ji = 1, nbdelay 612 IF( TRIM(cdelay) == TRIM(c_delaylist(ji)) ) idvar = ji 613 END DO 614 IF ( idvar == -1 ) CALL ctl_stop( 'STOP',' mpp_delay_sum : please add a new delayed exchange for '//TRIM(cdname) ) 615 616 IF ( ndelayid(idvar) == 0 ) THEN ! first call with restart: %z1d defined in iom_delay_rst 617 ! -------------------------- 618 IF ( SIZE(todelay(idvar)%z1d) /= isz ) THEN ! Check dimension coherence 619 IF(lwp) WRITE(numout,*) ' WARNING: the nb of delayed variables in restart file is not the model one' 620 DEALLOCATE(todelay(idvar)%z1d) 621 ndelayid(idvar) = -1 ! do as if we had no restart 622 ELSE 623 ALLOCATE(todelay(idvar)%y1d(isz)) 624 todelay(idvar)%y1d(:) = CMPLX(todelay(idvar)%z1d(:), 0., wp) ! create %y1d, complex variable needed by mpi_sumdd 625 END IF 626 ENDIF 627 628 IF( ndelayid(idvar) == -1 ) THEN ! first call without restart: define %y1d and %z1d from y_in with blocking allreduce 629 ! -------------------------- 630 ALLOCATE(todelay(idvar)%z1d(isz), todelay(idvar)%y1d(isz)) ! allocate also %z1d as used for the restart 631 CALL mpi_allreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ierr ) ! get %y1d 632 todelay(idvar)%z1d(:) = REAL(todelay(idvar)%y1d(:), wp) ! define %z1d from %y1d 633 ENDIF 634 635 IF( ndelayid(idvar) > 0 ) CALL mpp_delay_rcv( idvar ) ! make sure %z1d is received 636 637 ! send back pout from todelay(idvar)%z1d defined at previous call 638 pout(:) = todelay(idvar)%z1d(:) 639 640 ! send y_in into todelay(idvar)%y1d with a non-blocking communication 641 #if defined key_mpi2 642 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 643 CALL mpi_allreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ndelayid(idvar), ierr ) 644 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 645 #else 646 CALL mpi_iallreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ndelayid(idvar), ierr ) 647 #endif 648 649 END SUBROUTINE mpp_delay_sum 650 651 652 SUBROUTINE mpp_delay_max( cdname, cdelay, p_in, pout, ldlast, kcom ) 653 !!---------------------------------------------------------------------- 654 !! *** routine mpp_delay_max *** 655 !! 656 !! ** Purpose : performed delayed mpp_max, the result is received on next call 657 !! 658 !!---------------------------------------------------------------------- 659 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 660 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 661 REAL(wp), INTENT(in ), DIMENSION(:) :: p_in ! 662 REAL(wp), INTENT( out), DIMENSION(:) :: pout ! 663 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 664 INTEGER, INTENT(in ), OPTIONAL :: kcom 665 !! 666 INTEGER :: ji, isz 667 INTEGER :: idvar 668 INTEGER :: ierr, ilocalcomm 669 !!---------------------------------------------------------------------- 670 ilocalcomm = mpi_comm_oce 671 IF( PRESENT(kcom) ) ilocalcomm = kcom 672 673 isz = SIZE(p_in) 674 675 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ld_dlg = .TRUE. ) 676 677 idvar = -1 678 DO ji = 1, nbdelay 679 IF( TRIM(cdelay) == TRIM(c_delaylist(ji)) ) idvar = ji 680 END DO 681 IF ( idvar == -1 ) CALL ctl_stop( 'STOP',' mpp_delay_max : please add a new delayed exchange for '//TRIM(cdname) ) 682 683 IF ( ndelayid(idvar) == 0 ) THEN ! first call with restart: %z1d defined in iom_delay_rst 684 ! -------------------------- 685 IF ( SIZE(todelay(idvar)%z1d) /= isz ) THEN ! Check dimension coherence 686 IF(lwp) WRITE(numout,*) ' WARNING: the nb of delayed variables in restart file is not the model one' 687 DEALLOCATE(todelay(idvar)%z1d) 688 ndelayid(idvar) = -1 ! do as if we had no restart 689 END IF 690 ENDIF 691 692 IF( ndelayid(idvar) == -1 ) THEN ! first call without restart: define %z1d from p_in with a blocking allreduce 693 ! -------------------------- 694 ALLOCATE(todelay(idvar)%z1d(isz)) 695 CALL mpi_allreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ierr ) ! get %z1d 696 ENDIF 697 698 IF( ndelayid(idvar) > 0 ) CALL mpp_delay_rcv( idvar ) ! make sure %z1d is received 699 700 ! send back pout from todelay(idvar)%z1d defined at previous call 701 pout(:) = todelay(idvar)%z1d(:) 702 703 ! send p_in into todelay(idvar)%z1d with a non-blocking communication 704 #if defined key_mpi2 705 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 706 CALL mpi_allreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ndelayid(idvar), ierr ) 707 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 708 #else 709 CALL mpi_iallreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ndelayid(idvar), ierr ) 710 #endif 711 712 END SUBROUTINE mpp_delay_max 713 714 715 SUBROUTINE mpp_delay_rcv( kid ) 716 !!---------------------------------------------------------------------- 717 !! *** routine mpp_delay_rcv *** 718 !! 719 !! ** Purpose : force barrier for delayed mpp (needed for restart) 720 !! 721 !!---------------------------------------------------------------------- 722 INTEGER,INTENT(in ) :: kid 723 INTEGER :: ierr 724 !!---------------------------------------------------------------------- 725 IF( ndelayid(kid) /= -2 ) THEN 726 #if ! defined key_mpi2 727 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 728 CALL mpi_wait( ndelayid(kid), MPI_STATUS_IGNORE, ierr ) ! make sure todelay(kid) is received 729 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 730 #endif 731 IF( ASSOCIATED(todelay(kid)%y1d) ) todelay(kid)%z1d(:) = REAL(todelay(kid)%y1d(:), wp) ! define %z1d from %y1d 732 ndelayid(kid) = -2 ! add flag to know that mpi_wait was already called on kid 733 ENDIF 734 END SUBROUTINE mpp_delay_rcv 735 736 737 !!---------------------------------------------------------------------- 738 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 739 !! 740 !!---------------------------------------------------------------------- 741 !! 742 # define OPERATION_MAX 743 # define INTEGER_TYPE 744 # define DIM_0d 745 # define ROUTINE_ALLREDUCE mppmax_int 746 # include "mpp_allreduce_generic.h90" 747 # undef ROUTINE_ALLREDUCE 748 # undef DIM_0d 749 # define DIM_1d 750 # define ROUTINE_ALLREDUCE mppmax_a_int 751 # include "mpp_allreduce_generic.h90" 752 # undef ROUTINE_ALLREDUCE 753 # undef DIM_1d 754 # undef INTEGER_TYPE 755 ! 756 # define REAL_TYPE 757 # define DIM_0d 758 # define ROUTINE_ALLREDUCE mppmax_real 759 # include "mpp_allreduce_generic.h90" 760 # undef ROUTINE_ALLREDUCE 761 # undef DIM_0d 762 # define DIM_1d 763 # define ROUTINE_ALLREDUCE mppmax_a_real 764 # include "mpp_allreduce_generic.h90" 765 # undef ROUTINE_ALLREDUCE 766 # undef DIM_1d 767 # undef REAL_TYPE 768 # undef OPERATION_MAX 769 !!---------------------------------------------------------------------- 770 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** 771 !! 772 !!---------------------------------------------------------------------- 773 !! 774 # define OPERATION_MIN 775 # define INTEGER_TYPE 776 # define DIM_0d 777 # define ROUTINE_ALLREDUCE mppmin_int 778 # include "mpp_allreduce_generic.h90" 779 # undef ROUTINE_ALLREDUCE 780 # undef DIM_0d 781 # define DIM_1d 782 # define ROUTINE_ALLREDUCE mppmin_a_int 783 # include "mpp_allreduce_generic.h90" 784 # undef ROUTINE_ALLREDUCE 785 # undef DIM_1d 786 # undef INTEGER_TYPE 787 ! 788 # define REAL_TYPE 789 # define DIM_0d 790 # define ROUTINE_ALLREDUCE mppmin_real 791 # include "mpp_allreduce_generic.h90" 792 # undef ROUTINE_ALLREDUCE 793 # undef DIM_0d 794 # define DIM_1d 795 # define ROUTINE_ALLREDUCE mppmin_a_real 796 # include "mpp_allreduce_generic.h90" 797 # undef ROUTINE_ALLREDUCE 798 # undef DIM_1d 799 # undef REAL_TYPE 800 # undef OPERATION_MIN 801 802 !!---------------------------------------------------------------------- 803 !! *** mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real *** 804 !! 805 !! Global sum of 1D array or a variable (integer, real or complex) 806 !!---------------------------------------------------------------------- 807 !! 808 # define OPERATION_SUM 809 # define INTEGER_TYPE 810 # define DIM_0d 811 # define ROUTINE_ALLREDUCE mppsum_int 812 # include "mpp_allreduce_generic.h90" 813 # undef ROUTINE_ALLREDUCE 814 # undef DIM_0d 815 # define DIM_1d 816 # define ROUTINE_ALLREDUCE mppsum_a_int 817 # include "mpp_allreduce_generic.h90" 818 # undef ROUTINE_ALLREDUCE 819 # undef DIM_1d 820 # undef INTEGER_TYPE 821 ! 822 # define REAL_TYPE 823 # define DIM_0d 824 # define ROUTINE_ALLREDUCE mppsum_real 825 # include "mpp_allreduce_generic.h90" 826 # undef ROUTINE_ALLREDUCE 827 # undef DIM_0d 828 # define DIM_1d 829 # define ROUTINE_ALLREDUCE mppsum_a_real 830 # include "mpp_allreduce_generic.h90" 831 # undef ROUTINE_ALLREDUCE 832 # undef DIM_1d 833 # undef REAL_TYPE 834 # undef OPERATION_SUM 835 836 # define OPERATION_SUM_DD 837 # define COMPLEX_TYPE 838 # define DIM_0d 839 # define ROUTINE_ALLREDUCE mppsum_realdd 840 # include "mpp_allreduce_generic.h90" 841 # undef ROUTINE_ALLREDUCE 842 # undef DIM_0d 843 # define DIM_1d 844 # define ROUTINE_ALLREDUCE mppsum_a_realdd 845 # include "mpp_allreduce_generic.h90" 846 # undef ROUTINE_ALLREDUCE 847 # undef DIM_1d 848 # undef COMPLEX_TYPE 849 # undef OPERATION_SUM_DD 850 851 !!---------------------------------------------------------------------- 852 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 853 !! 854 !!---------------------------------------------------------------------- 855 !! 856 # define OPERATION_MINLOC 857 # define DIM_2d 858 # define ROUTINE_LOC mpp_minloc2d 859 # include "mpp_loc_generic.h90" 860 # undef ROUTINE_LOC 861 # undef DIM_2d 862 # define DIM_3d 863 # define ROUTINE_LOC mpp_minloc3d 864 # include "mpp_loc_generic.h90" 865 # undef ROUTINE_LOC 866 # undef DIM_3d 867 # undef OPERATION_MINLOC 868 869 # define OPERATION_MAXLOC 870 # define DIM_2d 871 # define ROUTINE_LOC mpp_maxloc2d 872 # include "mpp_loc_generic.h90" 873 # undef ROUTINE_LOC 874 # undef DIM_2d 875 # define DIM_3d 876 # define ROUTINE_LOC mpp_maxloc3d 877 # include "mpp_loc_generic.h90" 878 # undef ROUTINE_LOC 879 # undef DIM_3d 880 # undef OPERATION_MAXLOC 2363 881 2364 882 SUBROUTINE mppsync() … … 2372 890 !!----------------------------------------------------------------------- 2373 891 ! 2374 CALL mpi_barrier( mpi_comm_o pa, ierror )892 CALL mpi_barrier( mpi_comm_oce, ierror ) 2375 893 ! 2376 894 END SUBROUTINE mppsync 2377 895 2378 896 2379 SUBROUTINE mppstop 897 SUBROUTINE mppstop( ldfinal, ld_force_abort ) 2380 898 !!---------------------------------------------------------------------- 2381 899 !! *** routine mppstop *** … … 2384 902 !! 2385 903 !!---------------------------------------------------------------------- 904 LOGICAL, OPTIONAL, INTENT(in) :: ldfinal ! source process number 905 LOGICAL, OPTIONAL, INTENT(in) :: ld_force_abort ! source process number 906 LOGICAL :: llfinal, ll_force_abort 2386 907 INTEGER :: info 2387 908 !!---------------------------------------------------------------------- 2388 ! 2389 CALL mppsync 2390 CALL mpi_finalize( info ) 909 llfinal = .FALSE. 910 IF( PRESENT(ldfinal) ) llfinal = ldfinal 911 ll_force_abort = .FALSE. 912 IF( PRESENT(ld_force_abort) ) ll_force_abort = ld_force_abort 913 ! 914 IF(ll_force_abort) THEN 915 CALL mpi_abort( MPI_COMM_WORLD ) 916 ELSE 917 CALL mppsync 918 CALL mpi_finalize( info ) 919 ENDIF 920 IF( .NOT. llfinal ) STOP 123456 2391 921 ! 2392 922 END SUBROUTINE mppstop … … 2395 925 SUBROUTINE mpp_comm_free( kcom ) 2396 926 !!---------------------------------------------------------------------- 2397 !!----------------------------------------------------------------------2398 927 INTEGER, INTENT(in) :: kcom 2399 928 !! … … 2404 933 ! 2405 934 END SUBROUTINE mpp_comm_free 2406 2407 2408 SUBROUTINE mpp_ini_ice( pindic, kumout )2409 !!----------------------------------------------------------------------2410 !! *** routine mpp_ini_ice ***2411 !!2412 !! ** Purpose : Initialize special communicator for ice areas2413 !! condition together with global variables needed in the ddmpp folding2414 !!2415 !! ** Method : - Look for ice processors in ice routines2416 !! - Put their number in nrank_ice2417 !! - Create groups for the world processors and the ice processors2418 !! - Create a communicator for ice processors2419 !!2420 !! ** output2421 !! njmppmax = njmpp for northern procs2422 !! ndim_rank_ice = number of processors with ice2423 !! nrank_ice (ndim_rank_ice) = ice processors2424 !! ngrp_iworld = group ID for the world processors2425 !! ngrp_ice = group ID for the ice processors2426 !! ncomm_ice = communicator for the ice procs.2427 !! n_ice_root = number (in the world) of proc 0 in the ice comm.2428 !!2429 !!----------------------------------------------------------------------2430 INTEGER, INTENT(in) :: pindic2431 INTEGER, INTENT(in) :: kumout ! ocean.output logical unit2432 !!2433 INTEGER :: jjproc2434 INTEGER :: ii, ierr2435 INTEGER, ALLOCATABLE, DIMENSION(:) :: kice2436 INTEGER, ALLOCATABLE, DIMENSION(:) :: zwork2437 !!----------------------------------------------------------------------2438 !2439 ! Since this is just an init routine and these arrays are of length jpnij2440 ! then don't use wrk_nemo module - just allocate and deallocate.2441 ALLOCATE( kice(jpnij), zwork(jpnij), STAT=ierr )2442 IF( ierr /= 0 ) THEN2443 WRITE(kumout, cform_err)2444 WRITE(kumout,*) 'mpp_ini_ice : failed to allocate 2, 1D arrays (jpnij in length)'2445 CALL mppstop2446 ENDIF2447 2448 ! Look for how many procs with sea-ice2449 !2450 kice = 02451 DO jjproc = 1, jpnij2452 IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 12453 END DO2454 !2455 zwork = 02456 CALL MPI_ALLREDUCE( kice, zwork, jpnij, mpi_integer, mpi_sum, mpi_comm_opa, ierr )2457 ndim_rank_ice = SUM( zwork )2458 2459 ! Allocate the right size to nrank_north2460 IF( ALLOCATED ( nrank_ice ) ) DEALLOCATE( nrank_ice )2461 ALLOCATE( nrank_ice(ndim_rank_ice) )2462 !2463 ii = 02464 nrank_ice = 02465 DO jjproc = 1, jpnij2466 IF( zwork(jjproc) == 1) THEN2467 ii = ii + 12468 nrank_ice(ii) = jjproc -12469 ENDIF2470 END DO2471 2472 ! Create the world group2473 CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_iworld, ierr )2474 2475 ! Create the ice group from the world group2476 CALL MPI_GROUP_INCL( ngrp_iworld, ndim_rank_ice, nrank_ice, ngrp_ice, ierr )2477 2478 ! Create the ice communicator , ie the pool of procs with sea-ice2479 CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_ice, ncomm_ice, ierr )2480 2481 ! Find proc number in the world of proc 0 in the north2482 ! The following line seems to be useless, we just comment & keep it as reminder2483 ! CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_ice,1,0,ngrp_iworld,n_ice_root,ierr)2484 !2485 CALL MPI_GROUP_FREE(ngrp_ice, ierr)2486 CALL MPI_GROUP_FREE(ngrp_iworld, ierr)2487 2488 DEALLOCATE(kice, zwork)2489 !2490 END SUBROUTINE mpp_ini_ice2491 935 2492 936 … … 2518 962 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_world : ', ngrp_world 2519 963 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_world : ', mpi_comm_world 2520 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_o pa : ', mpi_comm_opa964 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_oce : ', mpi_comm_oce 2521 965 ! 2522 966 ALLOCATE( kwork(jpnij), STAT=ierr ) … … 2529 973 IF( jpnj == 1 ) THEN 2530 974 ngrp_znl = ngrp_world 2531 ncomm_znl = mpi_comm_o pa975 ncomm_znl = mpi_comm_oce 2532 976 ELSE 2533 977 ! 2534 CALL MPI_ALLGATHER ( njmpp, 1, mpi_integer, kwork, 1, mpi_integer, mpi_comm_o pa, ierr )978 CALL MPI_ALLGATHER ( njmpp, 1, mpi_integer, kwork, 1, mpi_integer, mpi_comm_oce, ierr ) 2535 979 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - kwork pour njmpp : ', kwork 2536 980 !-$$ CALL flush(numout) … … 2560 1004 2561 1005 ! Create the opa group 2562 CALL MPI_COMM_GROUP(mpi_comm_o pa,ngrp_opa,ierr)1006 CALL MPI_COMM_GROUP(mpi_comm_oce,ngrp_opa,ierr) 2563 1007 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_opa : ', ngrp_opa 2564 1008 !-$$ CALL flush(numout) … … 2570 1014 2571 1015 ! Create the znl communicator from the opa communicator, ie the pool of procs in the same row 2572 CALL MPI_COMM_CREATE ( mpi_comm_o pa, ngrp_znl, ncomm_znl, ierr )1016 CALL MPI_COMM_CREATE ( mpi_comm_oce, ngrp_znl, ncomm_znl, ierr ) 2573 1017 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ncomm_znl ', ncomm_znl 2574 1018 !-$$ CALL flush(numout) … … 2582 1026 l_znl_root = .FALSE. 2583 1027 kwork (1) = nimpp 2584 CALL mpp_min ( kwork(1), kcom = ncomm_znl)1028 CALL mpp_min ( 'lib_mpp', kwork(1), kcom = ncomm_znl) 2585 1029 IF ( nimpp == kwork(1)) l_znl_root = .TRUE. 2586 1030 END IF … … 2641 1085 ! 2642 1086 ! create the world group 2643 CALL MPI_COMM_GROUP( mpi_comm_o pa, ngrp_world, ierr )1087 CALL MPI_COMM_GROUP( mpi_comm_oce, ngrp_world, ierr ) 2644 1088 ! 2645 1089 ! Create the North group from the world group … … 2647 1091 ! 2648 1092 ! Create the North communicator , ie the pool of procs in the north group 2649 CALL MPI_COMM_CREATE( mpi_comm_o pa, ngrp_north, ncomm_north, ierr )1093 CALL MPI_COMM_CREATE( mpi_comm_oce, ngrp_north, ncomm_north, ierr ) 2650 1094 ! 2651 1095 END SUBROUTINE mpp_ini_north 2652 1096 2653 1097 2654 SUBROUTINE mpp_lbc_north_3d( pt3d, cd_type, psgn ) 2655 !!--------------------------------------------------------------------- 2656 !! *** routine mpp_lbc_north_3d *** 2657 !! 2658 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2659 !! in mpp configuration in case of jpn1 > 1 2660 !! 2661 !! ** Method : North fold condition and mpp with more than one proc 2662 !! in i-direction require a specific treatment. We gather 2663 !! the 4 northern lines of the global domain on 1 processor 2664 !! and apply lbc north-fold on this sub array. Then we 2665 !! scatter the north fold array back to the processors. 2666 !! 2667 !!---------------------------------------------------------------------- 2668 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: pt3d ! 3D array on which the b.c. is applied 2669 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 2670 ! ! = T , U , V , F or W gridpoints 2671 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2672 !! ! = 1. , the sign is kept 2673 INTEGER :: ji, jj, jr, jk 2674 INTEGER :: ierr, itaille, ildi, ilei, iilb 2675 INTEGER :: ijpj, ijpjm1, ij, iproc 2676 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather 2677 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather 2678 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for mpi_isend when avoiding mpi_allgather 2679 ! ! Workspace for message transfers avoiding mpi_allgather 2680 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab 2681 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk 2682 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio 2683 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr 2684 2685 INTEGER :: istatus(mpi_status_size) 2686 INTEGER :: iflag 2687 !!---------------------------------------------------------------------- 2688 ! 2689 ALLOCATE( ztab(jpiglo,4,jpk) , znorthloc(jpi,4,jpk), zfoldwk(jpi,4,jpk), znorthgloio(jpi,4,jpk,jpni) ) 2690 ALLOCATE( ztabl(jpi,4,jpk), ztabr(jpi*jpmaxngh, 4, jpk) ) 2691 2692 ijpj = 4 2693 ijpjm1 = 3 2694 ! 2695 znorthloc(:,:,:) = 0 2696 DO jk = 1, jpk 2697 DO jj = nlcj - ijpj +1, nlcj ! put in xnorthloc the last 4 jlines of pt3d 2698 ij = jj - nlcj + ijpj 2699 znorthloc(:,ij,jk) = pt3d(:,jj,jk) 2700 END DO 2701 END DO 2702 ! 2703 ! ! Build in procs of ncomm_north the znorthgloio 2704 itaille = jpi * jpk * ijpj 2705 2706 IF ( l_north_nogather ) THEN 2707 ! 2708 ztabr(:,:,:) = 0 2709 ztabl(:,:,:) = 0 2710 2711 DO jk = 1, jpk 2712 DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array 2713 ij = jj - nlcj + ijpj 2714 DO ji = nfsloop, nfeloop 2715 ztabl(ji,ij,jk) = pt3d(ji,jj,jk) 2716 END DO 2717 END DO 2718 END DO 2719 2720 DO jr = 1,nsndto 2721 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 2722 CALL mppsend( 5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr) ) 2723 ENDIF 2724 END DO 2725 DO jr = 1,nsndto 2726 iproc = nfipproc(isendto(jr),jpnj) 2727 IF(iproc .ne. -1) THEN 2728 ilei = nleit (iproc+1) 2729 ildi = nldit (iproc+1) 2730 iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj) 2731 ENDIF 2732 IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN 2733 CALL mpprecv(5, zfoldwk, itaille, iproc) 2734 DO jk = 1, jpk 2735 DO jj = 1, ijpj 2736 DO ji = ildi, ilei 2737 ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk) 2738 END DO 2739 END DO 2740 END DO 2741 ELSE IF (iproc .eq. (narea-1)) THEN 2742 DO jk = 1, jpk 2743 DO jj = 1, ijpj 2744 DO ji = ildi, ilei 2745 ztabr(iilb+ji,jj,jk) = pt3d(ji,nlcj-ijpj+jj,jk) 2746 END DO 2747 END DO 2748 END DO 2749 ENDIF 2750 END DO 2751 IF (l_isend) THEN 2752 DO jr = 1,nsndto 2753 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 2754 CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err) 2755 ENDIF 2756 END DO 2757 ENDIF 2758 CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition 2759 DO jk = 1, jpk 2760 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d 2761 ij = jj - nlcj + ijpj 2762 DO ji= 1, nlci 2763 pt3d(ji,jj,jk) = ztabl(ji,ij,jk) 2764 END DO 2765 END DO 2766 END DO 2767 ! 2768 2769 ELSE 2770 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2771 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2772 ! 2773 ztab(:,:,:) = 0.e0 2774 DO jr = 1, ndim_rank_north ! recover the global north array 2775 iproc = nrank_north(jr) + 1 2776 ildi = nldit (iproc) 2777 ilei = nleit (iproc) 2778 iilb = nimppt(iproc) 2779 DO jk = 1, jpk 2780 DO jj = 1, ijpj 2781 DO ji = ildi, ilei 2782 ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr) 2783 END DO 2784 END DO 2785 END DO 2786 END DO 2787 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition 2788 ! 2789 DO jk = 1, jpk 2790 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d 2791 ij = jj - nlcj + ijpj 2792 DO ji= 1, nlci 2793 pt3d(ji,jj,jk) = ztab(ji+nimpp-1,ij,jk) 2794 END DO 2795 END DO 2796 END DO 2797 ! 2798 ENDIF 2799 ! 2800 ! The ztab array has been either: 2801 ! a. Fully populated by the mpi_allgather operation or 2802 ! b. Had the active points for this domain and northern neighbours populated 2803 ! by peer to peer exchanges 2804 ! Either way the array may be folded by lbc_nfd and the result for the span of 2805 ! this domain will be identical. 2806 ! 2807 DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio ) 2808 DEALLOCATE( ztabl, ztabr ) 2809 ! 2810 END SUBROUTINE mpp_lbc_north_3d 2811 2812 2813 SUBROUTINE mpp_lbc_north_2d( pt2d, cd_type, psgn) 2814 !!--------------------------------------------------------------------- 2815 !! *** routine mpp_lbc_north_2d *** 2816 !! 2817 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2818 !! in mpp configuration in case of jpn1 > 1 (for 2d array ) 2819 !! 2820 !! ** Method : North fold condition and mpp with more than one proc 2821 !! in i-direction require a specific treatment. We gather 2822 !! the 4 northern lines of the global domain on 1 processor 2823 !! and apply lbc north-fold on this sub array. Then we 2824 !! scatter the north fold array back to the processors. 2825 !! 2826 !!---------------------------------------------------------------------- 2827 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the b.c. is applied 2828 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt2d grid-points 2829 ! ! = T , U , V , F or W gridpoints 2830 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2831 !! ! = 1. , the sign is kept 2832 INTEGER :: ji, jj, jr 2833 INTEGER :: ierr, itaille, ildi, ilei, iilb 2834 INTEGER :: ijpj, ijpjm1, ij, iproc 2835 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather 2836 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather 2837 INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather 2838 ! ! Workspace for message transfers avoiding mpi_allgather 2839 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab 2840 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: znorthloc, zfoldwk 2841 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio 2842 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztabl, ztabr 2843 INTEGER :: istatus(mpi_status_size) 2844 INTEGER :: iflag 2845 !!---------------------------------------------------------------------- 2846 ! 2847 ALLOCATE( ztab(jpiglo,4), znorthloc(jpi,4), zfoldwk(jpi,4), znorthgloio(jpi,4,jpni) ) 2848 ALLOCATE( ztabl(jpi,4), ztabr(jpi*jpmaxngh, 4) ) 2849 ! 2850 ijpj = 4 2851 ijpjm1 = 3 2852 ! 2853 DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d 2854 ij = jj - nlcj + ijpj 2855 znorthloc(:,ij) = pt2d(:,jj) 2856 END DO 2857 2858 ! ! Build in procs of ncomm_north the znorthgloio 2859 itaille = jpi * ijpj 2860 IF ( l_north_nogather ) THEN 2861 ! 2862 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified 2863 ! (in nemo_northcomms) as being involved in this process' northern boundary exchange 2864 ! 2865 ztabr(:,:) = 0 2866 ztabl(:,:) = 0 2867 2868 DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array 2869 ij = jj - nlcj + ijpj 2870 DO ji = nfsloop, nfeloop 2871 ztabl(ji,ij) = pt2d(ji,jj) 2872 END DO 2873 END DO 2874 2875 DO jr = 1,nsndto 2876 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 2877 CALL mppsend(5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr)) 2878 ENDIF 2879 END DO 2880 DO jr = 1,nsndto 2881 iproc = nfipproc(isendto(jr),jpnj) 2882 IF(iproc .ne. -1) THEN 2883 ilei = nleit (iproc+1) 2884 ildi = nldit (iproc+1) 2885 iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj) 2886 ENDIF 2887 IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN 2888 CALL mpprecv(5, zfoldwk, itaille, iproc) 2889 DO jj = 1, ijpj 2890 DO ji = ildi, ilei 2891 ztabr(iilb+ji,jj) = zfoldwk(ji,jj) 2892 END DO 2893 END DO 2894 ELSE IF (iproc .eq. (narea-1)) THEN 2895 DO jj = 1, ijpj 2896 DO ji = ildi, ilei 2897 ztabr(iilb+ji,jj) = pt2d(ji,nlcj-ijpj+jj) 2898 END DO 2899 END DO 2900 ENDIF 2901 END DO 2902 IF (l_isend) THEN 2903 DO jr = 1,nsndto 2904 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 2905 CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err) 2906 ENDIF 2907 END DO 2908 ENDIF 2909 CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition 2910 ! 2911 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 2912 ij = jj - nlcj + ijpj 2913 DO ji = 1, nlci 2914 pt2d(ji,jj) = ztabl(ji,ij) 2915 END DO 2916 END DO 2917 ! 2918 ELSE 2919 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2920 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2921 ! 2922 ztab(:,:) = 0.e0 2923 DO jr = 1, ndim_rank_north ! recover the global north array 2924 iproc = nrank_north(jr) + 1 2925 ildi = nldit (iproc) 2926 ilei = nleit (iproc) 2927 iilb = nimppt(iproc) 2928 DO jj = 1, ijpj 2929 DO ji = ildi, ilei 2930 ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr) 2931 END DO 2932 END DO 2933 END DO 2934 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition 2935 ! 2936 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 2937 ij = jj - nlcj + ijpj 2938 DO ji = 1, nlci 2939 pt2d(ji,jj) = ztab(ji+nimpp-1,ij) 2940 END DO 2941 END DO 2942 ! 2943 ENDIF 2944 DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio ) 2945 DEALLOCATE( ztabl, ztabr ) 2946 ! 2947 END SUBROUTINE mpp_lbc_north_2d 2948 2949 SUBROUTINE mpp_lbc_north_2d_multiple( pt2d_array, cd_type, psgn, num_fields) 2950 !!--------------------------------------------------------------------- 2951 !! *** routine mpp_lbc_north_2d *** 2952 !! 2953 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2954 !! in mpp configuration in case of jpn1 > 1 2955 !! (for multiple 2d arrays ) 2956 !! 2957 !! ** Method : North fold condition and mpp with more than one proc 2958 !! in i-direction require a specific treatment. We gather 2959 !! the 4 northern lines of the global domain on 1 processor 2960 !! and apply lbc north-fold on this sub array. Then we 2961 !! scatter the north fold array back to the processors. 2962 !! 2963 !!---------------------------------------------------------------------- 2964 INTEGER , INTENT (in ) :: num_fields ! number of variables contained in pt2d 2965 TYPE( arrayptr ), DIMENSION(:) :: pt2d_array 2966 CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: cd_type ! nature of pt2d grid-points 2967 ! ! = T , U , V , F or W gridpoints 2968 REAL(wp), DIMENSION(:), INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2969 !! ! = 1. , the sign is kept 2970 INTEGER :: ji, jj, jr, jk 2971 INTEGER :: ierr, itaille, ildi, ilei, iilb 2972 INTEGER :: ijpj, ijpjm1, ij, iproc 2973 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather 2974 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather 2975 INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather 2976 ! ! Workspace for message transfers avoiding mpi_allgather 2977 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab 2978 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk 2979 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio 2980 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr 2981 INTEGER :: istatus(mpi_status_size) 2982 INTEGER :: iflag 2983 !!---------------------------------------------------------------------- 2984 ! 2985 ALLOCATE( ztab(jpiglo,4,num_fields), znorthloc(jpi,4,num_fields), zfoldwk(jpi,4,num_fields), znorthgloio(jpi,4,num_fields,jpni) ) ! expanded to 3 dimensions 2986 ALLOCATE( ztabl(jpi,4,num_fields), ztabr(jpi*jpmaxngh, 4,num_fields) ) 2987 ! 2988 ijpj = 4 2989 ijpjm1 = 3 2990 ! 2991 2992 DO jk = 1, num_fields 2993 DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d (for every variable) 2994 ij = jj - nlcj + ijpj 2995 znorthloc(:,ij,jk) = pt2d_array(jk)%pt2d(:,jj) 2996 END DO 2997 END DO 2998 ! ! Build in procs of ncomm_north the znorthgloio 2999 itaille = jpi * ijpj 3000 3001 IF ( l_north_nogather ) THEN 3002 ! 3003 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified 3004 ! (in nemo_northcomms) as being involved in this process' northern boundary exchange 3005 ! 3006 ztabr(:,:,:) = 0 3007 ztabl(:,:,:) = 0 3008 3009 DO jk = 1, num_fields 3010 DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array 3011 ij = jj - nlcj + ijpj 3012 DO ji = nfsloop, nfeloop 3013 ztabl(ji,ij,jk) = pt2d_array(jk)%pt2d(ji,jj) 3014 END DO 3015 END DO 3016 END DO 3017 3018 DO jr = 1,nsndto 3019 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 3020 CALL mppsend(5, znorthloc, itaille*num_fields, nfipproc(isendto(jr),jpnj), ml_req_nf(jr)) ! Buffer expanded "num_fields" times 3021 ENDIF 3022 END DO 3023 DO jr = 1,nsndto 3024 iproc = nfipproc(isendto(jr),jpnj) 3025 IF(iproc .ne. -1) THEN 3026 ilei = nleit (iproc+1) 3027 ildi = nldit (iproc+1) 3028 iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj) 3029 ENDIF 3030 IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN 3031 CALL mpprecv(5, zfoldwk, itaille*num_fields, iproc) ! Buffer expanded "num_fields" times 3032 DO jk = 1 , num_fields 3033 DO jj = 1, ijpj 3034 DO ji = ildi, ilei 3035 ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk) ! Modified to 3D 3036 END DO 3037 END DO 3038 END DO 3039 ELSE IF (iproc .eq. (narea-1)) THEN 3040 DO jk = 1, num_fields 3041 DO jj = 1, ijpj 3042 DO ji = ildi, ilei 3043 ztabr(iilb+ji,jj,jk) = pt2d_array(jk)%pt2d(ji,nlcj-ijpj+jj) ! Modified to 3D 3044 END DO 3045 END DO 3046 END DO 3047 ENDIF 3048 END DO 3049 IF (l_isend) THEN 3050 DO jr = 1,nsndto 3051 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 3052 CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err) 3053 ENDIF 3054 END DO 3055 ENDIF 3056 ! 3057 DO ji = 1, num_fields ! Loop to manage 3D variables 3058 CALL mpp_lbc_nfd( ztabl(:,:,ji), ztabr(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition 3059 END DO 3060 ! 3061 DO jk = 1, num_fields 3062 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 3063 ij = jj - nlcj + ijpj 3064 DO ji = 1, nlci 3065 pt2d_array(jk)%pt2d(ji,jj) = ztabl(ji,ij,jk) ! Modified to 3D 3066 END DO 3067 END DO 3068 END DO 3069 3070 ! 3071 ELSE 3072 ! 3073 CALL MPI_ALLGATHER( znorthloc , itaille*num_fields, MPI_DOUBLE_PRECISION, & 3074 & znorthgloio, itaille*num_fields, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 3075 ! 3076 ztab(:,:,:) = 0.e0 3077 DO jk = 1, num_fields 3078 DO jr = 1, ndim_rank_north ! recover the global north array 3079 iproc = nrank_north(jr) + 1 3080 ildi = nldit (iproc) 3081 ilei = nleit (iproc) 3082 iilb = nimppt(iproc) 3083 DO jj = 1, ijpj 3084 DO ji = ildi, ilei 3085 ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr) 3086 END DO 3087 END DO 3088 END DO 3089 END DO 3090 3091 DO ji = 1, num_fields 3092 CALL lbc_nfd( ztab(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition 3093 END DO 3094 ! 3095 DO jk = 1, num_fields 3096 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 3097 ij = jj - nlcj + ijpj 3098 DO ji = 1, nlci 3099 pt2d_array(jk)%pt2d(ji,jj) = ztab(ji+nimpp-1,ij,jk) 3100 END DO 3101 END DO 3102 END DO 3103 ! 3104 ! 3105 ENDIF 3106 DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio ) 3107 DEALLOCATE( ztabl, ztabr ) 3108 ! 3109 END SUBROUTINE mpp_lbc_north_2d_multiple 3110 3111 SUBROUTINE mpp_lbc_north_e( pt2d, cd_type, psgn) 3112 !!--------------------------------------------------------------------- 3113 !! *** routine mpp_lbc_north_2d *** 3114 !! 3115 !! ** Purpose : Ensure proper north fold horizontal bondary condition 3116 !! in mpp configuration in case of jpn1 > 1 and for 2d 3117 !! array with outer extra halo 3118 !! 3119 !! ** Method : North fold condition and mpp with more than one proc 3120 !! in i-direction require a specific treatment. We gather 3121 !! the 4+2*jpr2dj northern lines of the global domain on 1 3122 !! processor and apply lbc north-fold on this sub array. 3123 !! Then we scatter the north fold array back to the processors. 3124 !! 3125 !!---------------------------------------------------------------------- 3126 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo 3127 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 3128 ! ! = T , U , V , F or W -points 3129 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 3130 !! ! north fold, = 1. otherwise 3131 INTEGER :: ji, jj, jr 3132 INTEGER :: ierr, itaille, ildi, ilei, iilb 3133 INTEGER :: ijpj, ij, iproc 3134 ! 3135 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e 3136 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e 3137 3138 !!---------------------------------------------------------------------- 3139 ! 3140 ALLOCATE( ztab_e(jpiglo,4+2*jpr2dj), znorthloc_e(jpi,4+2*jpr2dj), znorthgloio_e(jpi,4+2*jpr2dj,jpni) ) 3141 3142 ! 3143 ijpj=4 3144 ztab_e(:,:) = 0.e0 3145 3146 ij=0 3147 ! put in znorthloc_e the last 4 jlines of pt2d 3148 DO jj = nlcj - ijpj + 1 - jpr2dj, nlcj +jpr2dj 3149 ij = ij + 1 3150 DO ji = 1, jpi 3151 znorthloc_e(ji,ij)=pt2d(ji,jj) 3152 END DO 3153 END DO 3154 ! 3155 itaille = jpi * ( ijpj + 2 * jpr2dj ) 3156 CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, & 3157 & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 3158 ! 3159 DO jr = 1, ndim_rank_north ! recover the global north array 3160 iproc = nrank_north(jr) + 1 3161 ildi = nldit (iproc) 3162 ilei = nleit (iproc) 3163 iilb = nimppt(iproc) 3164 DO jj = 1, ijpj+2*jpr2dj 3165 DO ji = ildi, ilei 3166 ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr) 3167 END DO 3168 END DO 3169 END DO 3170 3171 3172 ! 2. North-Fold boundary conditions 3173 ! ---------------------------------- 3174 CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = jpr2dj ) 3175 3176 ij = jpr2dj 3177 !! Scatter back to pt2d 3178 DO jj = nlcj - ijpj + 1 , nlcj +jpr2dj 3179 ij = ij +1 3180 DO ji= 1, nlci 3181 pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij) 3182 END DO 3183 END DO 3184 ! 3185 DEALLOCATE( ztab_e, znorthloc_e, znorthgloio_e ) 3186 ! 3187 END SUBROUTINE mpp_lbc_north_e 3188 3189 3190 SUBROUTINE mpp_lnk_bdy_3d( ptab, cd_type, psgn, ib_bdy ) 3191 !!---------------------------------------------------------------------- 3192 !! *** routine mpp_lnk_bdy_3d *** 3193 !! 3194 !! ** Purpose : Message passing management 3195 !! 3196 !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries 3197 !! between processors following neighboring subdomains. 3198 !! domain parameters 3199 !! nlci : first dimension of the local subdomain 3200 !! nlcj : second dimension of the local subdomain 3201 !! nbondi_bdy : mark for "east-west local boundary" 3202 !! nbondj_bdy : mark for "north-south local boundary" 3203 !! noea : number for local neighboring processors 3204 !! nowe : number for local neighboring processors 3205 !! noso : number for local neighboring processors 3206 !! nono : number for local neighboring processors 3207 !! 3208 !! ** Action : ptab with update value at its periphery 3209 !! 3210 !!---------------------------------------------------------------------- 3211 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 3212 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 3213 ! ! = T , U , V , F , W points 3214 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 3215 ! ! = 1. , the sign is kept 3216 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 3217 ! 3218 INTEGER :: ji, jj, jk, jl ! dummy loop indices 3219 INTEGER :: imigr, iihom, ijhom ! local integers 3220 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 3221 REAL(wp) :: zland ! local scalar 3222 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 3223 ! 3224 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north 3225 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east 3226 !!---------------------------------------------------------------------- 3227 ! 3228 ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), & 3229 & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) ) 3230 3231 zland = 0._wp 3232 3233 ! 1. standard boundary treatment 3234 ! ------------------------------ 3235 ! ! East-West boundaries 3236 ! !* Cyclic east-west 3237 IF( nbondi == 2) THEN 3238 IF( nperio == 1 .OR. nperio == 4 .OR. nperio == 6 ) THEN 3239 ptab( 1 ,:,:) = ptab(jpim1,:,:) 3240 ptab(jpi,:,:) = ptab( 2 ,:,:) 3241 ELSE 3242 IF( .NOT. cd_type == 'F' ) ptab(1:jpreci,:,:) = zland ! south except F-point 3243 ptab(nlci-jpreci+1:jpi,:,:) = zland ! north 3244 ENDIF 3245 ELSEIF(nbondi == -1) THEN 3246 IF( .NOT. cd_type == 'F' ) ptab(1:jpreci,:,:) = zland ! south except F-point 3247 ELSEIF(nbondi == 1) THEN 3248 ptab(nlci-jpreci+1:jpi,:,:) = zland ! north 3249 ENDIF !* closed 3250 3251 IF (nbondj == 2 .OR. nbondj == -1) THEN 3252 IF( .NOT. cd_type == 'F' ) ptab(:,1:jprecj,:) = zland ! south except F-point 3253 ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN 3254 ptab(:,nlcj-jprecj+1:jpj,:) = zland ! north 3255 ENDIF 3256 ! 3257 ! 2. East and west directions exchange 3258 ! ------------------------------------ 3259 ! we play with the neigbours AND the row number because of the periodicity 3260 ! 3261 SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions 3262 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 3263 iihom = nlci-nreci 3264 DO jl = 1, jpreci 3265 zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) 3266 zt3we(:,jl,:,1) = ptab(iihom +jl,:,:) 3267 END DO 3268 END SELECT 3269 ! 3270 ! ! Migrations 3271 imigr = jpreci * jpj * jpk 3272 ! 3273 SELECT CASE ( nbondi_bdy(ib_bdy) ) 3274 CASE ( -1 ) 3275 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 ) 3276 CASE ( 0 ) 3277 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 3278 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 ) 3279 CASE ( 1 ) 3280 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 3281 END SELECT 3282 ! 3283 SELECT CASE ( nbondi_bdy_b(ib_bdy) ) 3284 CASE ( -1 ) 3285 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 3286 CASE ( 0 ) 3287 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 3288 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 3289 CASE ( 1 ) 3290 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 3291 END SELECT 3292 ! 3293 SELECT CASE ( nbondi_bdy(ib_bdy) ) 3294 CASE ( -1 ) 3295 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3296 CASE ( 0 ) 3297 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3298 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 3299 CASE ( 1 ) 3300 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3301 END SELECT 3302 ! 3303 ! ! Write Dirichlet lateral conditions 3304 iihom = nlci-jpreci 3305 ! 3306 SELECT CASE ( nbondi_bdy_b(ib_bdy) ) 3307 CASE ( -1 ) 3308 DO jl = 1, jpreci 3309 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 3310 END DO 3311 CASE ( 0 ) 3312 DO jl = 1, jpreci 3313 ptab( jl,:,:) = zt3we(:,jl,:,2) 3314 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 3315 END DO 3316 CASE ( 1 ) 3317 DO jl = 1, jpreci 3318 ptab( jl,:,:) = zt3we(:,jl,:,2) 3319 END DO 3320 END SELECT 3321 3322 3323 ! 3. North and south directions 3324 ! ----------------------------- 3325 ! always closed : we play only with the neigbours 3326 ! 3327 IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions 3328 ijhom = nlcj-nrecj 3329 DO jl = 1, jprecj 3330 zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:) 3331 zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:) 3332 END DO 3333 ENDIF 3334 ! 3335 ! ! Migrations 3336 imigr = jprecj * jpi * jpk 3337 ! 3338 SELECT CASE ( nbondj_bdy(ib_bdy) ) 3339 CASE ( -1 ) 3340 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 ) 3341 CASE ( 0 ) 3342 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 3343 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 ) 3344 CASE ( 1 ) 3345 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 3346 END SELECT 3347 ! 3348 SELECT CASE ( nbondj_bdy_b(ib_bdy) ) 3349 CASE ( -1 ) 3350 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 3351 CASE ( 0 ) 3352 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 3353 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 3354 CASE ( 1 ) 3355 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 3356 END SELECT 3357 ! 3358 SELECT CASE ( nbondj_bdy(ib_bdy) ) 3359 CASE ( -1 ) 3360 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3361 CASE ( 0 ) 3362 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3363 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 3364 CASE ( 1 ) 3365 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3366 END SELECT 3367 ! 3368 ! ! Write Dirichlet lateral conditions 3369 ijhom = nlcj-jprecj 3370 ! 3371 SELECT CASE ( nbondj_bdy_b(ib_bdy) ) 3372 CASE ( -1 ) 3373 DO jl = 1, jprecj 3374 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 3375 END DO 3376 CASE ( 0 ) 3377 DO jl = 1, jprecj 3378 ptab(:,jl ,:) = zt3sn(:,jl,:,2) 3379 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 3380 END DO 3381 CASE ( 1 ) 3382 DO jl = 1, jprecj 3383 ptab(:,jl,:) = zt3sn(:,jl,:,2) 3384 END DO 3385 END SELECT 3386 3387 3388 ! 4. north fold treatment 3389 ! ----------------------- 3390 ! 3391 IF( npolj /= 0) THEN 3392 ! 3393 SELECT CASE ( jpni ) 3394 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 3395 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 3396 END SELECT 3397 ! 3398 ENDIF 3399 ! 3400 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we ) 3401 ! 3402 END SUBROUTINE mpp_lnk_bdy_3d 3403 3404 3405 SUBROUTINE mpp_lnk_bdy_2d( ptab, cd_type, psgn, ib_bdy ) 3406 !!---------------------------------------------------------------------- 3407 !! *** routine mpp_lnk_bdy_2d *** 3408 !! 3409 !! ** Purpose : Message passing management 3410 !! 3411 !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries 3412 !! between processors following neighboring subdomains. 3413 !! domain parameters 3414 !! nlci : first dimension of the local subdomain 3415 !! nlcj : second dimension of the local subdomain 3416 !! nbondi_bdy : mark for "east-west local boundary" 3417 !! nbondj_bdy : mark for "north-south local boundary" 3418 !! noea : number for local neighboring processors 3419 !! nowe : number for local neighboring processors 3420 !! noso : number for local neighboring processors 3421 !! nono : number for local neighboring processors 3422 !! 3423 !! ** Action : ptab with update value at its periphery 3424 !! 3425 !!---------------------------------------------------------------------- 3426 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 3427 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 3428 ! ! = T , U , V , F , W points 3429 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 3430 ! ! = 1. , the sign is kept 3431 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 3432 ! 3433 INTEGER :: ji, jj, jl ! dummy loop indices 3434 INTEGER :: imigr, iihom, ijhom ! local integers 3435 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 3436 REAL(wp) :: zland 3437 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 3438 ! 3439 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 3440 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 3441 !!---------------------------------------------------------------------- 3442 3443 ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), & 3444 & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) ) 3445 3446 zland = 0._wp 3447 3448 ! 1. standard boundary treatment 3449 ! ------------------------------ 3450 ! ! East-West boundaries 3451 ! !* Cyclic east-west 3452 IF( nbondi == 2 ) THEN 3453 IF (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) THEN 3454 ptab( 1 ,:) = ptab(jpim1,:) 3455 ptab(jpi,:) = ptab( 2 ,:) 3456 ELSE 3457 IF(.NOT.cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point 3458 ptab(nlci-jpreci+1:jpi ,:) = zland ! north 3459 ENDIF 3460 ELSEIF(nbondi == -1) THEN 3461 IF( .NOT.cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point 3462 ELSEIF(nbondi == 1) THEN 3463 ptab(nlci-jpreci+1:jpi ,:) = zland ! north 3464 ENDIF 3465 ! !* closed 3466 IF( nbondj == 2 .OR. nbondj == -1 ) THEN 3467 IF( .NOT.cd_type == 'F' ) ptab(:, 1 :jprecj) = zland ! south except F-point 3468 ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN 3469 ptab(:,nlcj-jprecj+1:jpj ) = zland ! north 3470 ENDIF 3471 ! 3472 ! 2. East and west directions exchange 3473 ! ------------------------------------ 3474 ! we play with the neigbours AND the row number because of the periodicity 3475 ! 3476 SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions 3477 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 3478 iihom = nlci-nreci 3479 DO jl = 1, jpreci 3480 zt2ew(:,jl,1) = ptab(jpreci+jl,:) 3481 zt2we(:,jl,1) = ptab(iihom +jl,:) 3482 END DO 3483 END SELECT 3484 ! 3485 ! ! Migrations 3486 imigr = jpreci * jpj 3487 ! 3488 SELECT CASE ( nbondi_bdy(ib_bdy) ) 3489 CASE ( -1 ) 3490 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 ) 3491 CASE ( 0 ) 3492 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 3493 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 ) 3494 CASE ( 1 ) 3495 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 3496 END SELECT 3497 ! 3498 SELECT CASE ( nbondi_bdy_b(ib_bdy) ) 3499 CASE ( -1 ) 3500 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 3501 CASE ( 0 ) 3502 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 3503 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 3504 CASE ( 1 ) 3505 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 3506 END SELECT 3507 ! 3508 SELECT CASE ( nbondi_bdy(ib_bdy) ) 3509 CASE ( -1 ) 3510 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3511 CASE ( 0 ) 3512 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3513 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 3514 CASE ( 1 ) 3515 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3516 END SELECT 3517 ! 3518 ! ! Write Dirichlet lateral conditions 3519 iihom = nlci-jpreci 3520 ! 3521 SELECT CASE ( nbondi_bdy_b(ib_bdy) ) 3522 CASE ( -1 ) 3523 DO jl = 1, jpreci 3524 ptab(iihom+jl,:) = zt2ew(:,jl,2) 3525 END DO 3526 CASE ( 0 ) 3527 DO jl = 1, jpreci 3528 ptab(jl ,:) = zt2we(:,jl,2) 3529 ptab(iihom+jl,:) = zt2ew(:,jl,2) 3530 END DO 3531 CASE ( 1 ) 3532 DO jl = 1, jpreci 3533 ptab(jl ,:) = zt2we(:,jl,2) 3534 END DO 3535 END SELECT 3536 3537 3538 ! 3. North and south directions 3539 ! ----------------------------- 3540 ! always closed : we play only with the neigbours 3541 ! 3542 IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions 3543 ijhom = nlcj-nrecj 3544 DO jl = 1, jprecj 3545 zt2sn(:,jl,1) = ptab(:,ijhom +jl) 3546 zt2ns(:,jl,1) = ptab(:,jprecj+jl) 3547 END DO 3548 ENDIF 3549 ! 3550 ! ! Migrations 3551 imigr = jprecj * jpi 3552 ! 3553 SELECT CASE ( nbondj_bdy(ib_bdy) ) 3554 CASE ( -1 ) 3555 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 ) 3556 CASE ( 0 ) 3557 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 3558 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 ) 3559 CASE ( 1 ) 3560 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 3561 END SELECT 3562 ! 3563 SELECT CASE ( nbondj_bdy_b(ib_bdy) ) 3564 CASE ( -1 ) 3565 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 3566 CASE ( 0 ) 3567 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 3568 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 3569 CASE ( 1 ) 3570 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 3571 END SELECT 3572 ! 3573 SELECT CASE ( nbondj_bdy(ib_bdy) ) 3574 CASE ( -1 ) 3575 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3576 CASE ( 0 ) 3577 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3578 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 3579 CASE ( 1 ) 3580 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3581 END SELECT 3582 ! 3583 ! ! Write Dirichlet lateral conditions 3584 ijhom = nlcj-jprecj 3585 ! 3586 SELECT CASE ( nbondj_bdy_b(ib_bdy) ) 3587 CASE ( -1 ) 3588 DO jl = 1, jprecj 3589 ptab(:,ijhom+jl) = zt2ns(:,jl,2) 3590 END DO 3591 CASE ( 0 ) 3592 DO jl = 1, jprecj 3593 ptab(:,jl ) = zt2sn(:,jl,2) 3594 ptab(:,ijhom+jl) = zt2ns(:,jl,2) 3595 END DO 3596 CASE ( 1 ) 3597 DO jl = 1, jprecj 3598 ptab(:,jl) = zt2sn(:,jl,2) 3599 END DO 3600 END SELECT 3601 3602 3603 ! 4. north fold treatment 3604 ! ----------------------- 3605 ! 3606 IF( npolj /= 0) THEN 3607 ! 3608 SELECT CASE ( jpni ) 3609 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 3610 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 3611 END SELECT 3612 ! 3613 ENDIF 3614 ! 3615 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 3616 ! 3617 END SUBROUTINE mpp_lnk_bdy_2d 3618 3619 3620 SUBROUTINE mpi_init_opa( ldtxt, ksft, code ) 1098 SUBROUTINE mpi_init_oce( ldtxt, ksft, code ) 3621 1099 !!--------------------------------------------------------------------- 3622 1100 !! *** routine mpp_init.opa *** … … 3649 1127 IF( .NOT. mpi_was_called ) THEN 3650 1128 CALL mpi_init( code ) 3651 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_o pa, code )1129 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_oce, code ) 3652 1130 IF ( code /= MPI_SUCCESS ) THEN 3653 1131 DO ji = 1, SIZE(ldtxt) … … 3675 1153 ENDIF 3676 1154 ! 3677 END SUBROUTINE mpi_init_opa 3678 3679 SUBROUTINE DDPDD_MPI (ydda, yddb, ilen, itype) 1155 END SUBROUTINE mpi_init_oce 1156 1157 1158 SUBROUTINE DDPDD_MPI( ydda, yddb, ilen, itype ) 3680 1159 !!--------------------------------------------------------------------- 3681 1160 !! Routine DDPDD_MPI: used by reduction operator MPI_SUMDD … … 3684 1163 !! This subroutine computes yddb(i) = ydda(i)+yddb(i) 3685 1164 !!--------------------------------------------------------------------- 3686 INTEGER , INTENT(in) ::ilen, itype3687 COMPLEX(wp), DIMENSION(ilen), INTENT(in) ::ydda3688 COMPLEX(wp), DIMENSION(ilen), INTENT(inout) ::yddb1165 INTEGER , INTENT(in) :: ilen, itype 1166 COMPLEX(wp), DIMENSION(ilen), INTENT(in) :: ydda 1167 COMPLEX(wp), DIMENSION(ilen), INTENT(inout) :: yddb 3689 1168 ! 3690 1169 REAL(wp) :: zerr, zt1, zt2 ! local work variables 3691 INTEGER :: ji, ztmp ! local scalar 3692 1170 INTEGER :: ji, ztmp ! local scalar 1171 !!--------------------------------------------------------------------- 1172 ! 3693 1173 ztmp = itype ! avoid compilation warning 3694 1174 ! 3695 1175 DO ji=1,ilen 3696 1176 ! Compute ydda + yddb using Knuth's trick. … … 3703 1183 yddb(ji) = cmplx ( zt1 + zt2, zt2 - ((zt1 + zt2) - zt1),wp ) 3704 1184 END DO 3705 1185 ! 3706 1186 END SUBROUTINE DDPDD_MPI 3707 1187 3708 1188 3709 SUBROUTINE mpp_lbc_north_icb( pt2d, cd_type, psgn, pr2dj)1189 SUBROUTINE mpp_lbc_north_icb( pt2d, cd_type, psgn, kextj) 3710 1190 !!--------------------------------------------------------------------- 3711 1191 !! *** routine mpp_lbc_north_icb *** … … 3717 1197 !! ** Method : North fold condition and mpp with more than one proc 3718 1198 !! in i-direction require a specific treatment. We gather 3719 !! the 4+ 2*jpr2dj northern lines of the global domain on 11199 !! the 4+kextj northern lines of the global domain on 1 3720 1200 !! processor and apply lbc north-fold on this sub array. 3721 1201 !! Then we scatter the north fold array back to the processors. 3722 !! This version accounts for an extra halo with icebergs. 1202 !! This routine accounts for an extra halo with icebergs 1203 !! and assumes ghost rows and columns have been suppressed. 3723 1204 !! 3724 1205 !!---------------------------------------------------------------------- … … 3728 1209 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 3729 1210 !! ! north fold, = 1. otherwise 3730 INTEGER , OPTIONAL , INTENT(in ) :: pr2dj1211 INTEGER , INTENT(in ) :: kextj ! Extra halo width at north fold 3731 1212 ! 3732 1213 INTEGER :: ji, jj, jr 3733 1214 INTEGER :: ierr, itaille, ildi, ilei, iilb 3734 INTEGER :: i jpj, ij, iproc, ipr2dj1215 INTEGER :: ipj, ij, iproc 3735 1216 ! 3736 1217 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e … … 3738 1219 !!---------------------------------------------------------------------- 3739 1220 ! 3740 ijpj=4 3741 IF( PRESENT(pr2dj) ) THEN ! use of additional halos 3742 ipr2dj = pr2dj 3743 ELSE 3744 ipr2dj = 0 3745 ENDIF 3746 ALLOCATE( ztab_e(jpiglo,4+2*ipr2dj), znorthloc_e(jpi,4+2*ipr2dj), znorthgloio_e(jpi,4+2*ipr2dj,jpni) ) 3747 ! 3748 ztab_e(:,:) = 0._wp 3749 ! 3750 ij = 0 3751 ! put in znorthloc_e the last 4 jlines of pt2d 3752 DO jj = nlcj - ijpj + 1 - ipr2dj, nlcj +ipr2dj 1221 ipj=4 1222 ALLOCATE( ztab_e(jpiglo, 1-kextj:ipj+kextj) , & 1223 & znorthloc_e(jpimax, 1-kextj:ipj+kextj) , & 1224 & znorthgloio_e(jpimax, 1-kextj:ipj+kextj,jpni) ) 1225 ! 1226 ztab_e(:,:) = 0._wp 1227 znorthloc_e(:,:) = 0._wp 1228 ! 1229 ij = 1 - kextj 1230 ! put the last ipj+2*kextj lines of pt2d into znorthloc_e 1231 DO jj = jpj - ipj + 1 - kextj , jpj + kextj 1232 znorthloc_e(1:jpi,ij)=pt2d(1:jpi,jj) 3753 1233 ij = ij + 1 3754 DO ji = 1, jpi3755 znorthloc_e(ji,ij)=pt2d(ji,jj)3756 END DO3757 1234 END DO 3758 1235 ! 3759 itaille = jpi * ( ijpj + 2 * ipr2dj ) 3760 CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, & 3761 & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 1236 itaille = jpimax * ( ipj + 2*kextj ) 1237 ! 1238 IF( ln_timing ) CALL tic_tac(.TRUE.) 1239 CALL MPI_ALLGATHER( znorthloc_e(1,1-kextj) , itaille, MPI_DOUBLE_PRECISION, & 1240 & znorthgloio_e(1,1-kextj,1), itaille, MPI_DOUBLE_PRECISION, & 1241 & ncomm_north, ierr ) 1242 ! 1243 IF( ln_timing ) CALL tic_tac(.FALSE.) 3762 1244 ! 3763 1245 DO jr = 1, ndim_rank_north ! recover the global north array … … 3766 1248 ilei = nleit (iproc) 3767 1249 iilb = nimppt(iproc) 3768 DO jj = 1 , ijpj+2*ipr2dj1250 DO jj = 1-kextj, ipj+kextj 3769 1251 DO ji = ildi, ilei 3770 1252 ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr) … … 3773 1255 END DO 3774 1256 3775 3776 1257 ! 2. North-Fold boundary conditions 3777 1258 ! ---------------------------------- 3778 CALL lbc_nfd( ztab_e(:, :), cd_type, psgn, pr2dj = ipr2dj )3779 3780 ij = ipr2dj1259 CALL lbc_nfd( ztab_e(:,1-kextj:ipj+kextj), cd_type, psgn, kextj ) 1260 1261 ij = 1 - kextj 3781 1262 !! Scatter back to pt2d 3782 DO jj = nlcj - ijpj + 1 , nlcj +ipr2dj 3783 ij = ij +1 3784 DO ji= 1, nlci 1263 DO jj = jpj - ipj + 1 - kextj , jpj + kextj 1264 DO ji= 1, jpi 3785 1265 pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij) 3786 1266 END DO 1267 ij = ij +1 3787 1268 END DO 3788 1269 ! … … 3792 1273 3793 1274 3794 SUBROUTINE mpp_lnk_2d_icb( pt2d, cd_type, psgn, jpri, jprj )1275 SUBROUTINE mpp_lnk_2d_icb( cdname, pt2d, cd_type, psgn, kexti, kextj ) 3795 1276 !!---------------------------------------------------------------------- 3796 1277 !! *** routine mpp_lnk_2d_icb *** 3797 1278 !! 3798 !! ** Purpose : Message passing manadgement for 2d array (with extra halo and icebergs) 1279 !! ** Purpose : Message passing management for 2d array (with extra halo for icebergs) 1280 !! This routine receives a (1-kexti:jpi+kexti,1-kexti:jpj+kextj) 1281 !! array (usually (0:jpi+1, 0:jpj+1)) from lbc_lnk_icb calls. 3799 1282 !! 3800 1283 !! ** Method : Use mppsend and mpprecv function for passing mask 3801 1284 !! between processors following neighboring subdomains. 3802 1285 !! domain parameters 3803 !! nlci: first dimension of the local subdomain3804 !! nlcj: second dimension of the local subdomain3805 !! jpri : number of rows for extra outer halo3806 !! jprj : number of columns for extra outer halo1286 !! jpi : first dimension of the local subdomain 1287 !! jpj : second dimension of the local subdomain 1288 !! kexti : number of columns for extra outer halo 1289 !! kextj : number of rows for extra outer halo 3807 1290 !! nbondi : mark for "east-west local boundary" 3808 1291 !! nbondj : mark for "north-south local boundary" … … 3812 1295 !! nono : number for local neighboring processors 3813 1296 !!---------------------------------------------------------------------- 3814 INTEGER , INTENT(in ) :: jpri3815 INTEGER , INTENT(in ) :: jprj3816 REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo3817 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points3818 ! ! = T , U , V , F , W and I points3819 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the3820 ! ! ! north boundary, = 1. otherwise1297 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 1298 REAL(wp), DIMENSION(1-kexti:jpi+kexti,1-kextj:jpj+kextj), INTENT(inout) :: pt2d ! 2D array with extra halo 1299 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points 1300 REAL(wp) , INTENT(in ) :: psgn ! sign used across the north fold 1301 INTEGER , INTENT(in ) :: kexti ! extra i-halo width 1302 INTEGER , INTENT(in ) :: kextj ! extra j-halo width 1303 ! 3821 1304 INTEGER :: jl ! dummy loop indices 3822 INTEGER :: imigr, iihom, ijhom ! temporaryintegers3823 INTEGER :: ipreci, iprecj ! temporary integers1305 INTEGER :: imigr, iihom, ijhom ! local integers 1306 INTEGER :: ipreci, iprecj ! - - 3824 1307 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 3825 1308 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 3826 1309 !! 3827 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns 3828 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn 3829 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe 3830 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew 3831 !!---------------------------------------------------------------------- 3832 3833 ipreci = jpreci + jpri ! take into account outer extra 2D overlap area 3834 iprecj = jprecj + jprj 3835 1310 REAL(wp), DIMENSION(1-kexti:jpi+kexti,nn_hls+kextj,2) :: r2dns, r2dsn 1311 REAL(wp), DIMENSION(1-kextj:jpj+kextj,nn_hls+kexti,2) :: r2dwe, r2dew 1312 !!---------------------------------------------------------------------- 1313 1314 ipreci = nn_hls + kexti ! take into account outer extra 2D overlap area 1315 iprecj = nn_hls + kextj 1316 1317 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, 1, 1, 1, ld_lbc = .TRUE. ) 3836 1318 3837 1319 ! 1. standard boundary treatment … … 3841 1323 ! ! East-West boundaries 3842 1324 ! !* Cyclic east-west 3843 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6)) THEN3844 pt2d(1- jpri: 1 ,:) = pt2d(jpim1-jpri:jpim1 ,:) ! east3845 pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west1325 IF( l_Iperio ) THEN 1326 pt2d(1-kexti: 1 ,:) = pt2d(jpim1-kexti: jpim1 ,:) ! east 1327 pt2d( jpi :jpi+kexti,:) = pt2d( 2 :2+kexti,:) ! west 3846 1328 ! 3847 1329 ELSE !* closed 3848 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point 3849 pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north 1330 IF( .NOT. cd_type == 'F' ) pt2d( 1-kexti :nn_hls ,:) = 0._wp ! east except at F-point 1331 pt2d(jpi-nn_hls+1:jpi+kexti,:) = 0._wp ! west 1332 ENDIF 1333 ! ! North-South boundaries 1334 IF( l_Jperio ) THEN !* cyclic (only with no mpp j-split) 1335 pt2d(:,1-kextj: 1 ) = pt2d(:,jpjm1-kextj: jpjm1) ! north 1336 pt2d(:, jpj :jpj+kextj) = pt2d(:, 2 :2+kextj) ! south 1337 ELSE !* closed 1338 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-kextj :nn_hls ) = 0._wp ! north except at F-point 1339 pt2d(:,jpj-nn_hls+1:jpj+kextj) = 0._wp ! south 3850 1340 ENDIF 3851 1341 ! … … 3856 1346 ! 3857 1347 SELECT CASE ( jpni ) 3858 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj )3859 CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+jprj) , cd_type, psgn , pr2dj=jprj)1348 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+kextj), cd_type, psgn, kextj ) 1349 CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+kextj), cd_type, psgn, kextj ) 3860 1350 END SELECT 3861 1351 ! … … 3868 1358 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 3869 1359 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 3870 iihom = nlci-nreci-jpri1360 iihom = jpi-nreci-kexti 3871 1361 DO jl = 1, ipreci 3872 r2dew(:,jl,1) = pt2d( jpreci+jl,:)1362 r2dew(:,jl,1) = pt2d(nn_hls+jl,:) 3873 1363 r2dwe(:,jl,1) = pt2d(iihom +jl,:) 3874 1364 END DO … … 3876 1366 ! 3877 1367 ! ! Migrations 3878 imigr = ipreci * ( jpj + 2*jprj) 1368 imigr = ipreci * ( jpj + 2*kextj ) 1369 ! 1370 IF( ln_timing ) CALL tic_tac(.TRUE.) 3879 1371 ! 3880 1372 SELECT CASE ( nbondi ) 3881 1373 CASE ( -1 ) 3882 CALL mppsend( 2, r2dwe(1- jprj,1,1), imigr, noea, ml_req1 )3883 CALL mpprecv( 1, r2dew(1- jprj,1,2), imigr, noea )1374 CALL mppsend( 2, r2dwe(1-kextj,1,1), imigr, noea, ml_req1 ) 1375 CALL mpprecv( 1, r2dew(1-kextj,1,2), imigr, noea ) 3884 1376 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3885 1377 CASE ( 0 ) 3886 CALL mppsend( 1, r2dew(1- jprj,1,1), imigr, nowe, ml_req1 )3887 CALL mppsend( 2, r2dwe(1- jprj,1,1), imigr, noea, ml_req2 )3888 CALL mpprecv( 1, r2dew(1- jprj,1,2), imigr, noea )3889 CALL mpprecv( 2, r2dwe(1- jprj,1,2), imigr, nowe )1378 CALL mppsend( 1, r2dew(1-kextj,1,1), imigr, nowe, ml_req1 ) 1379 CALL mppsend( 2, r2dwe(1-kextj,1,1), imigr, noea, ml_req2 ) 1380 CALL mpprecv( 1, r2dew(1-kextj,1,2), imigr, noea ) 1381 CALL mpprecv( 2, r2dwe(1-kextj,1,2), imigr, nowe ) 3890 1382 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3891 1383 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 3892 1384 CASE ( 1 ) 3893 CALL mppsend( 1, r2dew(1- jprj,1,1), imigr, nowe, ml_req1 )3894 CALL mpprecv( 2, r2dwe(1- jprj,1,2), imigr, nowe )1385 CALL mppsend( 1, r2dew(1-kextj,1,1), imigr, nowe, ml_req1 ) 1386 CALL mpprecv( 2, r2dwe(1-kextj,1,2), imigr, nowe ) 3895 1387 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3896 1388 END SELECT 3897 1389 ! 1390 IF( ln_timing ) CALL tic_tac(.FALSE.) 1391 ! 3898 1392 ! ! Write Dirichlet lateral conditions 3899 iihom = nlci - jpreci1393 iihom = jpi - nn_hls 3900 1394 ! 3901 1395 SELECT CASE ( nbondi ) … … 3906 1400 CASE ( 0 ) 3907 1401 DO jl = 1, ipreci 3908 pt2d(jl- jpri,:) = r2dwe(:,jl,2)3909 pt2d( 1402 pt2d(jl-kexti,:) = r2dwe(:,jl,2) 1403 pt2d(iihom+jl,:) = r2dew(:,jl,2) 3910 1404 END DO 3911 1405 CASE ( 1 ) 3912 1406 DO jl = 1, ipreci 3913 pt2d(jl- jpri,:) = r2dwe(:,jl,2)1407 pt2d(jl-kexti,:) = r2dwe(:,jl,2) 3914 1408 END DO 3915 1409 END SELECT … … 3921 1415 ! 3922 1416 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 3923 ijhom = nlcj-nrecj-jprj1417 ijhom = jpj-nrecj-kextj 3924 1418 DO jl = 1, iprecj 3925 1419 r2dsn(:,jl,1) = pt2d(:,ijhom +jl) 3926 r2dns(:,jl,1) = pt2d(:, jprecj+jl)1420 r2dns(:,jl,1) = pt2d(:,nn_hls+jl) 3927 1421 END DO 3928 1422 ENDIF 3929 1423 ! 3930 1424 ! ! Migrations 3931 imigr = iprecj * ( jpi + 2*jpri ) 1425 imigr = iprecj * ( jpi + 2*kexti ) 1426 ! 1427 IF( ln_timing ) CALL tic_tac(.TRUE.) 3932 1428 ! 3933 1429 SELECT CASE ( nbondj ) 3934 1430 CASE ( -1 ) 3935 CALL mppsend( 4, r2dsn(1- jpri,1,1), imigr, nono, ml_req1 )3936 CALL mpprecv( 3, r2dns(1- jpri,1,2), imigr, nono )1431 CALL mppsend( 4, r2dsn(1-kexti,1,1), imigr, nono, ml_req1 ) 1432 CALL mpprecv( 3, r2dns(1-kexti,1,2), imigr, nono ) 3937 1433 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3938 1434 CASE ( 0 ) 3939 CALL mppsend( 3, r2dns(1- jpri,1,1), imigr, noso, ml_req1 )3940 CALL mppsend( 4, r2dsn(1- jpri,1,1), imigr, nono, ml_req2 )3941 CALL mpprecv( 3, r2dns(1- jpri,1,2), imigr, nono )3942 CALL mpprecv( 4, r2dsn(1- jpri,1,2), imigr, noso )1435 CALL mppsend( 3, r2dns(1-kexti,1,1), imigr, noso, ml_req1 ) 1436 CALL mppsend( 4, r2dsn(1-kexti,1,1), imigr, nono, ml_req2 ) 1437 CALL mpprecv( 3, r2dns(1-kexti,1,2), imigr, nono ) 1438 CALL mpprecv( 4, r2dsn(1-kexti,1,2), imigr, noso ) 3943 1439 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3944 1440 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 3945 1441 CASE ( 1 ) 3946 CALL mppsend( 3, r2dns(1- jpri,1,1), imigr, noso, ml_req1 )3947 CALL mpprecv( 4, r2dsn(1- jpri,1,2), imigr, noso )1442 CALL mppsend( 3, r2dns(1-kexti,1,1), imigr, noso, ml_req1 ) 1443 CALL mpprecv( 4, r2dsn(1-kexti,1,2), imigr, noso ) 3948 1444 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3949 1445 END SELECT 3950 1446 ! 1447 IF( ln_timing ) CALL tic_tac(.FALSE.) 1448 ! 3951 1449 ! ! Write Dirichlet lateral conditions 3952 ijhom = nlcj - jprecj1450 ijhom = jpj - nn_hls 3953 1451 ! 3954 1452 SELECT CASE ( nbondj ) … … 3959 1457 CASE ( 0 ) 3960 1458 DO jl = 1, iprecj 3961 pt2d(:,jl- jprj) = r2dsn(:,jl,2)3962 pt2d(:,ijhom+jl 1459 pt2d(:,jl-kextj) = r2dsn(:,jl,2) 1460 pt2d(:,ijhom+jl) = r2dns(:,jl,2) 3963 1461 END DO 3964 1462 CASE ( 1 ) 3965 1463 DO jl = 1, iprecj 3966 pt2d(:,jl- jprj) = r2dsn(:,jl,2)1464 pt2d(:,jl-kextj) = r2dsn(:,jl,2) 3967 1465 END DO 3968 1466 END SELECT 3969 1467 ! 3970 1468 END SUBROUTINE mpp_lnk_2d_icb 1469 1470 1471 SUBROUTINE mpp_report( cdname, kpk, kpl, kpf, ld_lbc, ld_glb, ld_dlg ) 1472 !!---------------------------------------------------------------------- 1473 !! *** routine mpp_report *** 1474 !! 1475 !! ** Purpose : report use of mpp routines per time-setp 1476 !! 1477 !!---------------------------------------------------------------------- 1478 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1479 INTEGER , OPTIONAL, INTENT(in ) :: kpk, kpl, kpf 1480 LOGICAL , OPTIONAL, INTENT(in ) :: ld_lbc, ld_glb, ld_dlg 1481 !! 1482 LOGICAL :: ll_lbc, ll_glb, ll_dlg 1483 INTEGER :: ji, jj, jk, jh, jf ! dummy loop indices 1484 !!---------------------------------------------------------------------- 1485 ! 1486 ll_lbc = .FALSE. 1487 IF( PRESENT(ld_lbc) ) ll_lbc = ld_lbc 1488 ll_glb = .FALSE. 1489 IF( PRESENT(ld_glb) ) ll_glb = ld_glb 1490 ll_dlg = .FALSE. 1491 IF( PRESENT(ld_dlg) ) ll_dlg = ld_dlg 1492 ! 1493 ! find the smallest common frequency: default = frequency product, if multiple, choose the larger of the 2 frequency 1494 IF( ncom_dttrc /= 1 ) CALL ctl_stop( 'STOP', 'mpp_report, ncom_dttrc /= 1 not coded...' ) 1495 ncom_freq = ncom_fsbc 1496 ! 1497 IF ( ncom_stp == nit000+ncom_freq ) THEN ! avoid to count extra communications in potential initializations at nit000 1498 IF( ll_lbc ) THEN 1499 IF( .NOT. ALLOCATED(ncomm_sequence) ) ALLOCATE( ncomm_sequence(ncom_rec_max,2) ) 1500 IF( .NOT. ALLOCATED( crname_lbc) ) ALLOCATE( crname_lbc(ncom_rec_max ) ) 1501 n_sequence_lbc = n_sequence_lbc + 1 1502 IF( n_sequence_lbc > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lib_mpp, increase ncom_rec_max' ) ! deadlock 1503 crname_lbc(n_sequence_lbc) = cdname ! keep the name of the calling routine 1504 ncomm_sequence(n_sequence_lbc,1) = kpk*kpl ! size of 3rd and 4th dimensions 1505 ncomm_sequence(n_sequence_lbc,2) = kpf ! number of arrays to be treated (multi) 1506 ENDIF 1507 IF( ll_glb ) THEN 1508 IF( .NOT. ALLOCATED(crname_glb) ) ALLOCATE( crname_glb(ncom_rec_max) ) 1509 n_sequence_glb = n_sequence_glb + 1 1510 IF( n_sequence_glb > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lib_mpp, increase ncom_rec_max' ) ! deadlock 1511 crname_glb(n_sequence_glb) = cdname ! keep the name of the calling routine 1512 ENDIF 1513 IF( ll_dlg ) THEN 1514 IF( .NOT. ALLOCATED(crname_dlg) ) ALLOCATE( crname_dlg(ncom_rec_max) ) 1515 n_sequence_dlg = n_sequence_dlg + 1 1516 IF( n_sequence_dlg > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lib_mpp, increase ncom_rec_max' ) ! deadlock 1517 crname_dlg(n_sequence_dlg) = cdname ! keep the name of the calling routine 1518 ENDIF 1519 ELSE IF ( ncom_stp == nit000+2*ncom_freq ) THEN 1520 CALL ctl_opn( numcom, 'communication_report.txt', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) 1521 WRITE(numcom,*) ' ' 1522 WRITE(numcom,*) ' ------------------------------------------------------------' 1523 WRITE(numcom,*) ' Communication pattern report (second oce+sbc+top time step):' 1524 WRITE(numcom,*) ' ------------------------------------------------------------' 1525 WRITE(numcom,*) ' ' 1526 WRITE(numcom,'(A,I4)') ' Exchanged halos : ', n_sequence_lbc 1527 jj = 0; jk = 0; jf = 0; jh = 0 1528 DO ji = 1, n_sequence_lbc 1529 IF ( ncomm_sequence(ji,1) .GT. 1 ) jk = jk + 1 1530 IF ( ncomm_sequence(ji,2) .GT. 1 ) jf = jf + 1 1531 IF ( ncomm_sequence(ji,1) .GT. 1 .AND. ncomm_sequence(ji,2) .GT. 1 ) jj = jj + 1 1532 jh = MAX (jh, ncomm_sequence(ji,1)*ncomm_sequence(ji,2)) 1533 END DO 1534 WRITE(numcom,'(A,I3)') ' 3D Exchanged halos : ', jk 1535 WRITE(numcom,'(A,I3)') ' Multi arrays exchanged halos : ', jf 1536 WRITE(numcom,'(A,I3)') ' from which 3D : ', jj 1537 WRITE(numcom,'(A,I10)') ' Array max size : ', jh*jpi*jpj 1538 WRITE(numcom,*) ' ' 1539 WRITE(numcom,*) ' lbc_lnk called' 1540 jj = 1 1541 DO ji = 2, n_sequence_lbc 1542 IF( crname_lbc(ji-1) /= crname_lbc(ji) ) THEN 1543 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(ji-1)) 1544 jj = 0 1545 END IF 1546 jj = jj + 1 1547 END DO 1548 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(n_sequence_lbc)) 1549 WRITE(numcom,*) ' ' 1550 IF ( n_sequence_glb > 0 ) THEN 1551 WRITE(numcom,'(A,I4)') ' Global communications : ', n_sequence_glb 1552 jj = 1 1553 DO ji = 2, n_sequence_glb 1554 IF( crname_glb(ji-1) /= crname_glb(ji) ) THEN 1555 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(ji-1)) 1556 jj = 0 1557 END IF 1558 jj = jj + 1 1559 END DO 1560 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(n_sequence_glb)) 1561 DEALLOCATE(crname_glb) 1562 ELSE 1563 WRITE(numcom,*) ' No MPI global communication ' 1564 ENDIF 1565 WRITE(numcom,*) ' ' 1566 IF ( n_sequence_dlg > 0 ) THEN 1567 WRITE(numcom,'(A,I4)') ' Delayed global communications : ', n_sequence_dlg 1568 jj = 1 1569 DO ji = 2, n_sequence_dlg 1570 IF( crname_dlg(ji-1) /= crname_dlg(ji) ) THEN 1571 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_dlg(ji-1)) 1572 jj = 0 1573 END IF 1574 jj = jj + 1 1575 END DO 1576 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_dlg(n_sequence_dlg)) 1577 DEALLOCATE(crname_dlg) 1578 ELSE 1579 WRITE(numcom,*) ' No MPI delayed global communication ' 1580 ENDIF 1581 WRITE(numcom,*) ' ' 1582 WRITE(numcom,*) ' -----------------------------------------------' 1583 WRITE(numcom,*) ' ' 1584 DEALLOCATE(ncomm_sequence) 1585 DEALLOCATE(crname_lbc) 1586 ENDIF 1587 END SUBROUTINE mpp_report 1588 3971 1589 1590 SUBROUTINE tic_tac (ld_tic, ld_global) 1591 1592 LOGICAL, INTENT(IN) :: ld_tic 1593 LOGICAL, OPTIONAL, INTENT(IN) :: ld_global 1594 REAL(wp), DIMENSION(2), SAVE :: tic_wt 1595 REAL(wp), SAVE :: tic_ct = 0._wp 1596 INTEGER :: ii 1597 1598 IF( ncom_stp <= nit000 ) RETURN 1599 IF( ncom_stp == nitend ) RETURN 1600 ii = 1 1601 IF( PRESENT( ld_global ) ) THEN 1602 IF( ld_global ) ii = 2 1603 END IF 1604 1605 IF ( ld_tic ) THEN 1606 tic_wt(ii) = MPI_Wtime() ! start count tic->tac (waiting time) 1607 IF ( tic_ct > 0.0_wp ) compute_time = compute_time + MPI_Wtime() - tic_ct ! cumulate count tac->tic 1608 ELSE 1609 waiting_time(ii) = waiting_time(ii) + MPI_Wtime() - tic_wt(ii) ! cumulate count tic->tac 1610 tic_ct = MPI_Wtime() ! start count tac->tic (waiting time) 1611 ENDIF 1612 1613 END SUBROUTINE tic_tac 1614 1615 1616 #else 1617 !!---------------------------------------------------------------------- 1618 !! Default case: Dummy module share memory computing 1619 !!---------------------------------------------------------------------- 1620 USE in_out_manager 1621 1622 INTERFACE mpp_sum 1623 MODULE PROCEDURE mppsum_int, mppsum_a_int, mppsum_real, mppsum_a_real, mppsum_realdd, mppsum_a_realdd 1624 END INTERFACE 1625 INTERFACE mpp_max 1626 MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real 1627 END INTERFACE 1628 INTERFACE mpp_min 1629 MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real 1630 END INTERFACE 1631 INTERFACE mpp_minloc 1632 MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d 1633 END INTERFACE 1634 INTERFACE mpp_maxloc 1635 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 1636 END INTERFACE 1637 1638 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag 1639 LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms (needed here in case "key_mpp_mpi" is not used) 1640 INTEGER, PUBLIC :: mpi_comm_oce ! opa local communicator 1641 1642 INTEGER, PARAMETER, PUBLIC :: nbdelay = 0 ! make sure we don't enter loops: DO ji = 1, nbdelay 1643 CHARACTER(len=32), DIMENSION(1), PUBLIC :: c_delaylist = 'empty' 1644 CHARACTER(len=32), DIMENSION(1), PUBLIC :: c_delaycpnt = 'empty' 1645 LOGICAL, PUBLIC :: l_full_nf_update = .TRUE. 1646 TYPE :: DELAYARR 1647 REAL( wp), POINTER, DIMENSION(:) :: z1d => NULL() 1648 COMPLEX(wp), POINTER, DIMENSION(:) :: y1d => NULL() 1649 END TYPE DELAYARR 1650 TYPE( DELAYARR ), DIMENSION(1), PUBLIC :: todelay 1651 INTEGER, PUBLIC, DIMENSION(1) :: ndelayid = -1 1652 !!---------------------------------------------------------------------- 1653 CONTAINS 1654 1655 INTEGER FUNCTION lib_mpp_alloc(kumout) ! Dummy function 1656 INTEGER, INTENT(in) :: kumout 1657 lib_mpp_alloc = 0 1658 END FUNCTION lib_mpp_alloc 1659 1660 FUNCTION mynode( ldtxt, ldname, kumnam_ref, knumnam_cfg, kumond , kstop, localComm ) RESULT (function_value) 1661 INTEGER, OPTIONAL , INTENT(in ) :: localComm 1662 CHARACTER(len=*),DIMENSION(:) :: ldtxt 1663 CHARACTER(len=*) :: ldname 1664 INTEGER :: kumnam_ref, knumnam_cfg , kumond , kstop 1665 IF( PRESENT( localComm ) ) mpi_comm_oce = localComm 1666 function_value = 0 1667 IF( .FALSE. ) ldtxt(:) = 'never done' 1668 CALL ctl_opn( kumond, TRIM(ldname), 'UNKNOWN', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. , 1 ) 1669 END FUNCTION mynode 1670 1671 SUBROUTINE mppsync ! Dummy routine 1672 END SUBROUTINE mppsync 1673 1674 !!---------------------------------------------------------------------- 1675 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 1676 !! 1677 !!---------------------------------------------------------------------- 1678 !! 1679 # define OPERATION_MAX 1680 # define INTEGER_TYPE 1681 # define DIM_0d 1682 # define ROUTINE_ALLREDUCE mppmax_int 1683 # include "mpp_allreduce_generic.h90" 1684 # undef ROUTINE_ALLREDUCE 1685 # undef DIM_0d 1686 # define DIM_1d 1687 # define ROUTINE_ALLREDUCE mppmax_a_int 1688 # include "mpp_allreduce_generic.h90" 1689 # undef ROUTINE_ALLREDUCE 1690 # undef DIM_1d 1691 # undef INTEGER_TYPE 1692 ! 1693 # define REAL_TYPE 1694 # define DIM_0d 1695 # define ROUTINE_ALLREDUCE mppmax_real 1696 # include "mpp_allreduce_generic.h90" 1697 # undef ROUTINE_ALLREDUCE 1698 # undef DIM_0d 1699 # define DIM_1d 1700 # define ROUTINE_ALLREDUCE mppmax_a_real 1701 # include "mpp_allreduce_generic.h90" 1702 # undef ROUTINE_ALLREDUCE 1703 # undef DIM_1d 1704 # undef REAL_TYPE 1705 # undef OPERATION_MAX 1706 !!---------------------------------------------------------------------- 1707 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** 1708 !! 1709 !!---------------------------------------------------------------------- 1710 !! 1711 # define OPERATION_MIN 1712 # define INTEGER_TYPE 1713 # define DIM_0d 1714 # define ROUTINE_ALLREDUCE mppmin_int 1715 # include "mpp_allreduce_generic.h90" 1716 # undef ROUTINE_ALLREDUCE 1717 # undef DIM_0d 1718 # define DIM_1d 1719 # define ROUTINE_ALLREDUCE mppmin_a_int 1720 # include "mpp_allreduce_generic.h90" 1721 # undef ROUTINE_ALLREDUCE 1722 # undef DIM_1d 1723 # undef INTEGER_TYPE 1724 ! 1725 # define REAL_TYPE 1726 # define DIM_0d 1727 # define ROUTINE_ALLREDUCE mppmin_real 1728 # include "mpp_allreduce_generic.h90" 1729 # undef ROUTINE_ALLREDUCE 1730 # undef DIM_0d 1731 # define DIM_1d 1732 # define ROUTINE_ALLREDUCE mppmin_a_real 1733 # include "mpp_allreduce_generic.h90" 1734 # undef ROUTINE_ALLREDUCE 1735 # undef DIM_1d 1736 # undef REAL_TYPE 1737 # undef OPERATION_MIN 1738 1739 !!---------------------------------------------------------------------- 1740 !! *** mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real *** 1741 !! 1742 !! Global sum of 1D array or a variable (integer, real or complex) 1743 !!---------------------------------------------------------------------- 1744 !! 1745 # define OPERATION_SUM 1746 # define INTEGER_TYPE 1747 # define DIM_0d 1748 # define ROUTINE_ALLREDUCE mppsum_int 1749 # include "mpp_allreduce_generic.h90" 1750 # undef ROUTINE_ALLREDUCE 1751 # undef DIM_0d 1752 # define DIM_1d 1753 # define ROUTINE_ALLREDUCE mppsum_a_int 1754 # include "mpp_allreduce_generic.h90" 1755 # undef ROUTINE_ALLREDUCE 1756 # undef DIM_1d 1757 # undef INTEGER_TYPE 1758 ! 1759 # define REAL_TYPE 1760 # define DIM_0d 1761 # define ROUTINE_ALLREDUCE mppsum_real 1762 # include "mpp_allreduce_generic.h90" 1763 # undef ROUTINE_ALLREDUCE 1764 # undef DIM_0d 1765 # define DIM_1d 1766 # define ROUTINE_ALLREDUCE mppsum_a_real 1767 # include "mpp_allreduce_generic.h90" 1768 # undef ROUTINE_ALLREDUCE 1769 # undef DIM_1d 1770 # undef REAL_TYPE 1771 # undef OPERATION_SUM 1772 1773 # define OPERATION_SUM_DD 1774 # define COMPLEX_TYPE 1775 # define DIM_0d 1776 # define ROUTINE_ALLREDUCE mppsum_realdd 1777 # include "mpp_allreduce_generic.h90" 1778 # undef ROUTINE_ALLREDUCE 1779 # undef DIM_0d 1780 # define DIM_1d 1781 # define ROUTINE_ALLREDUCE mppsum_a_realdd 1782 # include "mpp_allreduce_generic.h90" 1783 # undef ROUTINE_ALLREDUCE 1784 # undef DIM_1d 1785 # undef COMPLEX_TYPE 1786 # undef OPERATION_SUM_DD 1787 1788 !!---------------------------------------------------------------------- 1789 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 1790 !! 1791 !!---------------------------------------------------------------------- 1792 !! 1793 # define OPERATION_MINLOC 1794 # define DIM_2d 1795 # define ROUTINE_LOC mpp_minloc2d 1796 # include "mpp_loc_generic.h90" 1797 # undef ROUTINE_LOC 1798 # undef DIM_2d 1799 # define DIM_3d 1800 # define ROUTINE_LOC mpp_minloc3d 1801 # include "mpp_loc_generic.h90" 1802 # undef ROUTINE_LOC 1803 # undef DIM_3d 1804 # undef OPERATION_MINLOC 1805 1806 # define OPERATION_MAXLOC 1807 # define DIM_2d 1808 # define ROUTINE_LOC mpp_maxloc2d 1809 # include "mpp_loc_generic.h90" 1810 # undef ROUTINE_LOC 1811 # undef DIM_2d 1812 # define DIM_3d 1813 # define ROUTINE_LOC mpp_maxloc3d 1814 # include "mpp_loc_generic.h90" 1815 # undef ROUTINE_LOC 1816 # undef DIM_3d 1817 # undef OPERATION_MAXLOC 1818 1819 SUBROUTINE mpp_delay_sum( cdname, cdelay, y_in, pout, ldlast, kcom ) 1820 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1821 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 1822 COMPLEX(wp), INTENT(in ), DIMENSION(:) :: y_in 1823 REAL(wp), INTENT( out), DIMENSION(:) :: pout 1824 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 1825 INTEGER, INTENT(in ), OPTIONAL :: kcom 1826 ! 1827 pout(:) = REAL(y_in(:), wp) 1828 END SUBROUTINE mpp_delay_sum 1829 1830 SUBROUTINE mpp_delay_max( cdname, cdelay, p_in, pout, ldlast, kcom ) 1831 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1832 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 1833 REAL(wp), INTENT(in ), DIMENSION(:) :: p_in 1834 REAL(wp), INTENT( out), DIMENSION(:) :: pout 1835 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 1836 INTEGER, INTENT(in ), OPTIONAL :: kcom 1837 ! 1838 pout(:) = p_in(:) 1839 END SUBROUTINE mpp_delay_max 1840 1841 SUBROUTINE mpp_delay_rcv( kid ) 1842 INTEGER,INTENT(in ) :: kid 1843 WRITE(*,*) 'mpp_delay_rcv: You should not have seen this print! error?', kid 1844 END SUBROUTINE mpp_delay_rcv 1845 1846 SUBROUTINE mppstop( ldfinal, ld_force_abort ) 1847 LOGICAL, OPTIONAL, INTENT(in) :: ldfinal ! source process number 1848 LOGICAL, OPTIONAL, INTENT(in) :: ld_force_abort ! source process number 1849 STOP ! non MPP case, just stop the run 1850 END SUBROUTINE mppstop 1851 1852 SUBROUTINE mpp_ini_znl( knum ) 1853 INTEGER :: knum 1854 WRITE(*,*) 'mpp_ini_znl: You should not have seen this print! error?', knum 1855 END SUBROUTINE mpp_ini_znl 1856 1857 SUBROUTINE mpp_comm_free( kcom ) 1858 INTEGER :: kcom 1859 WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom 1860 END SUBROUTINE mpp_comm_free 1861 1862 #endif 3972 1863 3973 1864 !!---------------------------------------------------------------------- … … 3988 1879 ! 3989 1880 nstop = nstop + 1 3990 IF(lwp) THEN 3991 WRITE(numout,cform_err) 3992 IF( PRESENT(cd1 ) ) WRITE(numout,*) cd1 3993 IF( PRESENT(cd2 ) ) WRITE(numout,*) cd2 3994 IF( PRESENT(cd3 ) ) WRITE(numout,*) cd3 3995 IF( PRESENT(cd4 ) ) WRITE(numout,*) cd4 3996 IF( PRESENT(cd5 ) ) WRITE(numout,*) cd5 3997 IF( PRESENT(cd6 ) ) WRITE(numout,*) cd6 3998 IF( PRESENT(cd7 ) ) WRITE(numout,*) cd7 3999 IF( PRESENT(cd8 ) ) WRITE(numout,*) cd8 4000 IF( PRESENT(cd9 ) ) WRITE(numout,*) cd9 4001 IF( PRESENT(cd10) ) WRITE(numout,*) cd10 4002 ENDIF 1881 1882 ! force to open ocean.output file 1883 IF( numout == 6 ) CALL ctl_opn( numout, 'ocean.output', 'APPEND', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. ) 1884 1885 WRITE(numout,cform_err) 1886 IF( PRESENT(cd1 ) ) WRITE(numout,*) TRIM(cd1) 1887 IF( PRESENT(cd2 ) ) WRITE(numout,*) TRIM(cd2) 1888 IF( PRESENT(cd3 ) ) WRITE(numout,*) TRIM(cd3) 1889 IF( PRESENT(cd4 ) ) WRITE(numout,*) TRIM(cd4) 1890 IF( PRESENT(cd5 ) ) WRITE(numout,*) TRIM(cd5) 1891 IF( PRESENT(cd6 ) ) WRITE(numout,*) TRIM(cd6) 1892 IF( PRESENT(cd7 ) ) WRITE(numout,*) TRIM(cd7) 1893 IF( PRESENT(cd8 ) ) WRITE(numout,*) TRIM(cd8) 1894 IF( PRESENT(cd9 ) ) WRITE(numout,*) TRIM(cd9) 1895 IF( PRESENT(cd10) ) WRITE(numout,*) TRIM(cd10) 1896 4003 1897 CALL FLUSH(numout ) 4004 1898 IF( numstp /= -1 ) CALL FLUSH(numstp ) 4005 IF( num sol /= -1 ) CALL FLUSH(numsol)1899 IF( numrun /= -1 ) CALL FLUSH(numrun ) 4006 1900 IF( numevo_ice /= -1 ) CALL FLUSH(numevo_ice) 4007 1901 ! 4008 1902 IF( cd1 == 'STOP' ) THEN 4009 IF(lwp)WRITE(numout,*) 'huge E-R-R-O-R : immediate stop'4010 CALL mppstop( )1903 WRITE(numout,*) 'huge E-R-R-O-R : immediate stop' 1904 CALL mppstop(ld_force_abort = .true.) 4011 1905 ENDIF 4012 1906 ! … … 4029 1923 IF(lwp) THEN 4030 1924 WRITE(numout,cform_war) 4031 IF( PRESENT(cd1 ) ) WRITE(numout,*) cd14032 IF( PRESENT(cd2 ) ) WRITE(numout,*) cd24033 IF( PRESENT(cd3 ) ) WRITE(numout,*) cd34034 IF( PRESENT(cd4 ) ) WRITE(numout,*) cd44035 IF( PRESENT(cd5 ) ) WRITE(numout,*) cd54036 IF( PRESENT(cd6 ) ) WRITE(numout,*) cd64037 IF( PRESENT(cd7 ) ) WRITE(numout,*) cd74038 IF( PRESENT(cd8 ) ) WRITE(numout,*) cd84039 IF( PRESENT(cd9 ) ) WRITE(numout,*) cd94040 IF( PRESENT(cd10) ) WRITE(numout,*) cd101925 IF( PRESENT(cd1 ) ) WRITE(numout,*) TRIM(cd1) 1926 IF( PRESENT(cd2 ) ) WRITE(numout,*) TRIM(cd2) 1927 IF( PRESENT(cd3 ) ) WRITE(numout,*) TRIM(cd3) 1928 IF( PRESENT(cd4 ) ) WRITE(numout,*) TRIM(cd4) 1929 IF( PRESENT(cd5 ) ) WRITE(numout,*) TRIM(cd5) 1930 IF( PRESENT(cd6 ) ) WRITE(numout,*) TRIM(cd6) 1931 IF( PRESENT(cd7 ) ) WRITE(numout,*) TRIM(cd7) 1932 IF( PRESENT(cd8 ) ) WRITE(numout,*) TRIM(cd8) 1933 IF( PRESENT(cd9 ) ) WRITE(numout,*) TRIM(cd9) 1934 IF( PRESENT(cd10) ) WRITE(numout,*) TRIM(cd10) 4041 1935 ENDIF 4042 1936 CALL FLUSH(numout) … … 4073 1967 IF( karea > 1 ) WRITE(clfile, "(a,'_',i4.4)") TRIM(clfile), karea-1 4074 1968 ENDIF 1969 #if defined key_agrif 1970 IF( .NOT. Agrif_Root() ) clfile = TRIM(Agrif_CFixed())//'_'//TRIM(clfile) 1971 knum=Agrif_Get_Unit() 1972 #else 4075 1973 knum=get_unit() 1974 #endif 1975 IF( TRIM(cdfile) == '/dev/null' ) clfile = TRIM(cdfile) ! force the use of /dev/null 4076 1976 ! 4077 1977 iost=0 4078 IF( cdacce(1:6) == 'DIRECT' ) THEN 4079 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat, RECL=klengh, ERR=100, IOSTAT=iost ) 1978 IF( cdacce(1:6) == 'DIRECT' ) THEN ! cdacce has always more than 6 characters 1979 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat, RECL=klengh , ERR=100, IOSTAT=iost ) 1980 ELSE IF( TRIM(cdstat) == 'APPEND' ) THEN ! cdstat can have less than 6 characters 1981 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS='UNKNOWN', POSITION='APPEND', ERR=100, IOSTAT=iost ) 4080 1982 ELSE 4081 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 4082 ENDIF 1983 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 1984 ENDIF 1985 IF( iost /= 0 .AND. TRIM(clfile) == '/dev/null' ) & ! for windows 1986 & OPEN(UNIT=knum,FILE='NUL', FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 4083 1987 IF( iost == 0 ) THEN 4084 1988 IF(ldwp) THEN 4085 WRITE(kout,*) ' file : ', clfile,' open ok'1989 WRITE(kout,*) ' file : ', TRIM(clfile),' open ok' 4086 1990 WRITE(kout,*) ' unit = ', knum 4087 1991 WRITE(kout,*) ' status = ', cdstat … … 4095 1999 IF(ldwp) THEN 4096 2000 WRITE(kout,*) 4097 WRITE(kout,*) ' ===>>>> : bad opening file: ', clfile2001 WRITE(kout,*) ' ===>>>> : bad opening file: ', TRIM(clfile) 4098 2002 WRITE(kout,*) ' ======= === ' 4099 2003 WRITE(kout,*) ' unit = ', knum … … 4104 2008 WRITE(kout,*) ' we stop. verify the file ' 4105 2009 WRITE(kout,*) 2010 ELSE !!! Force writing to make sure we get the information - at least once - in this violent STOP!! 2011 WRITE(*,*) 2012 WRITE(*,*) ' ===>>>> : bad opening file: ', TRIM(clfile) 2013 WRITE(*,*) ' ======= === ' 2014 WRITE(*,*) ' unit = ', knum 2015 WRITE(*,*) ' status = ', cdstat 2016 WRITE(*,*) ' form = ', cdform 2017 WRITE(*,*) ' access = ', cdacce 2018 WRITE(*,*) ' iostat = ', iost 2019 WRITE(*,*) ' we stop. verify the file ' 2020 WRITE(*,*) 4106 2021 ENDIF 2022 CALL FLUSH( kout ) 4107 2023 STOP 'ctl_opn bad opening' 4108 2024 ENDIF … … 4121 2037 INTEGER , INTENT(inout) :: kios ! IO status after reading the namelist 4122 2038 CHARACTER(len=*), INTENT(in ) :: cdnam ! group name of namelist for which error occurs 4123 CHARACTER(len= 4) :: clios ! string to convert iostat in character for print2039 CHARACTER(len=5) :: clios ! string to convert iostat in character for print 4124 2040 LOGICAL , INTENT(in ) :: ldwp ! boolean term for print 4125 2041 !!---------------------------------------------------------------------- 4126 2042 ! 4127 WRITE (clios, '(I 4.0)') kios2043 WRITE (clios, '(I5.0)') kios 4128 2044 IF( kios < 0 ) THEN 4129 2045 CALL ctl_warn( 'end of record or file while reading namelist ' & -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/mppini.F90
r10725 r10727 1 1 MODULE mppini 2 !!====================================================================== ========2 !!====================================================================== 3 3 !! *** MODULE mppini *** 4 4 !! Ocean initialization : distributed memory computing initialization 5 !!============================================================================== 6 5 !!====================================================================== 6 !! History : 6.0 ! 1994-11 (M. Guyon) Original code 7 !! OPA 7.0 ! 1995-04 (J. Escobar, M. Imbard) 8 !! 8.0 ! 1998-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI versions 9 !! NEMO 1.0 ! 2004-01 (G. Madec, J.M Molines) F90 : free form , north fold jpni > 1 10 !! 3.4 ! 2011-10 (A. C. Coward, NOCS & J. Donners, PRACE) add mpp_init_nfdcom 11 !! 3. ! 2013-06 (I. Epicoco, S. Mocavero, CMCC) mpp_init_nfdcom: setup avoiding MPI communication 12 !! 4.0 ! 2016-06 (G. Madec) use domain configuration file instead of bathymetry file 13 !! 4.0 ! 2017-06 (J.M. Molines, T. Lovato) merge of mppini and mppini_2 7 14 !!---------------------------------------------------------------------- 8 !! mpp_init : Lay out the global domain over processors 9 !! mpp_init2 : Lay out the global domain over processors 10 !! with land processor elimination 11 !! mpp_init_ioispl: IOIPSL initialization in mpp 15 12 16 !!---------------------------------------------------------------------- 13 USE dom_oce ! ocean space and time domain 14 USE in_out_manager ! I/O Manager 15 USE lib_mpp ! distribued memory computing library 16 USE ioipsl 17 !! mpp_init : Lay out the global domain over processors with/without land processor elimination 18 !! mpp_init_mask : Read global bathymetric information to facilitate land suppression 19 !! mpp_init_ioipsl : IOIPSL initialization in mpp 20 !! mpp_init_partition: Calculate MPP domain decomposition 21 !! factorise : Calculate the factors of the no. of MPI processes 22 !! mpp_init_nfdcom : Setup for north fold exchanges with explicit point-to-point messaging 23 !!---------------------------------------------------------------------- 24 USE dom_oce ! ocean space and time domain 25 USE bdy_oce ! open BounDarY 26 ! 27 USE lbcnfd , ONLY : isendto, nsndto, nfsloop, nfeloop ! Setup of north fold exchanges 28 USE lib_mpp ! distribued memory computing library 29 USE iom ! nemo I/O library 30 USE ioipsl ! I/O IPSL library 31 USE in_out_manager ! I/O Manager 17 32 18 33 IMPLICIT NONE … … 20 35 21 36 PUBLIC mpp_init ! called by opa.F90 22 PUBLIC mpp_init2 ! called by opa.F90 23 37 38 INTEGER :: numbot = -1 ! 'bottom_level' local logical unit 39 INTEGER :: numbdy = -1 ! 'bdy_msk' local logical unit 40 24 41 !!---------------------------------------------------------------------- 25 42 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 26 !! $Id: mppini.F90 6412 2016-03-31 16:22:32Z lovato$27 !! Software governed by the CeCILL licen ce (./LICENSE)43 !! $Id: mppini.F90 10570 2019-01-24 15:14:49Z acc $ 44 !! Software governed by the CeCILL license (see ./LICENSE) 28 45 !!---------------------------------------------------------------------- 29 46 CONTAINS 30 47 48 #if ! defined key_mpp_mpi 31 49 !!---------------------------------------------------------------------- 32 !! 'key_mpp_mpi' OR MPI massively parallel processing50 !! Default option : shared memory computing 33 51 !!---------------------------------------------------------------------- 52 53 SUBROUTINE mpp_init 54 !!---------------------------------------------------------------------- 55 !! *** ROUTINE mpp_init *** 56 !! 57 !! ** Purpose : Lay out the global domain over processors. 58 !! 59 !! ** Method : Shared memory computing, set the local processor 60 !! variables to the value of the global domain 61 !!---------------------------------------------------------------------- 62 ! 63 jpimax = jpiglo 64 jpjmax = jpjglo 65 jpi = jpiglo 66 jpj = jpjglo 67 jpk = jpkglo 68 jpim1 = jpi-1 ! inner domain indices 69 jpjm1 = jpj-1 ! " " 70 jpkm1 = MAX( 1, jpk-1 ) ! " " 71 jpij = jpi*jpj 72 jpni = 1 73 jpnj = 1 74 jpnij = jpni*jpnj 75 nimpp = 1 ! 76 njmpp = 1 77 nlci = jpi 78 nlcj = jpj 79 nldi = 1 80 nldj = 1 81 nlei = jpi 82 nlej = jpj 83 nbondi = 2 84 nbondj = 2 85 nidom = FLIO_DOM_NONE 86 npolj = jperio 87 l_Iperio = jpni == 1 .AND. (jperio == 1 .OR. jperio == 4 .OR. jperio == 6 .OR. jperio == 7) 88 l_Jperio = jpnj == 1 .AND. (jperio == 2 .OR. jperio == 7) 89 ! 90 IF(lwp) THEN 91 WRITE(numout,*) 92 WRITE(numout,*) 'mpp_init : NO massively parallel processing' 93 WRITE(numout,*) '~~~~~~~~ ' 94 WRITE(numout,*) ' l_Iperio = ', l_Iperio, ' l_Jperio = ', l_Jperio 95 WRITE(numout,*) ' npolj = ', npolj , ' njmpp = ', njmpp 96 ENDIF 97 ! 98 IF( jpni /= 1 .OR. jpnj /= 1 .OR. jpnij /= 1 ) & 99 CALL ctl_stop( 'mpp_init: equality jpni = jpnj = jpnij = 1 is not satisfied', & 100 & 'the domain is lay out for distributed memory computing!' ) 101 ! 102 END SUBROUTINE mpp_init 103 104 #else 105 !!---------------------------------------------------------------------- 106 !! 'key_mpp_mpi' MPI massively parallel processing 107 !!---------------------------------------------------------------------- 108 34 109 35 110 SUBROUTINE mpp_init … … 38 113 !! 39 114 !! ** Purpose : Lay out the global domain over processors. 115 !! If land processors are to be eliminated, this program requires the 116 !! presence of the domain configuration file. Land processors elimination 117 !! is performed if jpni x jpnj /= jpnij. In this case, using the MPP_PREP 118 !! preprocessing tool, help for defining the best cutting out. 40 119 !! 41 120 !! ** Method : Global domain is distributed in smaller local domains. … … 44 123 !! periodic 45 124 !! Type : jperio global periodic condition 46 !! nperio local periodic condition 47 !! 48 !! ** Action : - set domain parameters 125 !! 126 !! ** Action : - set domain parameters 49 127 !! nimpp : longitudinal index 50 128 !! njmpp : latitudinal index 51 !! nperio : lateral condition type52 129 !! narea : number for local area 53 130 !! nlci : first dimension … … 60 137 !! noso : number for local neighboring processor 61 138 !! nono : number for local neighboring processor 62 !! 63 !! History : 64 !! ! 94-11 (M. Guyon) Original code 65 !! ! 95-04 (J. Escobar, M. Imbard) 66 !! ! 98-02 (M. Guyon) FETI method 67 !! ! 98-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI versions 68 !! 8.5 ! 02-08 (G. Madec) F90 : free form 69 !! 3.4 ! 11-11 (C. Harris) decomposition changes for running with CICE 70 !!---------------------------------------------------------------------- 71 INTEGER :: ji, jj, jn ! dummy loop indices 72 INTEGER :: ii, ij, ifreq, il1, il2 ! local integers 73 INTEGER :: iresti, irestj, ijm1, imil, inum ! - - 74 REAL(wp) :: zidom, zjdom ! local scalars 75 INTEGER, DIMENSION(jpni,jpnj) :: iimppt, ijmppt, ilcit, ilcjt ! local workspace 76 !!---------------------------------------------------------------------- 77 78 IF(lwp) WRITE(numout,*) 79 IF(lwp) WRITE(numout,*) 'mpp_init : Message Passing MPI' 80 IF(lwp) WRITE(numout,*) '~~~~~~~~' 81 82 139 !!---------------------------------------------------------------------- 140 INTEGER :: ji, jj, jn, jproc, jarea ! dummy loop indices 141 INTEGER :: inijmin 142 INTEGER :: i2add 143 INTEGER :: inum ! local logical unit 144 INTEGER :: idir, ifreq, icont ! local integers 145 INTEGER :: ii, il1, ili, imil ! - - 146 INTEGER :: ij, il2, ilj, ijm1 ! - - 147 INTEGER :: iino, ijno, iiso, ijso ! - - 148 INTEGER :: iiea, ijea, iiwe, ijwe ! - - 149 INTEGER :: iarea0 ! - - 150 INTEGER :: ierr, ios ! 151 INTEGER :: inbi, inbj, iimax, ijmax, icnt1, icnt2 152 LOGICAL :: llbest 153 LOGICAL :: llwrtlay 154 INTEGER, ALLOCATABLE, DIMENSION(:) :: iin, ii_nono, ii_noea ! 1D workspace 155 INTEGER, ALLOCATABLE, DIMENSION(:) :: ijn, ii_noso, ii_nowe ! - - 156 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: iimppt, ilci, ibondi, ipproc ! 2D workspace 157 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ijmppt, ilcj, ibondj, ipolj ! - - 158 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ilei, ildi, iono, ioea ! - - 159 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ilej, ildj, ioso, iowe ! - - 160 LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: llisoce ! - - 161 NAMELIST/nambdy/ ln_bdy, nb_bdy, ln_coords_file, cn_coords_file, & 162 & ln_mask_file, cn_mask_file, cn_dyn2d, nn_dyn2d_dta, & 163 & cn_dyn3d, nn_dyn3d_dta, cn_tra, nn_tra_dta, & 164 & ln_tra_dmp, ln_dyn3d_dmp, rn_time_dmp, rn_time_dmp_out, & 165 & cn_ice, nn_ice_dta, & 166 & rn_ice_tem, rn_ice_sal, rn_ice_age, & 167 & ln_vol, nn_volctl, nn_rimwidth, nb_jpk_bdy 168 !!---------------------------------------------------------------------- 169 170 llwrtlay = lwp .OR. ln_ctl .OR. sn_cfctl%l_layout 171 ! do we need to take into account bdy_msk? 172 REWIND( numnam_ref ) ! Namelist nambdy in reference namelist : BDY 173 READ ( numnam_ref, nambdy, IOSTAT = ios, ERR = 903) 174 903 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nambdy in reference namelist (mppini)', lwp ) 175 REWIND( numnam_cfg ) ! Namelist nambdy in configuration namelist : BDY 176 READ ( numnam_cfg, nambdy, IOSTAT = ios, ERR = 904 ) 177 904 IF( ios > 0 ) CALL ctl_nam ( ios , 'nambdy in configuration namelist (mppini)', lwp ) 178 ! 179 IF( ln_read_cfg ) CALL iom_open( cn_domcfg, numbot ) 180 IF( ln_bdy .AND. ln_mask_file ) CALL iom_open( cn_mask_file, numbdy ) 181 ! 83 182 ! 1. Dimension arrays for subdomains 84 183 ! ----------------------------------- 85 ! Computation of local domain sizes ilcit() ilcjt() 86 ! These dimensions depend on global sizes jpni,jpnj and jpiglo,jpjglo 87 ! The subdomains are squares leeser than or equal to the global 88 ! dimensions divided by the number of processors minus the overlap 89 ! array (cf. par_oce.F90). 184 ! 185 ! If dimensions of processor grid weren't specified in the namelist file 186 ! then we calculate them here now that we have our communicator size 187 IF( jpni < 1 .OR. jpnj < 1 ) THEN 188 CALL mpp_init_bestpartition( mppsize, jpni, jpnj ) 189 llbest = .TRUE. 190 ELSE 191 CALL mpp_init_bestpartition( mppsize, inbi, inbj, icnt2 ) 192 CALL mpp_basic_decomposition( jpni, jpnj, jpimax, jpjmax ) 193 CALL mpp_basic_decomposition( inbi, inbj, iimax, ijmax ) 194 IF( iimax*ijmax < jpimax*jpjmax ) THEN 195 llbest = .FALSE. 196 icnt1 = jpni*jpnj - mppsize 197 WRITE(ctmp1,9000) ' The chosen domain decomposition ', jpni, ' x ', jpnj, ' with ', icnt1, ' land sub-domains' 198 WRITE(ctmp2,9000) ' has larger MPI subdomains (jpi = ', jpimax, ', jpj = ', jpjmax, ', jpi*jpj = ', jpimax*jpjmax, ')' 199 WRITE(ctmp3,9000) ' than the following domain decompostion ', inbi, ' x ', inbj, ' with ', icnt2, ' land sub-domains' 200 WRITE(ctmp4,9000) ' which MPI subdomains size is jpi = ', iimax, ', jpj = ', ijmax, ', jpi*jpj = ', iimax*ijmax, ' ' 201 CALL ctl_warn( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4, ' ', ' --- YOU ARE WASTING CPU... ---', ' ' ) 202 ELSE 203 llbest = .TRUE. 204 ENDIF 205 ENDIF 90 206 91 nreci = 2 * jpreci 92 nrecj = 2 * jprecj 93 iresti = MOD( jpiglo - nreci , jpni ) 94 irestj = MOD( jpjglo - nrecj , jpnj ) 95 96 IF( iresti == 0 ) iresti = jpni 97 98 99 DO jj = 1, jpnj 100 DO ji = 1, iresti 101 ilcit(ji,jj) = jpi 102 END DO 103 DO ji = iresti+1, jpni 104 ilcit(ji,jj) = jpi -1 105 END DO 106 END DO 207 ! look for land mpi subdomains... 208 ALLOCATE( llisoce(jpni,jpnj) ) 209 CALL mpp_init_isoce( jpni, jpnj, llisoce ) 210 inijmin = COUNT( llisoce ) ! number of oce subdomains 211 212 IF( mppsize < inijmin ) THEN 213 WRITE(ctmp1,9001) ' With this specified domain decomposition: jpni = ', jpni, ' jpnj = ', jpnj 214 WRITE(ctmp2,9002) ' we can eliminate only ', jpni*jpnj - inijmin, ' land mpi subdomains therefore ' 215 WRITE(ctmp3,9001) ' the number of ocean mpi subdomains (', inijmin,') exceed the number of MPI processes:', mppsize 216 WRITE(ctmp4,*) ' ==>>> There is the list of best domain decompositions you should use: ' 217 CALL ctl_stop( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4 ) 218 CALL mpp_init_bestpartition( mppsize, ldlist = .TRUE. ) ! must be done by all core 219 CALL ctl_stop( 'STOP' ) 220 ENDIF 221 222 IF( mppsize > jpni*jpnj ) THEN 223 WRITE(ctmp1,9003) ' The number of mpi processes: ', mppsize 224 WRITE(ctmp2,9003) ' exceeds the maximum number of subdomains (ocean+land) = ', jpni*jpnj 225 WRITE(ctmp3,9001) ' defined by the following domain decomposition: jpni = ', jpni, ' jpnj = ', jpnj 226 WRITE(ctmp4,*) ' ==>>> There is the list of best domain decompositions you should use: ' 227 CALL ctl_stop( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4 ) 228 CALL mpp_init_bestpartition( mppsize, ldlist = .TRUE. ) ! must be done by all core 229 CALL ctl_stop( 'STOP' ) 230 ENDIF 231 232 jpnij = mppsize ! force jpnij definition <-- remove as much land subdomains as needed to reach this condition 233 IF( mppsize > inijmin ) THEN 234 WRITE(ctmp1,9003) ' The number of mpi processes: ', mppsize 235 WRITE(ctmp2,9003) ' exceeds the maximum number of ocean subdomains = ', inijmin 236 WRITE(ctmp3,9002) ' we suppressed ', jpni*jpnj - mppsize, ' land subdomains ' 237 WRITE(ctmp4,9002) ' BUT we had to keep ', mppsize - inijmin, ' land subdomains that are useless...' 238 CALL ctl_warn( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4, ' ', ' --- YOU ARE WASTING CPU... ---', ' ' ) 239 ELSE ! mppsize = inijmin 240 IF(lwp) THEN 241 IF(llbest) WRITE(numout,*) 'mpp_init: You use an optimal domain decomposition' 242 WRITE(numout,*) '~~~~~~~~ ' 243 WRITE(numout,9003) ' Number of mpi processes: ', mppsize 244 WRITE(numout,9003) ' Number of ocean subdomains = ', inijmin 245 WRITE(numout,9003) ' Number of suppressed land subdomains = ', jpni*jpnj - inijmin 246 WRITE(numout,*) 247 ENDIF 248 ENDIF 249 9000 FORMAT (a, i4, a, i4, a, i7, a) 250 9001 FORMAT (a, i4, a, i4) 251 9002 FORMAT (a, i4, a) 252 9003 FORMAT (a, i5) 253 254 IF( numbot /= -1 ) CALL iom_close( numbot ) 255 IF( numbdy /= -1 ) CALL iom_close( numbdy ) 256 257 ALLOCATE( nfiimpp(jpni,jpnj), nfipproc(jpni,jpnj), nfilcit(jpni,jpnj) , & 258 & nimppt(jpnij) , ibonit(jpnij) , nlcit(jpnij) , nlcjt(jpnij) , & 259 & njmppt(jpnij) , ibonjt(jpnij) , nldit(jpnij) , nldjt(jpnij) , & 260 & nleit(jpnij) , nlejt(jpnij) , & 261 & iin(jpnij), ii_nono(jpnij), ii_noea(jpnij), & 262 & ijn(jpnij), ii_noso(jpnij), ii_nowe(jpnij), & 263 & iimppt(jpni,jpnj), ilci(jpni,jpnj), ibondi(jpni,jpnj), ipproc(jpni,jpnj), & 264 & ijmppt(jpni,jpnj), ilcj(jpni,jpnj), ibondj(jpni,jpnj), ipolj(jpni,jpnj), & 265 & ilei(jpni,jpnj), ildi(jpni,jpnj), iono(jpni,jpnj), ioea(jpni,jpnj), & 266 & ilej(jpni,jpnj), ildj(jpni,jpnj), ioso(jpni,jpnj), iowe(jpni,jpnj), & 267 & STAT=ierr ) 268 CALL mpp_sum( 'mppini', ierr ) 269 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'mpp_init: unable to allocate standard ocean arrays' ) 107 270 108 nfilcit(:,:) = ilcit(:,:) 109 IF( irestj == 0 ) irestj = jpnj 110 111 112 DO ji = 1, jpni 113 DO jj = 1, irestj 114 ilcjt(ji,jj) = jpj 115 END DO 116 DO jj = irestj+1, jpnj 117 ilcjt(ji,jj) = jpj -1 118 END DO 119 END DO 120 121 271 #if defined key_agrif 272 IF( .NOT. Agrif_Root() ) THEN ! AGRIF children: specific setting (cf. agrif_user.F90) 273 IF( jpiglo /= nbcellsx + 2 + 2*nbghostcells ) THEN 274 IF(lwp) THEN 275 WRITE(numout,*) 276 WRITE(numout,*) 'jpiglo shoud be: ', nbcellsx + 2 + 2*nbghostcells 277 ENDIF 278 CALL ctl_stop( 'STOP', 'mpp_init: Agrif children requires jpiglo == nbcellsx + 2 + 2*nbghostcells' ) 279 ENDIF 280 IF( jpjglo /= nbcellsy + 2 + 2*nbghostcells ) THEN 281 IF(lwp) THEN 282 WRITE(numout,*) 283 WRITE(numout,*) 'jpjglo shoud be: ', nbcellsy + 2 + 2*nbghostcells 284 ENDIF 285 CALL ctl_stop( 'STOP', 'mpp_init: Agrif children requires jpjglo == nbcellsy + 2 + 2*nbghostcells' ) 286 ENDIF 287 IF( ln_use_jattr ) CALL ctl_stop( 'STOP', 'mpp_init:Agrif children requires ln_use_jattr = .false. ' ) 288 ENDIF 289 #endif 290 ! 122 291 ! 2. Index arrays for subdomains 123 ! ------------------------------- 124 125 iimppt(:,:) = 1 126 ijmppt(:,:) = 1 127 128 IF( jpni > 1 ) THEN 129 DO jj = 1, jpnj 130 DO ji = 2, jpni 131 iimppt(ji,jj) = iimppt(ji-1,jj) + ilcit(ji-1,jj) - nreci 132 END DO 133 END DO 134 ENDIF 135 nfiimpp(:,:)=iimppt(:,:) 136 137 IF( jpnj > 1 ) THEN 138 DO jj = 2, jpnj 139 DO ji = 1, jpni 140 ijmppt(ji,jj) = ijmppt(ji,jj-1)+ilcjt(ji,jj-1)-nrecj 141 END DO 142 END DO 143 ENDIF 144 145 ! 3. Subdomain description 146 ! ------------------------ 147 148 DO jn = 1, jpnij 149 ii = 1 + MOD( jn-1, jpni ) 150 ij = 1 + (jn-1) / jpni 151 nfipproc(ii,ij) = jn - 1 152 nimppt(jn) = iimppt(ii,ij) 153 njmppt(jn) = ijmppt(ii,ij) 154 nlcit (jn) = ilcit (ii,ij) 155 nlci = nlcit (jn) 156 nlcjt (jn) = ilcjt (ii,ij) 157 nlcj = nlcjt (jn) 158 nbondj = -1 ! general case 159 IF( jn > jpni ) nbondj = 0 ! first row of processor 160 IF( jn > (jpnj-1)*jpni ) nbondj = 1 ! last row of processor 161 IF( jpnj == 1 ) nbondj = 2 ! one processor only in j-direction 162 ibonjt(jn) = nbondj 163 164 nbondi = 0 ! 165 IF( MOD( jn, jpni ) == 1 ) nbondi = -1 ! 166 IF( MOD( jn, jpni ) == 0 ) nbondi = 1 ! 167 IF( jpni == 1 ) nbondi = 2 ! one processor only in i-direction 168 ibonit(jn) = nbondi 169 170 nldi = 1 + jpreci 171 nlei = nlci - jpreci 172 IF( nbondi == -1 .OR. nbondi == 2 ) nldi = 1 173 IF( nbondi == 1 .OR. nbondi == 2 ) nlei = nlci 174 nldj = 1 + jprecj 175 nlej = nlcj - jprecj 176 IF( nbondj == -1 .OR. nbondj == 2 ) nldj = 1 177 IF( nbondj == 1 .OR. nbondj == 2 ) nlej = nlcj 178 nldit(jn) = nldi 179 nleit(jn) = nlei 180 nldjt(jn) = nldj 181 nlejt(jn) = nlej 182 END DO 183 184 ! 4. Subdomain print 185 ! ------------------ 186 187 IF(lwp) WRITE(numout,*) 188 IF(lwp) WRITE(numout,*) ' mpp_init: defines mpp subdomains' 189 IF(lwp) WRITE(numout,*) ' ~~~~~~ ----------------------' 190 IF(lwp) WRITE(numout,*) 191 IF(lwp) WRITE(numout,*) 'iresti=',iresti,' irestj=',irestj 192 IF(lwp) WRITE(numout,*) 193 IF(lwp) WRITE(numout,*) 'jpni=',jpni,' jpnj=',jpnj 194 zidom = nreci 195 DO ji = 1, jpni 196 zidom = zidom + ilcit(ji,1) - nreci 197 END DO 198 IF(lwp) WRITE(numout,*) 199 IF(lwp) WRITE(numout,*)' sum ilcit(i,1)=', zidom, ' jpiglo=', jpiglo 200 201 zjdom = nrecj 202 DO jj = 1, jpnj 203 zjdom = zjdom + ilcjt(1,jj) - nrecj 204 END DO 205 IF(lwp) WRITE(numout,*)' sum ilcit(1,j)=', zjdom, ' jpjglo=', jpjglo 206 IF(lwp) WRITE(numout,*) 207 292 ! ----------------------------------- 293 ! 294 nreci = 2 * nn_hls 295 nrecj = 2 * nn_hls 296 CALL mpp_basic_decomposition( jpni, jpnj, jpimax, jpjmax, iimppt, ijmppt, ilci, ilcj ) 297 nfiimpp(:,:) = iimppt(:,:) 298 nfilcit(:,:) = ilci(:,:) 299 ! 208 300 IF(lwp) THEN 209 ifreq = 4210 il1 = 1211 DO jn = 1, (jpni-1)/ifreq+1212 il2 = MIN( jpni, il1+ifreq-1 )213 WRITE(numout,*)214 WRITE(numout,9200) ('***',ji = il1,il2-1)215 DO jj = jpnj, 1, -1216 WRITE(numout,9203) (' ',ji = il1,il2-1)217 WRITE(numout,9202) jj, ( ilcit(ji,jj),ilcjt(ji,jj),ji = il1,il2 )218 WRITE(numout,9204) (nfipproc(ji,jj),ji=il1,il2)219 WRITE(numout,9203) (' ',ji = il1,il2-1)220 WRITE(numout,9200) ('***',ji = il1,il2-1)221 END DO222 WRITE(numout,9201) (ji,ji = il1,il2)223 il1 = il1+ifreq224 END DO225 9200 FORMAT(' ***',20('*************',a3))226 9203 FORMAT(' * ',20(' * ',a3))227 9201 FORMAT(' ',20(' ',i3,' '))228 9202 FORMAT(' ',i3,' * ',20(i3,' x',i3,' * '))229 9204 FORMAT(' * ',20(' ',i3,' * '))230 ENDIF231 232 ! 5. From global to local233 ! -----------------------234 235 nperio = 0236 IF( jperio == 2 .AND. nbondj == -1 ) nperio = 2237 238 239 ! 6. Subdomain neighbours240 ! ----------------------241 242 nproc = narea - 1243 noso = nproc - jpni244 nowe = nproc - 1245 noea = nproc + 1246 nono = nproc + jpni247 ! great neighbours248 npnw = nono - 1249 npne = nono + 1250 npsw = noso - 1251 npse = noso + 1252 nbsw = 1253 nbnw = 1254 IF( MOD( nproc, jpni ) == 0 ) THEN255 nbsw = 0256 nbnw = 0257 ENDIF258 nbse = 1259 nbne = 1260 IF( MOD( nproc, jpni ) == jpni-1 ) THEN261 nbse = 0262 nbne = 0263 ENDIF264 IF(nproc < jpni) THEN265 nbsw = 0266 nbse = 0267 ENDIF268 IF( nproc >= (jpnj-1)*jpni ) THEN269 nbnw = 0270 nbne = 0271 ENDIF272 nlcj = nlcjt(narea)273 nlci = nlcit(narea)274 nldi = nldit(narea)275 nlei = nleit(narea)276 nldj = nldjt(narea)277 nlej = nlejt(narea)278 nbondi = ibonit(narea)279 nbondj = ibonjt(narea)280 nimpp = nimppt(narea)281 njmpp = njmppt(narea)282 283 ! Save processor layout in layout.dat file284 IF (lwp) THEN285 CALL ctl_opn( inum, 'layout.dat', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea )286 WRITE(inum,'(a)') ' jpnij jpi jpj jpk jpiglo jpjglo'287 WRITE(inum,'(6i8)') jpnij,jpi,jpj,jpk,jpiglo,jpjglo288 WRITE(inum,'(a)') 'NAREA nlci nlcj nldi nldj nlei nlej nimpp njmpp'289 290 DO jn = 1, jpnij291 WRITE(inum,'(9i5)') jn, nlcit(jn), nlcjt(jn), &292 nldit(jn), nldjt(jn), &293 nleit(jn), nlejt(jn), &294 nimppt(jn), njmppt(jn)295 END DO296 CLOSE(inum)297 END IF298 299 300 ! w a r n i n g narea (zone) /= nproc (processors)!301 302 IF( jperio == 1 .OR. jperio == 4 .OR. jperio == 6 ) THEN303 IF( jpni == 1 )THEN304 nbondi = 2305 nperio = 1306 ELSE307 nbondi = 0308 ENDIF309 IF( MOD( narea, jpni ) == 0 ) THEN310 noea = nproc-(jpni-1)311 npne = npne-jpni312 npse = npse-jpni313 ENDIF314 IF( MOD( narea, jpni ) == 1 ) THEN315 nowe = nproc+(jpni-1)316 npnw = npnw+jpni317 npsw = npsw+jpni318 ENDIF319 nbsw = 1320 nbnw = 1321 nbse = 1322 nbne = 1323 IF( nproc < jpni ) THEN324 nbsw = 0325 nbse = 0326 ENDIF327 IF( nproc >= (jpnj-1)*jpni ) THEN328 nbnw = 0329 nbne = 0330 ENDIF331 ENDIF332 npolj = 0333 IF( jperio == 3 .OR. jperio == 4 ) THEN334 ijm1 = jpni*(jpnj-1)335 imil = ijm1+(jpni+1)/2336 IF( narea > ijm1 ) npolj = 3337 IF( MOD(jpni,2) == 1 .AND. narea == imil ) npolj = 4338 IF( npolj == 3 ) nono = jpni*jpnj-narea+ijm1339 ENDIF340 IF( jperio == 5 .OR. jperio == 6 ) THEN341 ijm1 = jpni*(jpnj-1)342 imil = ijm1+(jpni+1)/2343 IF( narea > ijm1) npolj = 5344 IF( MOD(jpni,2) == 1 .AND. narea == imil ) npolj = 6345 IF( npolj == 5 ) nono = jpni*jpnj-narea+ijm1346 ENDIF347 348 ! Periodicity : no corner if nbondi = 2 and nperio != 1349 350 IF(lwp) THEN351 WRITE(numout,*) ' nproc = ', nproc352 WRITE(numout,*) ' nowe = ', nowe , ' noea = ', noea353 WRITE(numout,*) ' nono = ', nono , ' noso = ', noso354 WRITE(numout,*) ' nbondi = ', nbondi355 WRITE(numout,*) ' nbondj = ', nbondj356 WRITE(numout,*) ' npolj = ', npolj357 WRITE(numout,*) ' nperio = ', nperio358 WRITE(numout,*) ' nlci = ', nlci359 WRITE(numout,*) ' nlcj = ', nlcj360 WRITE(numout,*) ' nimpp = ', nimpp361 WRITE(numout,*) ' njmpp = ', njmpp362 WRITE(numout,*) ' nreci = ', nreci , ' npse = ', npse363 WRITE(numout,*) ' nrecj = ', nrecj , ' npsw = ', npsw364 WRITE(numout,*) ' jpreci = ', jpreci , ' npne = ', npne365 WRITE(numout,*) ' jprecj = ', jprecj , ' npnw = ', npnw366 301 WRITE(numout,*) 367 ENDIF 368 369 IF( nperio == 1 .AND. jpni /= 1 ) CALL ctl_stop( ' mpp_init: error on cyclicity' ) 370 371 ! Prepare mpp north fold 372 373 IF( jperio >= 3 .AND. jperio <= 6 .AND. jpni > 1 ) THEN 374 CALL mpp_ini_north 375 IF(lwp) WRITE(numout,*) ' mpp_init : North fold boundary prepared for jpni >1' 376 ENDIF 377 378 ! Prepare NetCDF output file (if necessary) 379 CALL mpp_init_ioipsl 380 381 END SUBROUTINE mpp_init 382 383 SUBROUTINE mpp_init2 384 !!---------------------------------------------------------------------- 385 !! *** ROUTINE mpp_init2 *** 386 !! 387 !! * Purpose : Lay out the global domain over processors. 388 !! FOR USING THIS VERSION, A PREPROCESSING TRAITMENT IS RECOMMENDED 389 !! FOR DEFINING BETTER CUTTING OUT. 390 !! This routine is used with a the bathymetry file. 391 !! In this version, the land processors are avoided and the adress 392 !! processor (nproc, narea,noea, ...) are calculated again. 393 !! The jpnij parameter can be lesser than jpni x jpnj 394 !! and this jpnij parameter must be calculated before with an 395 !! algoritmic preprocessing program. 396 !! 397 !! ** Method : Global domain is distributed in smaller local domains. 398 !! Periodic condition is a function of the local domain position 399 !! (global boundary or neighbouring domain) and of the global 400 !! periodic 401 !! Type : jperio global periodic condition 402 !! nperio local periodic condition 403 !! 404 !! ** Action : nimpp : longitudinal index 405 !! njmpp : latitudinal index 406 !! nperio : lateral condition type 407 !! narea : number for local area 408 !! nlci : first dimension 409 !! nlcj : second dimension 410 !! nproc : number for local processor 411 !! noea : number for local neighboring processor 412 !! nowe : number for local neighboring processor 413 !! noso : number for local neighboring processor 414 !! nono : number for local neighboring processor 415 !! 416 !! History : 417 !! ! 94-11 (M. Guyon) Original code 418 !! ! 95-04 (J. Escobar, M. Imbard) 419 !! ! 98-02 (M. Guyon) FETI method 420 !! ! 98-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI versions 421 !! 9.0 ! 04-01 (G. Madec, J.M Molines) F90 : free form , north fold jpni > 1 422 !!---------------------------------------------------------------------- 423 USE in_out_manager ! I/O Manager 424 USE iom 425 !! 426 INTEGER :: ji, jj, jn, jproc, jarea ! dummy loop indices 427 INTEGER :: inum ! temporary logical unit 428 INTEGER :: idir ! temporary integers 429 INTEGER :: jstartrow ! temporary integers 430 INTEGER :: ios ! Local integer output status for namelist read 431 INTEGER :: & 432 ii, ij, ifreq, il1, il2, & ! temporary integers 433 icont, ili, ilj, & ! " " 434 isurf, ijm1, imil, & ! " " 435 iino, ijno, iiso, ijso, & ! " " 436 iiea, ijea, iiwe, ijwe, & ! " " 437 iinw, ijnw, iine, ijne, & ! " " 438 iisw, ijsw, iise, ijse, & ! " " 439 iresti, irestj, iproc ! " " 440 INTEGER, DIMENSION(jpnij) :: & 441 iin, ijn 442 INTEGER, DIMENSION(jpni,jpnj) :: & 443 iimppt, ijmppt, ilci , ilcj , & ! temporary workspace 444 ipproc, ibondj, ibondi, ipolj , & ! " " 445 ilei , ilej , ildi , ildj , & ! " " 446 ioea , iowe , ioso , iono , & ! " " 447 ione , ionw , iose , iosw , & ! " " 448 ibne , ibnw , ibse , ibsw ! " " 449 INTEGER, DIMENSION(jpiglo,jpjglo) :: & 450 imask ! temporary global workspace 451 REAL(wp), DIMENSION(jpiglo,jpjglo) :: & 452 zdta, zdtaisf ! temporary data workspace 453 REAL(wp) :: zidom , zjdom ! temporary scalars 454 455 ! read namelist for ln_zco 456 NAMELIST/namzgr/ ln_zco, ln_zps, ln_sco, ln_isfcav, ln_linssh 457 458 !!---------------------------------------------------------------------- 459 !! OPA 9.0 , LOCEAN-IPSL (2005) 460 !! $Id: mppini_2.h90 6412 2016-03-31 16:22:32Z lovato $ 461 !! This software is governed by the CeCILL licence see modipsl/doc/NEMO_CeCILL.txt 462 !!---------------------------------------------------------------------- 463 464 REWIND( numnam_ref ) ! Namelist namzgr in reference namelist : Vertical coordinate 465 READ ( numnam_ref, namzgr, IOSTAT = ios, ERR = 901) 466 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namzgr in reference namelist', lwp ) 467 468 REWIND( numnam_cfg ) ! Namelist namzgr in configuration namelist : Vertical coordinate 469 READ ( numnam_cfg, namzgr, IOSTAT = ios, ERR = 902 ) 470 902 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namzgr in configuration namelist', lwp ) 471 IF(lwm) WRITE ( numond, namzgr ) 472 473 IF(lwp)WRITE(numout,*) 474 IF(lwp)WRITE(numout,*) 'mpp_init : Message Passing MPI' 475 IF(lwp)WRITE(numout,*) '~~~~~~~~' 476 IF(lwp)WRITE(numout,*) ' ' 477 478 IF( jpni*jpnj < jpnij ) CALL ctl_stop( ' jpnij > jpni x jpnj impossible' ) 479 480 ! 0. initialisation 481 ! ----------------- 482 483 ! open the file 484 ! Remember that at this level in the code, mpp is not yet initialized, so 485 ! the file must be open with jpdom_unknown, and kstart and kcount forced 486 jstartrow = 1 487 IF ( ln_zco ) THEN 488 CALL iom_open ( 'bathy_level.nc', inum ) ! Level bathymetry 489 ! Optionally use a file attribute (open_ocean_jstart) to set a start row for reading from the global file 490 ! This allows the unextended grid bathymetry to be stored in the same file as the under ice-shelf extended bathymetry 491 CALL iom_getatt(inum, 'open_ocean_jstart', jstartrow ) ! -999 is returned if the attribute is not found 492 jstartrow = MAX(1,jstartrow) 493 CALL iom_get ( inum, jpdom_unknown, 'Bathy_level', zdta, kstart=(/jpizoom,jpjzoom+jstartrow-1/), kcount=(/jpiglo,jpjglo/) ) 494 ELSE 495 CALL iom_open ( 'bathy_meter.nc', inum ) ! Meter bathy in case of partial steps 496 IF ( ln_isfcav ) THEN 497 CALL iom_get ( inum, jpdom_unknown, 'Bathymetry_isf' , zdta, kstart=(/jpizoom,jpjzoom/), kcount=(/jpiglo,jpjglo/) ) 498 ELSE 499 ! Optionally use a file attribute (open_ocean_jstart) to set a start row for reading from the global file 500 ! This allows the unextended grid bathymetry to be stored in the same file as the under ice-shelf extended bathymetry 501 CALL iom_getatt(inum, 'open_ocean_jstart', jstartrow ) ! -999 is returned if the attribute is not found 502 jstartrow = MAX(1,jstartrow) 503 CALL iom_get ( inum, jpdom_unknown, 'Bathymetry' , zdta, kstart=(/jpizoom,jpjzoom+jstartrow-1/) & 504 & , kcount=(/jpiglo,jpjglo/) ) 505 ENDIF 506 ENDIF 507 CALL iom_close (inum) 508 509 ! used to compute the land processor in case of not masked bathy file. 510 zdtaisf(:,:) = 0.0_wp 511 IF ( ln_isfcav ) THEN 512 CALL iom_open ( 'bathy_meter.nc', inum ) ! Meter bathy in case of partial steps 513 CALL iom_get ( inum, jpdom_unknown, 'isf_draft' , zdtaisf, kstart=(/jpizoom,jpjzoom/), kcount=(/jpiglo,jpjglo/) ) 514 END IF 515 CALL iom_close (inum) 516 517 ! land/sea mask over the global/zoom domain 518 519 imask(:,:)=1 520 WHERE ( zdta(:,:) - zdtaisf(:,:) <= rn_isfhmin ) imask = 0 521 522 ! 1. Dimension arrays for subdomains 523 ! ----------------------------------- 524 525 ! Computation of local domain sizes ilci() ilcj() 526 ! These dimensions depend on global sizes jpni,jpnj and jpiglo,jpjglo 527 ! The subdomains are squares leeser than or equal to the global 528 ! dimensions divided by the number of processors minus the overlap 529 ! array. 530 531 nreci=2*jpreci 532 nrecj=2*jprecj 533 iresti = 1 + MOD( jpiglo - nreci -1 , jpni ) 534 irestj = 1 + MOD( jpjglo - nrecj -1 , jpnj ) 535 536 ilci(1:iresti ,:) = jpi 537 ilci(iresti+1:jpni ,:) = jpi-1 538 539 ilcj(:, 1:irestj) = jpj 540 ilcj(:, irestj+1:jpnj) = jpj-1 541 542 nfilcit(:,:) = ilci(:,:) 543 544 IF(lwp) WRITE(numout,*) 545 IF(lwp) WRITE(numout,*) ' mpp_init2: defines mpp subdomains' 546 IF(lwp) WRITE(numout,*) ' ~~~~~~ ----------------------' 547 IF(lwp) WRITE(numout,*) 548 IF(lwp) WRITE(numout,*) 'iresti=',iresti,' irestj=',irestj 549 IF(lwp) WRITE(numout,*) 550 IF(lwp) WRITE(numout,*) 'jpni=',jpni,' jpnj=',jpnj 551 552 zidom = nreci + sum(ilci(:,1) - nreci ) 553 IF(lwp) WRITE(numout,*) 554 IF(lwp) WRITE(numout,*)' sum ilci(i,1)=',zidom,' jpiglo=',jpiglo 555 556 zjdom = nrecj + sum(ilcj(1,:) - nrecj ) 557 IF(lwp) WRITE(numout,*) ' sum ilcj(1,j)=',zjdom,' jpjglo=',jpjglo 558 IF(lwp) WRITE(numout,*) 559 560 561 ! 2. Index arrays for subdomains 562 ! ------------------------------- 563 564 iimppt(:,:) = 1 565 ijmppt(:,:) = 1 566 ipproc(:,:) = -1 567 568 IF( jpni > 1 )THEN 569 DO jj = 1, jpnj 570 DO ji = 2, jpni 571 iimppt(ji,jj) = iimppt(ji-1,jj) + ilci(ji-1,jj) - nreci 572 END DO 573 END DO 574 ENDIF 575 nfiimpp(:,:) = iimppt(:,:) 576 577 IF( jpnj > 1 )THEN 578 DO jj = 2, jpnj 579 DO ji = 1, jpni 580 ijmppt(ji,jj) = ijmppt(ji,jj-1) + ilcj(ji,jj-1) - nrecj 581 END DO 582 END DO 583 ENDIF 584 585 302 WRITE(numout,*) 'MPI Message Passing MPI - domain lay out over processors' 303 WRITE(numout,*) 304 WRITE(numout,*) ' defines mpp subdomains' 305 WRITE(numout,*) ' jpni = ', jpni 306 WRITE(numout,*) ' jpnj = ', jpnj 307 WRITE(numout,*) 308 WRITE(numout,*) ' sum ilci(i,1) = ', sum(ilci(:,1)), ' jpiglo = ', jpiglo 309 WRITE(numout,*) ' sum ilcj(1,j) = ', sum(ilcj(1,:)), ' jpjglo = ', jpjglo 310 ENDIF 311 586 312 ! 3. Subdomain description in the Regular Case 587 313 ! -------------------------------------------- 588 589 nperio = 0 590 icont = -1 314 ! specific cases where there is no communication -> must do the periodicity by itself 315 ! Warning: because of potential land-area suppression, do not use nbond[ij] == 2 316 l_Iperio = jpni == 1 .AND. (jperio == 1 .OR. jperio == 4 .OR. jperio == 6 .OR. jperio == 7) 317 l_Jperio = jpnj == 1 .AND. (jperio == 2 .OR. jperio == 7) 318 591 319 DO jarea = 1, jpni*jpnj 592 ii = 1 + MOD(jarea-1,jpni) 593 ij = 1 + (jarea-1)/jpni 320 ! 321 iarea0 = jarea - 1 322 ii = 1 + MOD(iarea0,jpni) 323 ij = 1 + iarea0/jpni 594 324 ili = ilci(ii,ij) 595 325 ilj = ilcj(ii,ij) 596 ibondj(ii,ij) = -1 597 IF( jarea > jpni ) ibondj(ii,ij) = 0 598 IF( jarea > (jpnj-1)*jpni ) ibondj(ii,ij) = 1 599 IF( jpnj == 1 ) ibondj(ii,ij) = 2 600 ibondi(ii,ij) = 0 601 IF( MOD(jarea,jpni) == 1 ) ibondi(ii,ij) = -1 602 IF( MOD(jarea,jpni) == 0 ) ibondi(ii,ij) = 1 603 IF( jpni == 1 ) ibondi(ii,ij) = 2 604 605 ! 2.4 Subdomain neighbors 606 607 iproc = jarea - 1 608 ioso(ii,ij) = iproc - jpni 609 iowe(ii,ij) = iproc - 1 610 ioea(ii,ij) = iproc + 1 611 iono(ii,ij) = iproc + jpni 612 ildi(ii,ij) = 1 + jpreci 613 ilei(ii,ij) = ili -jpreci 614 ionw(ii,ij) = iono(ii,ij) - 1 615 ione(ii,ij) = iono(ii,ij) + 1 616 iosw(ii,ij) = ioso(ii,ij) - 1 617 iose(ii,ij) = ioso(ii,ij) + 1 618 ibsw(ii,ij) = 1 619 ibnw(ii,ij) = 1 620 IF( MOD(iproc,jpni) == 0 ) THEN 621 ibsw(ii,ij) = 0 622 ibnw(ii,ij) = 0 623 ENDIF 624 ibse(ii,ij) = 1 625 ibne(ii,ij) = 1 626 IF( MOD(iproc,jpni) == jpni-1 ) THEN 627 ibse(ii,ij) = 0 628 ibne(ii,ij) = 0 629 ENDIF 630 IF( iproc < jpni ) THEN 631 ibsw(ii,ij) = 0 632 ibse(ii,ij) = 0 633 ENDIF 634 IF( iproc >= (jpnj-1)*jpni ) THEN 635 ibnw(ii,ij) = 0 636 ibne(ii,ij) = 0 637 ENDIF 638 IF( ibondi(ii,ij) == -1 .OR. ibondi(ii,ij) == 2 ) ildi(ii,ij) = 1 639 IF( ibondi(ii,ij) == 1 .OR. ibondi(ii,ij) == 2 ) ilei(ii,ij) = ili 640 ildj(ii,ij) = 1 + jprecj 641 ilej(ii,ij) = ilj - jprecj 642 IF( ibondj(ii,ij) == -1 .OR. ibondj(ii,ij) == 2 ) ildj(ii,ij) = 1 643 IF( ibondj(ii,ij) == 1 .OR. ibondj(ii,ij) == 2 ) ilej(ii,ij) = ilj 644 645 ! warning ii*ij (zone) /= nproc (processors)! 646 647 IF( jperio == 1 .OR. jperio == 4 .OR. jperio == 6 ) THEN 648 IF( jpni == 1 )THEN 649 ibondi(ii,ij) = 2 650 nperio = 1 651 ELSE 652 ibondi(ii,ij) = 0 653 ENDIF 654 IF( MOD(jarea,jpni) == 0 ) THEN 655 ioea(ii,ij) = iproc - (jpni-1) 656 ione(ii,ij) = ione(ii,ij) - jpni 657 iose(ii,ij) = iose(ii,ij) - jpni 658 ENDIF 659 IF( MOD(jarea,jpni) == 1 ) THEN 660 iowe(ii,ij) = iproc + jpni - 1 661 ionw(ii,ij) = ionw(ii,ij) + jpni 662 iosw(ii,ij) = iosw(ii,ij) + jpni 663 ENDIF 664 ibsw(ii,ij) = 1 665 ibnw(ii,ij) = 1 666 ibse(ii,ij) = 1 667 ibne(ii,ij) = 1 668 IF( iproc < jpni ) THEN 669 ibsw(ii,ij) = 0 670 ibse(ii,ij) = 0 671 ENDIF 672 IF( iproc >= (jpnj-1)*jpni ) THEN 673 ibnw(ii,ij) = 0 674 ibne(ii,ij) = 0 675 ENDIF 676 ENDIF 326 ibondi(ii,ij) = 0 ! default: has e-w neighbours 327 IF( ii == 1 ) ibondi(ii,ij) = -1 ! first column, has only e neighbour 328 IF( ii == jpni ) ibondi(ii,ij) = 1 ! last column, has only w neighbour 329 IF( jpni == 1 ) ibondi(ii,ij) = 2 ! has no e-w neighbour 330 ibondj(ii,ij) = 0 ! default: has n-s neighbours 331 IF( ij == 1 ) ibondj(ii,ij) = -1 ! first row, has only n neighbour 332 IF( ij == jpnj ) ibondj(ii,ij) = 1 ! last row, has only s neighbour 333 IF( jpnj == 1 ) ibondj(ii,ij) = 2 ! has no n-s neighbour 334 335 ! Subdomain neighbors (get their zone number): default definition 336 ioso(ii,ij) = iarea0 - jpni 337 iowe(ii,ij) = iarea0 - 1 338 ioea(ii,ij) = iarea0 + 1 339 iono(ii,ij) = iarea0 + jpni 340 ildi(ii,ij) = 1 + nn_hls 341 ilei(ii,ij) = ili - nn_hls 342 ildj(ii,ij) = 1 + nn_hls 343 ilej(ii,ij) = ilj - nn_hls 344 345 ! East-West periodicity: change ibondi, ioea, iowe 346 IF( jperio == 1 .OR. jperio == 4 .OR. jperio == 6 .OR. jperio == 7 ) THEN 347 IF( jpni /= 1 ) ibondi(ii,ij) = 0 ! redefine: all have e-w neighbours 348 IF( ii == 1 ) iowe(ii,ij) = iarea0 + (jpni-1) ! redefine: first column, address of w neighbour 349 IF( ii == jpni ) ioea(ii,ij) = iarea0 - (jpni-1) ! redefine: last column, address of e neighbour 350 ENDIF 351 352 ! Simple North-South periodicity: change ibondj, ioso, iono 353 IF( jperio == 2 .OR. jperio == 7 ) THEN 354 IF( jpnj /= 1 ) ibondj(ii,ij) = 0 ! redefine: all have n-s neighbours 355 IF( ij == 1 ) ioso(ii,ij) = iarea0 + jpni * (jpnj-1) ! redefine: first row, address of s neighbour 356 IF( ij == jpnj ) iono(ii,ij) = iarea0 - jpni * (jpnj-1) ! redefine: last row, address of n neighbour 357 ENDIF 358 359 ! North fold: define ipolj, change iono. Warning: we do not change ibondj... 677 360 ipolj(ii,ij) = 0 678 361 IF( jperio == 3 .OR. jperio == 4 ) THEN … … 690 373 IF( ipolj(ii,ij) == 5) iono(ii,ij) = jpni*jpnj-jarea+ijm1 ! MPI rank of northern neighbour 691 374 ENDIF 692 693 ! Check wet points over the entire domain to preserve the MPI communication stencil 694 isurf = 0 695 DO jj = 1, ilj 696 DO ji = 1, ili 697 IF( imask(ji+iimppt(ii,ij)-1, jj+ijmppt(ii,ij)-1) == 1) isurf = isurf+1 698 END DO 699 END DO 700 701 IF(isurf /= 0) THEN 375 ! 376 END DO 377 378 ! 4. deal with land subdomains 379 ! ---------------------------- 380 ! 381 ! specify which subdomains are oce subdomains; other are land subdomains 382 ipproc(:,:) = -1 383 icont = -1 384 DO jarea = 1, jpni*jpnj 385 iarea0 = jarea - 1 386 ii = 1 + MOD(iarea0,jpni) 387 ij = 1 + iarea0/jpni 388 IF( llisoce(ii,ij) ) THEN 702 389 icont = icont + 1 703 390 ipproc(ii,ij) = icont … … 706 393 ENDIF 707 394 END DO 708 395 ! if needed add some land subdomains to reach jpnij active subdomains 396 i2add = jpnij - inijmin 397 DO jarea = 1, jpni*jpnj 398 iarea0 = jarea - 1 399 ii = 1 + MOD(iarea0,jpni) 400 ij = 1 + iarea0/jpni 401 IF( .NOT. llisoce(ii,ij) .AND. i2add > 0 ) THEN 402 icont = icont + 1 403 ipproc(ii,ij) = icont 404 iin(icont+1) = ii 405 ijn(icont+1) = ij 406 i2add = i2add - 1 407 ENDIF 408 END DO 709 409 nfipproc(:,:) = ipproc(:,:) 710 410 711 ! Control 712 IF(icont+1 /= jpnij) THEN 713 WRITE(ctmp1,*) ' jpni =',jpni,' jpnj =',jpnj 714 WRITE(ctmp2,*) ' jpnij =',jpnij, '< jpni x jpnj' 715 WRITE(ctmp3,*) ' ***********, mpp_init2 finds jpnij=',icont+1 716 CALL ctl_stop( ' Eliminate land processors algorithm', '', ctmp1, ctmp2, '', ctmp3 ) 717 ENDIF 718 719 ! 4. Subdomain print 411 ! neighbour treatment: change ibondi, ibondj if next to a land zone 412 DO jarea = 1, jpni*jpnj 413 ii = 1 + MOD( jarea-1 , jpni ) 414 ij = 1 + (jarea-1) / jpni 415 ! land-only area with an active n neigbour 416 IF ( ipproc(ii,ij) == -1 .AND. 0 <= iono(ii,ij) .AND. iono(ii,ij) <= jpni*jpnj-1 ) THEN 417 iino = 1 + MOD( iono(ii,ij) , jpni ) ! ii index of this n neigbour 418 ijno = 1 + iono(ii,ij) / jpni ! ij index of this n neigbour 419 ! In case of north fold exchange: I am the n neigbour of my n neigbour!! (#1057) 420 ! --> for northern neighbours of northern row processors (in case of north-fold) 421 ! need to reverse the LOGICAL direction of communication 422 idir = 1 ! we are indeed the s neigbour of this n neigbour 423 IF( ij == jpnj .AND. ijno == jpnj ) idir = -1 ! both are on the last row, we are in fact the n neigbour 424 IF( ibondj(iino,ijno) == idir ) ibondj(iino,ijno) = 2 ! this n neigbour had only a s/n neigbour -> no more 425 IF( ibondj(iino,ijno) == 0 ) ibondj(iino,ijno) = -idir ! this n neigbour had both, n-s neighbours -> keep 1 426 ENDIF 427 ! land-only area with an active s neigbour 428 IF( ipproc(ii,ij) == -1 .AND. 0 <= ioso(ii,ij) .AND. ioso(ii,ij) <= jpni*jpnj-1 ) THEN 429 iiso = 1 + MOD( ioso(ii,ij) , jpni ) ! ii index of this s neigbour 430 ijso = 1 + ioso(ii,ij) / jpni ! ij index of this s neigbour 431 IF( ibondj(iiso,ijso) == -1 ) ibondj(iiso,ijso) = 2 ! this s neigbour had only a n neigbour -> no more neigbour 432 IF( ibondj(iiso,ijso) == 0 ) ibondj(iiso,ijso) = 1 ! this s neigbour had both, n-s neighbours -> keep s neigbour 433 ENDIF 434 ! land-only area with an active e neigbour 435 IF( ipproc(ii,ij) == -1 .AND. 0 <= ioea(ii,ij) .AND. ioea(ii,ij) <= jpni*jpnj-1 ) THEN 436 iiea = 1 + MOD( ioea(ii,ij) , jpni ) ! ii index of this e neigbour 437 ijea = 1 + ioea(ii,ij) / jpni ! ij index of this e neigbour 438 IF( ibondi(iiea,ijea) == 1 ) ibondi(iiea,ijea) = 2 ! this e neigbour had only a w neigbour -> no more neigbour 439 IF( ibondi(iiea,ijea) == 0 ) ibondi(iiea,ijea) = -1 ! this e neigbour had both, e-w neighbours -> keep e neigbour 440 ENDIF 441 ! land-only area with an active w neigbour 442 IF( ipproc(ii,ij) == -1 .AND. 0 <= iowe(ii,ij) .AND. iowe(ii,ij) <= jpni*jpnj-1) THEN 443 iiwe = 1 + MOD( iowe(ii,ij) , jpni ) ! ii index of this w neigbour 444 ijwe = 1 + iowe(ii,ij) / jpni ! ij index of this w neigbour 445 IF( ibondi(iiwe,ijwe) == -1 ) ibondi(iiwe,ijwe) = 2 ! this w neigbour had only a e neigbour -> no more neigbour 446 IF( ibondi(iiwe,ijwe) == 0 ) ibondi(iiwe,ijwe) = 1 ! this w neigbour had both, e-w neighbours -> keep w neigbour 447 ENDIF 448 END DO 449 450 ! Update il[de][ij] according to modified ibond[ij] 451 ! ---------------------- 452 DO jproc = 1, jpnij 453 ii = iin(jproc) 454 ij = ijn(jproc) 455 IF( ibondi(ii,ij) == -1 .OR. ibondi(ii,ij) == 2 ) ildi(ii,ij) = 1 456 IF( ibondi(ii,ij) == 1 .OR. ibondi(ii,ij) == 2 ) ilei(ii,ij) = ilci(ii,ij) 457 IF( ibondj(ii,ij) == -1 .OR. ibondj(ii,ij) == 2 ) ildj(ii,ij) = 1 458 IF( ibondj(ii,ij) == 1 .OR. ibondj(ii,ij) == 2 ) ilej(ii,ij) = ilcj(ii,ij) 459 END DO 460 461 ! 5. Subdomain print 720 462 ! ------------------ 721 722 463 IF(lwp) THEN 723 464 ifreq = 4 724 465 il1 = 1 725 DO jn = 1, (jpni-1)/ifreq+1466 DO jn = 1, (jpni-1)/ifreq+1 726 467 il2 = MIN(jpni,il1+ifreq-1) 727 468 WRITE(numout,*) … … 737 478 il1 = il1+ifreq 738 479 END DO 739 9400 FORMAT(' ***',20('*************',a3)) 740 9403 FORMAT(' * ',20(' * ',a3)) 741 9401 FORMAT(' ',20(' ',i3,' ')) 742 9402 FORMAT(' ',i3,' * ',20(i3,' x',i3,' * ')) 743 9404 FORMAT(' * ',20(' ',i3,' * ')) 744 ENDIF 745 746 747 ! 5. neighbour treatment 748 ! ---------------------- 749 750 DO jarea = 1, jpni*jpnj 751 iproc = jarea-1 752 ii = 1 + MOD(jarea-1,jpni) 753 ij = 1 + (jarea-1)/jpni 754 IF( ipproc(ii,ij) == -1 .AND. iono(ii,ij) >= 0 & 755 .AND. iono(ii,ij) <= jpni*jpnj-1 ) THEN 756 iino = 1 + MOD(iono(ii,ij),jpni) 757 ijno = 1 + (iono(ii,ij))/jpni 758 ! Need to reverse the logical direction of communication 759 ! for northern neighbours of northern row processors (north-fold) 760 ! i.e. need to check that the northern neighbour only communicates 761 ! to the SOUTH (or not at all) if this area is land-only (#1057) 762 idir = 1 763 IF( ij .eq. jpnj .AND. ijno .eq. jpnj ) idir = -1 764 IF( ibondj(iino,ijno) == idir ) ibondj(iino,ijno)=2 765 IF( ibondj(iino,ijno) == 0 ) ibondj(iino,ijno) = -idir 766 ENDIF 767 IF( ipproc(ii,ij) == -1 .AND. ioso(ii,ij) >= 0 & 768 .AND. ioso(ii,ij) <= jpni*jpnj-1 ) THEN 769 iiso = 1 + MOD(ioso(ii,ij),jpni) 770 ijso = 1 + (ioso(ii,ij))/jpni 771 IF( ibondj(iiso,ijso) == -1 ) ibondj(iiso,ijso) = 2 772 IF( ibondj(iiso,ijso) == 0 ) ibondj(iiso,ijso) = 1 773 ENDIF 774 IF( ipproc(ii,ij) == -1 .AND. ioea(ii,ij) >= 0 & 775 .AND. ioea(ii,ij) <= jpni*jpnj-1) THEN 776 iiea = 1 + MOD(ioea(ii,ij),jpni) 777 ijea = 1 + (ioea(ii,ij))/jpni 778 IF( ibondi(iiea,ijea) == 1 ) ibondi(iiea,ijea) = 2 779 IF( ibondi(iiea,ijea) == 0 ) ibondi(iiea,ijea) = -1 780 ENDIF 781 IF( ipproc(ii,ij) == -1 .AND. iowe(ii,ij) >= 0 & 782 .AND. iowe(ii,ij) <= jpni*jpnj-1) THEN 783 iiwe = 1 + MOD(iowe(ii,ij),jpni) 784 ijwe = 1 + (iowe(ii,ij))/jpni 785 IF( ibondi(iiwe,ijwe) == -1 ) ibondi(iiwe,ijwe) = 2 786 IF( ibondi(iiwe,ijwe) == 0 ) ibondi(iiwe,ijwe) = 1 787 ENDIF 788 IF( ipproc(ii,ij) == -1 .AND. ibne(ii,ij) == 1 ) THEN 789 iine = 1 + MOD(ione(ii,ij),jpni) 790 ijne = 1 + (ione(ii,ij))/jpni 791 IF( ibsw(iine,ijne) == 1 ) ibsw(iine,ijne) = 0 792 ENDIF 793 IF( ipproc(ii,ij) == -1 .AND. ibsw(ii,ij) == 1 ) THEN 794 iisw = 1 + MOD(iosw(ii,ij),jpni) 795 ijsw = 1 + (iosw(ii,ij))/jpni 796 IF( ibne(iisw,ijsw) == 1 ) ibne(iisw,ijsw) = 0 797 ENDIF 798 IF( ipproc(ii,ij) == -1 .AND. ibnw(ii,ij) == 1 ) THEN 799 iinw = 1 + MOD(ionw(ii,ij),jpni) 800 ijnw = 1 + (ionw(ii,ij))/jpni 801 IF( ibse(iinw,ijnw) == 1 ) ibse(iinw,ijnw)=0 802 ENDIF 803 IF( ipproc(ii,ij) == -1 .AND. ibse(ii,ij) == 1 ) THEN 804 iise = 1 + MOD(iose(ii,ij),jpni) 805 ijse = 1 + (iose(ii,ij))/jpni 806 IF( ibnw(iise,ijse) == 1 ) ibnw(iise,ijse) = 0 807 ENDIF 808 END DO 809 810 480 9400 FORMAT(' ***' ,20('*************',a3) ) 481 9403 FORMAT(' * ',20(' * ',a3) ) 482 9401 FORMAT(' ' ,20(' ',i3,' ') ) 483 9402 FORMAT(' ',i3,' * ',20(i3,' x',i3,' * ') ) 484 9404 FORMAT(' * ' ,20(' ',i3,' * ') ) 485 ENDIF 486 487 ! just to save nono etc for all proc 488 ! warning ii*ij (zone) /= nproc (processors)! 489 ! ioso = zone number, ii_noso = proc number 490 ii_noso(:) = -1 491 ii_nono(:) = -1 492 ii_noea(:) = -1 493 ii_nowe(:) = -1 494 DO jproc = 1, jpnij 495 ii = iin(jproc) 496 ij = ijn(jproc) 497 IF( 0 <= ioso(ii,ij) .AND. ioso(ii,ij) <= (jpni*jpnj-1) ) THEN 498 iiso = 1 + MOD( ioso(ii,ij) , jpni ) 499 ijso = 1 + ioso(ii,ij) / jpni 500 ii_noso(jproc) = ipproc(iiso,ijso) 501 ENDIF 502 IF( 0 <= iowe(ii,ij) .AND. iowe(ii,ij) <= (jpni*jpnj-1) ) THEN 503 iiwe = 1 + MOD( iowe(ii,ij) , jpni ) 504 ijwe = 1 + iowe(ii,ij) / jpni 505 ii_nowe(jproc) = ipproc(iiwe,ijwe) 506 ENDIF 507 IF( 0 <= ioea(ii,ij) .AND. ioea(ii,ij) <= (jpni*jpnj-1) ) THEN 508 iiea = 1 + MOD( ioea(ii,ij) , jpni ) 509 ijea = 1 + ioea(ii,ij) / jpni 510 ii_noea(jproc)= ipproc(iiea,ijea) 511 ENDIF 512 IF( 0 <= iono(ii,ij) .AND. iono(ii,ij) <= (jpni*jpnj-1) ) THEN 513 iino = 1 + MOD( iono(ii,ij) , jpni ) 514 ijno = 1 + iono(ii,ij) / jpni 515 ii_nono(jproc)= ipproc(iino,ijno) 516 ENDIF 517 END DO 518 811 519 ! 6. Change processor name 812 520 ! ------------------------ 813 814 nproc = narea-1815 521 ii = iin(narea) 816 522 ij = ijn(narea) 817 523 ! 818 524 ! set default neighbours 819 noso = ioso(ii,ij) 820 nowe = iowe(ii,ij) 821 noea = ioea(ii,ij) 822 nono = iono(ii,ij) 823 npse = iose(ii,ij) 824 npsw = iosw(ii,ij) 825 npne = ione(ii,ij) 826 npnw = ionw(ii,ij) 827 828 ! check neighbours location 829 IF( ioso(ii,ij) >= 0 .AND. ioso(ii,ij) <= (jpni*jpnj-1) ) THEN 830 iiso = 1 + MOD(ioso(ii,ij),jpni) 831 ijso = 1 + (ioso(ii,ij))/jpni 832 noso = ipproc(iiso,ijso) 833 ENDIF 834 IF( iowe(ii,ij) >= 0 .AND. iowe(ii,ij) <= (jpni*jpnj-1) ) THEN 835 iiwe = 1 + MOD(iowe(ii,ij),jpni) 836 ijwe = 1 + (iowe(ii,ij))/jpni 837 nowe = ipproc(iiwe,ijwe) 838 ENDIF 839 IF( ioea(ii,ij) >= 0 .AND. ioea(ii,ij) <= (jpni*jpnj-1) ) THEN 840 iiea = 1 + MOD(ioea(ii,ij),jpni) 841 ijea = 1 + (ioea(ii,ij))/jpni 842 noea = ipproc(iiea,ijea) 843 ENDIF 844 IF( iono(ii,ij) >= 0 .AND. iono(ii,ij) <= (jpni*jpnj-1) ) THEN 845 iino = 1 + MOD(iono(ii,ij),jpni) 846 ijno = 1 + (iono(ii,ij))/jpni 847 nono = ipproc(iino,ijno) 848 ENDIF 849 IF( iose(ii,ij) >= 0 .AND. iose(ii,ij) <= (jpni*jpnj-1) ) THEN 850 iise = 1 + MOD(iose(ii,ij),jpni) 851 ijse = 1 + (iose(ii,ij))/jpni 852 npse = ipproc(iise,ijse) 853 ENDIF 854 IF( iosw(ii,ij) >= 0 .AND. iosw(ii,ij) <= (jpni*jpnj-1) ) THEN 855 iisw = 1 + MOD(iosw(ii,ij),jpni) 856 ijsw = 1 + (iosw(ii,ij))/jpni 857 npsw = ipproc(iisw,ijsw) 858 ENDIF 859 IF( ione(ii,ij) >= 0 .AND. ione(ii,ij) <= (jpni*jpnj-1) ) THEN 860 iine = 1 + MOD(ione(ii,ij),jpni) 861 ijne = 1 + (ione(ii,ij))/jpni 862 npne = ipproc(iine,ijne) 863 ENDIF 864 IF( ionw(ii,ij) >= 0 .AND. ionw(ii,ij) <= (jpni*jpnj-1) ) THEN 865 iinw = 1 + MOD(ionw(ii,ij),jpni) 866 ijnw = 1 + (ionw(ii,ij))/jpni 867 npnw = ipproc(iinw,ijnw) 868 ENDIF 869 nbnw = ibnw(ii,ij) 870 nbne = ibne(ii,ij) 871 nbsw = ibsw(ii,ij) 872 nbse = ibse(ii,ij) 873 nlcj = ilcj(ii,ij) 525 noso = ii_noso(narea) 526 nowe = ii_nowe(narea) 527 noea = ii_noea(narea) 528 nono = ii_nono(narea) 874 529 nlci = ilci(ii,ij) 875 530 nldi = ildi(ii,ij) 876 531 nlei = ilei(ii,ij) 532 nlcj = ilcj(ii,ij) 877 533 nldj = ildj(ii,ij) 878 534 nlej = ilej(ii,ij) … … 880 536 nbondj = ibondj(ii,ij) 881 537 nimpp = iimppt(ii,ij) 882 njmpp = ijmppt(ii,ij) 538 njmpp = ijmppt(ii,ij) 539 jpi = nlci 540 jpj = nlcj 541 jpk = jpkglo ! third dim 542 #if defined key_agrif 543 ! simple trick to use same vertical grid as parent but different number of levels: 544 ! Save maximum number of levels in jpkglo, then define all vertical grids with this number. 545 ! Suppress once vertical online interpolation is ok 546 !!$ IF(.NOT.Agrif_Root()) jpkglo = Agrif_Parent( jpkglo ) 547 #endif 548 jpim1 = jpi-1 ! inner domain indices 549 jpjm1 = jpj-1 ! " " 550 jpkm1 = MAX( 1, jpk-1 ) ! " " 551 jpij = jpi*jpj ! jpi x j 883 552 DO jproc = 1, jpnij 884 553 ii = iin(jproc) 885 554 ij = ijn(jproc) 886 nimppt(jproc) = iimppt(ii,ij)887 njmppt(jproc) = ijmppt(ii,ij)888 nlcjt(jproc) = ilcj(ii,ij)889 555 nlcit(jproc) = ilci(ii,ij) 890 556 nldit(jproc) = ildi(ii,ij) 891 557 nleit(jproc) = ilei(ii,ij) 558 nlcjt(jproc) = ilcj(ii,ij) 892 559 nldjt(jproc) = ildj(ii,ij) 893 560 nlejt(jproc) = ilej(ii,ij) 561 ibonit(jproc) = ibondi(ii,ij) 562 ibonjt(jproc) = ibondj(ii,ij) 563 nimppt(jproc) = iimppt(ii,ij) 564 njmppt(jproc) = ijmppt(ii,ij) 894 565 END DO 895 566 896 567 ! Save processor layout in ascii file 897 IF (l wp) THEN568 IF (llwrtlay) THEN 898 569 CALL ctl_opn( inum, 'layout.dat', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) 899 WRITE(inum,'(a)') ' jpnij jpi jpj jpk jpiglo jpjglo' 900 WRITE(inum,'(6i8)') jpnij,jpi,jpj,jpk,jpiglo,jpjglo 901 WRITE(inum,'(a)') 'NAREA nlci nlcj nldi nldj nlei nlej nimpp njmpp' 902 903 DO jproc = 1, jpnij 904 WRITE(inum,'(9i5)') jproc, nlcit(jproc), nlcjt(jproc), & 905 nldit(jproc), nldjt(jproc), & 906 nleit(jproc), nlejt(jproc), & 907 nimppt(jproc), njmppt(jproc) 908 END DO 909 CLOSE(inum) 570 WRITE(inum,'(a)') ' jpnij jpimax jpjmax jpk jpiglo jpjglo'//& 571 & ' ( local: narea jpi jpj )' 572 WRITE(inum,'(6i8,a,3i8,a)') jpnij,jpimax,jpjmax,jpk,jpiglo,jpjglo,& 573 & ' ( local: ',narea,jpi,jpj,' )' 574 WRITE(inum,'(a)') 'nproc nlci nlcj nldi nldj nlei nlej nimp njmp nono noso nowe noea nbondi nbondj ' 575 576 DO jproc = 1, jpnij 577 WRITE(inum,'(13i5,2i7)') jproc-1, nlcit (jproc), nlcjt (jproc), & 578 & nldit (jproc), nldjt (jproc), & 579 & nleit (jproc), nlejt (jproc), & 580 & nimppt (jproc), njmppt (jproc), & 581 & ii_nono(jproc), ii_noso(jproc), & 582 & ii_nowe(jproc), ii_noea(jproc), & 583 & ibonit (jproc), ibonjt (jproc) 584 END DO 910 585 END IF 911 586 587 ! ! north fold parameter 912 588 ! Defined npolj, either 0, 3 , 4 , 5 , 6 913 589 ! In this case the important thing is that npolj /= 0 914 590 ! Because if we go through these line it is because jpni >1 and thus 915 591 ! we must use lbcnorthmpp, which tests only npolj =0 or npolj /= 0 916 917 592 npolj = 0 918 593 ij = ijn(narea) 919 920 594 IF( jperio == 3 .OR. jperio == 4 ) THEN 921 IF( ij == jpnj ) npolj = 3 922 ENDIF 923 595 IF( ij == jpnj ) npolj = 3 596 ENDIF 924 597 IF( jperio == 5 .OR. jperio == 6 ) THEN 925 IF( ij == jpnj ) npolj = 5 926 ENDIF 927 928 ! Periodicity : no corner if nbondi = 2 and nperio != 1 929 598 IF( ij == jpnj ) npolj = 5 599 ENDIF 600 ! 601 nproc = narea-1 930 602 IF(lwp) THEN 931 WRITE(numout,*) ' nproc = ', nproc932 WRITE(numout,*) ' nowe = ', nowe , ' noea = ', noea933 WRITE(numout,*) ' nono = ', nono , ' noso = ', noso934 WRITE(numout,*) ' nbondi = ', nbondi935 WRITE(numout,*) ' nbondj = ', nbondj936 WRITE(numout,*) ' npolj = ', npolj937 WRITE(numout,*) ' nperio = ', nperio938 WRITE(numout,*) ' nlci = ', nlci939 WRITE(numout,*) ' nlcj = ', nlcj940 WRITE(numout,*) ' nimpp = ', nimpp941 WRITE(numout,*) ' njmpp = ', njmpp942 WRITE(numout,*) ' nreci = ', nreci , ' npse = ', npse943 WRITE(numout,*) ' nrecj = ', nrecj , ' npsw = ', npsw944 WRITE(numout,*) ' jpreci = ', jpreci , ' npne = ', npne945 WRITE(numout,*) ' jprecj = ', jprecj , ' npnw = ', npnw946 603 WRITE(numout,*) 947 ENDIF 948 949 IF( nperio == 1 .AND. jpni /= 1 ) CALL ctl_stop( ' mpp_init2: error on cyclicity' ) 950 951 ! Prepare mpp north fold 952 604 WRITE(numout,*) ' resulting internal parameters : ' 605 WRITE(numout,*) ' nproc = ', nproc 606 WRITE(numout,*) ' nowe = ', nowe , ' noea = ', noea 607 WRITE(numout,*) ' nono = ', nono , ' noso = ', noso 608 WRITE(numout,*) ' nbondi = ', nbondi 609 WRITE(numout,*) ' nbondj = ', nbondj 610 WRITE(numout,*) ' npolj = ', npolj 611 WRITE(numout,*) ' l_Iperio = ', l_Iperio 612 WRITE(numout,*) ' l_Jperio = ', l_Jperio 613 WRITE(numout,*) ' nlci = ', nlci 614 WRITE(numout,*) ' nlcj = ', nlcj 615 WRITE(numout,*) ' nimpp = ', nimpp 616 WRITE(numout,*) ' njmpp = ', njmpp 617 WRITE(numout,*) ' nreci = ', nreci 618 WRITE(numout,*) ' nrecj = ', nrecj 619 WRITE(numout,*) ' nn_hls = ', nn_hls 620 ENDIF 621 622 ! ! Prepare mpp north fold 953 623 IF( jperio >= 3 .AND. jperio <= 6 .AND. jpni > 1 ) THEN 954 624 CALL mpp_ini_north 955 IF(lwp) WRITE(numout,*) ' mpp_init2 : North fold boundary prepared for jpni >1' 956 ENDIF 957 958 ! Prepare NetCDF output file (if necessary) 959 CALL mpp_init_ioipsl 960 961 962 END SUBROUTINE mpp_init2 625 IF (lwp) THEN 626 WRITE(numout,*) 627 WRITE(numout,*) ' ==>>> North fold boundary prepared for jpni >1' 628 ! additional prints in layout.dat 629 ENDIF 630 IF (llwrtlay) THEN 631 WRITE(inum,*) 632 WRITE(inum,*) 633 WRITE(inum,*) 'number of subdomains located along the north fold : ', ndim_rank_north 634 WRITE(inum,*) 'Rank of the subdomains located along the north fold : ', ndim_rank_north 635 DO jproc = 1, ndim_rank_north, 5 636 WRITE(inum,*) nrank_north( jproc:MINVAL( (/jproc+4,ndim_rank_north/) ) ) 637 END DO 638 ENDIF 639 ENDIF 640 ! 641 CALL mpp_init_ioipsl ! Prepare NetCDF output file (if necessary) 642 ! 643 IF( ln_nnogather ) THEN 644 CALL mpp_init_nfdcom ! northfold neighbour lists 645 IF (llwrtlay) THEN 646 WRITE(inum,*) 647 WRITE(inum,*) 648 WRITE(inum,*) 'north fold exchanges with explicit point-to-point messaging :' 649 WRITE(inum,*) 'nfsloop : ', nfsloop 650 WRITE(inum,*) 'nfeloop : ', nfeloop 651 WRITE(inum,*) 'nsndto : ', nsndto 652 WRITE(inum,*) 'isendto : ', isendto 653 ENDIF 654 ENDIF 655 ! 656 IF (llwrtlay) CLOSE(inum) 657 ! 658 DEALLOCATE(iin, ijn, ii_nono, ii_noea, ii_noso, ii_nowe, & 659 & iimppt, ijmppt, ibondi, ibondj, ipproc, ipolj, & 660 & ilci, ilcj, ilei, ilej, ildi, ildj, & 661 & iono, ioea, ioso, iowe, llisoce) 662 ! 663 END SUBROUTINE mpp_init 664 665 666 SUBROUTINE mpp_basic_decomposition( knbi, knbj, kimax, kjmax, kimppt, kjmppt, klci, klcj) 667 !!---------------------------------------------------------------------- 668 !! *** ROUTINE mpp_basic_decomposition *** 669 !! 670 !! ** Purpose : Lay out the global domain over processors. 671 !! 672 !! ** Method : Global domain is distributed in smaller local domains. 673 !! 674 !! ** Action : - set for all knbi*knbj domains: 675 !! kimppt : longitudinal index 676 !! kjmppt : latitudinal index 677 !! klci : first dimension 678 !! klcj : second dimension 679 !!---------------------------------------------------------------------- 680 INTEGER, INTENT(in ) :: knbi, knbj 681 INTEGER, INTENT( out) :: kimax, kjmax 682 INTEGER, DIMENSION(knbi,knbj), OPTIONAL, INTENT( out) :: kimppt, kjmppt 683 INTEGER, DIMENSION(knbi,knbj), OPTIONAL, INTENT( out) :: klci, klcj 684 ! 685 INTEGER :: ji, jj 686 INTEGER :: iresti, irestj, irm, ijpjmin 687 INTEGER :: ireci, irecj 688 !!---------------------------------------------------------------------- 689 ! 690 #if defined key_nemocice_decomp 691 kimax = ( nx_global+2-2*nn_hls + (knbi-1) ) / knbi + 2*nn_hls ! first dim. 692 kjmax = ( ny_global+2-2*nn_hls + (knbj-1) ) / knbj + 2*nn_hls ! second dim. 693 #else 694 kimax = ( jpiglo - 2*nn_hls + (knbi-1) ) / knbi + 2*nn_hls ! first dim. 695 kjmax = ( jpjglo - 2*nn_hls + (knbj-1) ) / knbj + 2*nn_hls ! second dim. 696 #endif 697 IF( .NOT. PRESENT(kimppt) ) RETURN 698 ! 699 ! 1. Dimension arrays for subdomains 700 ! ----------------------------------- 701 ! Computation of local domain sizes klci() klcj() 702 ! These dimensions depend on global sizes knbi,knbj and jpiglo,jpjglo 703 ! The subdomains are squares lesser than or equal to the global 704 ! dimensions divided by the number of processors minus the overlap array. 705 ! 706 ireci = 2 * nn_hls 707 irecj = 2 * nn_hls 708 iresti = 1 + MOD( jpiglo - ireci -1 , knbi ) 709 irestj = 1 + MOD( jpjglo - irecj -1 , knbj ) 710 ! 711 ! Need to use kimax and kjmax here since jpi and jpj not yet defined 712 #if defined key_nemocice_decomp 713 ! Change padding to be consistent with CICE 714 klci(1:knbi-1 ,:) = kimax 715 klci(knbi ,:) = jpiglo - (knbi - 1) * (kimax - nreci) 716 klcj(:, 1:knbj-1) = kjmax 717 klcj(:, knbj) = jpjglo - (knbj - 1) * (kjmax - nrecj) 718 #else 719 klci(1:iresti ,:) = kimax 720 klci(iresti+1:knbi ,:) = kimax-1 721 IF( MINVAL(klci) < 3 ) THEN 722 WRITE(ctmp1,*) ' mpp_basic_decomposition: minimum value of jpi must be >= 3' 723 WRITE(ctmp2,*) ' We have ', MINVAL(klci) 724 CALL ctl_stop( 'STOP', ctmp1, ctmp2 ) 725 ENDIF 726 IF( jperio == 3 .OR. jperio == 4 .OR. jperio == 5 .OR. jperio == 6 ) THEN 727 ! minimize the size of the last row to compensate for the north pole folding coast 728 IF( jperio == 3 .OR. jperio == 4 ) ijpjmin = 5 ! V and F folding involves line jpj-3 that must not be south boundary 729 IF( jperio == 5 .OR. jperio == 6 ) ijpjmin = 4 ! V and F folding involves line jpj-2 that must not be south boundary 730 irm = knbj - irestj ! total number of lines to be removed 731 klcj(:, knbj) = MAX( ijpjmin, kjmax-irm ) ! we must have jpj >= ijpjmin in the last row 732 irm = irm - ( kjmax - klcj(1,knbj) ) ! remaining number of lines to remove 733 irestj = knbj - 1 - irm 734 klcj(:, 1:irestj) = kjmax 735 klcj(:, irestj+1:knbj-1) = kjmax-1 736 ELSE 737 ijpjmin = 3 738 klcj(:, 1:irestj) = kjmax 739 klcj(:, irestj+1:knbj) = kjmax-1 740 ENDIF 741 IF( MINVAL(klcj) < ijpjmin ) THEN 742 WRITE(ctmp1,*) ' mpp_basic_decomposition: minimum value of jpj must be >= ', ijpjmin 743 WRITE(ctmp2,*) ' We have ', MINVAL(klcj) 744 CALL ctl_stop( 'STOP', ctmp1, ctmp2 ) 745 ENDIF 746 #endif 747 748 ! 2. Index arrays for subdomains 749 ! ------------------------------- 750 kimppt(:,:) = 1 751 kjmppt(:,:) = 1 752 ! 753 IF( knbi > 1 ) THEN 754 DO jj = 1, knbj 755 DO ji = 2, knbi 756 kimppt(ji,jj) = kimppt(ji-1,jj) + klci(ji-1,jj) - ireci 757 END DO 758 END DO 759 ENDIF 760 ! 761 IF( knbj > 1 )THEN 762 DO jj = 2, knbj 763 DO ji = 1, knbi 764 kjmppt(ji,jj) = kjmppt(ji,jj-1) + klcj(ji,jj-1) - irecj 765 END DO 766 END DO 767 ENDIF 768 769 END SUBROUTINE mpp_basic_decomposition 770 771 772 SUBROUTINE mpp_init_bestpartition( knbij, knbi, knbj, knbcnt, ldlist ) 773 !!---------------------------------------------------------------------- 774 !! *** ROUTINE mpp_init_bestpartition *** 775 !! 776 !! ** Purpose : 777 !! 778 !! ** Method : 779 !!---------------------------------------------------------------------- 780 INTEGER, INTENT(in ) :: knbij ! total number if subdomains (knbi*knbj) 781 INTEGER, OPTIONAL, INTENT( out) :: knbi, knbj ! number if subdomains along i and j (knbi and knbj) 782 INTEGER, OPTIONAL, INTENT( out) :: knbcnt ! number of land subdomains 783 LOGICAL, OPTIONAL, INTENT(in ) :: ldlist ! .true.: print the list the best domain decompositions (with land) 784 ! 785 INTEGER :: ji, jj, ii, iitarget 786 INTEGER :: iszitst, iszjtst 787 INTEGER :: isziref, iszjref 788 INTEGER :: inbij, iszij 789 INTEGER :: inbimax, inbjmax, inbijmax 790 INTEGER :: isz0, isz1 791 INTEGER, DIMENSION( :), ALLOCATABLE :: indexok 792 INTEGER, DIMENSION( :), ALLOCATABLE :: inbi0, inbj0, inbij0 ! number of subdomains along i,j 793 INTEGER, DIMENSION( :), ALLOCATABLE :: iszi0, iszj0, iszij0 ! max size of the subdomains along i,j 794 INTEGER, DIMENSION( :), ALLOCATABLE :: inbi1, inbj1, inbij1 ! number of subdomains along i,j 795 INTEGER, DIMENSION( :), ALLOCATABLE :: iszi1, iszj1, iszij1 ! max size of the subdomains along i,j 796 LOGICAL :: llist 797 LOGICAL, DIMENSION(:,:), ALLOCATABLE :: llmsk2d ! max size of the subdomains along i,j 798 LOGICAL, DIMENSION(:,:), ALLOCATABLE :: llisoce ! - - 799 REAL(wp):: zpropland 800 !!---------------------------------------------------------------------- 801 ! 802 llist = .FALSE. 803 IF( PRESENT(ldlist) ) llist = ldlist 804 805 CALL mpp_init_landprop( zpropland ) ! get the proportion of land point over the gloal domain 806 inbij = NINT( REAL(knbij, wp) / ( 1.0 - zpropland ) ) ! define the largest possible value for jpni*jpnj 807 ! 808 IF( llist ) THEN ; inbijmax = inbij*2 809 ELSE ; inbijmax = inbij 810 ENDIF 811 ! 812 ALLOCATE(inbi0(inbijmax),inbj0(inbijmax),iszi0(inbijmax),iszj0(inbijmax)) 813 ! 814 inbimax = 0 815 inbjmax = 0 816 isziref = jpiglo*jpjglo+1 817 iszjref = jpiglo*jpjglo+1 818 ! 819 ! get the list of knbi that gives a smaller jpimax than knbi-1 820 ! get the list of knbj that gives a smaller jpjmax than knbj-1 821 DO ji = 1, inbijmax 822 #if defined key_nemocice_decomp 823 iszitst = ( nx_global+2-2*nn_hls + (ji-1) ) / ji + 2*nn_hls ! first dim. 824 #else 825 iszitst = ( jpiglo - 2*nn_hls + (ji-1) ) / ji + 2*nn_hls 826 #endif 827 IF( iszitst < isziref ) THEN 828 isziref = iszitst 829 inbimax = inbimax + 1 830 inbi0(inbimax) = ji 831 iszi0(inbimax) = isziref 832 ENDIF 833 #if defined key_nemocice_decomp 834 iszjtst = ( ny_global+2-2*nn_hls + (ji-1) ) / ji + 2*nn_hls ! first dim. 835 #else 836 iszjtst = ( jpjglo - 2*nn_hls + (ji-1) ) / ji + 2*nn_hls 837 #endif 838 IF( iszjtst < iszjref ) THEN 839 iszjref = iszjtst 840 inbjmax = inbjmax + 1 841 inbj0(inbjmax) = ji 842 iszj0(inbjmax) = iszjref 843 ENDIF 844 END DO 845 846 ! combine these 2 lists to get all possible knbi*knbj < inbijmax 847 ALLOCATE( llmsk2d(inbimax,inbjmax) ) 848 DO jj = 1, inbjmax 849 DO ji = 1, inbimax 850 IF ( inbi0(ji) * inbj0(jj) <= inbijmax ) THEN ; llmsk2d(ji,jj) = .TRUE. 851 ELSE ; llmsk2d(ji,jj) = .FALSE. 852 ENDIF 853 END DO 854 END DO 855 isz1 = COUNT(llmsk2d) 856 ALLOCATE( inbi1(isz1), inbj1(isz1), iszi1(isz1), iszj1(isz1) ) 857 ii = 0 858 DO jj = 1, inbjmax 859 DO ji = 1, inbimax 860 IF( llmsk2d(ji,jj) .EQV. .TRUE. ) THEN 861 ii = ii + 1 862 inbi1(ii) = inbi0(ji) 863 inbj1(ii) = inbj0(jj) 864 iszi1(ii) = iszi0(ji) 865 iszj1(ii) = iszj0(jj) 866 END IF 867 END DO 868 END DO 869 DEALLOCATE( inbi0, inbj0, iszi0, iszj0 ) 870 DEALLOCATE( llmsk2d ) 871 872 ALLOCATE( inbij1(isz1), iszij1(isz1) ) 873 inbij1(:) = inbi1(:) * inbj1(:) 874 iszij1(:) = iszi1(:) * iszj1(:) 875 876 ! if therr is no land and no print 877 IF( .NOT. llist .AND. numbot == -1 .AND. numbdy == -1 ) THEN 878 ! get the smaller partition which gives the smallest subdomain size 879 ii = MINLOC(inbij1, mask = iszij1 == MINVAL(iszij1), dim = 1) 880 knbi = inbi1(ii) 881 knbj = inbj1(ii) 882 IF(PRESENT(knbcnt)) knbcnt = 0 883 DEALLOCATE( inbi1, inbj1, inbij1, iszi1, iszj1, iszij1 ) 884 RETURN 885 ENDIF 886 887 ! extract only the partitions which reduce the subdomain size in comparison with smaller partitions 888 ALLOCATE( indexok(isz1) ) ! to store indices of the best partitions 889 isz0 = 0 ! number of best partitions 890 inbij = 1 ! start with the min value of inbij1 => 1 891 iszij = jpiglo*jpjglo+1 ! default: larger than global domain 892 DO WHILE( inbij <= inbijmax ) ! if we did not reach the max of inbij1 893 ii = MINLOC(iszij1, mask = inbij1 == inbij, dim = 1) ! warning: send back the first occurence if multiple results 894 IF ( iszij1(ii) < iszij ) THEN 895 isz0 = isz0 + 1 896 indexok(isz0) = ii 897 iszij = iszij1(ii) 898 ENDIF 899 inbij = MINVAL(inbij1, mask = inbij1 > inbij) ! warning: return largest integer value if mask = .false. everywhere 900 END DO 901 DEALLOCATE( inbij1, iszij1 ) 902 903 ! keep only the best partitions (sorted by increasing order of subdomains number and decreassing subdomain size) 904 ALLOCATE( inbi0(isz0), inbj0(isz0), iszi0(isz0), iszj0(isz0) ) 905 DO ji = 1, isz0 906 ii = indexok(ji) 907 inbi0(ji) = inbi1(ii) 908 inbj0(ji) = inbj1(ii) 909 iszi0(ji) = iszi1(ii) 910 iszj0(ji) = iszj1(ii) 911 END DO 912 DEALLOCATE( indexok, inbi1, inbj1, iszi1, iszj1 ) 913 914 IF( llist ) THEN ! we print about 21 best partitions 915 IF(lwp) THEN 916 WRITE(numout,*) 917 WRITE(numout, *) ' For your information:' 918 WRITE(numout,'(a,i5,a)') ' list of the best partitions around ', knbij, ' mpi processes' 919 WRITE(numout, *) ' --------------------------------------', '-----', '--------------' 920 WRITE(numout,*) 921 END IF 922 iitarget = MINLOC( inbi0(:)*inbj0(:), mask = inbi0(:)*inbj0(:) >= knbij, dim = 1 ) 923 DO ji = MAX(1,iitarget-10), MIN(isz0,iitarget+10) 924 ALLOCATE( llisoce(inbi0(ji), inbj0(ji)) ) 925 CALL mpp_init_isoce( inbi0(ji), inbj0(ji), llisoce ) ! Warning: must be call by all cores (call mpp_sum) 926 inbij = COUNT(llisoce) 927 DEALLOCATE( llisoce ) 928 IF(lwp) WRITE(numout,'(a, i5, a, i5, a, i4, a, i4, a, i9, a, i5, a, i5, a)') & 929 & 'nb_cores ' , inbij,' oce + ', inbi0(ji)*inbj0(ji) - inbij & 930 & , ' land ( ', inbi0(ji),' x ', inbj0(ji), & 931 & ' ), nb_points ', iszi0(ji)*iszj0(ji),' ( ', iszi0(ji),' x ', iszj0(ji),' )' 932 END DO 933 DEALLOCATE( inbi0, inbj0, iszi0, iszj0 ) 934 RETURN 935 ENDIF 936 937 DEALLOCATE( iszi0, iszj0 ) 938 inbij = inbijmax + 1 ! default: larger than possible 939 ii = isz0+1 ! start from the end of the list (smaller subdomains) 940 DO WHILE( inbij > knbij ) ! while the number of ocean subdomains exceed the number of procs 941 ii = ii -1 942 ALLOCATE( llisoce(inbi0(ii), inbj0(ii)) ) 943 CALL mpp_init_isoce( inbi0(ii), inbj0(ii), llisoce ) ! must be done by all core 944 inbij = COUNT(llisoce) 945 DEALLOCATE( llisoce ) 946 END DO 947 knbi = inbi0(ii) 948 knbj = inbj0(ii) 949 IF(PRESENT(knbcnt)) knbcnt = knbi * knbj - inbij 950 DEALLOCATE( inbi0, inbj0 ) 951 ! 952 END SUBROUTINE mpp_init_bestpartition 953 954 955 SUBROUTINE mpp_init_landprop( propland ) 956 !!---------------------------------------------------------------------- 957 !! *** ROUTINE mpp_init_landprop *** 958 !! 959 !! ** Purpose : the the proportion of land points in the surface land-sea mask 960 !! 961 !! ** Method : read iproc strips (of length jpiglo) of the land-sea mask 962 !!---------------------------------------------------------------------- 963 REAL(wp), INTENT( out) :: propland ! proportion of land points in the global domain (between 0 and 1) 964 ! 965 INTEGER, DIMENSION(jpni*jpnj) :: kusedom_1d 966 INTEGER :: inboce, iarea 967 INTEGER :: iproc, idiv, ijsz 968 INTEGER :: ijstr 969 LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: lloce 970 !!---------------------------------------------------------------------- 971 ! do nothing if there is no land-sea mask 972 IF( numbot == -1 .and. numbdy == -1 ) THEN 973 propland = 0. 974 RETURN 975 ENDIF 976 977 ! number of processes reading the bathymetry file 978 iproc = MINVAL( (/mppsize, jpjglo/2, 100/) ) ! read a least 2 lines, no more that 100 processes reading at the same time 979 980 ! we want to read iproc strips of the land-sea mask. -> pick up iproc processes every idiv processes starting at 1 981 IF( iproc == 1 ) THEN ; idiv = mppsize 982 ELSE ; idiv = ( mppsize - 1 ) / ( iproc - 1 ) 983 ENDIF 984 985 iarea = (narea-1)/idiv ! involed process number (starting counting at 0) 986 IF( MOD( narea-1, idiv ) == 0 .AND. iarea < iproc ) THEN ! beware idiv can be = to 1 987 ! 988 ijsz = jpjglo / iproc ! width of the stripe to read 989 IF( iarea < MOD(jpjglo,iproc) ) ijsz = ijsz + 1 990 ijstr = iarea*(jpjglo/iproc) + MIN(iarea, MOD(jpjglo,iproc)) + 1 ! starting j position of the reading 991 ! 992 ALLOCATE( lloce(jpiglo, ijsz) ) ! allocate the strip 993 CALL mpp_init_readbot_strip( ijstr, ijsz, lloce ) 994 inboce = COUNT(lloce) ! number of ocean point in the stripe 995 DEALLOCATE(lloce) 996 ! 997 ELSE 998 inboce = 0 999 ENDIF 1000 CALL mpp_sum( 'mppini', inboce ) ! total number of ocean points over the global domain 1001 ! 1002 propland = REAL( jpiglo*jpjglo - inboce, wp ) / REAL( jpiglo*jpjglo, wp ) 1003 ! 1004 END SUBROUTINE mpp_init_landprop 1005 1006 1007 SUBROUTINE mpp_init_isoce( knbi, knbj, ldisoce ) 1008 !!---------------------------------------------------------------------- 1009 !! *** ROUTINE mpp_init_nboce *** 1010 !! 1011 !! ** Purpose : check for a mpi domain decomposition knbi x knbj which 1012 !! subdomains contain at least 1 ocean point 1013 !! 1014 !! ** Method : read knbj strips (of length jpiglo) of the land-sea mask 1015 !!---------------------------------------------------------------------- 1016 INTEGER, INTENT(in ) :: knbi, knbj ! domain decomposition 1017 LOGICAL, DIMENSION(knbi,knbj), INTENT( out) :: ldisoce ! .true. if a sub domain constains 1 ocean point 1018 ! 1019 INTEGER, DIMENSION(knbi,knbj) :: inboce ! number oce oce pint in each mpi subdomain 1020 INTEGER, DIMENSION(knbi*knbj) :: inboce_1d 1021 INTEGER :: idiv, iimax, ijmax, iarea 1022 INTEGER :: ji, jn 1023 LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: lloce ! lloce(i,j) = .true. if the point (i,j) is ocean 1024 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: iimppt, ilci 1025 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ijmppt, ilcj 1026 !!---------------------------------------------------------------------- 1027 ! do nothing if there is no land-sea mask 1028 IF( numbot == -1 .AND. numbdy == -1 ) THEN 1029 ldisoce(:,:) = .TRUE. 1030 RETURN 1031 ENDIF 1032 1033 ! we want to read knbj strips of the land-sea mask. -> pick up knbj processes every idiv processes starting at 1 1034 IF ( knbj == 1 ) THEN ; idiv = mppsize 1035 ELSE IF ( mppsize < knbj ) THEN ; idiv = 1 1036 ELSE ; idiv = ( mppsize - 1 ) / ( knbj - 1 ) 1037 ENDIF 1038 inboce(:,:) = 0 ! default no ocean point found 1039 1040 DO jn = 0, (knbj-1)/mppsize ! if mppsize < knbj : more strips than mpi processes (because of potential land domains) 1041 ! 1042 iarea = (narea-1)/idiv + jn * mppsize ! involed process number (starting counting at 0) 1043 IF( MOD( narea-1, idiv ) == 0 .AND. iarea < knbj ) THEN ! beware idiv can be = to 1 1044 ! 1045 ALLOCATE( iimppt(knbi,knbj), ijmppt(knbi,knbj), ilci(knbi,knbj), ilcj(knbi,knbj) ) 1046 CALL mpp_basic_decomposition( knbi, knbj, iimax, ijmax, iimppt, ijmppt, ilci, ilcj ) 1047 ! 1048 ALLOCATE( lloce(jpiglo, ilcj(1,iarea+1)) ) ! allocate the strip 1049 CALL mpp_init_readbot_strip( ijmppt(1,iarea+1), ilcj(1,iarea+1), lloce ) ! read the strip 1050 DO ji = 1, knbi 1051 inboce(ji,iarea+1) = COUNT( lloce(iimppt(ji,1):iimppt(ji,1)+ilci(ji,1)-1,:) ) ! number of ocean point in subdomain 1052 END DO 1053 ! 1054 DEALLOCATE(lloce) 1055 DEALLOCATE(iimppt, ijmppt, ilci, ilcj) 1056 ! 1057 ENDIF 1058 END DO 1059 1060 inboce_1d = RESHAPE(inboce, (/ knbi*knbj /)) 1061 CALL mpp_sum( 'mppini', inboce_1d ) 1062 inboce = RESHAPE(inboce_1d, (/knbi, knbj/)) 1063 ldisoce(:,:) = inboce(:,:) /= 0 1064 ! 1065 END SUBROUTINE mpp_init_isoce 1066 1067 1068 SUBROUTINE mpp_init_readbot_strip( kjstr, kjcnt, ldoce ) 1069 !!---------------------------------------------------------------------- 1070 !! *** ROUTINE mpp_init_readbot_strip *** 1071 !! 1072 !! ** Purpose : Read relevant bathymetric information in order to 1073 !! provide a land/sea mask used for the elimination 1074 !! of land domains, in an mpp computation. 1075 !! 1076 !! ** Method : read stipe of size (jpiglo,...) 1077 !!---------------------------------------------------------------------- 1078 INTEGER , INTENT(in ) :: kjstr ! starting j position of the reading 1079 INTEGER , INTENT(in ) :: kjcnt ! number of lines to read 1080 LOGICAL, DIMENSION(jpiglo,kjcnt), INTENT( out) :: ldoce ! ldoce(i,j) = .true. if the point (i,j) is ocean 1081 ! 1082 INTEGER :: inumsave ! local logical unit 1083 REAL(wp), DIMENSION(jpiglo,kjcnt) :: zbot, zbdy 1084 !!---------------------------------------------------------------------- 1085 ! 1086 inumsave = numout ; numout = numnul ! redirect all print to /dev/null 1087 ! 1088 IF( numbot /= -1 ) THEN 1089 CALL iom_get( numbot, jpdom_unknown, 'bottom_level', zbot, kstart = (/1,kjstr/), kcount = (/jpiglo, kjcnt/) ) 1090 ELSE 1091 zbot(:,:) = 1. ! put a non-null value 1092 ENDIF 1093 1094 IF( numbdy /= -1 ) THEN ! Adjust with bdy_msk if it exists 1095 CALL iom_get ( numbdy, jpdom_unknown, 'bdy_msk', zbdy, kstart = (/1,kjstr/), kcount = (/jpiglo, kjcnt/) ) 1096 zbot(:,:) = zbot(:,:) * zbdy(:,:) 1097 ENDIF 1098 ! 1099 ldoce(:,:) = zbot(:,:) > 0. 1100 numout = inumsave 1101 ! 1102 END SUBROUTINE mpp_init_readbot_strip 1103 963 1104 964 1105 SUBROUTINE mpp_init_ioipsl … … 1008 1149 1009 1150 1151 SUBROUTINE mpp_init_nfdcom 1152 !!---------------------------------------------------------------------- 1153 !! *** ROUTINE mpp_init_nfdcom *** 1154 !! ** Purpose : Setup for north fold exchanges with explicit 1155 !! point-to-point messaging 1156 !! 1157 !! ** Method : Initialization of the northern neighbours lists. 1158 !!---------------------------------------------------------------------- 1159 !! 1.0 ! 2011-10 (A. C. Coward, NOCS & J. Donners, PRACE) 1160 !! 2.0 ! 2013-06 Setup avoiding MPI communication (I. Epicoco, S. Mocavero, CMCC) 1161 !!---------------------------------------------------------------------- 1162 INTEGER :: sxM, dxM, sxT, dxT, jn 1163 INTEGER :: njmppmax 1164 !!---------------------------------------------------------------------- 1165 ! 1166 njmppmax = MAXVAL( njmppt ) 1167 ! 1168 !initializes the north-fold communication variables 1169 isendto(:) = 0 1170 nsndto = 0 1171 ! 1172 IF ( njmpp == njmppmax ) THEN ! if I am a process in the north 1173 ! 1174 !sxM is the first point (in the global domain) needed to compute the north-fold for the current process 1175 sxM = jpiglo - nimppt(narea) - nlcit(narea) + 1 1176 !dxM is the last point (in the global domain) needed to compute the north-fold for the current process 1177 dxM = jpiglo - nimppt(narea) + 2 1178 ! 1179 ! loop over the other north-fold processes to find the processes 1180 ! managing the points belonging to the sxT-dxT range 1181 ! 1182 DO jn = 1, jpni 1183 ! 1184 sxT = nfiimpp(jn, jpnj) ! sxT = 1st point (in the global domain) of the jn process 1185 dxT = nfiimpp(jn, jpnj) + nfilcit(jn, jpnj) - 1 ! dxT = last point (in the global domain) of the jn process 1186 ! 1187 IF ( sxT < sxM .AND. sxM < dxT ) THEN 1188 nsndto = nsndto + 1 1189 isendto(nsndto) = jn 1190 ELSEIF( sxM <= sxT .AND. dxM >= dxT ) THEN 1191 nsndto = nsndto + 1 1192 isendto(nsndto) = jn 1193 ELSEIF( dxM < dxT .AND. sxT < dxM ) THEN 1194 nsndto = nsndto + 1 1195 isendto(nsndto) = jn 1196 ENDIF 1197 ! 1198 END DO 1199 nfsloop = 1 1200 nfeloop = nlci 1201 DO jn = 2,jpni-1 1202 IF( nfipproc(jn,jpnj) == (narea - 1) ) THEN 1203 IF( nfipproc(jn-1,jpnj) == -1 ) nfsloop = nldi 1204 IF( nfipproc(jn+1,jpnj) == -1 ) nfeloop = nlei 1205 ENDIF 1206 END DO 1207 ! 1208 ENDIF 1209 l_north_nogather = .TRUE. 1210 ! 1211 END SUBROUTINE mpp_init_nfdcom 1212 1213 1214 #endif 1215 1010 1216 !!====================================================================== 1011 1217 END MODULE mppini -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/nemogcm.F90
r10725 r10727 62 62 63 63 CHARACTER(lc) :: cform_aaa="( /, 'AAAAAAAA', / ) " ! flag for output listing 64 65 #if defined key_agrif 66 external agrif_boundary_connections, agrif_update_all, agrif_recompute_scalefactors 67 #endif 64 68 65 69 !!---------------------------------------------------------------------- … … 87 91 !!---------------------------------------------------------------------- 88 92 ! 93 #if defined key_agrif 94 CALL Agrif_Init_Grids() ! AGRIF: set the meshes 95 #endif 89 96 ! !-----------------------! 90 97 CALL nemo_init !== Initialisations ==! 91 98 ! !-----------------------! 92 99 100 #if defined key_agrif 101 CALL Agrif_Regrid() 102 103 CALL Agrif_Step_Child(agrif_boundary_connections) 104 105 CALL Agrif_Step_Child_adj(agrif_update_all) 106 107 CALL Agrif_Step_Child(agrif_recompute_scalefactors) 108 109 CALL Agrif_Step_Child(cfg_write) 110 #endif 111 93 112 ! check that all process are still there... If some process have an error, 94 113 ! they will never enter in step and other processes will wait until the end of the cpu time! 95 IF( lk_mpp ) CALL mpp_max( nstop )114 IF( lk_mpp ) CALL mpp_max( 'nemogcm',nstop ) 96 115 97 116 IF(lwp) WRITE(numout,cform_aaa) ! Flag AAAAAAA … … 106 125 ENDIF 107 126 ! 108 IF( nn_timing == 1 ) CALL timing_finalize109 127 ! 110 128 CALL nemo_closefile … … 120 138 !! ** Purpose : initialization of the NEMO GCM 121 139 !!---------------------------------------------------------------------- 122 INTEGER :: ji ! dummy loop indices 123 INTEGER :: ilocal_comm ! local integer 124 INTEGER :: ios 125 CHARACTER(len=80), DIMENSION(16) :: cltxt 126 ! 127 NAMELIST/namctl/ ln_ctl , nn_print, nn_ictls, nn_ictle, & 128 & nn_isplt, nn_jsplt, nn_jctls, nn_jctle, & 129 & nn_bench, nn_timing, nn_diacfl 140 INTEGER :: ji ! dummy loop indices 141 INTEGER :: ios, ilocal_comm ! local integers 142 CHARACTER(len=120), DIMENSION(60) :: cltxt, cltxt2, clnam 143 ! 144 NAMELIST/namctl/ ln_ctl , sn_cfctl, nn_print,ln_timing 130 145 NAMELIST/namcfg/ ln_e3_dep, & 131 146 & cp_cfg, cp_cfz, jp_cfg, jpidta, jpjdta, jpkdta, jpiglo, jpjglo, & … … 145 160 REWIND( numnam_cfg ) ! Namelist namctl in confguration namelist : Control prints & Benchmark 146 161 READ ( numnam_cfg, namctl, IOSTAT = ios, ERR = 902 ) 147 902 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namctl in configuration namelist', .TRUE. )162 902 IF( ios /= 0 ) CALL ctl_nam ( ios , 'namctl in configuration namelist', .TRUE. ) 148 163 149 164 ! … … 164 179 ! !--------------------------------------------! 165 180 ! Nodes selection (control print return in cltxt) 166 ilocal_comm = 0167 181 narea = mynode( cltxt, 'output.namelist.dyn', numnam_ref, numnam_cfg, numond , nstop ) 168 182 narea = narea + 1 ! mynode return the rank of proc (0 --> jpnij -1 ) … … 179 193 ENDIF 180 194 181 ! If dimensions of processor grid weren't specified in the namelist file 182 ! then we calculate them here now that we have our communicator size 183 IF( jpni < 1 .OR. jpnj < 1 ) THEN 184 IF( Agrif_Root() ) CALL nemo_partition( mppsize ) 195 IF(lwp) THEN ! open listing units 196 ! 197 CALL ctl_opn( numout, 'ocean.output', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE., narea ) 198 ! 199 WRITE(numout,*) 200 WRITE(numout,*) ' CNRS - NERC - Met OFFICE - MERCATOR-ocean - INGV - CMCC' 201 WRITE(numout,*) ' NEMO team' 202 WRITE(numout,*) ' Ocean General Circulation Model' 203 WRITE(numout,*) ' NEMO version 4.0 (2019) ' 204 WRITE(numout,*) 205 WRITE(numout,*) " ._ ._ ._ ._ ._ " 206 WRITE(numout,*) " _.-._)`\_.-._)`\_.-._)`\_.-._)`\_.-._)`\_ " 207 WRITE(numout,*) 208 WRITE(numout,*) " o _, _, " 209 WRITE(numout,*) " o .' ( .-' / " 210 WRITE(numout,*) " o _/..._'. .' / " 211 WRITE(numout,*) " ( o .-'` ` '-./ _.' " 212 WRITE(numout,*) " ) ( o) ;= <_ ( " 213 WRITE(numout,*) " ( '-.,\\__ __.-;`\ '. ) " 214 WRITE(numout,*) " ) ) \) |`\ \) '. \ ( ( " 215 WRITE(numout,*) " ( ( \_/ '-._\ ) ) " 216 WRITE(numout,*) " ) ) ` ( ( " 217 WRITE(numout,*) " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ " 218 WRITE(numout,*) 219 220 DO ji = 1, SIZE(cltxt) 221 IF( TRIM(cltxt (ji)) /= '' ) WRITE(numout,*) TRIM(cltxt(ji)) ! control print of mynode 222 END DO 223 WRITE(numout,*) 224 WRITE(numout,*) 225 ! DO ji = 1, SIZE(cltxt2) 226 ! IF( TRIM(cltxt2(ji)) /= '' ) WRITE(numout,*) TRIM(cltxt2(ji)) ! control print of domain size 227 ! END DO 228 ! 229 WRITE(numout,cform_aaa) ! Flag AAAAAAA 230 ! 185 231 ENDIF 186 187 ! Calculate domain dimensions given calculated jpni and jpnj 188 ! This used to be done in par_oce.F90 when they were parameters rather than variables 189 IF( Agrif_Root() ) THEN 190 jpi = ( jpiglo -2*jpreci + (jpni-1) ) / jpni + 2*jpreci ! first dim. 191 jpj = ( jpjglo -2*jprecj + (jpnj-1) ) / jpnj + 2*jprecj ! second dim. 192 ENDIF 232 ! open /dev/null file to be able to supress output write easily 233 ! CALL ctl_opn( numnul, '/dev/null', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. ) 234 ! 235 ! ! Domain decomposition 236 CALL mpp_init ! MPP 237 238 ! IF( Agrif_Root() ) THEN 239 ! jpi = ( jpiglo -2*jpreci + (jpni-1) ) / jpni + 2*jpreci ! first dim. 240 ! jpj = ( jpjglo -2*jprecj + (jpnj-1) ) / jpnj + 2*jprecj ! second dim. 241 ! ENDIF 193 242 jpk = jpkdta ! third dim 194 243 jpim1 = jpi-1 ! inner domain indices … … 197 246 jpij = jpi*jpj ! jpi x j 198 247 199 IF(lwp) THEN ! open listing units 200 ! 201 CALL ctl_opn( numout, 'ocean.output', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE., narea ) 202 ! 203 WRITE(numout,*) 204 WRITE(numout,*) ' CNRS - NERC - Met OFFICE - MERCATOR-ocean - INGV - CMCC' 205 WRITE(numout,*) ' NEMO team' 206 WRITE(numout,*) ' Ocean General Circulation Model' 207 WRITE(numout,*) ' version 3.7 (2015) ' 208 WRITE(numout,*) 209 WRITE(numout,*) 210 DO ji = 1, SIZE(cltxt) 211 IF( TRIM(cltxt(ji)) /= '' ) WRITE(numout,*) cltxt(ji) ! control print of mynode 212 END DO 213 WRITE(numout,cform_aaa) ! Flag AAAAAAA 214 ! 215 ENDIF 216 217 ! Now we know the dimensions of the grid and numout has been set we can 218 ! allocate arrays 248 #if defined key_agrif 249 CALL Agrif_Declare_Var 250 #endif 251 252 ! Now we know the dimensions of the grid and numout has been set: we can allocate arrays 219 253 CALL nemo_alloc() 220 254 … … 226 260 227 261 ! ! Domain decomposition 228 IF( jpni*jpnj == jpnij ) THEN ; CALL mpp_init ! standard cutting out 229 ELSE ; CALL mpp_init2 ! eliminate land processors 230 ENDIF 231 ! 232 IF( nn_timing == 1 ) CALL timing_init 233 ! 262 ! 234 263 ! ! General initialization 235 264 CALL phy_cst ! Physical constants 236 CALL eos_init ! Equation of state237 265 CALL dom_cfg ! Domain configuration 238 266 CALL dom_init ! Domain … … 253 281 IF(lwp) THEN ! control print 254 282 WRITE(numout,*) 255 WRITE(numout,*) 'nemo_ctl: Control prints & Benchmark'256 WRITE(numout,*) '~~~~~~~ 283 WRITE(numout,*) 'nemo_ctl: Control prints' 284 WRITE(numout,*) '~~~~~~~~' 257 285 WRITE(numout,*) ' Namelist namctl' 258 286 WRITE(numout,*) ' run control (for debugging) ln_ctl = ', ln_ctl 287 WRITE(numout,*) ' finer control over o/p sn_cfctl%l_config = ', sn_cfctl%l_config 288 WRITE(numout,*) ' sn_cfctl%l_runstat = ', sn_cfctl%l_runstat 289 WRITE(numout,*) ' sn_cfctl%l_trcstat = ', sn_cfctl%l_trcstat 290 WRITE(numout,*) ' sn_cfctl%l_oceout = ', sn_cfctl%l_oceout 291 WRITE(numout,*) ' sn_cfctl%l_layout = ', sn_cfctl%l_layout 292 WRITE(numout,*) ' sn_cfctl%l_mppout = ', sn_cfctl%l_mppout 293 WRITE(numout,*) ' sn_cfctl%l_mpptop = ', sn_cfctl%l_mpptop 294 WRITE(numout,*) ' sn_cfctl%procmin = ', sn_cfctl%procmin 295 WRITE(numout,*) ' sn_cfctl%procmax = ', sn_cfctl%procmax 296 WRITE(numout,*) ' sn_cfctl%procincr = ', sn_cfctl%procincr 297 WRITE(numout,*) ' sn_cfctl%ptimincr = ', sn_cfctl%ptimincr 259 298 WRITE(numout,*) ' level of print nn_print = ', nn_print 260 299 WRITE(numout,*) ' Start i indice for SUM control nn_ictls = ', nn_ictls … … 264 303 WRITE(numout,*) ' number of proc. following i nn_isplt = ', nn_isplt 265 304 WRITE(numout,*) ' number of proc. following j nn_jsplt = ', nn_jsplt 266 WRITE(numout,*) ' benchmark parameter (0/1) nn_bench = ', nn_bench267 WRITE(numout,*) ' timing activated (0/1) nn_timing = ', nn_timing305 WRITE(numout,*) ' timing by routine ln_timing = ', ln_timing 306 WRITE(numout,*) ' CFL diagnostics ln_diacfl = ', ln_diacfl 268 307 ENDIF 269 308 ! … … 275 314 isplt = nn_isplt 276 315 jsplt = nn_jsplt 277 nbench = nn_bench 278 279 IF(lwp) THEN ! control print 280 WRITE(numout,*) 281 WRITE(numout,*) 'namcfg : configuration initialization through namelist read' 282 WRITE(numout,*) '~~~~~~~ ' 283 WRITE(numout,*) ' Namelist namcfg' 284 WRITE(numout,*) ' vertical scale factors =T: e3.=dk[depth] ln_e3_dep = ', ln_e3_dep 285 WRITE(numout,*) ' =F: old definition ' 286 WRITE(numout,*) ' configuration name cp_cfg = ', TRIM(cp_cfg) 287 WRITE(numout,*) ' configuration zoom name cp_cfz = ', TRIM(cp_cfz) 288 WRITE(numout,*) ' configuration resolution jp_cfg = ', jp_cfg 289 WRITE(numout,*) ' 1st lateral dimension ( >= jpiglo ) jpidta = ', jpidta 290 WRITE(numout,*) ' 2nd " " ( >= jpjglo ) jpjdta = ', jpjdta 291 WRITE(numout,*) ' 3nd " " jpkdta = ', jpkdta 292 WRITE(numout,*) ' 1st dimension of global domain in i jpiglo = ', jpiglo 293 WRITE(numout,*) ' 2nd - - in j jpjglo = ', jpjglo 294 WRITE(numout,*) ' left bottom i index of the zoom (in data domain) jpizoom = ', jpizoom 295 WRITE(numout,*) ' left bottom j index of the zoom (in data domain) jpizoom = ', jpjzoom 296 WRITE(numout,*) ' lateral cond. type (between 0 and 6) jperio = ', jperio 297 WRITE(numout,*) ' use file attribute if exists as i/p j-start ln_use_jattr = ', ln_use_jattr 298 ! 299 IF(.NOT.ln_e3_dep ) THEN 300 WRITE(numout,cform_war) 301 WRITE(numout,*) 302 WRITE(numout,*) ' ===>>>> Obsolescent definition of e3 scale factors is used' 303 WRITE(numout,*) 304 ENDIF 305 ENDIF 316 317 ! IF( .NOT.ln_read_cfg ) ln_closea = .false. ! dealing possible only with a domcfg file 318 ! 306 319 ! ! Parameter control 307 320 ! … … 343 356 ENDIF 344 357 ! 345 IF( 1_wp /= SIGN(1._wp,-0._wp) ) CALL ctl_stop( 'nemo_ctl: The intrinsec SIGN function follows ', & 346 & 'f2003 standard. ' , & 347 & 'Compile with key_nosignedzero enabled' ) 358 ! IF( 1._wp /= SIGN(1._wp,-0._wp) ) CALL ctl_stop( 'nemo_ctl: The intrinsec SIGN function follows f2003 standard.', & 359 ! & 'Compile with key_nosignedzero enabled:', & 360 ! & '--> add -Dkey_nosignedzero to the definition of %CPP in your arch file' ) 361 ! 362 #if defined key_agrif 363 IF( ln_timing ) CALL ctl_stop( 'AGRIF not implemented with ln_timing = true') 364 #endif 348 365 ! 349 366 END SUBROUTINE nemo_ctl … … 362 379 ! 363 380 IF( numstp /= -1 ) CLOSE( numstp ) ! time-step file 364 IF( numsol /= -1 ) CLOSE( numsol ) ! solver file365 381 IF( numnam_ref /= -1 ) CLOSE( numnam_ref ) ! oce reference namelist 366 382 IF( numnam_cfg /= -1 ) CLOSE( numnam_cfg ) ! oce configuration namelist … … 396 412 ierr = ierr + dom_oce_alloc () ! ocean domain 397 413 ! 398 IF( lk_mpp ) CALL mpp_sum(ierr )414 CALL mpp_sum( 'nemogcm', ierr ) 399 415 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'nemo_alloc : unable to allocate standard ocean arrays' ) 400 416 ! 401 417 END SUBROUTINE nemo_alloc 402 418 403 404 SUBROUTINE nemo_partition( num_pes )405 !!----------------------------------------------------------------------406 !! *** ROUTINE nemo_partition ***407 !!408 !! ** Purpose :409 !!410 !! ** Method :411 !!----------------------------------------------------------------------412 INTEGER, INTENT(in) :: num_pes ! The number of MPI processes we have413 !414 INTEGER, PARAMETER :: nfactmax = 20415 INTEGER :: nfact ! The no. of factors returned416 INTEGER :: ierr ! Error flag417 INTEGER :: ji418 INTEGER :: idiff, mindiff, imin ! For choosing pair of factors that are closest in value419 INTEGER, DIMENSION(nfactmax) :: ifact ! Array of factors420 !!----------------------------------------------------------------------421 !422 ierr = 0423 !424 CALL factorise( ifact, nfactmax, nfact, num_pes, ierr )425 !426 IF( nfact <= 1 ) THEN427 WRITE (numout, *) 'WARNING: factorisation of number of PEs failed'428 WRITE (numout, *) ' : using grid of ',num_pes,' x 1'429 jpnj = 1430 jpni = num_pes431 ELSE432 ! Search through factors for the pair that are closest in value433 mindiff = 1000000434 imin = 1435 DO ji = 1, nfact-1, 2436 idiff = ABS( ifact(ji) - ifact(ji+1) )437 IF( idiff < mindiff ) THEN438 mindiff = idiff439 imin = ji440 ENDIF441 END DO442 jpnj = ifact(imin)443 jpni = ifact(imin + 1)444 ENDIF445 !446 jpnij = jpni*jpnj447 !448 END SUBROUTINE nemo_partition449 450 451 SUBROUTINE factorise( kfax, kmaxfax, knfax, kn, kerr )452 !!----------------------------------------------------------------------453 !! *** ROUTINE factorise ***454 !!455 !! ** Purpose : return the prime factors of n.456 !! knfax factors are returned in array kfax which is of457 !! maximum dimension kmaxfax.458 !! ** Method :459 !!----------------------------------------------------------------------460 INTEGER , INTENT(in ) :: kn, kmaxfax461 INTEGER , INTENT( out) :: kerr, knfax462 INTEGER, DIMENSION(kmaxfax), INTENT( out) :: kfax463 !464 INTEGER :: ifac, jl, inu465 INTEGER, PARAMETER :: ntest = 14466 INTEGER, DIMENSION(ntest) :: ilfax467 !!----------------------------------------------------------------------468 !469 ! lfax contains the set of allowed factors.470 ilfax(:) = (/(2**jl,jl=ntest,1,-1)/)471 !472 ! Clear the error flag and initialise output vars473 kerr = 0474 kfax = 1475 knfax = 0476 !477 ! Find the factors of n.478 IF( kn == 1 ) GOTO 20479 480 ! nu holds the unfactorised part of the number.481 ! knfax holds the number of factors found.482 ! l points to the allowed factor list.483 ! ifac holds the current factor.484 !485 inu = kn486 knfax = 0487 !488 DO jl = ntest, 1, -1489 !490 ifac = ilfax(jl)491 IF( ifac > inu ) CYCLE492 493 ! Test whether the factor will divide.494 495 IF( MOD(inu,ifac) == 0 ) THEN496 !497 knfax = knfax + 1 ! Add the factor to the list498 IF( knfax > kmaxfax ) THEN499 kerr = 6500 write (*,*) 'FACTOR: insufficient space in factor array ', knfax501 return502 ENDIF503 kfax(knfax) = ifac504 ! Store the other factor that goes with this one505 knfax = knfax + 1506 kfax(knfax) = inu / ifac507 !WRITE (*,*) 'ARPDBG, factors ',knfax-1,' & ',knfax,' are ', kfax(knfax-1),' and ',kfax(knfax)508 ENDIF509 !510 END DO511 !512 20 CONTINUE ! Label 20 is the exit point from the factor search loop.513 !514 END SUBROUTINE factorise515 516 517 SUBROUTINE nemo_northcomms518 !!----------------------------------------------------------------------519 !! *** ROUTINE nemo_northcomms ***520 !! ** Purpose : Setup for north fold exchanges with explicit521 !! point-to-point messaging522 !!523 !! ** Method : Initialization of the northern neighbours lists.524 !!----------------------------------------------------------------------525 !! 1.0 ! 2011-10 (A. C. Coward, NOCS & J. Donners, PRACE)526 !! 2.0 ! 2013-06 Setup avoiding MPI communication (I. Epicoco, S. Mocavero, CMCC)527 !!----------------------------------------------------------------------528 INTEGER :: sxM, dxM, sxT, dxT, jn529 INTEGER :: njmppmax530 !!----------------------------------------------------------------------531 !532 njmppmax = MAXVAL( njmppt )533 !534 !initializes the north-fold communication variables535 isendto(:) = 0536 nsndto = 0537 !538 !if I am a process in the north539 IF ( njmpp == njmppmax ) THEN540 !sxM is the first point (in the global domain) needed to compute the541 !north-fold for the current process542 sxM = jpiglo - nimppt(narea) - nlcit(narea) + 1543 !dxM is the last point (in the global domain) needed to compute the544 !north-fold for the current process545 dxM = jpiglo - nimppt(narea) + 2546 547 !loop over the other north-fold processes to find the processes548 !managing the points belonging to the sxT-dxT range549 550 DO jn = 1, jpni551 !sxT is the first point (in the global domain) of the jn552 !process553 sxT = nfiimpp(jn, jpnj)554 !dxT is the last point (in the global domain) of the jn555 !process556 dxT = nfiimpp(jn, jpnj) + nfilcit(jn, jpnj) - 1557 IF ((sxM .gt. sxT) .AND. (sxM .lt. dxT)) THEN558 nsndto = nsndto + 1559 isendto(nsndto) = jn560 ELSEIF ((sxM .le. sxT) .AND. (dxM .ge. dxT)) THEN561 nsndto = nsndto + 1562 isendto(nsndto) = jn563 ELSEIF ((dxM .lt. dxT) .AND. (sxT .lt. dxM)) THEN564 nsndto = nsndto + 1565 isendto(nsndto) = jn566 END IF567 END DO568 nfsloop = 1569 nfeloop = nlci570 DO jn = 2,jpni-1571 IF(nfipproc(jn,jpnj) .eq. (narea - 1)) THEN572 IF (nfipproc(jn - 1 ,jpnj) .eq. -1) THEN573 nfsloop = nldi574 ENDIF575 IF (nfipproc(jn + 1,jpnj) .eq. -1) THEN576 nfeloop = nlei577 ENDIF578 ENDIF579 END DO580 581 ENDIF582 l_north_nogather = .TRUE.583 END SUBROUTINE nemo_northcomms584 419 585 420 -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/par_oce.f90
r9598 r10727 13 13 PUBLIC 14 14 15 ! zoom starting position 16 INTEGER :: jpizoom !: left bottom (i,j) indices of the zoom 17 INTEGER :: jpjzoom !: in data domain indices 18 19 CHARACTER(lc) :: cp_cfg !: name of the configuration 20 CHARACTER(lc) :: cp_cfz !: name of the zoom of configuration 21 INTEGER :: jp_cfg !: resolution of the configuration 22 23 ! data size !!! * size of all input files * 24 INTEGER :: jpidta !: 1st lateral dimension ( >= jpi ) 25 INTEGER :: jpjdta !: 2nd " " ( >= jpj ) 26 INTEGER :: jpkdta !: number of levels ( >= jpk ) 27 LOGICAL :: ln_e3_dep ! e3. definition flag 28 REAL(wp) :: pp_not_used = 999999._wp !: vertical grid parameter 29 REAL(wp) :: pp_to_be_computed = 999999._wp !: - - - 30 !!---------------------------------------------------------------------- 31 !! namcfg namelist parameters 32 !!---------------------------------------------------------------------- 33 LOGICAL :: ln_read_cfg !: (=T) read the domain configuration file or (=F) not 34 CHARACTER(lc) :: cn_domcfg !: filename the configuration file to be read 35 LOGICAL :: ln_write_cfg !: (=T) create the domain configuration file 36 CHARACTER(lc) :: cn_domcfg_out !: filename the configuration file to be read 37 ! 38 LOGICAL :: ln_use_jattr !: input file read offset 39 ! ! Use file global attribute: open_ocean_jstart to determine start j-row 40 ! ! when reading input from those netcdf files that have the 41 ! ! attribute defined. This is designed to enable input files associated 42 ! ! with the extended grids used in the under ice shelf configurations to 43 ! ! be used without redundant rows when the ice shelves are not in use. 44 ! 45 46 !!--------------------------------------------------------------------- 47 !! Domain Matrix size 48 !!--------------------------------------------------------------------- 49 ! configuration name & resolution (required only in ORCA family case) 50 CHARACTER(lc) :: cn_cfg !: name of the configuration 51 INTEGER :: nn_cfg !: resolution of the configuration 52 53 ! global domain size !!! * total computational domain * 54 INTEGER :: jpiglo !: 1st dimension of global domain --> i-direction 55 INTEGER :: jpjglo !: 2nd - - --> j-direction 56 INTEGER :: jpkglo !: 3nd - - --> k levels 57 58 ! global domain size for AGRIF !!! * total AGRIF computational domain * 59 INTEGER, PUBLIC :: nbug_in_agrif_conv_do_not_remove_or_modify = 1 - 1 60 INTEGER, PUBLIC, PARAMETER :: nbghostcells = 3 !: number of ghost cells 61 INTEGER, PUBLIC :: nbcellsx ! = jpiglo - 2 - 2*nbghostcells !: number of cells in i-direction 62 INTEGER, PUBLIC :: nbcellsy ! = jpjglo - 2 - 2*nbghostcells !: number of cells in j-direction 63 64 ! local domain size !!! * local computational domain * 65 INTEGER, PUBLIC :: jpi ! !: first dimension 66 INTEGER, PUBLIC :: jpj ! !: second dimension 67 INTEGER, PUBLIC :: jpk ! = jpkglo !: third dimension 68 INTEGER, PUBLIC :: jpim1 ! = jpi-1 !: inner domain indices 69 INTEGER, PUBLIC :: jpjm1 ! = jpj-1 !: - - - 70 INTEGER, PUBLIC :: jpkm1 ! = jpk-1 !: - - - 71 INTEGER, PUBLIC :: jpij ! = jpi*jpj !: jpi x jpj 72 INTEGER, PUBLIC :: jpimax! = ( jpiglo-2*nn_hls + (jpni-1) ) / jpni + 2*nn_hls !: maximum jpi 73 INTEGER, PUBLIC :: jpjmax! = ( jpjglo-2*nn_hls + (jpnj-1) ) / jpnj + 2*nn_hls !: maximum jpj 74 75 !!--------------------------------------------------------------------- 76 !! Active tracer parameters 77 !!--------------------------------------------------------------------- 78 INTEGER, PUBLIC, PARAMETER :: jpts = 2 !: Number of active tracers (=2, i.e. T & S ) 79 INTEGER, PUBLIC, PARAMETER :: jp_tem = 1 !: indice for temperature 80 INTEGER, PUBLIC, PARAMETER :: jp_sal = 2 !: indice for salinity 81 15 82 !!---------------------------------------------------------------------- 16 83 !! Domain decomposition … … 22 89 INTEGER, PUBLIC, PARAMETER :: jpr2di = 0 !: number of columns for extra outer halo 23 90 INTEGER, PUBLIC, PARAMETER :: jpr2dj = 0 !: number of rows for extra outer halo 24 INTEGER, PUBLIC, PARAMETER :: jpreci = 1 !: number of columns for overlap 25 INTEGER, PUBLIC, PARAMETER :: jprecj = 1 !: number of rows for overlap 26 27 !!---------------------------------------------------------------------- 28 !! namcfg namelist parameters 29 !!---------------------------------------------------------------------- 30 ! 31 LOGICAL :: ln_e3_dep ! e3. definition flag 32 ! 33 CHARACTER(lc) :: cp_cfg !: name of the configuration 34 CHARACTER(lc) :: cp_cfz !: name of the zoom of configuration 35 INTEGER :: jp_cfg !: resolution of the configuration 36 37 ! data size !!! * size of all input files * 38 INTEGER :: jpidta !: 1st lateral dimension ( >= jpi ) 39 INTEGER :: jpjdta !: 2nd " " ( >= jpj ) 40 INTEGER :: jpkdta !: number of levels ( >= jpk ) 41 42 ! global or zoom domain size !!! * computational domain * 43 INTEGER :: jpiglo !: 1st dimension of global domain --> i 44 INTEGER :: jpjglo !: 2nd - - --> j 45 46 ! zoom starting position 47 INTEGER :: jpizoom !: left bottom (i,j) indices of the zoom 48 INTEGER :: jpjzoom !: in data domain indices 49 50 ! Domain characteristics 51 INTEGER :: jperio !: lateral cond. type (between 0 and 6) 52 ! ! = 0 closed ; = 1 cyclic East-West 53 ! ! = 2 equatorial symmetric ; = 3 North fold T-point pivot 54 ! ! = 4 cyclic East-West AND North fold T-point pivot 55 ! ! = 5 North fold F-point pivot 56 ! ! = 6 cyclic East-West AND North fold F-point pivot 57 58 ! Input file read offset 59 LOGICAL :: ln_use_jattr !: Use file global attribute: open_ocean_jstart to determine start j-row 60 ! when reading input from those netcdf files that have the 61 ! attribute defined. This is designed to enable input files associated 62 ! with the extended grids used in the under ice shelf configurations to 63 ! be used without redundant rows when the ice shelves are not in use. 64 65 !! Values set to pp_not_used indicates that this parameter is not used in THIS config. 66 !! Values set to pp_to_be_computed indicates that variables will be computed in domzgr 67 REAL(wp) :: pp_not_used = 999999._wp !: vertical grid parameter 68 REAL(wp) :: pp_to_be_computed = 999999._wp !: - - - 69 70 71 72 73 !!--------------------------------------------------------------------- 74 !! Active tracer parameters 75 !!--------------------------------------------------------------------- 76 INTEGER, PUBLIC, PARAMETER :: jpts = 2 !: Number of active tracers (=2, i.e. T & S ) 77 INTEGER, PUBLIC, PARAMETER :: jp_tem = 1 !: indice for temperature 78 INTEGER, PUBLIC, PARAMETER :: jp_sal = 2 !: indice for salinity 79 80 !!--------------------------------------------------------------------- 81 !! Domain Matrix size (if AGRIF, they are not all parameters) 82 !!--------------------------------------------------------------------- 83 84 85 86 87 88 89 INTEGER, PUBLIC :: jpi ! = ( jpiglo-2*jpreci + (jpni-1) ) / jpni + 2*jpreci !: first dimension 90 INTEGER, PUBLIC :: jpj ! = ( jpjglo-2*jprecj + (jpnj-1) ) / jpnj + 2*jprecj !: second dimension 91 INTEGER, PUBLIC :: jpk ! = jpkdta 92 INTEGER, PUBLIC :: jpim1 ! = jpi-1 !: inner domain indices 93 INTEGER, PUBLIC :: jpjm1 ! = jpj-1 !: - - - 94 INTEGER, PUBLIC :: jpkm1 ! = jpk-1 !: - - - 95 INTEGER, PUBLIC :: jpij ! = jpi*jpj !: jpi x jpj 91 INTEGER, PUBLIC, PARAMETER :: nn_hls = 1 !: halo width (applies to both rows and columns) 96 92 97 93 !!---------------------------------------------------------------------- 98 94 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 99 !! $Id: par_oce.F90 5836 2015-10-26 14:49:40Z cetlod$100 !! Software governed by the CeCILL licen ce (./LICENSE)95 !! $Id: par_oce.F90 10068 2018-08-28 14:09:04Z nicolasmartin $ 96 !! Software governed by the CeCILL license (see ./LICENSE) 101 97 !!====================================================================== 102 98 END MODULE par_oce -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/phycst.F90
r10725 r10727 23 23 PUBLIC phy_cst ! routine called by inipar.F90 24 24 25 REAL(wp), PUBLIC :: rpi = 3.141592653589793_wp !: pi26 REAL(wp), PUBLIC :: rad = 3.141592653589793_wp / 180._wp !: conversion from degre into radian27 REAL(wp), PUBLIC :: rsmall = 0.5 * EPSILON( 1.e0 )!: smallest real computer value25 REAL(wp), PUBLIC :: rpi = 3.141592653589793_wp !: pi 26 REAL(wp), PUBLIC :: rad = 3.141592653589793_wp / 180._wp !: conversion from degre into radian 27 REAL(wp), PUBLIC :: rsmall = 0.5 * EPSILON( 1.e0 ) !: smallest real computer value 28 28 29 REAL(wp), PUBLIC :: rday = 24.*60.*60. !: day [s] 30 REAL(wp), PUBLIC :: rsiyea !: sideral year [s] 31 REAL(wp), PUBLIC :: rsiday !: sideral day [s] 32 REAL(wp), PUBLIC :: raamo = 12._wp !: number of months in one year 33 REAL(wp), PUBLIC :: rjjhh = 24._wp !: number of hours in one day 34 REAL(wp), PUBLIC :: rhhmm = 60._wp !: number of minutes in one hour 35 REAL(wp), PUBLIC :: rmmss = 60._wp !: number of seconds in one minute 36 REAL(wp), PUBLIC :: omega !: earth rotation parameter [s-1] 37 REAL(wp), PUBLIC :: ra = 6371229._wp !: earth radius [m] 38 REAL(wp), PUBLIC :: grav = 9.80665_wp !: gravity [m/s2] 39 40 REAL(wp), PUBLIC :: rtt = 273.16_wp !: triple point of temperature [Kelvin] 29 REAL(wp), PUBLIC :: rday = 24.*60.*60. !: day [s] 30 REAL(wp), PUBLIC :: rsiyea !: sideral year [s] 31 REAL(wp), PUBLIC :: rsiday !: sideral day [s] 32 REAL(wp), PUBLIC :: raamo = 12._wp !: number of months in one year 33 REAL(wp), PUBLIC :: rjjhh = 24._wp !: number of hours in one day 34 REAL(wp), PUBLIC :: rhhmm = 60._wp !: number of minutes in one hour 35 REAL(wp), PUBLIC :: rmmss = 60._wp !: number of seconds in one minute 36 REAL(wp), PUBLIC :: omega !: earth rotation parameter [s-1] 37 REAL(wp), PUBLIC :: ra = 6371229._wp !: earth radius [m] 38 REAL(wp), PUBLIC :: grav = 9.80665_wp !: gravity [m/s2] 41 39 REAL(wp), PUBLIC :: rt0 = 273.15_wp !: freezing point of fresh water [Kelvin] 42 43 44 45 46 REAL(wp), PUBLIC :: rt0_snow = 273.15_wp !: melting point of snow [Kelvin]47 REAL(wp), PUBLIC :: rt0_ice = 273.05_wp !: melting point of ice [Kelvin]48 40 49 41 REAL(wp), PUBLIC :: rau0 !: volumic mass of reference [kg/m3] … … 54 46 REAL(wp), PUBLIC :: r1_rau0_rcp !: = 1. / ( rau0 * rcp ) 55 47 56 REAL(wp), PUBLIC :: rhosn = 330._wp !: volumic mass of snow [kg/m3] 57 REAL(wp), PUBLIC :: emic = 0.97_wp !: emissivity of snow or ice 58 REAL(wp), PUBLIC :: sice = 6.0_wp !: salinity of ice [psu] 59 REAL(wp), PUBLIC :: soce = 34.7_wp !: salinity of sea [psu] 60 REAL(wp), PUBLIC :: cevap = 2.5e+6_wp !: latent heat of evaporation (water) 61 REAL(wp), PUBLIC :: srgamma = 0.9_wp !: correction factor for solar radiation (Oberhuber, 1974) 48 REAL(wp), PUBLIC :: emic = 0.97_wp !: emissivity of snow or ice (not used?) 49 50 REAL(wp), PUBLIC :: sice = 6.0_wp !: salinity of ice (for pisces) [psu] 51 REAL(wp), PUBLIC :: soce = 34.7_wp !: salinity of sea (for pisces and isf) [psu] 52 REAL(wp), PUBLIC :: rLevap = 2.5e+6_wp !: latent heat of evaporation (water) 62 53 REAL(wp), PUBLIC :: vkarmn = 0.4_wp !: von Karman constant 63 54 REAL(wp), PUBLIC :: stefan = 5.67e-8_wp !: Stefan-Boltzmann constant 64 55 65 REAL(wp), PUBLIC :: rhoic = 900._wp !: volumic mass of sea ice [kg/m3] 66 REAL(wp), PUBLIC :: rcdic = 2.034396_wp !: conductivity of the ice [W/m/K] 67 REAL(wp), PUBLIC :: rcpic = 1.8837e+6_wp !: volumetric specific heat for ice [J/m3/K] 68 REAL(wp), PUBLIC :: cpic !: = rcpic / rhoic (specific heat for ice) [J/Kg/K] 69 REAL(wp), PUBLIC :: rcdsn = 0.22_wp !: conductivity of the snow [W/m/K] 70 REAL(wp), PUBLIC :: rcpsn = 6.9069e+5_wp !: volumetric specific heat for snow [J/m3/K] 71 REAL(wp), PUBLIC :: xlsn = 110.121e+6_wp !: volumetric latent heat fusion of snow [J/m3] 72 REAL(wp), PUBLIC :: lfus !: = xlsn / rhosn (latent heat of fusion of fresh ice) [J/Kg] 73 REAL(wp), PUBLIC :: xlic = 300.33e+6_wp !: volumetric latent heat fusion of ice [J/m3] 74 REAL(wp), PUBLIC :: xsn = 2.8e+6_wp !: volumetric latent heat of sublimation of snow [J/m3] 56 REAL(wp), PUBLIC :: rhos = 330._wp !: volumic mass of snow [kg/m3] 57 REAL(wp), PUBLIC :: rhoi = 917._wp !: volumic mass of sea ice [kg/m3] 58 REAL(wp), PUBLIC :: rhow = 1000._wp !: volumic mass of freshwater in melt ponds [kg/m3] 59 REAL(wp), PUBLIC :: rcnd_i = 2.034396_wp !: thermal conductivity of fresh ice [W/m/K] 60 REAL(wp), PUBLIC :: rcpi = 2067.0_wp !: specific heat of fresh ice [J/kg/K] 61 REAL(wp), PUBLIC :: rLsub = 2.834e+6_wp !: pure ice latent heat of sublimation [J/kg] 62 REAL(wp), PUBLIC :: rLfus = 0.334e+6_wp !: latent heat of fusion of fresh ice [J/kg] 63 REAL(wp), PUBLIC :: rTmlt = 0.054_wp !: decrease of seawater meltpoint with salinity 64 65 REAL(wp), PUBLIC :: r1_rhoi !: 1 / rhoi 66 REAL(wp), PUBLIC :: r1_rhos !: 1 / rhos 67 REAL(wp), PUBLIC :: r1_rcpi !: 1 / rcpi 75 68 !!---------------------------------------------------------------------- 76 69 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 77 !! $Id: phycst.F90 5147 2015-03-13 10:01:32Z cetlod$78 !! Software governed by the CeCILL licen ce (./LICENSE)70 !! $Id: phycst.F90 10068 2018-08-28 14:09:04Z nicolasmartin $ 71 !! Software governed by the CeCILL license (see ./LICENSE) 79 72 !!---------------------------------------------------------------------- 80 73 … … 85 78 !! *** ROUTINE phy_cst *** 86 79 !! 87 !! ** Purpose : Print model parameters andset and print the constants80 !! ** Purpose : set and print the constants 88 81 !!---------------------------------------------------------------------- 89 CHARACTER (len=64) :: cform = "(A12, 3(A13, I7) )"90 !!----------------------------------------------------------------------91 92 IF(lwp) WRITE(numout,*)93 IF(lwp) WRITE(numout,*) ' phy_cst : initialization of ocean parameters and constants'94 IF(lwp) WRITE(numout,*) ' ~~~~~~~'95 96 ! Ocean Parameters97 ! ----------------98 IF(lwp) THEN99 WRITE(numout,*) ' Domain info'100 WRITE(numout,*) ' dimension of model'101 WRITE(numout,*) ' Local domain Global domain Data domain '102 WRITE(numout,cform) ' ',' jpi : ', jpi, ' jpiglo : ', jpiglo, ' jpidta : ', jpidta103 WRITE(numout,cform) ' ',' jpj : ', jpj, ' jpjglo : ', jpjglo, ' jpjdta : ', jpjdta104 WRITE(numout,cform) ' ',' jpk : ', jpk, ' jpk : ', jpk , ' jpkdta : ', jpkdta105 WRITE(numout,*) ' ',' jpij : ', jpij106 WRITE(numout,*) ' mpp local domain info (mpp)'107 WRITE(numout,*) ' jpni : ', jpni, ' jpreci : ', jpreci108 WRITE(numout,*) ' jpnj : ', jpnj, ' jprecj : ', jprecj109 WRITE(numout,*) ' jpnij : ', jpnij110 WRITE(numout,*) ' lateral domain boundary condition type : jperio = ', jperio111 ENDIF112 113 ! Define constants114 ! ----------------115 IF(lwp) WRITE(numout,*)116 IF(lwp) WRITE(numout,*) ' Constants'117 118 IF(lwp) WRITE(numout,*)119 IF(lwp) WRITE(numout,*) ' mathematical constant rpi = ', rpi120 82 121 83 rsiyea = 365.25_wp * rday * 2._wp * rpi / 6.283076_wp 122 84 rsiday = rday / ( 1._wp + rday / rsiyea ) 85 #if defined key_cice 86 omega = 7.292116e-05 87 #else 123 88 omega = 2._wp * rpi / rsiday 124 IF(lwp) WRITE(numout,*) 125 IF(lwp) WRITE(numout,*) ' day rday = ', rday, ' s' 126 IF(lwp) WRITE(numout,*) ' sideral year rsiyea = ', rsiyea, ' s' 127 IF(lwp) WRITE(numout,*) ' sideral day rsiday = ', rsiday, ' s' 128 IF(lwp) WRITE(numout,*) ' omega omega = ', omega, ' s^-1' 89 #endif 129 90 130 IF(lwp) WRITE(numout,*) 131 IF(lwp) WRITE(numout,*) ' nb of months per year raamo = ', raamo, ' months' 132 IF(lwp) WRITE(numout,*) ' nb of hours per day rjjhh = ', rjjhh, ' hours' 133 IF(lwp) WRITE(numout,*) ' nb of minutes per hour rhhmm = ', rhhmm, ' mn' 134 IF(lwp) WRITE(numout,*) ' nb of seconds per minute rmmss = ', rmmss, ' s' 91 r1_rhoi = 1._wp / rhoi 92 r1_rhos = 1._wp / rhos 93 r1_rcpi = 1._wp / rcpi 135 94 136 IF(lwp) WRITE(numout,*)137 IF(lwp) WRITE(numout,*) ' earth radius ra = ', ra, ' m'138 IF(lwp) WRITE(numout,*) ' gravity grav = ', grav , ' m/s^2'139 140 IF(lwp) WRITE(numout,*)141 IF(lwp) WRITE(numout,*) ' triple point of temperature rtt = ', rtt , ' K'142 IF(lwp) WRITE(numout,*) ' freezing point of water rt0 = ', rt0 , ' K'143 IF(lwp) WRITE(numout,*) ' melting point of snow rt0_snow = ', rt0_snow, ' K'144 IF(lwp) WRITE(numout,*) ' melting point of ice rt0_ice = ', rt0_ice , ' K'145 146 IF(lwp) WRITE(numout,*) ' reference density and heat capacity now defined in eosbn2.f90'147 148 cpic = rcpic / rhoic ! specific heat for ice [J/Kg/K]149 lfus = xlsn / rhosn ! latent heat of fusion of fresh ice150 95 IF(lwp) THEN 151 96 WRITE(numout,*) 152 WRITE(numout,*) ' thermal conductivity of the snow = ', rcdsn , ' J/s/m/K' 153 WRITE(numout,*) ' thermal conductivity of the ice = ', rcdic , ' J/s/m/K' 154 WRITE(numout,*) ' fresh ice specific heat = ', cpic , ' J/kg/K' 155 WRITE(numout,*) ' latent heat of fusion of fresh ice / snow = ', lfus , ' J/kg' 156 WRITE(numout,*) ' density times specific heat for snow = ', rcpsn , ' J/m^3/K' 157 WRITE(numout,*) ' density times specific heat for ice = ', rcpic , ' J/m^3/K' 158 WRITE(numout,*) ' volumetric latent heat fusion of sea ice = ', xlic , ' J/m' 159 WRITE(numout,*) ' latent heat of sublimation of snow = ', xsn , ' J/kg' 160 WRITE(numout,*) ' volumetric latent heat fusion of snow = ', xlsn , ' J/m^3' 161 WRITE(numout,*) ' density of sea ice = ', rhoic , ' kg/m^3' 162 WRITE(numout,*) ' density of snow = ', rhosn , ' kg/m^3' 163 WRITE(numout,*) ' emissivity of snow or ice = ', emic 164 WRITE(numout,*) ' salinity of ice = ', sice , ' psu' 165 WRITE(numout,*) ' salinity of sea = ', soce , ' psu' 166 WRITE(numout,*) ' latent heat of evaporation (water) = ', cevap , ' J/m^3' 167 WRITE(numout,*) ' correction factor for solar radiation = ', srgamma 168 WRITE(numout,*) ' von Karman constant = ', vkarmn 169 WRITE(numout,*) ' Stefan-Boltzmann constant = ', stefan , ' J/s/m^2/K^4' 97 WRITE(numout,*) 'phy_cst : initialization of ocean parameters and constants' 98 WRITE(numout,*) '~~~~~~~' 99 WRITE(numout,*) ' mathematical constant rpi = ', rpi 100 WRITE(numout,*) ' day rday = ', rday, ' s' 101 WRITE(numout,*) ' sideral year rsiyea = ', rsiyea, ' s' 102 WRITE(numout,*) ' sideral day rsiday = ', rsiday, ' s' 103 WRITE(numout,*) ' omega omega = ', omega, ' s^-1' 170 104 WRITE(numout,*) 171 WRITE(numout,*) ' conversion: degre ==> radian rad = ', rad 105 WRITE(numout,*) ' nb of months per year raamo = ', raamo, ' months' 106 WRITE(numout,*) ' nb of hours per day rjjhh = ', rjjhh, ' hours' 107 WRITE(numout,*) ' nb of minutes per hour rhhmm = ', rhhmm, ' mn' 108 WRITE(numout,*) ' nb of seconds per minute rmmss = ', rmmss, ' s' 172 109 WRITE(numout,*) 173 WRITE(numout,*) ' smallest real computer value rsmall = ', rsmall 110 WRITE(numout,*) ' earth radius ra = ', ra , ' m' 111 WRITE(numout,*) ' gravity grav = ', grav , ' m/s^2' 112 WRITE(numout,*) 113 WRITE(numout,*) ' freezing point of water rt0 = ', rt0 , ' K' 114 WRITE(numout,*) 115 WRITE(numout,*) ' reference density and heat capacity now defined in eosbn2.f90' 116 WRITE(numout,*) 117 WRITE(numout,*) ' thermal conductivity of pure ice = ', rcnd_i , ' J/s/m/K' 118 WRITE(numout,*) ' thermal conductivity of snow is defined in a namelist ' 119 WRITE(numout,*) ' fresh ice specific heat = ', rcpi , ' J/kg/K' 120 WRITE(numout,*) ' latent heat of fusion of fresh ice / snow = ', rLfus , ' J/kg' 121 WRITE(numout,*) ' latent heat of subl. of fresh ice / snow = ', rLsub , ' J/kg' 122 WRITE(numout,*) ' density of sea ice = ', rhoi , ' kg/m^3' 123 WRITE(numout,*) ' density of snow = ', rhos , ' kg/m^3' 124 WRITE(numout,*) ' density of freshwater (in melt ponds) = ', rhow , ' kg/m^3' 125 WRITE(numout,*) ' salinity of ice (for pisces) = ', sice , ' psu' 126 WRITE(numout,*) ' salinity of sea (for pisces and isf) = ', soce , ' psu' 127 WRITE(numout,*) ' latent heat of evaporation (water) = ', rLevap , ' J/m^3' 128 WRITE(numout,*) ' von Karman constant = ', vkarmn 129 WRITE(numout,*) ' Stefan-Boltzmann constant = ', stefan , ' J/s/m^2/K^4' 130 WRITE(numout,*) 131 WRITE(numout,*) ' conversion: degre ==> radian rad = ', rad 132 WRITE(numout,*) 133 WRITE(numout,*) ' smallest real computer value rsmall = ', rsmall 174 134 ENDIF 175 135 -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/prtctl.F90
r10725 r10727 8 8 !!---------------------------------------------------------------------- 9 9 USE dom_oce ! ocean space and time domain variables 10 11 12 10 #if defined key_nemocice_decomp 11 USE ice_domain_size, only: nx_global, ny_global 12 #endif 13 13 USE in_out_manager ! I/O manager 14 14 USE lib_mpp ! distributed memory computing 15 USE wrk_nemo ! work arrays16 15 17 16 IMPLICIT NONE … … 37 36 !!---------------------------------------------------------------------- 38 37 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 39 !! $Id: prtctl.F90 5025 2015-01-12 15:53:50Z timgraham$40 !! Software governed by the CeCILL licen ce (./LICENSE)38 !! $Id: prtctl.F90 10068 2018-08-28 14:09:04Z nicolasmartin $ 39 !! Software governed by the CeCILL license (see ./LICENSE) 41 40 !!---------------------------------------------------------------------- 42 41 CONTAINS 43 42 44 43 SUBROUTINE prt_ctl (tab2d_1, tab3d_1, mask1, clinfo1, tab2d_2, tab3d_2, & 45 & mask2, clinfo2, ovlap,kdim, clinfo3 )44 & mask2, clinfo2, kdim, clinfo3 ) 46 45 !!---------------------------------------------------------------------- 47 46 !! *** ROUTINE prt_ctl *** … … 75 74 !! mask2 : mask (3D) to apply to the tab[23]d_2 array 76 75 !! clinfo2 : information about the tab[23]d_2 array 77 !! ovlap : overlap value78 76 !! kdim : k- direction for 3D arrays 79 77 !! clinfo3 : additional information … … 87 85 REAL(wp), DIMENSION(:,:,:), INTENT(in), OPTIONAL :: mask2 88 86 CHARACTER (len=*) , INTENT(in), OPTIONAL :: clinfo2 89 INTEGER , INTENT(in), OPTIONAL :: ovlap90 87 INTEGER , INTENT(in), OPTIONAL :: kdim 91 88 CHARACTER (len=*) , INTENT(in), OPTIONAL :: clinfo3 92 89 ! 93 90 CHARACTER (len=15) :: cl2 94 INTEGER :: overlap,jn, sind, eind, kdir,j_id91 INTEGER :: jn, sind, eind, kdir,j_id 95 92 REAL(wp) :: zsum1, zsum2, zvctl1, zvctl2 96 REAL(wp), POINTER, DIMENSION(:,:) :: ztab2d_1, ztab2d_2 97 REAL(wp), POINTER, DIMENSION(:,:,:) :: zmask1, zmask2, ztab3d_1, ztab3d_2 98 !!---------------------------------------------------------------------- 99 100 CALL wrk_alloc( jpi,jpj, ztab2d_1, ztab2d_2 ) 101 CALL wrk_alloc( jpi,jpj,jpk, zmask1, zmask2, ztab3d_1, ztab3d_2 ) 93 REAL(wp), DIMENSION(jpi,jpj) :: ztab2d_1, ztab2d_2 94 REAL(wp), DIMENSION(jpi,jpj,jpk) :: zmask1, zmask2, ztab3d_1, ztab3d_2 95 !!---------------------------------------------------------------------- 102 96 103 97 ! Arrays, scalars initialization 104 overlap = 0105 98 kdir = jpkm1 106 99 cl2 = '' … … 118 111 ! Control of optional arguments 119 112 IF( PRESENT(clinfo2) ) cl2 = clinfo2 120 IF( PRESENT(ovlap) ) overlap = ovlap121 113 IF( PRESENT(kdim) ) kdir = kdim 122 114 IF( PRESENT(tab2d_1) ) ztab2d_1(:,:) = tab2d_1(:,:) … … 142 134 IF( .NOT. lsp_area ) THEN 143 135 IF (lk_mpp .AND. jpnij > 1) THEN 144 nictls = MAX( 1, nlditl(jn) - overlap)145 nictle = nleitl(jn) + overlap * MIN( 1, nlcitl(jn) - nleitl(jn))146 njctls = MAX( 1, nldjtl(jn) - overlap)147 njctle = nlejtl(jn) + overlap * MIN( 1, nlcjtl(jn) - nlejtl(jn))136 nictls = MAX( 1, nlditl(jn) ) 137 nictle = MIN(jpi, nleitl(jn) ) 138 njctls = MAX( 1, nldjtl(jn) ) 139 njctle = MIN(jpj, nlejtl(jn) ) 148 140 ! Do not take into account the bound of the domain 149 141 IF( ibonitl(jn) == -1 .OR. ibonitl(jn) == 2 ) nictls = MAX(2, nictls) … … 152 144 IF( ibonjtl(jn) == 1 .OR. ibonjtl(jn) == 2 ) njctle = MIN(njctle, nlejtl(jn) - 1) 153 145 ELSE 154 nictls = MAX( 1, nimpptl(jn) + nlditl(jn) - 1 - overlap)155 nictle = nimpptl(jn) + nleitl(jn) - 1 + overlap * MIN( 1, nlcitl(jn) - nleitl(jn) )156 njctls = MAX( 1, njmpptl(jn) + nldjtl(jn) - 1 - overlap)157 njctle = njmpptl(jn) + nlejtl(jn) - 1 + overlap * MIN( 1, nlcjtl(jn) - nlejtl(jn) )146 nictls = MAX( 1, nimpptl(jn) - 1 + nlditl(jn) ) 147 nictle = MIN(jpi, nimpptl(jn) - 1 + nleitl(jn) ) 148 njctls = MAX( 1, njmpptl(jn) - 1 + nldjtl(jn) ) 149 njctle = MIN(jpj, njmpptl(jn) - 1 + nlejtl(jn) ) 158 150 ! Do not take into account the bound of the domain 159 151 IF( ibonitl(jn) == -1 .OR. ibonitl(jn) == 2 ) nictls = MAX(2, nictls) … … 207 199 208 200 ENDDO 209 210 CALL wrk_dealloc( jpi,jpj, ztab2d_1, ztab2d_2 )211 CALL wrk_dealloc( jpi,jpj,jpk, zmask1, zmask2, ztab3d_1, ztab3d_2 )212 201 ! 213 202 END SUBROUTINE prt_ctl … … 398 387 !! periodic 399 388 !! Type : jperio global periodic condition 400 !! nperio local periodic condition401 389 !! 402 390 !! ** Action : - set domain parameters 403 391 !! nimpp : longitudinal index 404 392 !! njmpp : latitudinal index 405 !! nperio : lateral condition type406 393 !! narea : number for local area 407 394 !! nlcil : first dimension … … 425 412 nrecil, nrecjl, nldil, nleil, nldjl, nlejl 426 413 427 INTEGER, POINTER, DIMENSION(:,:) :: iimpptl, ijmpptl, ilcitl, ilcjtl ! workspace414 INTEGER, DIMENSION(jpi,jpj) :: iimpptl, ijmpptl, ilcitl, ilcjtl ! workspace 428 415 REAL(wp) :: zidom, zjdom ! temporary scalars 429 !!----------------------------------------------------------------------430 431 ! 432 CALL wrk_alloc( isplt, jsplt, ilcitl, ilcjtl, iimpptl, ijmpptl )416 INTEGER :: inum ! local logical unit 417 !!---------------------------------------------------------------------- 418 419 ! 433 420 ! 434 421 ! 1. Dimension arrays for subdomains … … 440 427 ! array (cf. par_oce.F90). 441 428 442 443 444 445 446 ijpi = ( jpiglo-2* jpreci + (isplt-1) ) / isplt + 2*jpreci447 ijpj = ( jpjglo-2* jprecj + (jsplt-1) ) / jsplt + 2*jprecj448 449 450 451 nrecil = 2 * jpreci452 nrecjl = 2 * jprecj429 #if defined key_nemocice_decomp 430 ijpi = ( nx_global+2-2*nn_hls + (isplt-1) ) / isplt + 2*nn_hls 431 ijpj = ( ny_global+2-2*nn_hls + (jsplt-1) ) / jsplt + 2*nn_hls 432 #else 433 ijpi = ( jpiglo-2*nn_hls + (isplt-1) ) / isplt + 2*nn_hls 434 ijpj = ( jpjglo-2*nn_hls + (jsplt-1) ) / jsplt + 2*nn_hls 435 #endif 436 437 438 nrecil = 2 * nn_hls 439 nrecjl = 2 * nn_hls 453 440 irestil = MOD( jpiglo - nrecil , isplt ) 454 441 irestjl = MOD( jpjglo - nrecjl , jsplt ) 455 442 456 443 IF( irestil == 0 ) irestil = isplt 444 #if defined key_nemocice_decomp 445 446 ! In order to match CICE the size of domains in NEMO has to be changed 447 ! The last line of blocks (west) will have fewer points 448 DO jj = 1, jsplt 449 DO ji=1, isplt-1 450 ilcitl(ji,jj) = ijpi 451 END DO 452 ilcitl(isplt,jj) = jpiglo - (isplt - 1) * (ijpi - nrecil) 453 END DO 454 455 #else 457 456 458 457 DO jj = 1, jsplt … … 465 464 END DO 466 465 466 #endif 467 467 468 468 IF( irestjl == 0 ) irestjl = jsplt 469 #if defined key_nemocice_decomp 470 471 ! Same change to domains in North-South direction as in East-West. 472 DO ji = 1, isplt 473 DO jj=1, jsplt-1 474 ilcjtl(ji,jj) = ijpj 475 END DO 476 ilcjtl(ji,jsplt) = jpjglo - (jsplt - 1) * (ijpj - nrecjl) 477 END DO 478 479 #else 469 480 470 481 DO ji = 1, isplt … … 477 488 END DO 478 489 490 #endif 479 491 zidom = nrecil 480 492 DO ji = 1, isplt … … 538 550 ibonitl(jn) = nbondil 539 551 540 nldil = 1 + jpreci541 nleil = nlcil - jpreci552 nldil = 1 + nn_hls 553 nleil = nlcil - nn_hls 542 554 IF( nbondil == -1 .OR. nbondil == 2 ) nldil = 1 543 555 IF( nbondil == 1 .OR. nbondil == 2 ) nleil = nlcil 544 nldjl = 1 + jprecj545 nlejl = nlcjl - jprecj556 nldjl = 1 + nn_hls 557 nlejl = nlcjl - nn_hls 546 558 IF( nbondjl == -1 .OR. nbondjl == 2 ) nldjl = 1 547 559 IF( nbondjl == 1 .OR. nbondjl == 2 ) nlejl = nlcjl … … 552 564 END DO 553 565 ! 554 ! 555 CALL wrk_dealloc( isplt, jsplt, ilcitl, ilcjtl, iimpptl, ijmpptl ) 566 ! Save processor layout in layout_prtctl.dat file 567 IF(lwp) THEN 568 CALL ctl_opn( inum, 'layout_prtctl.dat', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) 569 WRITE(inum,'(a)') 'nproc nlcil nlcjl nldil nldjl nleil nlejl nimpptl njmpptl ibonitl ibonjtl' 570 ! 571 DO jn = 1, ijsplt 572 WRITE(inum,'(i5,6i6,4i8)') jn-1,nlcitl(jn), nlcjtl(jn), & 573 & nlditl(jn), nldjtl(jn), & 574 & nleitl(jn), nlejtl(jn), & 575 & nimpptl(jn), njmpptl(jn), & 576 & ibonitl(jn), ibonjtl(jn) 577 END DO 578 CLOSE(inum) 579 END IF 556 580 ! 557 581 ! -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/step_oce.f90
r9598 r10727 12 12 USE daymod ! calendar (day routine) 13 13 14 USE eosbn2 ! equation of state (eos_bn2 routine)15 14 16 15 USE prtctl ! Print control (prt_ctl routine) -
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/timing.F90
r10725 r10727 31 31 PUBLIC timing_start, timing_stop ! called in each routine to time 32 32 33 33 #if defined key_mpp_mpi 34 34 INCLUDE 'mpif.h' 35 35 #endif 36 36 37 37 ! Variables for fine grain timing 38 38 TYPE timer 39 39 CHARACTER(LEN=20) :: cname 40 REAL(wp) :: t_cpu, t_clock, tsum_cpu, tsum_clock, tmax_cpu, tmax_clock, tmin_cpu, tmin_clock, tsub_cpu, tsub_clock 40 CHARACTER(LEN=20) :: surname 41 INTEGER :: rank 42 REAL(wp) :: t_cpu, t_clock, tsum_cpu, tsum_clock, tmax_cpu, tmax_clock, tmin_cpu, tmin_clock, tsub_cpu, tsub_clock 41 43 INTEGER :: ncount, ncount_max, ncount_rate 42 44 INTEGER :: niter … … 49 51 TYPE alltimer 50 52 CHARACTER(LEN=20), DIMENSION(:), POINTER :: cname => NULL() 51 52 53 53 REAL(wp), DIMENSION(:), POINTER :: tsum_cpu => NULL() 54 REAL(wp), DIMENSION(:), POINTER :: tsum_clock => NULL() 55 INTEGER, DIMENSION(:), POINTER :: niter => NULL() 54 56 TYPE(alltimer), POINTER :: next => NULL() 55 57 TYPE(alltimer), POINTER :: prev => NULL() … … 58 60 TYPE(timer), POINTER :: s_timer_root => NULL() 59 61 TYPE(timer), POINTER :: s_timer => NULL() 62 TYPE(timer), POINTER :: s_timer_old => NULL() 63 60 64 TYPE(timer), POINTER :: s_wrk => NULL() 61 65 REAL(wp) :: t_overclock, t_overcpu … … 77 81 LOGICAL :: lwriter 78 82 !!---------------------------------------------------------------------- 79 !! NEMO/O PA 4.0 , NEMO Consortium (2011)80 !! $Id: timing.F90 5120 2015-03-03 16:11:55Z acc$81 !! Software governed by the CeCILL licen ce (./LICENSE)83 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 84 !! $Id: timing.F90 10510 2019-01-14 16:13:17Z clem $ 85 !! Software governed by the CeCILL license (see ./LICENSE) 82 86 !!---------------------------------------------------------------------- 83 87 CONTAINS … … 90 94 CHARACTER(len=*), INTENT(in) :: cdinfo 91 95 ! 92 93 ! Create timing structure at first call 94 IF( .NOT. l_initdone ) THEN 95 CALL timing_ini_var(cdinfo) 96 IF(ASSOCIATED(s_timer) ) s_timer_old => s_timer 97 ! 98 ! Create timing structure at first call of the routine 99 CALL timing_ini_var(cdinfo) 100 ! write(*,*) 'after inivar ', s_timer%cname 101 102 ! ici timing_ini_var a soit retrouve s_timer et fait return soit ajoute un maillon 103 ! maintenant on regarde si le call d'avant corrsspond a un parent ou si il est ferme 104 IF( .NOT. s_timer_old%l_tdone ) THEN 105 s_timer%parent_section => s_timer_old 96 106 ELSE 97 s_timer => s_timer_root 98 DO WHILE( TRIM(s_timer%cname) /= TRIM(cdinfo) ) 99 IF( ASSOCIATED(s_timer%next) ) s_timer => s_timer%next 100 END DO 101 ENDIF 107 s_timer%parent_section => NULL() 108 ENDIF 109 102 110 s_timer%l_tdone = .FALSE. 103 111 s_timer%niter = s_timer%niter + 1 … … 108 116 CALL CPU_TIME( s_timer%t_cpu ) 109 117 ! clock time collection 110 118 #if defined key_mpp_mpi 111 119 s_timer%t_clock= MPI_Wtime() 112 113 114 120 #else 121 CALL SYSTEM_CLOCK(COUNT_RATE=s_timer%ncount_rate, COUNT_MAX=s_timer%ncount_max) 122 CALL SYSTEM_CLOCK(COUNT = s_timer%ncount) 123 #endif 124 ! write(*,*) 'end of start ', s_timer%cname 115 125 116 126 ! … … 127 137 ! 128 138 INTEGER :: ifinal_count, iperiods 129 REAL(wp) :: zcpu_end, zmpitime 139 REAL(wp) :: zcpu_end, zmpitime,zcpu_raw,zclock_raw 130 140 ! 131 141 s_wrk => NULL() 132 142 133 143 ! clock time collection 134 144 #if defined key_mpp_mpi 135 145 zmpitime = MPI_Wtime() 136 137 138 146 #else 147 CALL SYSTEM_CLOCK(COUNT = ifinal_count) 148 #endif 139 149 ! CPU time collection 140 150 CALL CPU_TIME( zcpu_end ) 141 151 142 s_timer => s_timer_root 143 DO WHILE( TRIM(s_timer%cname) /= TRIM(cdinfo) ) 144 IF( ASSOCIATED(s_timer%next) ) s_timer => s_timer%next 145 END DO 152 !!$ IF(associated(s_timer%parent_section))then 153 !!$ write(*,*) s_timer%cname,' <-- ', s_timer%parent_section%cname 154 !!$ ENDIF 155 156 ! No need to search ... : s_timer has the last value defined in start 157 ! s_timer => s_timer_root 158 ! DO WHILE( TRIM(s_timer%cname) /= TRIM(cdinfo) ) 159 ! IF( ASSOCIATED(s_timer%next) ) s_timer => s_timer%next 160 ! END DO 146 161 147 162 ! CPU time correction 148 s_timer%t_cpu = zcpu_end - s_timer%t_cpu - t_overcpu - s_timer%tsub_cpu 149 163 zcpu_raw = zcpu_end - s_timer%t_cpu - t_overcpu ! total time including child 164 s_timer%t_cpu = zcpu_raw - s_timer%tsub_cpu 165 ! IF(s_timer%cname==trim('lbc_lnk_2d')) write(*,*) s_timer%tsub_cpu,zcpu_end 166 150 167 ! clock time correction 151 152 s_timer%t_clock = zmpitime - s_timer%t_clock - t_overclock - s_timer%tsub_clock 153 154 155 156 157 158 168 #if defined key_mpp_mpi 169 zclock_raw = zmpitime - s_timer%t_clock - t_overclock ! total time including child 170 s_timer%t_clock = zclock_raw - t_overclock - s_timer%tsub_clock 171 #else 172 iperiods = ifinal_count - s_timer%ncount 173 IF( ifinal_count < s_timer%ncount ) & 174 iperiods = iperiods + s_timer%ncount_max 175 zclock_raw = REAL(iperiods) / s_timer%ncount_rate !- t_overclock 176 s_timer%t_clock = zclock_raw - s_timer%tsub_clock 177 #endif 178 ! IF(s_timer%cname==trim('lbc_lnk_2d')) write(*,*) zclock_raw , s_timer%tsub_clock 159 179 160 180 ! Correction of parent section 161 181 IF( .NOT. PRESENT(csection) ) THEN 162 s_wrk => s_timer 163 DO WHILE ( ASSOCIATED(s_wrk%parent_section ) ) 164 s_wrk => s_wrk%parent_section 165 s_wrk%tsub_cpu = s_wrk%tsub_cpu + s_timer%t_cpu 166 s_wrk%tsub_clock = s_wrk%tsub_clock + s_timer%t_clock 167 END DO 182 IF ( ASSOCIATED(s_timer%parent_section ) ) THEN 183 s_timer%parent_section%tsub_cpu = zcpu_raw + s_timer%parent_section%tsub_cpu 184 s_timer%parent_section%tsub_clock = zclock_raw + s_timer%parent_section%tsub_clock 185 ENDIF 168 186 ENDIF 169 187 … … 186 204 s_timer%l_tdone = .TRUE. 187 205 ! 206 ! 207 ! we come back 208 IF ( ASSOCIATED(s_timer%parent_section ) ) s_timer => s_timer%parent_section 209 210 ! write(*,*) 'end of stop ', s_timer%cname 211 188 212 END SUBROUTINE timing_stop 189 213 … … 211 235 WRITE(numtime,*) ' NEMO team' 212 236 WRITE(numtime,*) ' Ocean General Circulation Model' 213 WRITE(numtime,*) ' version 3.6 (2015) '237 WRITE(numtime,*) ' version 4.0 (2019) ' 214 238 WRITE(numtime,*) 215 239 WRITE(numtime,*) ' Timing Informations ' … … 219 243 220 244 ! Compute clock function overhead 221 245 #if defined key_mpp_mpi 222 246 t_overclock = MPI_WTIME() 223 247 t_overclock = MPI_WTIME() - t_overclock 248 #else 249 CALL SYSTEM_CLOCK(COUNT_RATE=ncount_rate, COUNT_MAX=ncount_max) 250 CALL SYSTEM_CLOCK(COUNT = istart_count) 251 CALL SYSTEM_CLOCK(COUNT = ifinal_count) 252 iperiods = ifinal_count - istart_count 253 IF( ifinal_count < istart_count ) & 254 iperiods = iperiods + ncount_max 255 t_overclock = REAL(iperiods) / ncount_rate 256 #endif 224 257 225 258 ! Compute cpu_time function overhead … … 235 268 236 269 CALL CPU_TIME(t_cpu(1)) 270 #if defined key_mpp_mpi 237 271 ! Start elapsed and CPU time counters 238 272 t_elaps(1) = MPI_WTIME() 273 #else 274 CALL SYSTEM_CLOCK(COUNT_RATE=ncount_rate, COUNT_MAX=ncount_max) 275 CALL SYSTEM_CLOCK(COUNT = ncount) 276 #endif 239 277 ! 240 278 END SUBROUTINE timing_init … … 249 287 TYPE(timer), POINTER :: s_temp 250 288 INTEGER :: idum, iperiods, icode 289 INTEGER :: ji 251 290 LOGICAL :: ll_ord, ll_averep 252 291 CHARACTER(len=120) :: clfmt 253 292 REAL(wp), DIMENSION(:), ALLOCATABLE :: timing_glob 293 REAL(wp) :: zsypd ! simulated years per day (Balaji 2017) 294 REAL(wp) :: zperc, ztot 295 254 296 ll_averep = .TRUE. 255 297 … … 257 299 CALL CPU_TIME(t_cpu(2)) 258 300 t_cpu(2) = t_cpu(2) - t_cpu(1) - t_overcpu 301 #if defined key_mpp_mpi 259 302 t_elaps(2) = MPI_WTIME() - t_elaps(1) - t_overclock 303 #else 304 CALL SYSTEM_CLOCK(COUNT = nfinal_count) 305 iperiods = nfinal_count - ncount 306 IF( nfinal_count < ncount ) & 307 iperiods = iperiods + ncount_max 308 t_elaps(2) = REAL(iperiods) / ncount_rate - t_overclock 309 #endif 260 310 261 311 ! End of timings on date & time … … 270 320 END DO 271 321 idum = nsize 272 IF(lk_mpp) CALL mpp_sum(idum)322 CALL mpp_sum('timing', idum) 273 323 IF( idum/jpnij /= nsize ) THEN 274 324 IF( lwriter ) WRITE(numtime,*) ' ===> W A R N I N G: ' … … 280 330 ENDIF 281 331 332 #if defined key_mpp_mpi 282 333 ! in MPI gather some info 283 334 ALLOCATE( all_etime(jpnij), all_ctime(jpnij) ) 284 335 CALL MPI_ALLGATHER(t_elaps(2), 1, MPI_DOUBLE_PRECISION, & 285 336 all_etime , 1, MPI_DOUBLE_PRECISION, & 286 MPI_COMM_O PA, icode)337 MPI_COMM_OCE, icode) 287 338 CALL MPI_ALLGATHER(t_cpu(2) , 1, MPI_DOUBLE_PRECISION, & 288 339 all_ctime, 1, MPI_DOUBLE_PRECISION, & 289 MPI_COMM_O PA, icode)340 MPI_COMM_OCE, icode) 290 341 tot_etime = SUM(all_etime(:)) 291 342 tot_ctime = SUM(all_ctime(:)) 343 #else 344 tot_etime = t_elaps(2) 345 tot_ctime = t_cpu (2) 346 #endif 292 347 293 348 ! write output file … … 297 352 IF( lwriter ) WRITE(numtime,'(5x,f12.3,1x,f12.3)') tot_etime, tot_ctime 298 353 IF( lwriter ) WRITE(numtime,*) 354 #if defined key_mpp_mpi 299 355 IF( ll_averep ) CALL waver_info 300 356 CALL wmpi_info 357 #endif 301 358 IF( lwriter ) CALL wcurrent_info 302 359 … … 311 368 & ctime(2)(1:2), ctime(2)(3:4), ctime(2)(5:6), & 312 369 & czone(1:3), czone(4:5) 370 371 #if defined key_mpp_mpi 372 ALLOCATE(timing_glob(4*jpnij), stat=icode) 373 CALL MPI_GATHER( (/compute_time, waiting_time(1), waiting_time(2), elapsed_time/), & 374 & 4, MPI_DOUBLE_PRECISION, timing_glob, 4, MPI_DOUBLE_PRECISION, 0, MPI_COMM_OCE, icode) 375 IF( narea == 1 ) THEN 376 WRITE(numtime,*) ' ' 377 WRITE(numtime,*) ' Report on time spent on waiting MPI messages ' 378 WRITE(numtime,*) ' total timing measured between nit000+1 and nitend-1 ' 379 WRITE(numtime,*) ' warning: includes restarts writing time if output before nitend... ' 380 WRITE(numtime,*) ' ' 381 DO ji = 1, jpnij 382 ztot = SUM( timing_glob(4*ji-3:4*ji-1) ) 383 WRITE(numtime,'(A28,F11.6, A34,I8)') 'Computing time : ',timing_glob(4*ji-3), ' on MPI rank : ', ji 384 IF ( ztot /= 0. ) zperc = timing_glob(4*ji-2) / ztot * 100. 385 WRITE(numtime,'(A28,F11.6,A2, F4.1,A3,A25,I8)') 'Waiting lbc_lnk time : ',timing_glob(4*ji-2) & 386 & , ' (', zperc,' %)', ' on MPI rank : ', ji 387 IF ( ztot /= 0. ) zperc = timing_glob(4*ji-1) / ztot * 100. 388 WRITE(numtime,'(A28,F11.6,A2, F4.1,A3,A25,I8)') 'Waiting global time : ',timing_glob(4*ji-1) & 389 & , ' (', zperc,' %)', ' on MPI rank : ', ji 390 zsypd = rn_rdt * REAL(nitend-nit000-1, wp) / (timing_glob(4*ji) * 365.) 391 WRITE(numtime,'(A28,F11.6,A7,F10.3,A2,A15,I8)') 'Total time : ',timing_glob(4*ji ) & 392 & , ' (SYPD: ', zsypd, ')', ' on MPI rank : ', ji 393 END DO 394 ENDIF 395 DEALLOCATE(timing_glob) 396 #endif 313 397 314 398 IF( lwriter ) CLOSE(numtime) … … 365 449 END SUBROUTINE wcurrent_info 366 450 451 #if defined key_mpp_mpi 367 452 SUBROUTINE waver_info 368 453 !!---------------------------------------------------------------------- … … 438 523 CALL MPI_GATHER(s_timer%cname , 20, MPI_CHARACTER, & 439 524 sl_timer_glob%cname, 20, MPI_CHARACTER, & 440 0, MPI_COMM_O PA, icode)525 0, MPI_COMM_OCE, icode) 441 526 CALL MPI_GATHER(s_timer%tsum_clock , 1, MPI_DOUBLE_PRECISION, & 442 527 sl_timer_glob%tsum_clock, 1, MPI_DOUBLE_PRECISION, & 443 0, MPI_COMM_O PA, icode)528 0, MPI_COMM_OCE, icode) 444 529 CALL MPI_GATHER(s_timer%tsum_cpu , 1, MPI_DOUBLE_PRECISION, & 445 530 sl_timer_glob%tsum_cpu, 1, MPI_DOUBLE_PRECISION, & 446 0, MPI_COMM_O PA, icode)531 0, MPI_COMM_OCE, icode) 447 532 CALL MPI_GATHER(s_timer%niter , 1, MPI_INTEGER, & 448 533 sl_timer_glob%niter, 1, MPI_INTEGER, & 449 0, MPI_COMM_O PA, icode)534 0, MPI_COMM_OCE, icode) 450 535 451 536 IF( narea == 1 .AND. ASSOCIATED(s_timer%next) ) THEN … … 461 546 s_timer => s_timer%next 462 547 END DO 463 464 WRITE(*,*) 'ARPDBG: timing: done gathers'465 548 466 549 IF( narea == 1 ) THEN … … 485 568 sl_timer_glob => sl_timer_glob%next 486 569 END DO 487 488 WRITE(*,*) 'ARPDBG: timing: done computing stats'489 570 490 571 ! reorder the averaged list by CPU time … … 608 689 ! 609 690 END SUBROUTINE wmpi_info 691 #endif 610 692 611 693 … … 643 725 ALLOCATE(s_wrk) 644 726 s_wrk => NULL() 645 727 ! 728 ALLOCATE(s_timer_old) 729 s_timer_old%cname = cdinfo 730 s_timer_old%t_cpu = 0._wp 731 s_timer_old%t_clock = 0._wp 732 s_timer_old%tsum_cpu = 0._wp 733 s_timer_old%tsum_clock = 0._wp 734 s_timer_old%tmax_cpu = 0._wp 735 s_timer_old%tmax_clock = 0._wp 736 s_timer_old%tmin_cpu = 0._wp 737 s_timer_old%tmin_clock = 0._wp 738 s_timer_old%tsub_cpu = 0._wp 739 s_timer_old%tsub_clock = 0._wp 740 s_timer_old%ncount = 0 741 s_timer_old%ncount_rate = 0 742 s_timer_old%ncount_max = 0 743 s_timer_old%niter = 0 744 s_timer_old%l_tdone = .TRUE. 745 s_timer_old%next => NULL() 746 s_timer_old%prev => NULL() 747 646 748 ELSE 647 749 s_timer => s_timer_root 648 750 ! case of already existing area (typically inside a loop) 751 ! write(*,*) 'in ini_var for routine : ', cdinfo 649 752 DO WHILE( ASSOCIATED(s_timer) ) 650 IF( TRIM(s_timer%cname) .EQ. TRIM(cdinfo) ) RETURN 753 IF( TRIM(s_timer%cname) .EQ. TRIM(cdinfo) ) THEN 754 ! write(*,*) 'in ini_var for routine : ', cdinfo,' we return' 755 RETURN ! cdinfo is already in the chain 756 ENDIF 651 757 s_timer => s_timer%next 652 758 END DO 653 759 654 760 ! end of the chain 655 761 s_timer => s_timer_root … … 657 763 s_timer => s_timer%next 658 764 END DO 659 660 ALLOCATE(s_timer%next) 765 766 ! write(*,*) 'after search', s_timer%cname 767 ! cdinfo is not part of the chain so we add it with initialisation 768 ALLOCATE(s_timer%next) 769 ! write(*,*) 'after allocation of next' 770 661 771 s_timer%next%cname = cdinfo 662 772 s_timer%next%t_cpu = 0._wp … … 679 789 s_timer%next%next => NULL() 680 790 s_timer => s_timer%next 681 682 ! are we inside a section 683 s_wrk => s_timer%prev 684 ll_section = .FALSE. 685 DO WHILE( ASSOCIATED(s_wrk) .AND. .NOT. ll_section ) 686 IF( .NOT. s_wrk%l_tdone ) THEN 687 ll_section = .TRUE. 688 s_timer%parent_section => s_wrk 689 ENDIF 690 s_wrk => s_wrk%prev 691 END DO 692 ENDIF 693 ! 791 ENDIF 792 ! write(*,*) 'after allocation' 793 ! 694 794 END SUBROUTINE timing_ini_var 695 795 … … 704 804 ! IF(lwp) WRITE(numout,*) 'timing_reset : instrumented routines for timing' 705 805 ! IF(lwp) WRITE(numout,*) '~~~~~~~~~~~~' 706 !CALL timing_list(s_timer_root)806 CALL timing_list(s_timer_root) 707 807 ! WRITE(numout,*) 708 808 !
Note: See TracChangeset
for help on using the changeset viewer.