Changeset 10727 for utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/lib_mpp.F90
- Timestamp:
- 2019-02-27T17:02:02+01:00 (5 years ago)
- File:
-
- 1 moved
Legend:
- Unmodified
- Added
- Removed
-
utils/tools_AGRIF_CMEMS_2020/DOMAINcfg/src/lib_mpp.F90
r10725 r10727 8 8 !! 8.0 ! 1998 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI 9 9 !! ! 1998 (J.M. Molines) Open boundary conditions 10 !! NEMO 1.0 ! 2003 (J. -M. Molines, G. Madec) F90, free form10 !! NEMO 1.0 ! 2003 (J.M. Molines, G. Madec) F90, free form 11 11 !! ! 2003 (J.M. Molines) add mpp_ini_north(_3d,_2d) 12 12 !! - ! 2004 (R. Bourdalle Badie) isend option in mpi … … 19 19 !! 3.2 ! 2009 (O. Marti) add mpp_ini_znl 20 20 !! 4.0 ! 2011 (G. Madec) move ctl_ routines from in_out_manager 21 !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) Add 'mpp_lnk_bdy_3d', 'mpp_lnk_obc_3d', 22 !! 'mpp_lnk_bdy_2d' and 'mpp_lnk_obc_2d' routines and update 23 !! the mppobc routine to optimize the BDY and OBC communications 24 !! 3.5 ! 2013 ( C. Ethe, G. Madec ) message passing arrays as local variables 21 !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) Add mpp_lnk_bdy_3d/2d routines to optimize the BDY comm. 22 !! 3.5 ! 2013 (C. Ethe, G. Madec) message passing arrays as local variables 25 23 !! 3.5 ! 2013 (S.Mocavero, I.Epicoco - CMCC) north fold optimizations 26 !! 3.6 ! 2015 (O. Tintó and M. Castrillo - BSC) Added 'mpp_lnk_2d_multiple', 'mpp_lbc_north_2d_multiple', 'mpp_max_multiple' 24 !! 3.6 ! 2015 (O. Tintó and M. Castrillo - BSC) Added '_multiple' case for 2D lbc and max 25 !! 4.0 ! 2017 (G. Madec) automatique allocation of array argument (use any 3rd dimension) 26 !! - ! 2017 (G. Madec) create generic.h90 files to generate all lbc and north fold routines 27 27 !!---------------------------------------------------------------------- 28 28 … … 34 34 !! get_unit : give the index of an unused logical unit 35 35 !!---------------------------------------------------------------------- 36 36 #if defined key_mpp_mpi 37 37 !!---------------------------------------------------------------------- 38 38 !! 'key_mpp_mpi' MPI massively parallel processing library … … 41 41 !! mynode : indentify the processor unit 42 42 !! mpp_lnk : interface (defined in lbclnk) for message passing of 2d or 3d arrays (mpp_lnk_2d, mpp_lnk_3d) 43 !! mpp_lnk_3d_gather : Message passing manadgement for two 3D arrays44 !! mpp_lnk_e : interface (defined in lbclnk) for message passing of 2d array with extra halo (mpp_lnk_2d_e)45 43 !! mpp_lnk_icb : interface for message passing of 2d arrays with extra halo for icebergs (mpp_lnk_2d_icb) 46 44 !! mpprecv : 47 !! mppsend : SUBROUTINE mpp_ini_znl45 !! mppsend : 48 46 !! mppscatter : 49 47 !! mppgather : … … 56 54 !! mppstop : 57 55 !! mpp_ini_north : initialisation of north fold 58 !! mpp_lbc_north : north fold processors gathering 59 !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo 60 !! mpp_lbc_north_icb : variant of mpp_lbc_north for extra outer halo with icebergs 56 !! mpp_lbc_north_icb : alternative to mpp_nfd for extra outer halo with icebergs 61 57 !!---------------------------------------------------------------------- 62 58 USE dom_oce ! ocean space and time domain 63 59 USE lbcnfd ! north fold treatment 64 60 USE in_out_manager ! I/O manager 65 USE wrk_nemo ! work arrays66 61 67 62 IMPLICIT NONE 68 63 PRIVATE 69 64 65 INTERFACE mpp_nfd 66 MODULE PROCEDURE mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d 67 MODULE PROCEDURE mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr 68 END INTERFACE 69 70 ! Interface associated to the mpp_lnk_... routines is defined in lbclnk 71 PUBLIC mpp_lnk_2d , mpp_lnk_3d , mpp_lnk_4d 72 PUBLIC mpp_lnk_2d_ptr, mpp_lnk_3d_ptr, mpp_lnk_4d_ptr 73 ! 74 !!gm this should be useless 75 PUBLIC mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d 76 PUBLIC mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr 77 !!gm end 78 ! 70 79 PUBLIC ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam 71 80 PUBLIC mynode, mppstop, mppsync, mpp_comm_free 72 PUBLIC mpp_ini_north, mpp_lbc_north, mpp_lbc_north_e 81 PUBLIC mpp_ini_north 82 PUBLIC mpp_lnk_2d_icb 83 PUBLIC mpp_lbc_north_icb 73 84 PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc 74 PUBLIC mpp_max_multiple 75 PUBLIC mpp_lnk_3d, mpp_lnk_3d_gather, mpp_lnk_2d, mpp_lnk_2d_e 76 PUBLIC mpp_lnk_2d_9 , mpp_lnk_2d_multiple 77 PUBLIC mpp_lnk_sum_3d, mpp_lnk_sum_2d 85 PUBLIC mpp_delay_max, mpp_delay_sum, mpp_delay_rcv 78 86 PUBLIC mppscatter, mppgather 79 PUBLIC mpp_ini_ice, mpp_ini_znl 80 PUBLIC mppsize 87 PUBLIC mpp_ini_znl 81 88 PUBLIC mppsend, mpprecv ! needed by TAM and ICB routines 82 PUBLIC mpp_lnk_bdy_2d, mpp_lnk_bdy_3d 83 PUBLIC mpp_lbc_north_icb, mpp_lnk_2d_icb 84 PUBLIC mpprank 85 86 TYPE arrayptr 87 REAL , DIMENSION (:,:), POINTER :: pt2d 88 END TYPE arrayptr 89 PUBLIC arrayptr 89 PUBLIC mpp_lnk_bdy_2d, mpp_lnk_bdy_3d, mpp_lnk_bdy_4d 90 90 91 91 !! * Interfaces … … 101 101 INTERFACE mpp_sum 102 102 MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real, & 103 mppsum_realdd, mppsum_a_realdd 104 END INTERFACE 105 INTERFACE mpp_lbc_north 106 MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d 103 & mppsum_realdd, mppsum_a_realdd 107 104 END INTERFACE 108 105 INTERFACE mpp_minloc … … 113 110 END INTERFACE 114 111 115 INTERFACE mpp_max_multiple116 MODULE PROCEDURE mppmax_real_multiple117 END INTERFACE118 119 112 !! ========================= !! 120 113 !! MPI variable definition !! … … 128 121 INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2) 129 122 130 INTEGER :: mppsize ! number of process131 INTEGER :: mpprank ! process number [ 0 - size-1 ]123 INTEGER, PUBLIC :: mppsize ! number of process 124 INTEGER, PUBLIC :: mpprank ! process number [ 0 - size-1 ] 132 125 !$AGRIF_DO_NOT_TREAT 133 INTEGER, PUBLIC :: mpi_comm_o pa! opa local communicator126 INTEGER, PUBLIC :: mpi_comm_oce ! opa local communicator 134 127 !$AGRIF_END_DO_NOT_TREAT 135 128 136 129 INTEGER :: MPI_SUMDD 137 138 ! variables used in case of sea-ice139 INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice (public so that it can be freed in limthd)140 INTEGER :: ngrp_iworld ! group ID for the world processors (for rheology)141 INTEGER :: ngrp_ice ! group ID for the ice processors (for rheology)142 INTEGER :: ndim_rank_ice ! number of 'ice' processors143 INTEGER :: n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm144 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_ice ! dimension ndim_rank_ice145 130 146 131 ! variables used for zonal integration 147 132 INTEGER, PUBLIC :: ncomm_znl !: communicator made by the processors on the same zonal average 148 LOGICAL, PUBLIC :: l_znl_root ! True on the 'left'most processor on the same row149 INTEGER :: ngrp_znl !group ID for the znl processors150 INTEGER :: ndim_rank_znl !number of processors on the same zonal average133 LOGICAL, PUBLIC :: l_znl_root !: True on the 'left'most processor on the same row 134 INTEGER :: ngrp_znl ! group ID for the znl processors 135 INTEGER :: ndim_rank_znl ! number of processors on the same zonal average 151 136 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_znl ! dimension ndim_rank_znl, number of the procs into the same znl domain 152 137 153 138 ! North fold condition in mpp_mpi with jpni > 1 (PUBLIC for TAM) 154 INTEGER, PUBLIC :: ngrp_world ! group ID for the world processors155 INTEGER, PUBLIC :: ngrp_opa ! group ID for the opa processors156 INTEGER, PUBLIC :: ngrp_north ! group ID for the northern processors (to be fold)157 INTEGER, PUBLIC :: ncomm_north ! communicator made by the processors belonging to ngrp_north158 INTEGER, PUBLIC :: ndim_rank_north ! number of 'sea' processor in the northern line (can be /= jpni !)159 INTEGER, PUBLIC :: njmppmax ! value of njmpp for the processors of the northern line160 INTEGER, PUBLIC :: north_root ! number (in the comm_opa) of proc 0 in the northern comm161 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE, PUBLIC :: nrank_north !dimension ndim_rank_north139 INTEGER, PUBLIC :: ngrp_world !: group ID for the world processors 140 INTEGER, PUBLIC :: ngrp_opa !: group ID for the opa processors 141 INTEGER, PUBLIC :: ngrp_north !: group ID for the northern processors (to be fold) 142 INTEGER, PUBLIC :: ncomm_north !: communicator made by the processors belonging to ngrp_north 143 INTEGER, PUBLIC :: ndim_rank_north !: number of 'sea' processor in the northern line (can be /= jpni !) 144 INTEGER, PUBLIC :: njmppmax !: value of njmpp for the processors of the northern line 145 INTEGER, PUBLIC :: north_root !: number (in the comm_opa) of proc 0 in the northern comm 146 INTEGER, PUBLIC, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_north !: dimension ndim_rank_north 162 147 163 148 ! Type of send : standard, buffered, immediate 164 CHARACTER(len=1), PUBLIC :: cn_mpi_send ! type od mpi send/recieve (S=standard, B=bsend, I=isend) 165 LOGICAL, PUBLIC :: l_isend = .FALSE. ! isend use indicator (T if cn_mpi_send='I') 166 INTEGER, PUBLIC :: nn_buffer ! size of the buffer in case of mpi_bsend 167 168 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend 169 170 LOGICAL, PUBLIC :: ln_nnogather ! namelist control of northfold comms 171 LOGICAL, PUBLIC :: l_north_nogather = .FALSE. ! internal control of northfold comms 172 INTEGER, PUBLIC :: ityp 149 CHARACTER(len=1), PUBLIC :: cn_mpi_send !: type od mpi send/recieve (S=standard, B=bsend, I=isend) 150 LOGICAL , PUBLIC :: l_isend = .FALSE. !: isend use indicator (T if cn_mpi_send='I') 151 INTEGER , PUBLIC :: nn_buffer !: size of the buffer in case of mpi_bsend 152 153 ! Communications summary report 154 CHARACTER(len=400), DIMENSION(:), ALLOCATABLE :: crname_lbc !: names of lbc_lnk calling routines 155 CHARACTER(len=400), DIMENSION(:), ALLOCATABLE :: crname_glb !: names of global comm calling routines 156 CHARACTER(len=400), DIMENSION(:), ALLOCATABLE :: crname_dlg !: names of delayed global comm calling routines 157 INTEGER, PUBLIC :: ncom_stp = 0 !: copy of time step # istp 158 INTEGER, PUBLIC :: ncom_fsbc = 1 !: copy of sbc time step # nn_fsbc 159 INTEGER, PUBLIC :: ncom_dttrc = 1 !: copy of top time step # nn_dttrc 160 INTEGER, PUBLIC :: ncom_freq !: frequency of comm diagnostic 161 INTEGER, PUBLIC , DIMENSION(:,:), ALLOCATABLE :: ncomm_sequence !: size of communicated arrays (halos) 162 INTEGER, PARAMETER, PUBLIC :: ncom_rec_max = 3000 !: max number of communication record 163 INTEGER, PUBLIC :: n_sequence_lbc = 0 !: # of communicated arraysvia lbc 164 INTEGER, PUBLIC :: n_sequence_glb = 0 !: # of global communications 165 INTEGER, PUBLIC :: n_sequence_dlg = 0 !: # of delayed global communications 166 INTEGER, PUBLIC :: numcom = -1 !: logical unit for communicaton report 167 LOGICAL, PUBLIC :: l_full_nf_update = .TRUE. !: logical for a full (2lines) update of bc at North fold report 168 INTEGER, PARAMETER, PUBLIC :: nbdelay = 2 !: number of delayed operations 169 !: name (used as id) of allreduce-delayed operations 170 ! Warning: we must use the same character length in an array constructor (at least for gcc compiler) 171 CHARACTER(len=32), DIMENSION(nbdelay), PUBLIC :: c_delaylist = (/ 'cflice', 'fwb ' /) 172 !: component name where the allreduce-delayed operation is performed 173 CHARACTER(len=3), DIMENSION(nbdelay), PUBLIC :: c_delaycpnt = (/ 'ICE' , 'OCE' /) 174 TYPE, PUBLIC :: DELAYARR 175 REAL( wp), POINTER, DIMENSION(:) :: z1d => NULL() 176 COMPLEX(wp), POINTER, DIMENSION(:) :: y1d => NULL() 177 END TYPE DELAYARR 178 TYPE( DELAYARR ), DIMENSION(nbdelay), PUBLIC :: todelay 179 INTEGER, DIMENSION(nbdelay), PUBLIC :: ndelayid = -1 !: mpi request id of the delayed operations 180 181 ! timing summary report 182 REAL(wp), DIMENSION(2), PUBLIC :: waiting_time = 0._wp 183 REAL(wp) , PUBLIC :: compute_time = 0._wp, elapsed_time = 0._wp 184 185 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend 186 187 LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms 188 LOGICAL, PUBLIC :: l_north_nogather = .FALSE. !: internal control of northfold comms 189 173 190 !!---------------------------------------------------------------------- 174 191 !! NEMO/OCE 4.0 , NEMO Consortium (2018) 175 !! $Id: lib_mpp.F90 6490 2016-04-20 14:55:58Z mcastril$176 !! Software governed by the CeCILL licen ce (./LICENSE)192 !! $Id: lib_mpp.F90 10538 2019-01-17 10:41:10Z clem $ 193 !! Software governed by the CeCILL license (see ./LICENSE) 177 194 !!---------------------------------------------------------------------- 178 195 CONTAINS 179 196 180 181 FUNCTION mynode( ldtxt, ldname, kumnam_ref , kumnam_cfg , kumond , kstop, localComm ) 197 FUNCTION mynode( ldtxt, ldname, kumnam_ref, kumnam_cfg, kumond, kstop, localComm ) 182 198 !!---------------------------------------------------------------------- 183 199 !! *** routine mynode *** … … 196 212 LOGICAL :: mpi_was_called 197 213 ! 198 NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, jpnij,ln_nnogather214 NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, ln_nnogather 199 215 !!---------------------------------------------------------------------- 200 216 ! … … 204 220 WRITE(ldtxt(ii),*) '~~~~~~ ' ; ii = ii + 1 205 221 ! 206 207 222 REWIND( kumnam_ref ) ! Namelist nammpp in reference namelist: mpi variables 208 223 READ ( kumnam_ref, nammpp, IOSTAT = ios, ERR = 901) 209 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in reference namelist', lwp )210 224 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in reference namelist', lwp ) 225 ! 211 226 REWIND( kumnam_cfg ) ! Namelist nammpp in configuration namelist: mpi variables 212 227 READ ( kumnam_cfg, nammpp, IOSTAT = ios, ERR = 902 ) 213 902 IF( ios /= 0 )CALL ctl_nam ( ios , 'nammpp in configuration namelist', lwp )214 228 902 IF( ios > 0 ) CALL ctl_nam ( ios , 'nammpp in configuration namelist', lwp ) 229 ! 215 230 ! ! control print 216 231 WRITE(ldtxt(ii),*) ' Namelist nammpp' ; ii = ii + 1 217 232 WRITE(ldtxt(ii),*) ' mpi send type cn_mpi_send = ', cn_mpi_send ; ii = ii + 1 218 233 WRITE(ldtxt(ii),*) ' size exported buffer nn_buffer = ', nn_buffer,' bytes'; ii = ii + 1 219 220 221 222 223 224 225 226 227 228 IF(jpnij < 1)THEN 229 ! If jpnij is not specified in namelist then we calculate it - this 230 ! means there will be no land cutting out. 231 jpnij = jpni * jpnj 232 END IF 233 234 IF( (jpni < 1) .OR. (jpnj < 1) )THEN 235 WRITE(ldtxt(ii),*) ' jpni, jpnj and jpnij will be calculated automatically' ; ii = ii + 1 234 ! 235 IF( jpni < 1 .OR. jpnj < 1 ) THEN 236 WRITE(ldtxt(ii),*) ' jpni and jpnj will be calculated automatically' ; ii = ii + 1 236 237 ELSE 237 238 WRITE(ldtxt(ii),*) ' processor grid extent in i jpni = ',jpni ; ii = ii + 1 238 239 WRITE(ldtxt(ii),*) ' processor grid extent in j jpnj = ',jpnj ; ii = ii + 1 239 WRITE(ldtxt(ii),*) ' number of local domains jpnij = ',jpnij ; ii = ii + 1 240 END IF 240 ENDIF 241 241 242 242 WRITE(ldtxt(ii),*) ' avoid use of mpi_allgather at the north fold ln_nnogather = ', ln_nnogather ; ii = ii + 1 … … 259 259 CASE ( 'B' ) ! Buffer mpi send (blocking) 260 260 WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1 261 IF( Agrif_Root() ) CALL mpi_init_o pa( ldtxt, ii, ierr )261 IF( Agrif_Root() ) CALL mpi_init_oce( ldtxt, ii, ierr ) 262 262 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 263 263 WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1 … … 268 268 kstop = kstop + 1 269 269 END SELECT 270 ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN 270 ! 271 ELSEIF ( PRESENT(localComm) .AND. .NOT. mpi_was_called ) THEN 272 WRITE(ldtxt(ii),cform_err) ; ii = ii + 1 271 273 WRITE(ldtxt(ii),*) ' lib_mpp: You cannot provide a local communicator ' ; ii = ii + 1 272 274 WRITE(ldtxt(ii),*) ' without calling MPI_Init before ! ' ; ii = ii + 1 … … 279 281 CASE ( 'B' ) ! Buffer mpi send (blocking) 280 282 WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1 281 IF( Agrif_Root() ) CALL mpi_init_o pa( ldtxt, ii, ierr )283 IF( Agrif_Root() ) CALL mpi_init_oce( ldtxt, ii, ierr ) 282 284 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 283 285 WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1 … … 294 296 IF( PRESENT(localComm) ) THEN 295 297 IF( Agrif_Root() ) THEN 296 mpi_comm_o pa= localComm298 mpi_comm_oce = localComm 297 299 ENDIF 298 300 ELSE 299 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_o pa, code)301 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_oce, code) 300 302 IF( code /= MPI_SUCCESS ) THEN 301 303 DO ji = 1, SIZE(ldtxt) … … 308 310 ENDIF 309 311 310 311 312 313 314 315 316 317 318 CALL mpi_comm_rank( mpi_comm_o pa, mpprank, ierr )319 CALL mpi_comm_size( mpi_comm_o pa, mppsize, ierr )312 #if defined key_agrif 313 IF( Agrif_Root() ) THEN 314 CALL Agrif_MPI_Init(mpi_comm_oce) 315 ELSE 316 CALL Agrif_MPI_set_grid_comm(mpi_comm_oce) 317 ENDIF 318 #endif 319 320 CALL mpi_comm_rank( mpi_comm_oce, mpprank, ierr ) 321 CALL mpi_comm_size( mpi_comm_oce, mppsize, ierr ) 320 322 mynode = mpprank 321 323 … … 329 331 END FUNCTION mynode 330 332 331 332 SUBROUTINE mpp_lnk_3d( ptab, cd_type, psgn, cd_mpp, pval ) 333 !!---------------------------------------------------------------------- 334 !! *** routine mpp_lnk_3d *** 335 !! 336 !! ** Purpose : Message passing manadgement 337 !! 338 !! ** Method : Use mppsend and mpprecv function for passing mask 339 !! between processors following neighboring subdomains. 340 !! domain parameters 341 !! nlci : first dimension of the local subdomain 342 !! nlcj : second dimension of the local subdomain 343 !! nbondi : mark for "east-west local boundary" 344 !! nbondj : mark for "north-south local boundary" 345 !! noea : number for local neighboring processors 346 !! nowe : number for local neighboring processors 347 !! noso : number for local neighboring processors 348 !! nono : number for local neighboring processors 349 !! 350 !! ** Action : ptab with update value at its periphery 351 !! 352 !!---------------------------------------------------------------------- 353 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 354 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 355 ! ! = T , U , V , F , W points 356 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 357 ! ! = 1. , the sign is kept 358 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 359 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 360 ! 361 INTEGER :: ji, jj, jk, jl ! dummy loop indices 362 INTEGER :: imigr, iihom, ijhom ! temporary integers 363 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 364 REAL(wp) :: zland 365 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 366 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north 367 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east 368 !!---------------------------------------------------------------------- 369 370 ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), & 371 & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) ) 372 373 ! 374 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 375 ELSE ; zland = 0._wp ! zero by default 376 ENDIF 377 378 ! 1. standard boundary treatment 379 ! ------------------------------ 380 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values 381 ! 382 ! WARNING ptab is defined only between nld and nle 383 DO jk = 1, jpk 384 DO jj = nlcj+1, jpj ! added line(s) (inner only) 385 ptab(nldi :nlei , jj ,jk) = ptab(nldi:nlei, nlej,jk) 386 ptab(1 :nldi-1, jj ,jk) = ptab(nldi , nlej,jk) 387 ptab(nlei+1:nlci , jj ,jk) = ptab( nlei, nlej,jk) 388 END DO 389 DO ji = nlci+1, jpi ! added column(s) (full) 390 ptab(ji ,nldj :nlej ,jk) = ptab( nlei,nldj:nlej,jk) 391 ptab(ji ,1 :nldj-1,jk) = ptab( nlei,nldj ,jk) 392 ptab(ji ,nlej+1:jpj ,jk) = ptab( nlei, nlej,jk) 393 END DO 394 END DO 395 ! 396 ELSE ! standard close or cyclic treatment 397 ! 398 ! ! East-West boundaries 399 ! !* Cyclic east-west 400 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 401 ptab( 1 ,:,:) = ptab(jpim1,:,:) 402 ptab(jpi,:,:) = ptab( 2 ,:,:) 403 ELSE !* closed 404 IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point 405 ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north 406 ENDIF 407 ! ! North-South boundaries (always closed) 408 IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point 409 ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north 410 ! 411 ENDIF 412 413 ! 2. East and west directions exchange 414 ! ------------------------------------ 415 ! we play with the neigbours AND the row number because of the periodicity 416 ! 417 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 418 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 419 iihom = nlci-nreci 420 DO jl = 1, jpreci 421 zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) 422 zt3we(:,jl,:,1) = ptab(iihom +jl,:,:) 423 END DO 424 END SELECT 425 ! 426 ! ! Migrations 427 imigr = jpreci * jpj * jpk 428 ! 429 SELECT CASE ( nbondi ) 430 CASE ( -1 ) 431 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 ) 432 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 433 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 434 CASE ( 0 ) 435 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 436 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 ) 437 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 438 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 439 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 440 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 441 CASE ( 1 ) 442 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 443 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 444 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 445 END SELECT 446 ! 447 ! ! Write Dirichlet lateral conditions 448 iihom = nlci-jpreci 449 ! 450 SELECT CASE ( nbondi ) 451 CASE ( -1 ) 452 DO jl = 1, jpreci 453 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 454 END DO 455 CASE ( 0 ) 456 DO jl = 1, jpreci 457 ptab(jl ,:,:) = zt3we(:,jl,:,2) 458 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 459 END DO 460 CASE ( 1 ) 461 DO jl = 1, jpreci 462 ptab(jl ,:,:) = zt3we(:,jl,:,2) 463 END DO 464 END SELECT 465 466 ! 3. North and south directions 467 ! ----------------------------- 468 ! always closed : we play only with the neigbours 469 ! 470 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 471 ijhom = nlcj-nrecj 472 DO jl = 1, jprecj 473 zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:) 474 zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:) 475 END DO 476 ENDIF 477 ! 478 ! ! Migrations 479 imigr = jprecj * jpi * jpk 480 ! 481 SELECT CASE ( nbondj ) 482 CASE ( -1 ) 483 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 ) 484 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 485 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 486 CASE ( 0 ) 487 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 488 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 ) 489 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 490 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 491 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 492 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 493 CASE ( 1 ) 494 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 495 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 496 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 497 END SELECT 498 ! 499 ! ! Write Dirichlet lateral conditions 500 ijhom = nlcj-jprecj 501 ! 502 SELECT CASE ( nbondj ) 503 CASE ( -1 ) 504 DO jl = 1, jprecj 505 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 506 END DO 507 CASE ( 0 ) 508 DO jl = 1, jprecj 509 ptab(:,jl ,:) = zt3sn(:,jl,:,2) 510 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 511 END DO 512 CASE ( 1 ) 513 DO jl = 1, jprecj 514 ptab(:,jl,:) = zt3sn(:,jl,:,2) 515 END DO 516 END SELECT 517 518 ! 4. north fold treatment 519 ! ----------------------- 520 ! 521 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 522 ! 523 SELECT CASE ( jpni ) 524 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 525 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 526 END SELECT 527 ! 528 ENDIF 529 ! 530 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we ) 531 ! 532 END SUBROUTINE mpp_lnk_3d 533 534 535 SUBROUTINE mpp_lnk_2d_multiple( pt2d_array , type_array , psgn_array , num_fields , cd_mpp, pval ) 536 !!---------------------------------------------------------------------- 537 !! *** routine mpp_lnk_2d_multiple *** 538 !! 539 !! ** Purpose : Message passing management for multiple 2d arrays 540 !! 541 !! ** Method : Use mppsend and mpprecv function for passing mask 542 !! between processors following neighboring subdomains. 543 !! domain parameters 544 !! nlci : first dimension of the local subdomain 545 !! nlcj : second dimension of the local subdomain 546 !! nbondi : mark for "east-west local boundary" 547 !! nbondj : mark for "north-south local boundary" 548 !! noea : number for local neighboring processors 549 !! nowe : number for local neighboring processors 550 !! noso : number for local neighboring processors 551 !! nono : number for local neighboring processors 552 !!---------------------------------------------------------------------- 553 CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: type_array ! define the nature of ptab array grid-points 554 ! ! = T , U , V , F , W and I points 555 REAL(wp) , DIMENSION(:), INTENT(in ) :: psgn_array ! =-1 the sign change across the north fold boundary 556 ! ! = 1. , the sign is kept 557 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 558 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 559 !! 560 INTEGER :: ji, jj, jl ! dummy loop indices 561 INTEGER :: ii !!MULTI SEND DUMMY LOOP INDICES 562 INTEGER :: imigr, iihom, ijhom ! temporary integers 563 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 564 INTEGER :: num_fields 565 TYPE( arrayptr ), DIMENSION(:) :: pt2d_array 566 REAL(wp) :: zland 567 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 568 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 569 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 570 571 !!---------------------------------------------------------------------- 572 ! 573 ALLOCATE( zt2ns(jpi,jprecj,2*num_fields), zt2sn(jpi,jprecj,2*num_fields), & 574 & zt2ew(jpj,jpreci,2*num_fields), zt2we(jpj,jpreci,2*num_fields) ) 575 ! 576 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 577 ELSE ; zland = 0._wp ! zero by default 578 ENDIF 579 580 ! 1. standard boundary treatment 581 ! ------------------------------ 582 ! 583 !First Array 584 DO ii = 1 , num_fields 585 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values 586 ! 587 ! WARNING pt2d is defined only between nld and nle 588 DO jj = nlcj+1, jpj ! added line(s) (inner only) 589 pt2d_array(ii)%pt2d(nldi :nlei , jj) = pt2d_array(ii)%pt2d(nldi:nlei, nlej) 590 pt2d_array(ii)%pt2d(1 :nldi-1, jj) = pt2d_array(ii)%pt2d(nldi , nlej) 591 pt2d_array(ii)%pt2d(nlei+1:nlci , jj) = pt2d_array(ii)%pt2d( nlei, nlej) 592 END DO 593 DO ji = nlci+1, jpi ! added column(s) (full) 594 pt2d_array(ii)%pt2d(ji, nldj :nlej ) = pt2d_array(ii)%pt2d(nlei, nldj:nlej) 595 pt2d_array(ii)%pt2d(ji, 1 :nldj-1) = pt2d_array(ii)%pt2d(nlei, nldj ) 596 pt2d_array(ii)%pt2d(ji, nlej+1:jpj ) = pt2d_array(ii)%pt2d(nlei, nlej) 597 END DO 598 ! 599 ELSE ! standard close or cyclic treatment 600 ! 601 ! ! East-West boundaries 602 IF( nbondi == 2 .AND. & ! Cyclic east-west 603 & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 604 pt2d_array(ii)%pt2d( 1 , : ) = pt2d_array(ii)%pt2d( jpim1, : ) ! west 605 pt2d_array(ii)%pt2d( jpi , : ) = pt2d_array(ii)%pt2d( 2 , : ) ! east 606 ELSE ! closed 607 IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d( 1 : jpreci,:) = zland ! south except F-point 608 pt2d_array(ii)%pt2d(nlci-jpreci+1 : jpi ,:) = zland ! north 609 ENDIF 610 ! ! North-South boundaries (always closed) 611 IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d(:, 1:jprecj ) = zland ! south except F-point 612 pt2d_array(ii)%pt2d(:, nlcj-jprecj+1:jpj ) = zland ! north 613 ! 614 ENDIF 615 END DO 616 617 ! 2. East and west directions exchange 618 ! ------------------------------------ 619 ! we play with the neigbours AND the row number because of the periodicity 620 ! 621 DO ii = 1 , num_fields 622 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 623 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 624 iihom = nlci-nreci 625 DO jl = 1, jpreci 626 zt2ew( : , jl , ii ) = pt2d_array(ii)%pt2d( jpreci+jl , : ) 627 zt2we( : , jl , ii ) = pt2d_array(ii)%pt2d( iihom +jl , : ) 628 END DO 629 END SELECT 630 END DO 631 ! 632 ! ! Migrations 633 imigr = jpreci * jpj 634 ! 635 SELECT CASE ( nbondi ) 636 CASE ( -1 ) 637 CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req1 ) 638 CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea ) 639 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 640 CASE ( 0 ) 641 CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 ) 642 CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req2 ) 643 CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea ) 644 CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe ) 645 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 646 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 647 CASE ( 1 ) 648 CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 ) 649 CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe ) 650 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 651 END SELECT 652 ! 653 ! ! Write Dirichlet lateral conditions 654 iihom = nlci - jpreci 655 ! 656 657 DO ii = 1 , num_fields 658 SELECT CASE ( nbondi ) 659 CASE ( -1 ) 660 DO jl = 1, jpreci 661 pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii) 662 END DO 663 CASE ( 0 ) 664 DO jl = 1, jpreci 665 pt2d_array(ii)%pt2d( jl , : ) = zt2we(:,jl,num_fields+ii) 666 pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii) 667 END DO 668 CASE ( 1 ) 669 DO jl = 1, jpreci 670 pt2d_array(ii)%pt2d( jl , : )= zt2we(:,jl,num_fields+ii) 671 END DO 672 END SELECT 673 END DO 674 675 ! 3. North and south directions 676 ! ----------------------------- 677 ! always closed : we play only with the neigbours 678 ! 679 !First Array 680 DO ii = 1 , num_fields 681 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 682 ijhom = nlcj-nrecj 683 DO jl = 1, jprecj 684 zt2sn(:,jl , ii) = pt2d_array(ii)%pt2d( : , ijhom +jl ) 685 zt2ns(:,jl , ii) = pt2d_array(ii)%pt2d( : , jprecj+jl ) 686 END DO 687 ENDIF 688 END DO 689 ! 690 ! ! Migrations 691 imigr = jprecj * jpi 692 ! 693 SELECT CASE ( nbondj ) 694 CASE ( -1 ) 695 CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req1 ) 696 CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono ) 697 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 698 CASE ( 0 ) 699 CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 ) 700 CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req2 ) 701 CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono ) 702 CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso ) 703 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 704 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 705 CASE ( 1 ) 706 CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 ) 707 CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso ) 708 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 709 END SELECT 710 ! 711 ! ! Write Dirichlet lateral conditions 712 ijhom = nlcj - jprecj 713 ! 714 715 DO ii = 1 , num_fields 716 !First Array 717 SELECT CASE ( nbondj ) 718 CASE ( -1 ) 719 DO jl = 1, jprecj 720 pt2d_array(ii)%pt2d( : , ijhom+jl ) = zt2ns( : , jl , num_fields+ii ) 721 END DO 722 CASE ( 0 ) 723 DO jl = 1, jprecj 724 pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii) 725 pt2d_array(ii)%pt2d( : , ijhom + jl ) = zt2ns( : , jl , num_fields + ii ) 726 END DO 727 CASE ( 1 ) 728 DO jl = 1, jprecj 729 pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii ) 730 END DO 731 END SELECT 732 END DO 733 734 ! 4. north fold treatment 735 ! ----------------------- 736 ! 737 !First Array 738 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 739 ! 740 SELECT CASE ( jpni ) 741 CASE ( 1 ) ; 742 DO ii = 1 , num_fields 743 CALL lbc_nfd ( pt2d_array(ii)%pt2d( : , : ), type_array(ii) , psgn_array(ii) ) ! only 1 northern proc, no mpp 744 END DO 745 CASE DEFAULT ; CALL mpp_lbc_north_2d_multiple( pt2d_array, type_array, psgn_array, num_fields ) ! for all northern procs. 746 END SELECT 747 ! 748 ENDIF 749 ! 750 ! 751 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 752 ! 753 END SUBROUTINE mpp_lnk_2d_multiple 754 755 756 SUBROUTINE load_array( pt2d, cd_type, psgn, pt2d_array, type_array, psgn_array, num_fields ) 757 !!--------------------------------------------------------------------- 758 REAL(wp), DIMENSION(jpi,jpj), TARGET, INTENT(inout) :: pt2d ! Second 2D array on which the boundary condition is applied 759 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 760 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 761 TYPE(arrayptr) , DIMENSION(9) :: pt2d_array 762 CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points 763 REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary 764 INTEGER , INTENT (inout) :: num_fields 765 !!--------------------------------------------------------------------- 766 num_fields = num_fields + 1 767 pt2d_array(num_fields)%pt2d => pt2d 768 type_array(num_fields) = cd_type 769 psgn_array(num_fields) = psgn 770 END SUBROUTINE load_array 333 !!---------------------------------------------------------------------- 334 !! *** routine mpp_lnk_(2,3,4)d *** 335 !! 336 !! * Argument : dummy argument use in mpp_lnk_... routines 337 !! ptab : array or pointer of arrays on which the boundary condition is applied 338 !! cd_nat : nature of array grid-points 339 !! psgn : sign used across the north fold boundary 340 !! kfld : optional, number of pt3d arrays 341 !! cd_mpp : optional, fill the overlap area only 342 !! pval : optional, background value (used at closed boundaries) 343 !!---------------------------------------------------------------------- 344 ! 345 ! !== 2D array and array of 2D pointer ==! 346 ! 347 # define DIM_2d 348 # define ROUTINE_LNK mpp_lnk_2d 349 # include "mpp_lnk_generic.h90" 350 # undef ROUTINE_LNK 351 # define MULTI 352 # define ROUTINE_LNK mpp_lnk_2d_ptr 353 # include "mpp_lnk_generic.h90" 354 # undef ROUTINE_LNK 355 # undef MULTI 356 # undef DIM_2d 357 ! 358 ! !== 3D array and array of 3D pointer ==! 359 ! 360 # define DIM_3d 361 # define ROUTINE_LNK mpp_lnk_3d 362 # include "mpp_lnk_generic.h90" 363 # undef ROUTINE_LNK 364 # define MULTI 365 # define ROUTINE_LNK mpp_lnk_3d_ptr 366 # include "mpp_lnk_generic.h90" 367 # undef ROUTINE_LNK 368 # undef MULTI 369 # undef DIM_3d 370 ! 371 ! !== 4D array and array of 4D pointer ==! 372 ! 373 # define DIM_4d 374 # define ROUTINE_LNK mpp_lnk_4d 375 # include "mpp_lnk_generic.h90" 376 # undef ROUTINE_LNK 377 # define MULTI 378 # define ROUTINE_LNK mpp_lnk_4d_ptr 379 # include "mpp_lnk_generic.h90" 380 # undef ROUTINE_LNK 381 # undef MULTI 382 # undef DIM_4d 383 384 !!---------------------------------------------------------------------- 385 !! *** routine mpp_nfd_(2,3,4)d *** 386 !! 387 !! * Argument : dummy argument use in mpp_nfd_... routines 388 !! ptab : array or pointer of arrays on which the boundary condition is applied 389 !! cd_nat : nature of array grid-points 390 !! psgn : sign used across the north fold boundary 391 !! kfld : optional, number of pt3d arrays 392 !! cd_mpp : optional, fill the overlap area only 393 !! pval : optional, background value (used at closed boundaries) 394 !!---------------------------------------------------------------------- 395 ! 396 ! !== 2D array and array of 2D pointer ==! 397 ! 398 # define DIM_2d 399 # define ROUTINE_NFD mpp_nfd_2d 400 # include "mpp_nfd_generic.h90" 401 # undef ROUTINE_NFD 402 # define MULTI 403 # define ROUTINE_NFD mpp_nfd_2d_ptr 404 # include "mpp_nfd_generic.h90" 405 # undef ROUTINE_NFD 406 # undef MULTI 407 # undef DIM_2d 408 ! 409 ! !== 3D array and array of 3D pointer ==! 410 ! 411 # define DIM_3d 412 # define ROUTINE_NFD mpp_nfd_3d 413 # include "mpp_nfd_generic.h90" 414 # undef ROUTINE_NFD 415 # define MULTI 416 # define ROUTINE_NFD mpp_nfd_3d_ptr 417 # include "mpp_nfd_generic.h90" 418 # undef ROUTINE_NFD 419 # undef MULTI 420 # undef DIM_3d 421 ! 422 ! !== 4D array and array of 4D pointer ==! 423 ! 424 # define DIM_4d 425 # define ROUTINE_NFD mpp_nfd_4d 426 # include "mpp_nfd_generic.h90" 427 # undef ROUTINE_NFD 428 # define MULTI 429 # define ROUTINE_NFD mpp_nfd_4d_ptr 430 # include "mpp_nfd_generic.h90" 431 # undef ROUTINE_NFD 432 # undef MULTI 433 # undef DIM_4d 434 435 436 !!---------------------------------------------------------------------- 437 !! *** routine mpp_lnk_bdy_(2,3,4)d *** 438 !! 439 !! * Argument : dummy argument use in mpp_lnk_... routines 440 !! ptab : array or pointer of arrays on which the boundary condition is applied 441 !! cd_nat : nature of array grid-points 442 !! psgn : sign used across the north fold boundary 443 !! kb_bdy : BDY boundary set 444 !! kfld : optional, number of pt3d arrays 445 !!---------------------------------------------------------------------- 446 ! 447 ! !== 2D array and array of 2D pointer ==! 448 ! 449 # define DIM_2d 450 # define ROUTINE_BDY mpp_lnk_bdy_2d 451 # include "mpp_bdy_generic.h90" 452 # undef ROUTINE_BDY 453 # undef DIM_2d 454 ! 455 ! !== 3D array and array of 3D pointer ==! 456 ! 457 # define DIM_3d 458 # define ROUTINE_BDY mpp_lnk_bdy_3d 459 # include "mpp_bdy_generic.h90" 460 # undef ROUTINE_BDY 461 # undef DIM_3d 462 ! 463 ! !== 4D array and array of 4D pointer ==! 464 ! 465 # define DIM_4d 466 # define ROUTINE_BDY mpp_lnk_bdy_4d 467 # include "mpp_bdy_generic.h90" 468 # undef ROUTINE_BDY 469 # undef DIM_4d 470 471 !!---------------------------------------------------------------------- 472 !! 473 !! load_array & mpp_lnk_2d_9 à generaliser a 3D et 4D 771 474 772 475 773 SUBROUTINE mpp_lnk_2d_9( pt2dA, cd_typeA, psgnA, pt2dB, cd_typeB, psgnB, pt2dC, cd_typeC, psgnC & 774 & , pt2dD, cd_typeD, psgnD, pt2dE, cd_typeE, psgnE, pt2dF, cd_typeF, psgnF & 775 & , pt2dG, cd_typeG, psgnG, pt2dH, cd_typeH, psgnH, pt2dI, cd_typeI, psgnI, cd_mpp, pval) 776 !!--------------------------------------------------------------------- 777 ! Second 2D array on which the boundary condition is applied 778 REAL(wp), DIMENSION(jpi,jpj), TARGET , INTENT(inout) :: pt2dA 779 REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dB , pt2dC , pt2dD , pt2dE 780 REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dF , pt2dG , pt2dH , pt2dI 781 ! define the nature of ptab array grid-points 782 CHARACTER(len=1) , INTENT(in ) :: cd_typeA 783 CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeB , cd_typeC , cd_typeD , cd_typeE 784 CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeF , cd_typeG , cd_typeH , cd_typeI 785 ! =-1 the sign change across the north fold boundary 786 REAL(wp) , INTENT(in ) :: psgnA 787 REAL(wp) , OPTIONAL, INTENT(in ) :: psgnB , psgnC , psgnD , psgnE 788 REAL(wp) , OPTIONAL, INTENT(in ) :: psgnF , psgnG , psgnH , psgnI 789 CHARACTER(len=3) , OPTIONAL, INTENT(in ) :: cd_mpp ! fill the overlap area only 790 REAL(wp) , OPTIONAL, INTENT(in ) :: pval ! background value (used at closed boundaries) 791 !! 792 TYPE(arrayptr) , DIMENSION(9) :: pt2d_array 793 CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points 794 ! ! = T , U , V , F , W and I points 795 REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary 796 INTEGER :: num_fields 797 !!--------------------------------------------------------------------- 798 ! 799 num_fields = 0 800 ! 801 ! Load the first array 802 CALL load_array( pt2dA, cd_typeA, psgnA, pt2d_array, type_array, psgn_array, num_fields ) 803 ! 804 ! Look if more arrays are added 805 IF( PRESENT(psgnB) ) CALL load_array(pt2dB,cd_typeB,psgnB,pt2d_array, type_array, psgn_array,num_fields) 806 IF( PRESENT(psgnC) ) CALL load_array(pt2dC,cd_typeC,psgnC,pt2d_array, type_array, psgn_array,num_fields) 807 IF( PRESENT(psgnD) ) CALL load_array(pt2dD,cd_typeD,psgnD,pt2d_array, type_array, psgn_array,num_fields) 808 IF( PRESENT(psgnE) ) CALL load_array(pt2dE,cd_typeE,psgnE,pt2d_array, type_array, psgn_array,num_fields) 809 IF( PRESENT(psgnF) ) CALL load_array(pt2dF,cd_typeF,psgnF,pt2d_array, type_array, psgn_array,num_fields) 810 IF( PRESENT(psgnG) ) CALL load_array(pt2dG,cd_typeG,psgnG,pt2d_array, type_array, psgn_array,num_fields) 811 IF( PRESENT(psgnH) ) CALL load_array(pt2dH,cd_typeH,psgnH,pt2d_array, type_array, psgn_array,num_fields) 812 IF( PRESENT(psgnI) ) CALL load_array(pt2dI,cd_typeI,psgnI,pt2d_array, type_array, psgn_array,num_fields) 813 ! 814 CALL mpp_lnk_2d_multiple( pt2d_array, type_array, psgn_array, num_fields, cd_mpp,pval ) 815 ! 816 END SUBROUTINE mpp_lnk_2d_9 817 818 819 SUBROUTINE mpp_lnk_2d( pt2d, cd_type, psgn, cd_mpp, pval ) 820 !!---------------------------------------------------------------------- 821 !! *** routine mpp_lnk_2d *** 822 !! 823 !! ** Purpose : Message passing manadgement for 2d array 824 !! 825 !! ** Method : Use mppsend and mpprecv function for passing mask 826 !! between processors following neighboring subdomains. 827 !! domain parameters 828 !! nlci : first dimension of the local subdomain 829 !! nlcj : second dimension of the local subdomain 830 !! nbondi : mark for "east-west local boundary" 831 !! nbondj : mark for "north-south local boundary" 832 !! noea : number for local neighboring processors 833 !! nowe : number for local neighboring processors 834 !! noso : number for local neighboring processors 835 !! nono : number for local neighboring processors 836 !! 837 !!---------------------------------------------------------------------- 838 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied 839 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 840 ! ! = T , U , V , F , W and I points 841 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 842 ! ! = 1. , the sign is kept 843 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 844 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 845 !! 846 INTEGER :: ji, jj, jl ! dummy loop indices 847 INTEGER :: imigr, iihom, ijhom ! temporary integers 848 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 849 REAL(wp) :: zland 850 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 851 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 852 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 853 !!---------------------------------------------------------------------- 854 ! 855 ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), & 856 & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) ) 857 ! 858 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 859 ELSE ; zland = 0._wp ! zero by default 860 ENDIF 861 862 ! 1. standard boundary treatment 863 ! ------------------------------ 864 ! 865 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values 866 ! 867 ! WARNING pt2d is defined only between nld and nle 868 DO jj = nlcj+1, jpj ! added line(s) (inner only) 869 pt2d(nldi :nlei , jj ) = pt2d(nldi:nlei, nlej) 870 pt2d(1 :nldi-1, jj ) = pt2d(nldi , nlej) 871 pt2d(nlei+1:nlci , jj ) = pt2d( nlei, nlej) 872 END DO 873 DO ji = nlci+1, jpi ! added column(s) (full) 874 pt2d(ji ,nldj :nlej ) = pt2d( nlei,nldj:nlej) 875 pt2d(ji ,1 :nldj-1) = pt2d( nlei,nldj ) 876 pt2d(ji ,nlej+1:jpj ) = pt2d( nlei, nlej) 877 END DO 878 ! 879 ELSE ! standard close or cyclic treatment 880 ! 881 ! ! East-West boundaries 882 IF( nbondi == 2 .AND. & ! Cyclic east-west 883 & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 884 pt2d( 1 ,:) = pt2d(jpim1,:) ! west 885 pt2d(jpi,:) = pt2d( 2 ,:) ! east 886 ELSE ! closed 887 IF( .NOT. cd_type == 'F' ) pt2d( 1 :jpreci,:) = zland ! south except F-point 888 pt2d(nlci-jpreci+1:jpi ,:) = zland ! north 889 ENDIF 890 ! ! North-South boundaries (always closed) 891 IF( .NOT. cd_type == 'F' ) pt2d(:, 1 :jprecj) = zland !south except F-point 892 pt2d(:,nlcj-jprecj+1:jpj ) = zland ! north 893 ! 894 ENDIF 895 896 ! 2. East and west directions exchange 897 ! ------------------------------------ 898 ! we play with the neigbours AND the row number because of the periodicity 899 ! 900 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 901 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 902 iihom = nlci-nreci 903 DO jl = 1, jpreci 904 zt2ew(:,jl,1) = pt2d(jpreci+jl,:) 905 zt2we(:,jl,1) = pt2d(iihom +jl,:) 906 END DO 907 END SELECT 908 ! 909 ! ! Migrations 910 imigr = jpreci * jpj 911 ! 912 SELECT CASE ( nbondi ) 913 CASE ( -1 ) 914 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 ) 915 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 916 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 917 CASE ( 0 ) 918 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 919 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 ) 920 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 921 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 922 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 923 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 924 CASE ( 1 ) 925 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 926 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 927 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 928 END SELECT 929 ! 930 ! ! Write Dirichlet lateral conditions 931 iihom = nlci - jpreci 932 ! 933 SELECT CASE ( nbondi ) 934 CASE ( -1 ) 935 DO jl = 1, jpreci 936 pt2d(iihom+jl,:) = zt2ew(:,jl,2) 937 END DO 938 CASE ( 0 ) 939 DO jl = 1, jpreci 940 pt2d(jl ,:) = zt2we(:,jl,2) 941 pt2d(iihom+jl,:) = zt2ew(:,jl,2) 942 END DO 943 CASE ( 1 ) 944 DO jl = 1, jpreci 945 pt2d(jl ,:) = zt2we(:,jl,2) 946 END DO 947 END SELECT 948 949 950 ! 3. North and south directions 951 ! ----------------------------- 952 ! always closed : we play only with the neigbours 953 ! 954 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 955 ijhom = nlcj-nrecj 956 DO jl = 1, jprecj 957 zt2sn(:,jl,1) = pt2d(:,ijhom +jl) 958 zt2ns(:,jl,1) = pt2d(:,jprecj+jl) 959 END DO 960 ENDIF 961 ! 962 ! ! Migrations 963 imigr = jprecj * jpi 964 ! 965 SELECT CASE ( nbondj ) 966 CASE ( -1 ) 967 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 ) 968 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 969 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 970 CASE ( 0 ) 971 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 972 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 ) 973 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 974 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 975 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 976 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 977 CASE ( 1 ) 978 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 979 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 980 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 981 END SELECT 982 ! 983 ! ! Write Dirichlet lateral conditions 984 ijhom = nlcj - jprecj 985 ! 986 SELECT CASE ( nbondj ) 987 CASE ( -1 ) 988 DO jl = 1, jprecj 989 pt2d(:,ijhom+jl) = zt2ns(:,jl,2) 990 END DO 991 CASE ( 0 ) 992 DO jl = 1, jprecj 993 pt2d(:,jl ) = zt2sn(:,jl,2) 994 pt2d(:,ijhom+jl) = zt2ns(:,jl,2) 995 END DO 996 CASE ( 1 ) 997 DO jl = 1, jprecj 998 pt2d(:,jl ) = zt2sn(:,jl,2) 999 END DO 1000 END SELECT 1001 1002 1003 ! 4. north fold treatment 1004 ! ----------------------- 1005 ! 1006 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 1007 ! 1008 SELECT CASE ( jpni ) 1009 CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp 1010 CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs. 1011 END SELECT 1012 ! 1013 ENDIF 1014 ! 1015 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 1016 ! 1017 END SUBROUTINE mpp_lnk_2d 1018 1019 1020 SUBROUTINE mpp_lnk_3d_gather( ptab1, cd_type1, ptab2, cd_type2, psgn ) 1021 !!---------------------------------------------------------------------- 1022 !! *** routine mpp_lnk_3d_gather *** 1023 !! 1024 !! ** Purpose : Message passing manadgement for two 3D arrays 1025 !! 1026 !! ** Method : Use mppsend and mpprecv function for passing mask 1027 !! between processors following neighboring subdomains. 1028 !! domain parameters 1029 !! nlci : first dimension of the local subdomain 1030 !! nlcj : second dimension of the local subdomain 1031 !! nbondi : mark for "east-west local boundary" 1032 !! nbondj : mark for "north-south local boundary" 1033 !! noea : number for local neighboring processors 1034 !! nowe : number for local neighboring processors 1035 !! noso : number for local neighboring processors 1036 !! nono : number for local neighboring processors 1037 !! 1038 !! ** Action : ptab1 and ptab2 with update value at its periphery 1039 !! 1040 !!---------------------------------------------------------------------- 1041 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which 1042 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab2 ! the boundary condition is applied 1043 CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays 1044 CHARACTER(len=1) , INTENT(in ) :: cd_type2 ! i.e. grid-points = T , U , V , F or W points 1045 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 1046 !! ! = 1. , the sign is kept 1047 INTEGER :: jl ! dummy loop indices 1048 INTEGER :: imigr, iihom, ijhom ! temporary integers 1049 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1050 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1051 REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ns, zt4sn ! 2 x 3d for north-south & south-north 1052 REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ew, zt4we ! 2 x 3d for east-west & west-east 1053 !!---------------------------------------------------------------------- 1054 ! 1055 ALLOCATE( zt4ns(jpi,jprecj,jpk,2,2), zt4sn(jpi,jprecj,jpk,2,2) , & 1056 & zt4ew(jpj,jpreci,jpk,2,2), zt4we(jpj,jpreci,jpk,2,2) ) 1057 ! 1058 ! 1. standard boundary treatment 1059 ! ------------------------------ 1060 ! ! East-West boundaries 1061 ! !* Cyclic east-west 1062 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 1063 ptab1( 1 ,:,:) = ptab1(jpim1,:,:) 1064 ptab1(jpi,:,:) = ptab1( 2 ,:,:) 1065 ptab2( 1 ,:,:) = ptab2(jpim1,:,:) 1066 ptab2(jpi,:,:) = ptab2( 2 ,:,:) 1067 ELSE !* closed 1068 IF( .NOT. cd_type1 == 'F' ) ptab1( 1 :jpreci,:,:) = 0.e0 ! south except at F-point 1069 IF( .NOT. cd_type2 == 'F' ) ptab2( 1 :jpreci,:,:) = 0.e0 1070 ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 ! north 1071 ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0 1072 ENDIF 1073 1074 1075 ! ! North-South boundaries 1076 IF( .NOT. cd_type1 == 'F' ) ptab1(:, 1 :jprecj,:) = 0.e0 ! south except at F-point 1077 IF( .NOT. cd_type2 == 'F' ) ptab2(:, 1 :jprecj,:) = 0.e0 1078 ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 ! north 1079 ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0 1080 1081 1082 ! 2. East and west directions exchange 1083 ! ------------------------------------ 1084 ! we play with the neigbours AND the row number because of the periodicity 1085 ! 1086 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 1087 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1088 iihom = nlci-nreci 1089 DO jl = 1, jpreci 1090 zt4ew(:,jl,:,1,1) = ptab1(jpreci+jl,:,:) 1091 zt4we(:,jl,:,1,1) = ptab1(iihom +jl,:,:) 1092 zt4ew(:,jl,:,2,1) = ptab2(jpreci+jl,:,:) 1093 zt4we(:,jl,:,2,1) = ptab2(iihom +jl,:,:) 1094 END DO 1095 END SELECT 1096 ! 1097 ! ! Migrations 1098 imigr = jpreci * jpj * jpk *2 1099 ! 1100 SELECT CASE ( nbondi ) 1101 CASE ( -1 ) 1102 CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req1 ) 1103 CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea ) 1104 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1105 CASE ( 0 ) 1106 CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 1107 CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req2 ) 1108 CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea ) 1109 CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe ) 1110 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1111 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1112 CASE ( 1 ) 1113 CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 1114 CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe ) 1115 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1116 END SELECT 1117 ! 1118 ! ! Write Dirichlet lateral conditions 1119 iihom = nlci - jpreci 1120 ! 1121 SELECT CASE ( nbondi ) 1122 CASE ( -1 ) 1123 DO jl = 1, jpreci 1124 ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2) 1125 ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2) 1126 END DO 1127 CASE ( 0 ) 1128 DO jl = 1, jpreci 1129 ptab1(jl ,:,:) = zt4we(:,jl,:,1,2) 1130 ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2) 1131 ptab2(jl ,:,:) = zt4we(:,jl,:,2,2) 1132 ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2) 1133 END DO 1134 CASE ( 1 ) 1135 DO jl = 1, jpreci 1136 ptab1(jl ,:,:) = zt4we(:,jl,:,1,2) 1137 ptab2(jl ,:,:) = zt4we(:,jl,:,2,2) 1138 END DO 1139 END SELECT 1140 1141 1142 ! 3. North and south directions 1143 ! ----------------------------- 1144 ! always closed : we play only with the neigbours 1145 ! 1146 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 1147 ijhom = nlcj - nrecj 1148 DO jl = 1, jprecj 1149 zt4sn(:,jl,:,1,1) = ptab1(:,ijhom +jl,:) 1150 zt4ns(:,jl,:,1,1) = ptab1(:,jprecj+jl,:) 1151 zt4sn(:,jl,:,2,1) = ptab2(:,ijhom +jl,:) 1152 zt4ns(:,jl,:,2,1) = ptab2(:,jprecj+jl,:) 1153 END DO 1154 ENDIF 1155 ! 1156 ! ! Migrations 1157 imigr = jprecj * jpi * jpk * 2 1158 ! 1159 SELECT CASE ( nbondj ) 1160 CASE ( -1 ) 1161 CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req1 ) 1162 CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono ) 1163 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1164 CASE ( 0 ) 1165 CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 ) 1166 CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req2 ) 1167 CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono ) 1168 CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso ) 1169 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1170 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1171 CASE ( 1 ) 1172 CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 ) 1173 CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso ) 1174 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1175 END SELECT 1176 ! 1177 ! ! Write Dirichlet lateral conditions 1178 ijhom = nlcj - jprecj 1179 ! 1180 SELECT CASE ( nbondj ) 1181 CASE ( -1 ) 1182 DO jl = 1, jprecj 1183 ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2) 1184 ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2) 1185 END DO 1186 CASE ( 0 ) 1187 DO jl = 1, jprecj 1188 ptab1(:,jl ,:) = zt4sn(:,jl,:,1,2) 1189 ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2) 1190 ptab2(:,jl ,:) = zt4sn(:,jl,:,2,2) 1191 ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2) 1192 END DO 1193 CASE ( 1 ) 1194 DO jl = 1, jprecj 1195 ptab1(:,jl,:) = zt4sn(:,jl,:,1,2) 1196 ptab2(:,jl,:) = zt4sn(:,jl,:,2,2) 1197 END DO 1198 END SELECT 1199 1200 1201 ! 4. north fold treatment 1202 ! ----------------------- 1203 IF( npolj /= 0 ) THEN 1204 ! 1205 SELECT CASE ( jpni ) 1206 CASE ( 1 ) 1207 CALL lbc_nfd ( ptab1, cd_type1, psgn ) ! only for northern procs. 1208 CALL lbc_nfd ( ptab2, cd_type2, psgn ) 1209 CASE DEFAULT 1210 CALL mpp_lbc_north( ptab1, cd_type1, psgn ) ! for all northern procs. 1211 CALL mpp_lbc_north (ptab2, cd_type2, psgn) 1212 END SELECT 1213 ! 1214 ENDIF 1215 ! 1216 DEALLOCATE( zt4ns, zt4sn, zt4ew, zt4we ) 1217 ! 1218 END SUBROUTINE mpp_lnk_3d_gather 1219 1220 1221 SUBROUTINE mpp_lnk_2d_e( pt2d, cd_type, psgn, jpri, jprj ) 1222 !!---------------------------------------------------------------------- 1223 !! *** routine mpp_lnk_2d_e *** 1224 !! 1225 !! ** Purpose : Message passing manadgement for 2d array (with halo) 1226 !! 1227 !! ** Method : Use mppsend and mpprecv function for passing mask 1228 !! between processors following neighboring subdomains. 1229 !! domain parameters 1230 !! nlci : first dimension of the local subdomain 1231 !! nlcj : second dimension of the local subdomain 1232 !! jpri : number of rows for extra outer halo 1233 !! jprj : number of columns for extra outer halo 1234 !! nbondi : mark for "east-west local boundary" 1235 !! nbondj : mark for "north-south local boundary" 1236 !! noea : number for local neighboring processors 1237 !! nowe : number for local neighboring processors 1238 !! noso : number for local neighboring processors 1239 !! nono : number for local neighboring processors 1240 !! 1241 !!---------------------------------------------------------------------- 1242 INTEGER , INTENT(in ) :: jpri 1243 INTEGER , INTENT(in ) :: jprj 1244 REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo 1245 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points 1246 ! ! = T , U , V , F , W and I points 1247 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the 1248 !! ! north boundary, = 1. otherwise 1249 INTEGER :: jl ! dummy loop indices 1250 INTEGER :: imigr, iihom, ijhom ! temporary integers 1251 INTEGER :: ipreci, iprecj ! temporary integers 1252 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1253 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1254 !! 1255 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns 1256 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn 1257 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe 1258 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew 1259 !!---------------------------------------------------------------------- 1260 1261 ipreci = jpreci + jpri ! take into account outer extra 2D overlap area 1262 iprecj = jprecj + jprj 1263 1264 1265 ! 1. standard boundary treatment 1266 ! ------------------------------ 1267 ! Order matters Here !!!! 1268 ! 1269 ! !* North-South boundaries (always colsed) 1270 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jprj : jprecj ) = 0.e0 ! south except at F-point 1271 pt2d(:,nlcj-jprecj+1:jpj+jprj) = 0.e0 ! north 1272 1273 ! ! East-West boundaries 1274 ! !* Cyclic east-west 1275 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 1276 pt2d(1-jpri: 1 ,:) = pt2d(jpim1-jpri: jpim1 ,:) ! east 1277 pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west 1278 ! 1279 ELSE !* closed 1280 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point 1281 pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north 1282 ENDIF 1283 ! 1284 1285 ! north fold treatment 1286 ! ----------------------- 1287 IF( npolj /= 0 ) THEN 1288 ! 1289 SELECT CASE ( jpni ) 1290 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj ) 1291 CASE DEFAULT ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn ) 1292 END SELECT 1293 ! 1294 ENDIF 1295 1296 ! 2. East and west directions exchange 1297 ! ------------------------------------ 1298 ! we play with the neigbours AND the row number because of the periodicity 1299 ! 1300 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 1301 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1302 iihom = nlci-nreci-jpri 1303 DO jl = 1, ipreci 1304 r2dew(:,jl,1) = pt2d(jpreci+jl,:) 1305 r2dwe(:,jl,1) = pt2d(iihom +jl,:) 1306 END DO 1307 END SELECT 1308 ! 1309 ! ! Migrations 1310 imigr = ipreci * ( jpj + 2*jprj) 1311 ! 1312 SELECT CASE ( nbondi ) 1313 CASE ( -1 ) 1314 CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req1 ) 1315 CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) 1316 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1317 CASE ( 0 ) 1318 CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) 1319 CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req2 ) 1320 CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) 1321 CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) 1322 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1323 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1324 CASE ( 1 ) 1325 CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) 1326 CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) 1327 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1328 END SELECT 1329 ! 1330 ! ! Write Dirichlet lateral conditions 1331 iihom = nlci - jpreci 1332 ! 1333 SELECT CASE ( nbondi ) 1334 CASE ( -1 ) 1335 DO jl = 1, ipreci 1336 pt2d(iihom+jl,:) = r2dew(:,jl,2) 1337 END DO 1338 CASE ( 0 ) 1339 DO jl = 1, ipreci 1340 pt2d(jl-jpri,:) = r2dwe(:,jl,2) 1341 pt2d( iihom+jl,:) = r2dew(:,jl,2) 1342 END DO 1343 CASE ( 1 ) 1344 DO jl = 1, ipreci 1345 pt2d(jl-jpri,:) = r2dwe(:,jl,2) 1346 END DO 1347 END SELECT 1348 1349 1350 ! 3. North and south directions 1351 ! ----------------------------- 1352 ! always closed : we play only with the neigbours 1353 ! 1354 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 1355 ijhom = nlcj-nrecj-jprj 1356 DO jl = 1, iprecj 1357 r2dsn(:,jl,1) = pt2d(:,ijhom +jl) 1358 r2dns(:,jl,1) = pt2d(:,jprecj+jl) 1359 END DO 1360 ENDIF 1361 ! 1362 ! ! Migrations 1363 imigr = iprecj * ( jpi + 2*jpri ) 1364 ! 1365 SELECT CASE ( nbondj ) 1366 CASE ( -1 ) 1367 CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req1 ) 1368 CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) 1369 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1370 CASE ( 0 ) 1371 CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) 1372 CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req2 ) 1373 CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) 1374 CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) 1375 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1376 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1377 CASE ( 1 ) 1378 CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) 1379 CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) 1380 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1381 END SELECT 1382 ! 1383 ! ! Write Dirichlet lateral conditions 1384 ijhom = nlcj - jprecj 1385 ! 1386 SELECT CASE ( nbondj ) 1387 CASE ( -1 ) 1388 DO jl = 1, iprecj 1389 pt2d(:,ijhom+jl) = r2dns(:,jl,2) 1390 END DO 1391 CASE ( 0 ) 1392 DO jl = 1, iprecj 1393 pt2d(:,jl-jprj) = r2dsn(:,jl,2) 1394 pt2d(:,ijhom+jl ) = r2dns(:,jl,2) 1395 END DO 1396 CASE ( 1 ) 1397 DO jl = 1, iprecj 1398 pt2d(:,jl-jprj) = r2dsn(:,jl,2) 1399 END DO 1400 END SELECT 1401 ! 1402 END SUBROUTINE mpp_lnk_2d_e 1403 1404 SUBROUTINE mpp_lnk_sum_3d( ptab, cd_type, psgn, cd_mpp, pval ) 1405 !!---------------------------------------------------------------------- 1406 !! *** routine mpp_lnk_sum_3d *** 1407 !! 1408 !! ** Purpose : Message passing manadgement (sum the overlap region) 1409 !! 1410 !! ** Method : Use mppsend and mpprecv function for passing mask 1411 !! between processors following neighboring subdomains. 1412 !! domain parameters 1413 !! nlci : first dimension of the local subdomain 1414 !! nlcj : second dimension of the local subdomain 1415 !! nbondi : mark for "east-west local boundary" 1416 !! nbondj : mark for "north-south local boundary" 1417 !! noea : number for local neighboring processors 1418 !! nowe : number for local neighboring processors 1419 !! noso : number for local neighboring processors 1420 !! nono : number for local neighboring processors 1421 !! 1422 !! ** Action : ptab with update value at its periphery 1423 !! 1424 !!---------------------------------------------------------------------- 1425 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 1426 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 1427 ! ! = T , U , V , F , W points 1428 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 1429 ! ! = 1. , the sign is kept 1430 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 1431 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 1432 !! 1433 INTEGER :: ji, jj, jk, jl ! dummy loop indices 1434 INTEGER :: imigr, iihom, ijhom ! temporary integers 1435 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1436 REAL(wp) :: zland 1437 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1438 ! 1439 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north 1440 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east 1441 1442 !!---------------------------------------------------------------------- 1443 1444 ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), & 1445 & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) ) 1446 1447 ! 1448 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 1449 ELSE ; zland = 0.e0 ! zero by default 1450 ENDIF 1451 1452 ! 1. standard boundary treatment 1453 ! ------------------------------ 1454 ! 2. East and west directions exchange 1455 ! ------------------------------------ 1456 ! we play with the neigbours AND the row number because of the periodicity 1457 ! 1458 SELECT CASE ( nbondi ) ! Read lateral conditions 1459 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1460 iihom = nlci-jpreci 1461 DO jl = 1, jpreci 1462 zt3ew(:,jl,:,1) = ptab(jl ,:,:) ; ptab(jl ,:,:) = 0.0_wp 1463 zt3we(:,jl,:,1) = ptab(iihom+jl,:,:) ; ptab(iihom+jl,:,:) = 0.0_wp 1464 END DO 1465 END SELECT 1466 ! 1467 ! ! Migrations 1468 imigr = jpreci * jpj * jpk 1469 ! 1470 SELECT CASE ( nbondi ) 1471 CASE ( -1 ) 1472 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 ) 1473 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 1474 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1475 CASE ( 0 ) 1476 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 1477 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 ) 1478 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 1479 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 1480 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1481 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1482 CASE ( 1 ) 1483 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 1484 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 1485 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1486 END SELECT 1487 ! 1488 ! ! Write lateral conditions 1489 iihom = nlci-nreci 1490 ! 1491 SELECT CASE ( nbondi ) 1492 CASE ( -1 ) 1493 DO jl = 1, jpreci 1494 ptab(iihom+jl,:,:) = ptab(iihom+jl,:,:) + zt3ew(:,jl,:,2) 1495 END DO 1496 CASE ( 0 ) 1497 DO jl = 1, jpreci 1498 ptab(jpreci+jl,:,:) = ptab(jpreci+jl,:,:) + zt3we(:,jl,:,2) 1499 ptab(iihom +jl,:,:) = ptab(iihom +jl,:,:) + zt3ew(:,jl,:,2) 1500 END DO 1501 CASE ( 1 ) 1502 DO jl = 1, jpreci 1503 ptab(jpreci+jl,:,:) = ptab(jpreci+jl,:,:) + zt3we(:,jl,:,2) 1504 END DO 1505 END SELECT 1506 1507 1508 ! 3. North and south directions 1509 ! ----------------------------- 1510 ! always closed : we play only with the neigbours 1511 ! 1512 IF( nbondj /= 2 ) THEN ! Read lateral conditions 1513 ijhom = nlcj-jprecj 1514 DO jl = 1, jprecj 1515 zt3sn(:,jl,:,1) = ptab(:,ijhom+jl,:) ; ptab(:,ijhom+jl,:) = 0.0_wp 1516 zt3ns(:,jl,:,1) = ptab(:,jl ,:) ; ptab(:,jl ,:) = 0.0_wp 1517 END DO 1518 ENDIF 1519 ! 1520 ! ! Migrations 1521 imigr = jprecj * jpi * jpk 1522 ! 1523 SELECT CASE ( nbondj ) 1524 CASE ( -1 ) 1525 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 ) 1526 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 1527 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1528 CASE ( 0 ) 1529 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 1530 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 ) 1531 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 1532 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 1533 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1534 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1535 CASE ( 1 ) 1536 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 1537 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 1538 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1539 END SELECT 1540 ! 1541 ! ! Write lateral conditions 1542 ijhom = nlcj-nrecj 1543 ! 1544 SELECT CASE ( nbondj ) 1545 CASE ( -1 ) 1546 DO jl = 1, jprecj 1547 ptab(:,ijhom+jl,:) = ptab(:,ijhom+jl,:) + zt3ns(:,jl,:,2) 1548 END DO 1549 CASE ( 0 ) 1550 DO jl = 1, jprecj 1551 ptab(:,jprecj+jl,:) = ptab(:,jprecj+jl,:) + zt3sn(:,jl,:,2) 1552 ptab(:,ijhom +jl,:) = ptab(:,ijhom +jl,:) + zt3ns(:,jl,:,2) 1553 END DO 1554 CASE ( 1 ) 1555 DO jl = 1, jprecj 1556 ptab(:,jprecj+jl,:) = ptab(:,jprecj+jl,:) + zt3sn(:,jl ,:,2) 1557 END DO 1558 END SELECT 1559 1560 1561 ! 4. north fold treatment 1562 ! ----------------------- 1563 ! 1564 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 1565 ! 1566 SELECT CASE ( jpni ) 1567 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 1568 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 1569 END SELECT 1570 ! 1571 ENDIF 1572 ! 1573 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we ) 1574 ! 1575 END SUBROUTINE mpp_lnk_sum_3d 1576 1577 SUBROUTINE mpp_lnk_sum_2d( pt2d, cd_type, psgn, cd_mpp, pval ) 1578 !!---------------------------------------------------------------------- 1579 !! *** routine mpp_lnk_sum_2d *** 1580 !! 1581 !! ** Purpose : Message passing manadgement for 2d array (sum the overlap region) 1582 !! 1583 !! ** Method : Use mppsend and mpprecv function for passing mask 1584 !! between processors following neighboring subdomains. 1585 !! domain parameters 1586 !! nlci : first dimension of the local subdomain 1587 !! nlcj : second dimension of the local subdomain 1588 !! nbondi : mark for "east-west local boundary" 1589 !! nbondj : mark for "north-south local boundary" 1590 !! noea : number for local neighboring processors 1591 !! nowe : number for local neighboring processors 1592 !! noso : number for local neighboring processors 1593 !! nono : number for local neighboring processors 1594 !! 1595 !!---------------------------------------------------------------------- 1596 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied 1597 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 1598 ! ! = T , U , V , F , W and I points 1599 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 1600 ! ! = 1. , the sign is kept 1601 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 1602 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 1603 !! 1604 INTEGER :: ji, jj, jl ! dummy loop indices 1605 INTEGER :: imigr, iihom, ijhom ! temporary integers 1606 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1607 REAL(wp) :: zland 1608 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1609 ! 1610 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 1611 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 1612 1613 !!---------------------------------------------------------------------- 1614 1615 ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), & 1616 & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) ) 1617 1618 ! 1619 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 1620 ELSE ; zland = 0.e0 ! zero by default 1621 ENDIF 1622 1623 ! 1. standard boundary treatment 1624 ! ------------------------------ 1625 ! 2. East and west directions exchange 1626 ! ------------------------------------ 1627 ! we play with the neigbours AND the row number because of the periodicity 1628 ! 1629 SELECT CASE ( nbondi ) ! Read lateral conditions 1630 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1631 iihom = nlci - jpreci 1632 DO jl = 1, jpreci 1633 zt2ew(:,jl,1) = pt2d(jl ,:) ; pt2d(jl ,:) = 0.0_wp 1634 zt2we(:,jl,1) = pt2d(iihom +jl,:) ; pt2d(iihom +jl,:) = 0.0_wp 1635 END DO 1636 END SELECT 1637 ! 1638 ! ! Migrations 1639 imigr = jpreci * jpj 1640 ! 1641 SELECT CASE ( nbondi ) 1642 CASE ( -1 ) 1643 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 ) 1644 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 1645 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1646 CASE ( 0 ) 1647 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 1648 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 ) 1649 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 1650 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 1651 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1652 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1653 CASE ( 1 ) 1654 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 1655 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 1656 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1657 END SELECT 1658 ! 1659 ! ! Write lateral conditions 1660 iihom = nlci-nreci 1661 ! 1662 SELECT CASE ( nbondi ) 1663 CASE ( -1 ) 1664 DO jl = 1, jpreci 1665 pt2d(iihom+jl,:) = pt2d(iihom+jl,:) + zt2ew(:,jl,2) 1666 END DO 1667 CASE ( 0 ) 1668 DO jl = 1, jpreci 1669 pt2d(jpreci+jl,:) = pt2d(jpreci+jl,:) + zt2we(:,jl,2) 1670 pt2d(iihom +jl,:) = pt2d(iihom +jl,:) + zt2ew(:,jl,2) 1671 END DO 1672 CASE ( 1 ) 1673 DO jl = 1, jpreci 1674 pt2d(jpreci+jl,:) = pt2d(jpreci+jl,:) + zt2we(:,jl,2) 1675 END DO 1676 END SELECT 1677 1678 1679 ! 3. North and south directions 1680 ! ----------------------------- 1681 ! always closed : we play only with the neigbours 1682 ! 1683 IF( nbondj /= 2 ) THEN ! Read lateral conditions 1684 ijhom = nlcj - jprecj 1685 DO jl = 1, jprecj 1686 zt2sn(:,jl,1) = pt2d(:,ijhom +jl) ; pt2d(:,ijhom +jl) = 0.0_wp 1687 zt2ns(:,jl,1) = pt2d(:,jl ) ; pt2d(:,jl ) = 0.0_wp 1688 END DO 1689 ENDIF 1690 ! 1691 ! ! Migrations 1692 imigr = jprecj * jpi 1693 ! 1694 SELECT CASE ( nbondj ) 1695 CASE ( -1 ) 1696 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 ) 1697 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 1698 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1699 CASE ( 0 ) 1700 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 1701 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 ) 1702 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 1703 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 1704 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1705 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1706 CASE ( 1 ) 1707 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 1708 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 1709 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1710 END SELECT 1711 ! 1712 ! ! Write lateral conditions 1713 ijhom = nlcj-nrecj 1714 ! 1715 SELECT CASE ( nbondj ) 1716 CASE ( -1 ) 1717 DO jl = 1, jprecj 1718 pt2d(:,ijhom+jl) = pt2d(:,ijhom+jl) + zt2ns(:,jl,2) 1719 END DO 1720 CASE ( 0 ) 1721 DO jl = 1, jprecj 1722 pt2d(:,jprecj+jl) = pt2d(:,jprecj+jl) + zt2sn(:,jl,2) 1723 pt2d(:,ijhom +jl) = pt2d(:,ijhom +jl) + zt2ns(:,jl,2) 1724 END DO 1725 CASE ( 1 ) 1726 DO jl = 1, jprecj 1727 pt2d(:,jprecj+jl) = pt2d(:,jprecj+jl) + zt2sn(:,jl,2) 1728 END DO 1729 END SELECT 1730 1731 1732 ! 4. north fold treatment 1733 ! ----------------------- 1734 ! 1735 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 1736 ! 1737 SELECT CASE ( jpni ) 1738 CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp 1739 CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs. 1740 END SELECT 1741 ! 1742 ENDIF 1743 ! 1744 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 1745 ! 1746 END SUBROUTINE mpp_lnk_sum_2d 476 !! mpp_lnk_sum_2d et 3D ====>>>>>> à virer du code !!!! 477 478 479 !!---------------------------------------------------------------------- 480 481 1747 482 1748 483 SUBROUTINE mppsend( ktyp, pmess, kbytes, kdest, md_req ) … … 1764 499 SELECT CASE ( cn_mpi_send ) 1765 500 CASE ( 'S' ) ! Standard mpi send (blocking) 1766 CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_o pa, iflag )501 CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_oce , iflag ) 1767 502 CASE ( 'B' ) ! Buffer mpi send (blocking) 1768 CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_o pa, iflag )503 CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_oce , iflag ) 1769 504 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 1770 505 ! be carefull, one more argument here : the mpi request identifier.. 1771 CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_o pa, md_req, iflag )506 CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_oce, md_req, iflag ) 1772 507 END SELECT 1773 508 ! … … 1797 532 IF( PRESENT(ksource) ) use_source = ksource 1798 533 ! 1799 CALL mpi_recv( pmess, kbytes, mpi_double_precision, use_source, ktyp, mpi_comm_o pa, istatus, iflag )534 CALL mpi_recv( pmess, kbytes, mpi_double_precision, use_source, ktyp, mpi_comm_oce, istatus, iflag ) 1800 535 ! 1801 536 END SUBROUTINE mpprecv … … 1819 554 itaille = jpi * jpj 1820 555 CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , & 1821 & mpi_double_precision, kp , mpi_comm_o pa, ierror )556 & mpi_double_precision, kp , mpi_comm_oce, ierror ) 1822 557 ! 1823 558 END SUBROUTINE mppgather … … 1842 577 ! 1843 578 CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille , & 1844 & mpi_double_precision, kp , mpi_comm_o pa, ierror )579 & mpi_double_precision, kp , mpi_comm_oce, ierror ) 1845 580 ! 1846 581 END SUBROUTINE mppscatter 1847 582 1848 1849 SUBROUTINE mppmax_a_int( ktab, kdim, kcom ) 1850 !!---------------------------------------------------------------------- 1851 !! *** routine mppmax_a_int *** 1852 !! 1853 !! ** Purpose : Find maximum value in an integer layout array 1854 !! 1855 !!---------------------------------------------------------------------- 1856 INTEGER , INTENT(in ) :: kdim ! size of array 1857 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 1858 INTEGER , INTENT(in ), OPTIONAL :: kcom ! 1859 ! 1860 INTEGER :: ierror, localcomm ! temporary integer 1861 INTEGER, DIMENSION(kdim) :: iwork 1862 !!---------------------------------------------------------------------- 1863 ! 1864 localcomm = mpi_comm_opa 1865 IF( PRESENT(kcom) ) localcomm = kcom 1866 ! 1867 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, localcomm, ierror ) 1868 ! 1869 ktab(:) = iwork(:) 1870 ! 1871 END SUBROUTINE mppmax_a_int 1872 1873 1874 SUBROUTINE mppmax_int( ktab, kcom ) 1875 !!---------------------------------------------------------------------- 1876 !! *** routine mppmax_int *** 1877 !! 1878 !! ** Purpose : Find maximum value in an integer layout array 1879 !! 1880 !!---------------------------------------------------------------------- 1881 INTEGER, INTENT(inout) :: ktab ! ??? 1882 INTEGER, INTENT(in ), OPTIONAL :: kcom ! ??? 1883 ! 1884 INTEGER :: ierror, iwork, localcomm ! temporary integer 1885 !!---------------------------------------------------------------------- 1886 ! 1887 localcomm = mpi_comm_opa 1888 IF( PRESENT(kcom) ) localcomm = kcom 1889 ! 1890 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, localcomm, ierror ) 1891 ! 1892 ktab = iwork 1893 ! 1894 END SUBROUTINE mppmax_int 1895 1896 1897 SUBROUTINE mppmin_a_int( ktab, kdim, kcom ) 1898 !!---------------------------------------------------------------------- 1899 !! *** routine mppmin_a_int *** 1900 !! 1901 !! ** Purpose : Find minimum value in an integer layout array 1902 !! 1903 !!---------------------------------------------------------------------- 1904 INTEGER , INTENT( in ) :: kdim ! size of array 1905 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 1906 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 1907 !! 1908 INTEGER :: ierror, localcomm ! temporary integer 1909 INTEGER, DIMENSION(kdim) :: iwork 1910 !!---------------------------------------------------------------------- 1911 ! 1912 localcomm = mpi_comm_opa 1913 IF( PRESENT(kcom) ) localcomm = kcom 1914 ! 1915 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, localcomm, ierror ) 1916 ! 1917 ktab(:) = iwork(:) 1918 ! 1919 END SUBROUTINE mppmin_a_int 1920 1921 1922 SUBROUTINE mppmin_int( ktab, kcom ) 1923 !!---------------------------------------------------------------------- 1924 !! *** routine mppmin_int *** 1925 !! 1926 !! ** Purpose : Find minimum value in an integer layout array 1927 !! 1928 !!---------------------------------------------------------------------- 1929 INTEGER, INTENT(inout) :: ktab ! ??? 1930 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 1931 !! 1932 INTEGER :: ierror, iwork, localcomm 1933 !!---------------------------------------------------------------------- 1934 ! 1935 localcomm = mpi_comm_opa 1936 IF( PRESENT(kcom) ) localcomm = kcom 1937 ! 1938 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, localcomm, ierror ) 1939 ! 1940 ktab = iwork 1941 ! 1942 END SUBROUTINE mppmin_int 1943 1944 1945 SUBROUTINE mppsum_a_int( ktab, kdim ) 1946 !!---------------------------------------------------------------------- 1947 !! *** routine mppsum_a_int *** 1948 !! 1949 !! ** Purpose : Global integer sum, 1D array case 1950 !! 1951 !!---------------------------------------------------------------------- 1952 INTEGER, INTENT(in ) :: kdim ! ??? 1953 INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? 1954 ! 1955 INTEGER :: ierror 1956 INTEGER, DIMENSION (kdim) :: iwork 1957 !!---------------------------------------------------------------------- 1958 ! 1959 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 1960 ! 1961 ktab(:) = iwork(:) 1962 ! 1963 END SUBROUTINE mppsum_a_int 1964 1965 1966 SUBROUTINE mppsum_int( ktab ) 1967 !!---------------------------------------------------------------------- 1968 !! *** routine mppsum_int *** 1969 !! 1970 !! ** Purpose : Global integer sum 1971 !! 1972 !!---------------------------------------------------------------------- 1973 INTEGER, INTENT(inout) :: ktab 1974 !! 1975 INTEGER :: ierror, iwork 1976 !!---------------------------------------------------------------------- 1977 ! 1978 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 1979 ! 1980 ktab = iwork 1981 ! 1982 END SUBROUTINE mppsum_int 1983 1984 1985 SUBROUTINE mppmax_a_real( ptab, kdim, kcom ) 1986 !!---------------------------------------------------------------------- 1987 !! *** routine mppmax_a_real *** 1988 !! 1989 !! ** Purpose : Maximum 1990 !! 1991 !!---------------------------------------------------------------------- 1992 INTEGER , INTENT(in ) :: kdim 1993 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 1994 INTEGER , INTENT(in ), OPTIONAL :: kcom 1995 ! 1996 INTEGER :: ierror, localcomm 1997 REAL(wp), DIMENSION(kdim) :: zwork 1998 !!---------------------------------------------------------------------- 1999 ! 2000 localcomm = mpi_comm_opa 2001 IF( PRESENT(kcom) ) localcomm = kcom 2002 ! 2003 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, localcomm, ierror ) 2004 ptab(:) = zwork(:) 2005 ! 2006 END SUBROUTINE mppmax_a_real 2007 2008 2009 SUBROUTINE mppmax_real( ptab, kcom ) 2010 !!---------------------------------------------------------------------- 2011 !! *** routine mppmax_real *** 2012 !! 2013 !! ** Purpose : Maximum 2014 !! 2015 !!---------------------------------------------------------------------- 2016 REAL(wp), INTENT(inout) :: ptab ! ??? 2017 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? 2018 !! 2019 INTEGER :: ierror, localcomm 2020 REAL(wp) :: zwork 2021 !!---------------------------------------------------------------------- 2022 ! 2023 localcomm = mpi_comm_opa 2024 IF( PRESENT(kcom) ) localcomm = kcom 2025 ! 2026 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, localcomm, ierror ) 2027 ptab = zwork 2028 ! 2029 END SUBROUTINE mppmax_real 2030 2031 SUBROUTINE mppmax_real_multiple( ptab, NUM , kcom ) 2032 !!---------------------------------------------------------------------- 2033 !! *** routine mppmax_real *** 2034 !! 2035 !! ** Purpose : Maximum 2036 !! 2037 !!---------------------------------------------------------------------- 2038 REAL(wp), DIMENSION(:) , INTENT(inout) :: ptab ! ??? 2039 INTEGER , INTENT(in ) :: NUM 2040 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? 2041 !! 2042 INTEGER :: ierror, localcomm 2043 REAL(wp) , POINTER , DIMENSION(:) :: zwork 2044 !!---------------------------------------------------------------------- 2045 ! 2046 CALL wrk_alloc(NUM , zwork) 2047 localcomm = mpi_comm_opa 2048 IF( PRESENT(kcom) ) localcomm = kcom 2049 ! 2050 CALL mpi_allreduce( ptab, zwork, NUM, mpi_double_precision, mpi_max, localcomm, ierror ) 2051 ptab = zwork 2052 CALL wrk_dealloc(NUM , zwork) 2053 ! 2054 END SUBROUTINE mppmax_real_multiple 2055 2056 2057 SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) 2058 !!---------------------------------------------------------------------- 2059 !! *** routine mppmin_a_real *** 2060 !! 2061 !! ** Purpose : Minimum of REAL, array case 2062 !! 2063 !!----------------------------------------------------------------------- 2064 INTEGER , INTENT(in ) :: kdim 2065 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 2066 INTEGER , INTENT(in ), OPTIONAL :: kcom 2067 !! 2068 INTEGER :: ierror, localcomm 2069 REAL(wp), DIMENSION(kdim) :: zwork 2070 !!----------------------------------------------------------------------- 2071 ! 2072 localcomm = mpi_comm_opa 2073 IF( PRESENT(kcom) ) localcomm = kcom 2074 ! 2075 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, localcomm, ierror ) 2076 ptab(:) = zwork(:) 2077 ! 2078 END SUBROUTINE mppmin_a_real 2079 2080 2081 SUBROUTINE mppmin_real( ptab, kcom ) 2082 !!---------------------------------------------------------------------- 2083 !! *** routine mppmin_real *** 2084 !! 2085 !! ** Purpose : minimum of REAL, scalar case 2086 !! 2087 !!----------------------------------------------------------------------- 2088 REAL(wp), INTENT(inout) :: ptab ! 2089 INTEGER , INTENT(in ), OPTIONAL :: kcom 2090 !! 2091 INTEGER :: ierror 2092 REAL(wp) :: zwork 2093 INTEGER :: localcomm 2094 !!----------------------------------------------------------------------- 2095 ! 2096 localcomm = mpi_comm_opa 2097 IF( PRESENT(kcom) ) localcomm = kcom 2098 ! 2099 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, localcomm, ierror ) 2100 ptab = zwork 2101 ! 2102 END SUBROUTINE mppmin_real 2103 2104 2105 SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) 2106 !!---------------------------------------------------------------------- 2107 !! *** routine mppsum_a_real *** 2108 !! 2109 !! ** Purpose : global sum, REAL ARRAY argument case 2110 !! 2111 !!----------------------------------------------------------------------- 2112 INTEGER , INTENT( in ) :: kdim ! size of ptab 2113 REAL(wp), DIMENSION(kdim), INTENT( inout ) :: ptab ! input array 2114 INTEGER , INTENT( in ), OPTIONAL :: kcom 2115 !! 2116 INTEGER :: ierror ! temporary integer 2117 INTEGER :: localcomm 2118 REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace 2119 !!----------------------------------------------------------------------- 2120 ! 2121 localcomm = mpi_comm_opa 2122 IF( PRESENT(kcom) ) localcomm = kcom 2123 ! 2124 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, localcomm, ierror ) 2125 ptab(:) = zwork(:) 2126 ! 2127 END SUBROUTINE mppsum_a_real 2128 2129 2130 SUBROUTINE mppsum_real( ptab, kcom ) 2131 !!---------------------------------------------------------------------- 2132 !! *** routine mppsum_real *** 2133 !! 2134 !! ** Purpose : global sum, SCALAR argument case 2135 !! 2136 !!----------------------------------------------------------------------- 2137 REAL(wp), INTENT(inout) :: ptab ! input scalar 2138 INTEGER , INTENT(in ), OPTIONAL :: kcom 2139 !! 2140 INTEGER :: ierror, localcomm 2141 REAL(wp) :: zwork 2142 !!----------------------------------------------------------------------- 2143 ! 2144 localcomm = mpi_comm_opa 2145 IF( PRESENT(kcom) ) localcomm = kcom 2146 ! 2147 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, localcomm, ierror ) 2148 ptab = zwork 2149 ! 2150 END SUBROUTINE mppsum_real 2151 2152 2153 SUBROUTINE mppsum_realdd( ytab, kcom ) 2154 !!---------------------------------------------------------------------- 2155 !! *** routine mppsum_realdd *** 2156 !! 2157 !! ** Purpose : global sum in Massively Parallel Processing 2158 !! SCALAR argument case for double-double precision 2159 !! 2160 !!----------------------------------------------------------------------- 2161 COMPLEX(wp), INTENT(inout) :: ytab ! input scalar 2162 INTEGER , INTENT(in ), OPTIONAL :: kcom 2163 ! 2164 INTEGER :: ierror 2165 INTEGER :: localcomm 2166 COMPLEX(wp) :: zwork 2167 !!----------------------------------------------------------------------- 2168 ! 2169 localcomm = mpi_comm_opa 2170 IF( PRESENT(kcom) ) localcomm = kcom 2171 ! 2172 ! reduce local sums into global sum 2173 CALL MPI_ALLREDUCE (ytab, zwork, 1, MPI_DOUBLE_COMPLEX, MPI_SUMDD, localcomm, ierror ) 2174 ytab = zwork 2175 ! 2176 END SUBROUTINE mppsum_realdd 2177 2178 2179 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 2180 !!---------------------------------------------------------------------- 2181 !! *** routine mppsum_a_realdd *** 2182 !! 2183 !! ** Purpose : global sum in Massively Parallel Processing 2184 !! COMPLEX ARRAY case for double-double precision 2185 !! 2186 !!----------------------------------------------------------------------- 2187 INTEGER , INTENT(in ) :: kdim ! size of ytab 2188 COMPLEX(wp), DIMENSION(kdim), INTENT(inout) :: ytab ! input array 2189 INTEGER , OPTIONAL , INTENT(in ) :: kcom 2190 ! 2191 INTEGER:: ierror, localcomm ! local integer 2192 COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace 2193 !!----------------------------------------------------------------------- 2194 ! 2195 localcomm = mpi_comm_opa 2196 IF( PRESENT(kcom) ) localcomm = kcom 2197 ! 2198 CALL MPI_ALLREDUCE( ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, MPI_SUMDD, localcomm, ierror ) 2199 ytab(:) = zwork(:) 2200 ! 2201 END SUBROUTINE mppsum_a_realdd 2202 2203 2204 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) 2205 !!------------------------------------------------------------------------ 2206 !! *** routine mpp_minloc *** 2207 !! 2208 !! ** Purpose : Compute the global minimum of an array ptab 2209 !! and also give its global position 2210 !! 2211 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 2212 !! 2213 !!-------------------------------------------------------------------------- 2214 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 2215 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 2216 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 2217 INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame 2218 ! 2219 INTEGER :: ierror 2220 INTEGER , DIMENSION(2) :: ilocs 2221 REAL(wp) :: zmin ! local minimum 2222 REAL(wp), DIMENSION(2,1) :: zain, zaout 2223 !!----------------------------------------------------------------------- 2224 ! 2225 zmin = MINVAL( ptab(:,:) , mask= pmask == 1.e0 ) 2226 ilocs = MINLOC( ptab(:,:) , mask= pmask == 1.e0 ) 2227 ! 2228 ki = ilocs(1) + nimpp - 1 2229 kj = ilocs(2) + njmpp - 1 2230 ! 2231 zain(1,:)=zmin 2232 zain(2,:)=ki+10000.*kj 2233 ! 2234 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 2235 ! 2236 pmin = zaout(1,1) 2237 kj = INT(zaout(2,1)/10000.) 2238 ki = INT(zaout(2,1) - 10000.*kj ) 2239 ! 2240 END SUBROUTINE mpp_minloc2d 2241 2242 2243 SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj ,kk) 2244 !!------------------------------------------------------------------------ 2245 !! *** routine mpp_minloc *** 2246 !! 2247 !! ** Purpose : Compute the global minimum of an array ptab 2248 !! and also give its global position 2249 !! 2250 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 2251 !! 2252 !!-------------------------------------------------------------------------- 2253 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array 2254 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask 2255 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 2256 INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame 2257 !! 2258 INTEGER :: ierror 2259 REAL(wp) :: zmin ! local minimum 2260 INTEGER , DIMENSION(3) :: ilocs 2261 REAL(wp), DIMENSION(2,1) :: zain, zaout 2262 !!----------------------------------------------------------------------- 2263 ! 2264 zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) 2265 ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) 2266 ! 2267 ki = ilocs(1) + nimpp - 1 2268 kj = ilocs(2) + njmpp - 1 2269 kk = ilocs(3) 2270 ! 2271 zain(1,:)=zmin 2272 zain(2,:)=ki+10000.*kj+100000000.*kk 2273 ! 2274 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 2275 ! 2276 pmin = zaout(1,1) 2277 kk = INT( zaout(2,1) / 100000000. ) 2278 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 2279 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 2280 ! 2281 END SUBROUTINE mpp_minloc3d 2282 2283 2284 SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) 2285 !!------------------------------------------------------------------------ 2286 !! *** routine mpp_maxloc *** 2287 !! 2288 !! ** Purpose : Compute the global maximum of an array ptab 2289 !! and also give its global position 2290 !! 2291 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 2292 !! 2293 !!-------------------------------------------------------------------------- 2294 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 2295 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 2296 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 2297 INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame 2298 !! 2299 INTEGER :: ierror 2300 INTEGER, DIMENSION (2) :: ilocs 2301 REAL(wp) :: zmax ! local maximum 2302 REAL(wp), DIMENSION(2,1) :: zain, zaout 2303 !!----------------------------------------------------------------------- 2304 ! 2305 zmax = MAXVAL( ptab(:,:) , mask= pmask == 1.e0 ) 2306 ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1.e0 ) 2307 ! 2308 ki = ilocs(1) + nimpp - 1 2309 kj = ilocs(2) + njmpp - 1 2310 ! 2311 zain(1,:) = zmax 2312 zain(2,:) = ki + 10000. * kj 2313 ! 2314 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 2315 ! 2316 pmax = zaout(1,1) 2317 kj = INT( zaout(2,1) / 10000. ) 2318 ki = INT( zaout(2,1) - 10000.* kj ) 2319 ! 2320 END SUBROUTINE mpp_maxloc2d 2321 2322 2323 SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) 2324 !!------------------------------------------------------------------------ 2325 !! *** routine mpp_maxloc *** 2326 !! 2327 !! ** Purpose : Compute the global maximum of an array ptab 2328 !! and also give its global position 2329 !! 2330 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 2331 !! 2332 !!-------------------------------------------------------------------------- 2333 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array 2334 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask 2335 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 2336 INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame 2337 !! 2338 REAL(wp) :: zmax ! local maximum 2339 REAL(wp), DIMENSION(2,1) :: zain, zaout 2340 INTEGER , DIMENSION(3) :: ilocs 2341 INTEGER :: ierror 2342 !!----------------------------------------------------------------------- 2343 ! 2344 zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) 2345 ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) 2346 ! 2347 ki = ilocs(1) + nimpp - 1 2348 kj = ilocs(2) + njmpp - 1 2349 kk = ilocs(3) 2350 ! 2351 zain(1,:)=zmax 2352 zain(2,:)=ki+10000.*kj+100000000.*kk 2353 ! 2354 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 2355 ! 2356 pmax = zaout(1,1) 2357 kk = INT( zaout(2,1) / 100000000. ) 2358 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 2359 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 2360 ! 2361 END SUBROUTINE mpp_maxloc3d 2362 583 584 SUBROUTINE mpp_delay_sum( cdname, cdelay, y_in, pout, ldlast, kcom ) 585 !!---------------------------------------------------------------------- 586 !! *** routine mpp_delay_sum *** 587 !! 588 !! ** Purpose : performed delayed mpp_sum, the result is received on next call 589 !! 590 !!---------------------------------------------------------------------- 591 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 592 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 593 COMPLEX(wp), INTENT(in ), DIMENSION(:) :: y_in 594 REAL(wp), INTENT( out), DIMENSION(:) :: pout 595 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 596 INTEGER, INTENT(in ), OPTIONAL :: kcom 597 !! 598 INTEGER :: ji, isz 599 INTEGER :: idvar 600 INTEGER :: ierr, ilocalcomm 601 COMPLEX(wp), ALLOCATABLE, DIMENSION(:) :: ytmp 602 !!---------------------------------------------------------------------- 603 ilocalcomm = mpi_comm_oce 604 IF( PRESENT(kcom) ) ilocalcomm = kcom 605 606 isz = SIZE(y_in) 607 608 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ld_dlg = .TRUE. ) 609 610 idvar = -1 611 DO ji = 1, nbdelay 612 IF( TRIM(cdelay) == TRIM(c_delaylist(ji)) ) idvar = ji 613 END DO 614 IF ( idvar == -1 ) CALL ctl_stop( 'STOP',' mpp_delay_sum : please add a new delayed exchange for '//TRIM(cdname) ) 615 616 IF ( ndelayid(idvar) == 0 ) THEN ! first call with restart: %z1d defined in iom_delay_rst 617 ! -------------------------- 618 IF ( SIZE(todelay(idvar)%z1d) /= isz ) THEN ! Check dimension coherence 619 IF(lwp) WRITE(numout,*) ' WARNING: the nb of delayed variables in restart file is not the model one' 620 DEALLOCATE(todelay(idvar)%z1d) 621 ndelayid(idvar) = -1 ! do as if we had no restart 622 ELSE 623 ALLOCATE(todelay(idvar)%y1d(isz)) 624 todelay(idvar)%y1d(:) = CMPLX(todelay(idvar)%z1d(:), 0., wp) ! create %y1d, complex variable needed by mpi_sumdd 625 END IF 626 ENDIF 627 628 IF( ndelayid(idvar) == -1 ) THEN ! first call without restart: define %y1d and %z1d from y_in with blocking allreduce 629 ! -------------------------- 630 ALLOCATE(todelay(idvar)%z1d(isz), todelay(idvar)%y1d(isz)) ! allocate also %z1d as used for the restart 631 CALL mpi_allreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ierr ) ! get %y1d 632 todelay(idvar)%z1d(:) = REAL(todelay(idvar)%y1d(:), wp) ! define %z1d from %y1d 633 ENDIF 634 635 IF( ndelayid(idvar) > 0 ) CALL mpp_delay_rcv( idvar ) ! make sure %z1d is received 636 637 ! send back pout from todelay(idvar)%z1d defined at previous call 638 pout(:) = todelay(idvar)%z1d(:) 639 640 ! send y_in into todelay(idvar)%y1d with a non-blocking communication 641 #if defined key_mpi2 642 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 643 CALL mpi_allreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ndelayid(idvar), ierr ) 644 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 645 #else 646 CALL mpi_iallreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ndelayid(idvar), ierr ) 647 #endif 648 649 END SUBROUTINE mpp_delay_sum 650 651 652 SUBROUTINE mpp_delay_max( cdname, cdelay, p_in, pout, ldlast, kcom ) 653 !!---------------------------------------------------------------------- 654 !! *** routine mpp_delay_max *** 655 !! 656 !! ** Purpose : performed delayed mpp_max, the result is received on next call 657 !! 658 !!---------------------------------------------------------------------- 659 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 660 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 661 REAL(wp), INTENT(in ), DIMENSION(:) :: p_in ! 662 REAL(wp), INTENT( out), DIMENSION(:) :: pout ! 663 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 664 INTEGER, INTENT(in ), OPTIONAL :: kcom 665 !! 666 INTEGER :: ji, isz 667 INTEGER :: idvar 668 INTEGER :: ierr, ilocalcomm 669 !!---------------------------------------------------------------------- 670 ilocalcomm = mpi_comm_oce 671 IF( PRESENT(kcom) ) ilocalcomm = kcom 672 673 isz = SIZE(p_in) 674 675 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ld_dlg = .TRUE. ) 676 677 idvar = -1 678 DO ji = 1, nbdelay 679 IF( TRIM(cdelay) == TRIM(c_delaylist(ji)) ) idvar = ji 680 END DO 681 IF ( idvar == -1 ) CALL ctl_stop( 'STOP',' mpp_delay_max : please add a new delayed exchange for '//TRIM(cdname) ) 682 683 IF ( ndelayid(idvar) == 0 ) THEN ! first call with restart: %z1d defined in iom_delay_rst 684 ! -------------------------- 685 IF ( SIZE(todelay(idvar)%z1d) /= isz ) THEN ! Check dimension coherence 686 IF(lwp) WRITE(numout,*) ' WARNING: the nb of delayed variables in restart file is not the model one' 687 DEALLOCATE(todelay(idvar)%z1d) 688 ndelayid(idvar) = -1 ! do as if we had no restart 689 END IF 690 ENDIF 691 692 IF( ndelayid(idvar) == -1 ) THEN ! first call without restart: define %z1d from p_in with a blocking allreduce 693 ! -------------------------- 694 ALLOCATE(todelay(idvar)%z1d(isz)) 695 CALL mpi_allreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ierr ) ! get %z1d 696 ENDIF 697 698 IF( ndelayid(idvar) > 0 ) CALL mpp_delay_rcv( idvar ) ! make sure %z1d is received 699 700 ! send back pout from todelay(idvar)%z1d defined at previous call 701 pout(:) = todelay(idvar)%z1d(:) 702 703 ! send p_in into todelay(idvar)%z1d with a non-blocking communication 704 #if defined key_mpi2 705 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 706 CALL mpi_allreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ndelayid(idvar), ierr ) 707 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 708 #else 709 CALL mpi_iallreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ndelayid(idvar), ierr ) 710 #endif 711 712 END SUBROUTINE mpp_delay_max 713 714 715 SUBROUTINE mpp_delay_rcv( kid ) 716 !!---------------------------------------------------------------------- 717 !! *** routine mpp_delay_rcv *** 718 !! 719 !! ** Purpose : force barrier for delayed mpp (needed for restart) 720 !! 721 !!---------------------------------------------------------------------- 722 INTEGER,INTENT(in ) :: kid 723 INTEGER :: ierr 724 !!---------------------------------------------------------------------- 725 IF( ndelayid(kid) /= -2 ) THEN 726 #if ! defined key_mpi2 727 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 728 CALL mpi_wait( ndelayid(kid), MPI_STATUS_IGNORE, ierr ) ! make sure todelay(kid) is received 729 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 730 #endif 731 IF( ASSOCIATED(todelay(kid)%y1d) ) todelay(kid)%z1d(:) = REAL(todelay(kid)%y1d(:), wp) ! define %z1d from %y1d 732 ndelayid(kid) = -2 ! add flag to know that mpi_wait was already called on kid 733 ENDIF 734 END SUBROUTINE mpp_delay_rcv 735 736 737 !!---------------------------------------------------------------------- 738 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 739 !! 740 !!---------------------------------------------------------------------- 741 !! 742 # define OPERATION_MAX 743 # define INTEGER_TYPE 744 # define DIM_0d 745 # define ROUTINE_ALLREDUCE mppmax_int 746 # include "mpp_allreduce_generic.h90" 747 # undef ROUTINE_ALLREDUCE 748 # undef DIM_0d 749 # define DIM_1d 750 # define ROUTINE_ALLREDUCE mppmax_a_int 751 # include "mpp_allreduce_generic.h90" 752 # undef ROUTINE_ALLREDUCE 753 # undef DIM_1d 754 # undef INTEGER_TYPE 755 ! 756 # define REAL_TYPE 757 # define DIM_0d 758 # define ROUTINE_ALLREDUCE mppmax_real 759 # include "mpp_allreduce_generic.h90" 760 # undef ROUTINE_ALLREDUCE 761 # undef DIM_0d 762 # define DIM_1d 763 # define ROUTINE_ALLREDUCE mppmax_a_real 764 # include "mpp_allreduce_generic.h90" 765 # undef ROUTINE_ALLREDUCE 766 # undef DIM_1d 767 # undef REAL_TYPE 768 # undef OPERATION_MAX 769 !!---------------------------------------------------------------------- 770 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** 771 !! 772 !!---------------------------------------------------------------------- 773 !! 774 # define OPERATION_MIN 775 # define INTEGER_TYPE 776 # define DIM_0d 777 # define ROUTINE_ALLREDUCE mppmin_int 778 # include "mpp_allreduce_generic.h90" 779 # undef ROUTINE_ALLREDUCE 780 # undef DIM_0d 781 # define DIM_1d 782 # define ROUTINE_ALLREDUCE mppmin_a_int 783 # include "mpp_allreduce_generic.h90" 784 # undef ROUTINE_ALLREDUCE 785 # undef DIM_1d 786 # undef INTEGER_TYPE 787 ! 788 # define REAL_TYPE 789 # define DIM_0d 790 # define ROUTINE_ALLREDUCE mppmin_real 791 # include "mpp_allreduce_generic.h90" 792 # undef ROUTINE_ALLREDUCE 793 # undef DIM_0d 794 # define DIM_1d 795 # define ROUTINE_ALLREDUCE mppmin_a_real 796 # include "mpp_allreduce_generic.h90" 797 # undef ROUTINE_ALLREDUCE 798 # undef DIM_1d 799 # undef REAL_TYPE 800 # undef OPERATION_MIN 801 802 !!---------------------------------------------------------------------- 803 !! *** mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real *** 804 !! 805 !! Global sum of 1D array or a variable (integer, real or complex) 806 !!---------------------------------------------------------------------- 807 !! 808 # define OPERATION_SUM 809 # define INTEGER_TYPE 810 # define DIM_0d 811 # define ROUTINE_ALLREDUCE mppsum_int 812 # include "mpp_allreduce_generic.h90" 813 # undef ROUTINE_ALLREDUCE 814 # undef DIM_0d 815 # define DIM_1d 816 # define ROUTINE_ALLREDUCE mppsum_a_int 817 # include "mpp_allreduce_generic.h90" 818 # undef ROUTINE_ALLREDUCE 819 # undef DIM_1d 820 # undef INTEGER_TYPE 821 ! 822 # define REAL_TYPE 823 # define DIM_0d 824 # define ROUTINE_ALLREDUCE mppsum_real 825 # include "mpp_allreduce_generic.h90" 826 # undef ROUTINE_ALLREDUCE 827 # undef DIM_0d 828 # define DIM_1d 829 # define ROUTINE_ALLREDUCE mppsum_a_real 830 # include "mpp_allreduce_generic.h90" 831 # undef ROUTINE_ALLREDUCE 832 # undef DIM_1d 833 # undef REAL_TYPE 834 # undef OPERATION_SUM 835 836 # define OPERATION_SUM_DD 837 # define COMPLEX_TYPE 838 # define DIM_0d 839 # define ROUTINE_ALLREDUCE mppsum_realdd 840 # include "mpp_allreduce_generic.h90" 841 # undef ROUTINE_ALLREDUCE 842 # undef DIM_0d 843 # define DIM_1d 844 # define ROUTINE_ALLREDUCE mppsum_a_realdd 845 # include "mpp_allreduce_generic.h90" 846 # undef ROUTINE_ALLREDUCE 847 # undef DIM_1d 848 # undef COMPLEX_TYPE 849 # undef OPERATION_SUM_DD 850 851 !!---------------------------------------------------------------------- 852 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 853 !! 854 !!---------------------------------------------------------------------- 855 !! 856 # define OPERATION_MINLOC 857 # define DIM_2d 858 # define ROUTINE_LOC mpp_minloc2d 859 # include "mpp_loc_generic.h90" 860 # undef ROUTINE_LOC 861 # undef DIM_2d 862 # define DIM_3d 863 # define ROUTINE_LOC mpp_minloc3d 864 # include "mpp_loc_generic.h90" 865 # undef ROUTINE_LOC 866 # undef DIM_3d 867 # undef OPERATION_MINLOC 868 869 # define OPERATION_MAXLOC 870 # define DIM_2d 871 # define ROUTINE_LOC mpp_maxloc2d 872 # include "mpp_loc_generic.h90" 873 # undef ROUTINE_LOC 874 # undef DIM_2d 875 # define DIM_3d 876 # define ROUTINE_LOC mpp_maxloc3d 877 # include "mpp_loc_generic.h90" 878 # undef ROUTINE_LOC 879 # undef DIM_3d 880 # undef OPERATION_MAXLOC 2363 881 2364 882 SUBROUTINE mppsync() … … 2372 890 !!----------------------------------------------------------------------- 2373 891 ! 2374 CALL mpi_barrier( mpi_comm_o pa, ierror )892 CALL mpi_barrier( mpi_comm_oce, ierror ) 2375 893 ! 2376 894 END SUBROUTINE mppsync 2377 895 2378 896 2379 SUBROUTINE mppstop 897 SUBROUTINE mppstop( ldfinal, ld_force_abort ) 2380 898 !!---------------------------------------------------------------------- 2381 899 !! *** routine mppstop *** … … 2384 902 !! 2385 903 !!---------------------------------------------------------------------- 904 LOGICAL, OPTIONAL, INTENT(in) :: ldfinal ! source process number 905 LOGICAL, OPTIONAL, INTENT(in) :: ld_force_abort ! source process number 906 LOGICAL :: llfinal, ll_force_abort 2386 907 INTEGER :: info 2387 908 !!---------------------------------------------------------------------- 2388 ! 2389 CALL mppsync 2390 CALL mpi_finalize( info ) 909 llfinal = .FALSE. 910 IF( PRESENT(ldfinal) ) llfinal = ldfinal 911 ll_force_abort = .FALSE. 912 IF( PRESENT(ld_force_abort) ) ll_force_abort = ld_force_abort 913 ! 914 IF(ll_force_abort) THEN 915 CALL mpi_abort( MPI_COMM_WORLD ) 916 ELSE 917 CALL mppsync 918 CALL mpi_finalize( info ) 919 ENDIF 920 IF( .NOT. llfinal ) STOP 123456 2391 921 ! 2392 922 END SUBROUTINE mppstop … … 2395 925 SUBROUTINE mpp_comm_free( kcom ) 2396 926 !!---------------------------------------------------------------------- 2397 !!----------------------------------------------------------------------2398 927 INTEGER, INTENT(in) :: kcom 2399 928 !! … … 2404 933 ! 2405 934 END SUBROUTINE mpp_comm_free 2406 2407 2408 SUBROUTINE mpp_ini_ice( pindic, kumout )2409 !!----------------------------------------------------------------------2410 !! *** routine mpp_ini_ice ***2411 !!2412 !! ** Purpose : Initialize special communicator for ice areas2413 !! condition together with global variables needed in the ddmpp folding2414 !!2415 !! ** Method : - Look for ice processors in ice routines2416 !! - Put their number in nrank_ice2417 !! - Create groups for the world processors and the ice processors2418 !! - Create a communicator for ice processors2419 !!2420 !! ** output2421 !! njmppmax = njmpp for northern procs2422 !! ndim_rank_ice = number of processors with ice2423 !! nrank_ice (ndim_rank_ice) = ice processors2424 !! ngrp_iworld = group ID for the world processors2425 !! ngrp_ice = group ID for the ice processors2426 !! ncomm_ice = communicator for the ice procs.2427 !! n_ice_root = number (in the world) of proc 0 in the ice comm.2428 !!2429 !!----------------------------------------------------------------------2430 INTEGER, INTENT(in) :: pindic2431 INTEGER, INTENT(in) :: kumout ! ocean.output logical unit2432 !!2433 INTEGER :: jjproc2434 INTEGER :: ii, ierr2435 INTEGER, ALLOCATABLE, DIMENSION(:) :: kice2436 INTEGER, ALLOCATABLE, DIMENSION(:) :: zwork2437 !!----------------------------------------------------------------------2438 !2439 ! Since this is just an init routine and these arrays are of length jpnij2440 ! then don't use wrk_nemo module - just allocate and deallocate.2441 ALLOCATE( kice(jpnij), zwork(jpnij), STAT=ierr )2442 IF( ierr /= 0 ) THEN2443 WRITE(kumout, cform_err)2444 WRITE(kumout,*) 'mpp_ini_ice : failed to allocate 2, 1D arrays (jpnij in length)'2445 CALL mppstop2446 ENDIF2447 2448 ! Look for how many procs with sea-ice2449 !2450 kice = 02451 DO jjproc = 1, jpnij2452 IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 12453 END DO2454 !2455 zwork = 02456 CALL MPI_ALLREDUCE( kice, zwork, jpnij, mpi_integer, mpi_sum, mpi_comm_opa, ierr )2457 ndim_rank_ice = SUM( zwork )2458 2459 ! Allocate the right size to nrank_north2460 IF( ALLOCATED ( nrank_ice ) ) DEALLOCATE( nrank_ice )2461 ALLOCATE( nrank_ice(ndim_rank_ice) )2462 !2463 ii = 02464 nrank_ice = 02465 DO jjproc = 1, jpnij2466 IF( zwork(jjproc) == 1) THEN2467 ii = ii + 12468 nrank_ice(ii) = jjproc -12469 ENDIF2470 END DO2471 2472 ! Create the world group2473 CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_iworld, ierr )2474 2475 ! Create the ice group from the world group2476 CALL MPI_GROUP_INCL( ngrp_iworld, ndim_rank_ice, nrank_ice, ngrp_ice, ierr )2477 2478 ! Create the ice communicator , ie the pool of procs with sea-ice2479 CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_ice, ncomm_ice, ierr )2480 2481 ! Find proc number in the world of proc 0 in the north2482 ! The following line seems to be useless, we just comment & keep it as reminder2483 ! CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_ice,1,0,ngrp_iworld,n_ice_root,ierr)2484 !2485 CALL MPI_GROUP_FREE(ngrp_ice, ierr)2486 CALL MPI_GROUP_FREE(ngrp_iworld, ierr)2487 2488 DEALLOCATE(kice, zwork)2489 !2490 END SUBROUTINE mpp_ini_ice2491 935 2492 936 … … 2518 962 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_world : ', ngrp_world 2519 963 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_world : ', mpi_comm_world 2520 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_o pa : ', mpi_comm_opa964 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_oce : ', mpi_comm_oce 2521 965 ! 2522 966 ALLOCATE( kwork(jpnij), STAT=ierr ) … … 2529 973 IF( jpnj == 1 ) THEN 2530 974 ngrp_znl = ngrp_world 2531 ncomm_znl = mpi_comm_o pa975 ncomm_znl = mpi_comm_oce 2532 976 ELSE 2533 977 ! 2534 CALL MPI_ALLGATHER ( njmpp, 1, mpi_integer, kwork, 1, mpi_integer, mpi_comm_o pa, ierr )978 CALL MPI_ALLGATHER ( njmpp, 1, mpi_integer, kwork, 1, mpi_integer, mpi_comm_oce, ierr ) 2535 979 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - kwork pour njmpp : ', kwork 2536 980 !-$$ CALL flush(numout) … … 2560 1004 2561 1005 ! Create the opa group 2562 CALL MPI_COMM_GROUP(mpi_comm_o pa,ngrp_opa,ierr)1006 CALL MPI_COMM_GROUP(mpi_comm_oce,ngrp_opa,ierr) 2563 1007 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_opa : ', ngrp_opa 2564 1008 !-$$ CALL flush(numout) … … 2570 1014 2571 1015 ! Create the znl communicator from the opa communicator, ie the pool of procs in the same row 2572 CALL MPI_COMM_CREATE ( mpi_comm_o pa, ngrp_znl, ncomm_znl, ierr )1016 CALL MPI_COMM_CREATE ( mpi_comm_oce, ngrp_znl, ncomm_znl, ierr ) 2573 1017 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ncomm_znl ', ncomm_znl 2574 1018 !-$$ CALL flush(numout) … … 2582 1026 l_znl_root = .FALSE. 2583 1027 kwork (1) = nimpp 2584 CALL mpp_min ( kwork(1), kcom = ncomm_znl)1028 CALL mpp_min ( 'lib_mpp', kwork(1), kcom = ncomm_znl) 2585 1029 IF ( nimpp == kwork(1)) l_znl_root = .TRUE. 2586 1030 END IF … … 2641 1085 ! 2642 1086 ! create the world group 2643 CALL MPI_COMM_GROUP( mpi_comm_o pa, ngrp_world, ierr )1087 CALL MPI_COMM_GROUP( mpi_comm_oce, ngrp_world, ierr ) 2644 1088 ! 2645 1089 ! Create the North group from the world group … … 2647 1091 ! 2648 1092 ! Create the North communicator , ie the pool of procs in the north group 2649 CALL MPI_COMM_CREATE( mpi_comm_o pa, ngrp_north, ncomm_north, ierr )1093 CALL MPI_COMM_CREATE( mpi_comm_oce, ngrp_north, ncomm_north, ierr ) 2650 1094 ! 2651 1095 END SUBROUTINE mpp_ini_north 2652 1096 2653 1097 2654 SUBROUTINE mpp_lbc_north_3d( pt3d, cd_type, psgn ) 2655 !!--------------------------------------------------------------------- 2656 !! *** routine mpp_lbc_north_3d *** 2657 !! 2658 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2659 !! in mpp configuration in case of jpn1 > 1 2660 !! 2661 !! ** Method : North fold condition and mpp with more than one proc 2662 !! in i-direction require a specific treatment. We gather 2663 !! the 4 northern lines of the global domain on 1 processor 2664 !! and apply lbc north-fold on this sub array. Then we 2665 !! scatter the north fold array back to the processors. 2666 !! 2667 !!---------------------------------------------------------------------- 2668 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: pt3d ! 3D array on which the b.c. is applied 2669 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 2670 ! ! = T , U , V , F or W gridpoints 2671 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2672 !! ! = 1. , the sign is kept 2673 INTEGER :: ji, jj, jr, jk 2674 INTEGER :: ierr, itaille, ildi, ilei, iilb 2675 INTEGER :: ijpj, ijpjm1, ij, iproc 2676 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather 2677 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather 2678 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for mpi_isend when avoiding mpi_allgather 2679 ! ! Workspace for message transfers avoiding mpi_allgather 2680 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab 2681 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk 2682 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio 2683 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr 2684 2685 INTEGER :: istatus(mpi_status_size) 2686 INTEGER :: iflag 2687 !!---------------------------------------------------------------------- 2688 ! 2689 ALLOCATE( ztab(jpiglo,4,jpk) , znorthloc(jpi,4,jpk), zfoldwk(jpi,4,jpk), znorthgloio(jpi,4,jpk,jpni) ) 2690 ALLOCATE( ztabl(jpi,4,jpk), ztabr(jpi*jpmaxngh, 4, jpk) ) 2691 2692 ijpj = 4 2693 ijpjm1 = 3 2694 ! 2695 znorthloc(:,:,:) = 0 2696 DO jk = 1, jpk 2697 DO jj = nlcj - ijpj +1, nlcj ! put in xnorthloc the last 4 jlines of pt3d 2698 ij = jj - nlcj + ijpj 2699 znorthloc(:,ij,jk) = pt3d(:,jj,jk) 2700 END DO 2701 END DO 2702 ! 2703 ! ! Build in procs of ncomm_north the znorthgloio 2704 itaille = jpi * jpk * ijpj 2705 2706 IF ( l_north_nogather ) THEN 2707 ! 2708 ztabr(:,:,:) = 0 2709 ztabl(:,:,:) = 0 2710 2711 DO jk = 1, jpk 2712 DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array 2713 ij = jj - nlcj + ijpj 2714 DO ji = nfsloop, nfeloop 2715 ztabl(ji,ij,jk) = pt3d(ji,jj,jk) 2716 END DO 2717 END DO 2718 END DO 2719 2720 DO jr = 1,nsndto 2721 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 2722 CALL mppsend( 5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr) ) 2723 ENDIF 2724 END DO 2725 DO jr = 1,nsndto 2726 iproc = nfipproc(isendto(jr),jpnj) 2727 IF(iproc .ne. -1) THEN 2728 ilei = nleit (iproc+1) 2729 ildi = nldit (iproc+1) 2730 iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj) 2731 ENDIF 2732 IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN 2733 CALL mpprecv(5, zfoldwk, itaille, iproc) 2734 DO jk = 1, jpk 2735 DO jj = 1, ijpj 2736 DO ji = ildi, ilei 2737 ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk) 2738 END DO 2739 END DO 2740 END DO 2741 ELSE IF (iproc .eq. (narea-1)) THEN 2742 DO jk = 1, jpk 2743 DO jj = 1, ijpj 2744 DO ji = ildi, ilei 2745 ztabr(iilb+ji,jj,jk) = pt3d(ji,nlcj-ijpj+jj,jk) 2746 END DO 2747 END DO 2748 END DO 2749 ENDIF 2750 END DO 2751 IF (l_isend) THEN 2752 DO jr = 1,nsndto 2753 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 2754 CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err) 2755 ENDIF 2756 END DO 2757 ENDIF 2758 CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition 2759 DO jk = 1, jpk 2760 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d 2761 ij = jj - nlcj + ijpj 2762 DO ji= 1, nlci 2763 pt3d(ji,jj,jk) = ztabl(ji,ij,jk) 2764 END DO 2765 END DO 2766 END DO 2767 ! 2768 2769 ELSE 2770 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2771 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2772 ! 2773 ztab(:,:,:) = 0.e0 2774 DO jr = 1, ndim_rank_north ! recover the global north array 2775 iproc = nrank_north(jr) + 1 2776 ildi = nldit (iproc) 2777 ilei = nleit (iproc) 2778 iilb = nimppt(iproc) 2779 DO jk = 1, jpk 2780 DO jj = 1, ijpj 2781 DO ji = ildi, ilei 2782 ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr) 2783 END DO 2784 END DO 2785 END DO 2786 END DO 2787 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition 2788 ! 2789 DO jk = 1, jpk 2790 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d 2791 ij = jj - nlcj + ijpj 2792 DO ji= 1, nlci 2793 pt3d(ji,jj,jk) = ztab(ji+nimpp-1,ij,jk) 2794 END DO 2795 END DO 2796 END DO 2797 ! 2798 ENDIF 2799 ! 2800 ! The ztab array has been either: 2801 ! a. Fully populated by the mpi_allgather operation or 2802 ! b. Had the active points for this domain and northern neighbours populated 2803 ! by peer to peer exchanges 2804 ! Either way the array may be folded by lbc_nfd and the result for the span of 2805 ! this domain will be identical. 2806 ! 2807 DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio ) 2808 DEALLOCATE( ztabl, ztabr ) 2809 ! 2810 END SUBROUTINE mpp_lbc_north_3d 2811 2812 2813 SUBROUTINE mpp_lbc_north_2d( pt2d, cd_type, psgn) 2814 !!--------------------------------------------------------------------- 2815 !! *** routine mpp_lbc_north_2d *** 2816 !! 2817 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2818 !! in mpp configuration in case of jpn1 > 1 (for 2d array ) 2819 !! 2820 !! ** Method : North fold condition and mpp with more than one proc 2821 !! in i-direction require a specific treatment. We gather 2822 !! the 4 northern lines of the global domain on 1 processor 2823 !! and apply lbc north-fold on this sub array. Then we 2824 !! scatter the north fold array back to the processors. 2825 !! 2826 !!---------------------------------------------------------------------- 2827 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the b.c. is applied 2828 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt2d grid-points 2829 ! ! = T , U , V , F or W gridpoints 2830 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2831 !! ! = 1. , the sign is kept 2832 INTEGER :: ji, jj, jr 2833 INTEGER :: ierr, itaille, ildi, ilei, iilb 2834 INTEGER :: ijpj, ijpjm1, ij, iproc 2835 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather 2836 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather 2837 INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather 2838 ! ! Workspace for message transfers avoiding mpi_allgather 2839 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab 2840 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: znorthloc, zfoldwk 2841 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio 2842 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztabl, ztabr 2843 INTEGER :: istatus(mpi_status_size) 2844 INTEGER :: iflag 2845 !!---------------------------------------------------------------------- 2846 ! 2847 ALLOCATE( ztab(jpiglo,4), znorthloc(jpi,4), zfoldwk(jpi,4), znorthgloio(jpi,4,jpni) ) 2848 ALLOCATE( ztabl(jpi,4), ztabr(jpi*jpmaxngh, 4) ) 2849 ! 2850 ijpj = 4 2851 ijpjm1 = 3 2852 ! 2853 DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d 2854 ij = jj - nlcj + ijpj 2855 znorthloc(:,ij) = pt2d(:,jj) 2856 END DO 2857 2858 ! ! Build in procs of ncomm_north the znorthgloio 2859 itaille = jpi * ijpj 2860 IF ( l_north_nogather ) THEN 2861 ! 2862 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified 2863 ! (in nemo_northcomms) as being involved in this process' northern boundary exchange 2864 ! 2865 ztabr(:,:) = 0 2866 ztabl(:,:) = 0 2867 2868 DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array 2869 ij = jj - nlcj + ijpj 2870 DO ji = nfsloop, nfeloop 2871 ztabl(ji,ij) = pt2d(ji,jj) 2872 END DO 2873 END DO 2874 2875 DO jr = 1,nsndto 2876 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 2877 CALL mppsend(5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr)) 2878 ENDIF 2879 END DO 2880 DO jr = 1,nsndto 2881 iproc = nfipproc(isendto(jr),jpnj) 2882 IF(iproc .ne. -1) THEN 2883 ilei = nleit (iproc+1) 2884 ildi = nldit (iproc+1) 2885 iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj) 2886 ENDIF 2887 IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN 2888 CALL mpprecv(5, zfoldwk, itaille, iproc) 2889 DO jj = 1, ijpj 2890 DO ji = ildi, ilei 2891 ztabr(iilb+ji,jj) = zfoldwk(ji,jj) 2892 END DO 2893 END DO 2894 ELSE IF (iproc .eq. (narea-1)) THEN 2895 DO jj = 1, ijpj 2896 DO ji = ildi, ilei 2897 ztabr(iilb+ji,jj) = pt2d(ji,nlcj-ijpj+jj) 2898 END DO 2899 END DO 2900 ENDIF 2901 END DO 2902 IF (l_isend) THEN 2903 DO jr = 1,nsndto 2904 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 2905 CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err) 2906 ENDIF 2907 END DO 2908 ENDIF 2909 CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition 2910 ! 2911 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 2912 ij = jj - nlcj + ijpj 2913 DO ji = 1, nlci 2914 pt2d(ji,jj) = ztabl(ji,ij) 2915 END DO 2916 END DO 2917 ! 2918 ELSE 2919 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2920 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2921 ! 2922 ztab(:,:) = 0.e0 2923 DO jr = 1, ndim_rank_north ! recover the global north array 2924 iproc = nrank_north(jr) + 1 2925 ildi = nldit (iproc) 2926 ilei = nleit (iproc) 2927 iilb = nimppt(iproc) 2928 DO jj = 1, ijpj 2929 DO ji = ildi, ilei 2930 ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr) 2931 END DO 2932 END DO 2933 END DO 2934 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition 2935 ! 2936 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 2937 ij = jj - nlcj + ijpj 2938 DO ji = 1, nlci 2939 pt2d(ji,jj) = ztab(ji+nimpp-1,ij) 2940 END DO 2941 END DO 2942 ! 2943 ENDIF 2944 DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio ) 2945 DEALLOCATE( ztabl, ztabr ) 2946 ! 2947 END SUBROUTINE mpp_lbc_north_2d 2948 2949 SUBROUTINE mpp_lbc_north_2d_multiple( pt2d_array, cd_type, psgn, num_fields) 2950 !!--------------------------------------------------------------------- 2951 !! *** routine mpp_lbc_north_2d *** 2952 !! 2953 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2954 !! in mpp configuration in case of jpn1 > 1 2955 !! (for multiple 2d arrays ) 2956 !! 2957 !! ** Method : North fold condition and mpp with more than one proc 2958 !! in i-direction require a specific treatment. We gather 2959 !! the 4 northern lines of the global domain on 1 processor 2960 !! and apply lbc north-fold on this sub array. Then we 2961 !! scatter the north fold array back to the processors. 2962 !! 2963 !!---------------------------------------------------------------------- 2964 INTEGER , INTENT (in ) :: num_fields ! number of variables contained in pt2d 2965 TYPE( arrayptr ), DIMENSION(:) :: pt2d_array 2966 CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: cd_type ! nature of pt2d grid-points 2967 ! ! = T , U , V , F or W gridpoints 2968 REAL(wp), DIMENSION(:), INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2969 !! ! = 1. , the sign is kept 2970 INTEGER :: ji, jj, jr, jk 2971 INTEGER :: ierr, itaille, ildi, ilei, iilb 2972 INTEGER :: ijpj, ijpjm1, ij, iproc 2973 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather 2974 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather 2975 INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather 2976 ! ! Workspace for message transfers avoiding mpi_allgather 2977 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab 2978 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk 2979 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio 2980 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr 2981 INTEGER :: istatus(mpi_status_size) 2982 INTEGER :: iflag 2983 !!---------------------------------------------------------------------- 2984 ! 2985 ALLOCATE( ztab(jpiglo,4,num_fields), znorthloc(jpi,4,num_fields), zfoldwk(jpi,4,num_fields), znorthgloio(jpi,4,num_fields,jpni) ) ! expanded to 3 dimensions 2986 ALLOCATE( ztabl(jpi,4,num_fields), ztabr(jpi*jpmaxngh, 4,num_fields) ) 2987 ! 2988 ijpj = 4 2989 ijpjm1 = 3 2990 ! 2991 2992 DO jk = 1, num_fields 2993 DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d (for every variable) 2994 ij = jj - nlcj + ijpj 2995 znorthloc(:,ij,jk) = pt2d_array(jk)%pt2d(:,jj) 2996 END DO 2997 END DO 2998 ! ! Build in procs of ncomm_north the znorthgloio 2999 itaille = jpi * ijpj 3000 3001 IF ( l_north_nogather ) THEN 3002 ! 3003 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified 3004 ! (in nemo_northcomms) as being involved in this process' northern boundary exchange 3005 ! 3006 ztabr(:,:,:) = 0 3007 ztabl(:,:,:) = 0 3008 3009 DO jk = 1, num_fields 3010 DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array 3011 ij = jj - nlcj + ijpj 3012 DO ji = nfsloop, nfeloop 3013 ztabl(ji,ij,jk) = pt2d_array(jk)%pt2d(ji,jj) 3014 END DO 3015 END DO 3016 END DO 3017 3018 DO jr = 1,nsndto 3019 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 3020 CALL mppsend(5, znorthloc, itaille*num_fields, nfipproc(isendto(jr),jpnj), ml_req_nf(jr)) ! Buffer expanded "num_fields" times 3021 ENDIF 3022 END DO 3023 DO jr = 1,nsndto 3024 iproc = nfipproc(isendto(jr),jpnj) 3025 IF(iproc .ne. -1) THEN 3026 ilei = nleit (iproc+1) 3027 ildi = nldit (iproc+1) 3028 iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj) 3029 ENDIF 3030 IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN 3031 CALL mpprecv(5, zfoldwk, itaille*num_fields, iproc) ! Buffer expanded "num_fields" times 3032 DO jk = 1 , num_fields 3033 DO jj = 1, ijpj 3034 DO ji = ildi, ilei 3035 ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk) ! Modified to 3D 3036 END DO 3037 END DO 3038 END DO 3039 ELSE IF (iproc .eq. (narea-1)) THEN 3040 DO jk = 1, num_fields 3041 DO jj = 1, ijpj 3042 DO ji = ildi, ilei 3043 ztabr(iilb+ji,jj,jk) = pt2d_array(jk)%pt2d(ji,nlcj-ijpj+jj) ! Modified to 3D 3044 END DO 3045 END DO 3046 END DO 3047 ENDIF 3048 END DO 3049 IF (l_isend) THEN 3050 DO jr = 1,nsndto 3051 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN 3052 CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err) 3053 ENDIF 3054 END DO 3055 ENDIF 3056 ! 3057 DO ji = 1, num_fields ! Loop to manage 3D variables 3058 CALL mpp_lbc_nfd( ztabl(:,:,ji), ztabr(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition 3059 END DO 3060 ! 3061 DO jk = 1, num_fields 3062 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 3063 ij = jj - nlcj + ijpj 3064 DO ji = 1, nlci 3065 pt2d_array(jk)%pt2d(ji,jj) = ztabl(ji,ij,jk) ! Modified to 3D 3066 END DO 3067 END DO 3068 END DO 3069 3070 ! 3071 ELSE 3072 ! 3073 CALL MPI_ALLGATHER( znorthloc , itaille*num_fields, MPI_DOUBLE_PRECISION, & 3074 & znorthgloio, itaille*num_fields, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 3075 ! 3076 ztab(:,:,:) = 0.e0 3077 DO jk = 1, num_fields 3078 DO jr = 1, ndim_rank_north ! recover the global north array 3079 iproc = nrank_north(jr) + 1 3080 ildi = nldit (iproc) 3081 ilei = nleit (iproc) 3082 iilb = nimppt(iproc) 3083 DO jj = 1, ijpj 3084 DO ji = ildi, ilei 3085 ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr) 3086 END DO 3087 END DO 3088 END DO 3089 END DO 3090 3091 DO ji = 1, num_fields 3092 CALL lbc_nfd( ztab(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition 3093 END DO 3094 ! 3095 DO jk = 1, num_fields 3096 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 3097 ij = jj - nlcj + ijpj 3098 DO ji = 1, nlci 3099 pt2d_array(jk)%pt2d(ji,jj) = ztab(ji+nimpp-1,ij,jk) 3100 END DO 3101 END DO 3102 END DO 3103 ! 3104 ! 3105 ENDIF 3106 DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio ) 3107 DEALLOCATE( ztabl, ztabr ) 3108 ! 3109 END SUBROUTINE mpp_lbc_north_2d_multiple 3110 3111 SUBROUTINE mpp_lbc_north_e( pt2d, cd_type, psgn) 3112 !!--------------------------------------------------------------------- 3113 !! *** routine mpp_lbc_north_2d *** 3114 !! 3115 !! ** Purpose : Ensure proper north fold horizontal bondary condition 3116 !! in mpp configuration in case of jpn1 > 1 and for 2d 3117 !! array with outer extra halo 3118 !! 3119 !! ** Method : North fold condition and mpp with more than one proc 3120 !! in i-direction require a specific treatment. We gather 3121 !! the 4+2*jpr2dj northern lines of the global domain on 1 3122 !! processor and apply lbc north-fold on this sub array. 3123 !! Then we scatter the north fold array back to the processors. 3124 !! 3125 !!---------------------------------------------------------------------- 3126 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo 3127 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 3128 ! ! = T , U , V , F or W -points 3129 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 3130 !! ! north fold, = 1. otherwise 3131 INTEGER :: ji, jj, jr 3132 INTEGER :: ierr, itaille, ildi, ilei, iilb 3133 INTEGER :: ijpj, ij, iproc 3134 ! 3135 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e 3136 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e 3137 3138 !!---------------------------------------------------------------------- 3139 ! 3140 ALLOCATE( ztab_e(jpiglo,4+2*jpr2dj), znorthloc_e(jpi,4+2*jpr2dj), znorthgloio_e(jpi,4+2*jpr2dj,jpni) ) 3141 3142 ! 3143 ijpj=4 3144 ztab_e(:,:) = 0.e0 3145 3146 ij=0 3147 ! put in znorthloc_e the last 4 jlines of pt2d 3148 DO jj = nlcj - ijpj + 1 - jpr2dj, nlcj +jpr2dj 3149 ij = ij + 1 3150 DO ji = 1, jpi 3151 znorthloc_e(ji,ij)=pt2d(ji,jj) 3152 END DO 3153 END DO 3154 ! 3155 itaille = jpi * ( ijpj + 2 * jpr2dj ) 3156 CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, & 3157 & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 3158 ! 3159 DO jr = 1, ndim_rank_north ! recover the global north array 3160 iproc = nrank_north(jr) + 1 3161 ildi = nldit (iproc) 3162 ilei = nleit (iproc) 3163 iilb = nimppt(iproc) 3164 DO jj = 1, ijpj+2*jpr2dj 3165 DO ji = ildi, ilei 3166 ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr) 3167 END DO 3168 END DO 3169 END DO 3170 3171 3172 ! 2. North-Fold boundary conditions 3173 ! ---------------------------------- 3174 CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = jpr2dj ) 3175 3176 ij = jpr2dj 3177 !! Scatter back to pt2d 3178 DO jj = nlcj - ijpj + 1 , nlcj +jpr2dj 3179 ij = ij +1 3180 DO ji= 1, nlci 3181 pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij) 3182 END DO 3183 END DO 3184 ! 3185 DEALLOCATE( ztab_e, znorthloc_e, znorthgloio_e ) 3186 ! 3187 END SUBROUTINE mpp_lbc_north_e 3188 3189 3190 SUBROUTINE mpp_lnk_bdy_3d( ptab, cd_type, psgn, ib_bdy ) 3191 !!---------------------------------------------------------------------- 3192 !! *** routine mpp_lnk_bdy_3d *** 3193 !! 3194 !! ** Purpose : Message passing management 3195 !! 3196 !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries 3197 !! between processors following neighboring subdomains. 3198 !! domain parameters 3199 !! nlci : first dimension of the local subdomain 3200 !! nlcj : second dimension of the local subdomain 3201 !! nbondi_bdy : mark for "east-west local boundary" 3202 !! nbondj_bdy : mark for "north-south local boundary" 3203 !! noea : number for local neighboring processors 3204 !! nowe : number for local neighboring processors 3205 !! noso : number for local neighboring processors 3206 !! nono : number for local neighboring processors 3207 !! 3208 !! ** Action : ptab with update value at its periphery 3209 !! 3210 !!---------------------------------------------------------------------- 3211 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 3212 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 3213 ! ! = T , U , V , F , W points 3214 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 3215 ! ! = 1. , the sign is kept 3216 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 3217 ! 3218 INTEGER :: ji, jj, jk, jl ! dummy loop indices 3219 INTEGER :: imigr, iihom, ijhom ! local integers 3220 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 3221 REAL(wp) :: zland ! local scalar 3222 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 3223 ! 3224 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north 3225 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east 3226 !!---------------------------------------------------------------------- 3227 ! 3228 ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), & 3229 & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) ) 3230 3231 zland = 0._wp 3232 3233 ! 1. standard boundary treatment 3234 ! ------------------------------ 3235 ! ! East-West boundaries 3236 ! !* Cyclic east-west 3237 IF( nbondi == 2) THEN 3238 IF( nperio == 1 .OR. nperio == 4 .OR. nperio == 6 ) THEN 3239 ptab( 1 ,:,:) = ptab(jpim1,:,:) 3240 ptab(jpi,:,:) = ptab( 2 ,:,:) 3241 ELSE 3242 IF( .NOT. cd_type == 'F' ) ptab(1:jpreci,:,:) = zland ! south except F-point 3243 ptab(nlci-jpreci+1:jpi,:,:) = zland ! north 3244 ENDIF 3245 ELSEIF(nbondi == -1) THEN 3246 IF( .NOT. cd_type == 'F' ) ptab(1:jpreci,:,:) = zland ! south except F-point 3247 ELSEIF(nbondi == 1) THEN 3248 ptab(nlci-jpreci+1:jpi,:,:) = zland ! north 3249 ENDIF !* closed 3250 3251 IF (nbondj == 2 .OR. nbondj == -1) THEN 3252 IF( .NOT. cd_type == 'F' ) ptab(:,1:jprecj,:) = zland ! south except F-point 3253 ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN 3254 ptab(:,nlcj-jprecj+1:jpj,:) = zland ! north 3255 ENDIF 3256 ! 3257 ! 2. East and west directions exchange 3258 ! ------------------------------------ 3259 ! we play with the neigbours AND the row number because of the periodicity 3260 ! 3261 SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions 3262 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 3263 iihom = nlci-nreci 3264 DO jl = 1, jpreci 3265 zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) 3266 zt3we(:,jl,:,1) = ptab(iihom +jl,:,:) 3267 END DO 3268 END SELECT 3269 ! 3270 ! ! Migrations 3271 imigr = jpreci * jpj * jpk 3272 ! 3273 SELECT CASE ( nbondi_bdy(ib_bdy) ) 3274 CASE ( -1 ) 3275 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 ) 3276 CASE ( 0 ) 3277 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 3278 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 ) 3279 CASE ( 1 ) 3280 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 3281 END SELECT 3282 ! 3283 SELECT CASE ( nbondi_bdy_b(ib_bdy) ) 3284 CASE ( -1 ) 3285 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 3286 CASE ( 0 ) 3287 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 3288 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 3289 CASE ( 1 ) 3290 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 3291 END SELECT 3292 ! 3293 SELECT CASE ( nbondi_bdy(ib_bdy) ) 3294 CASE ( -1 ) 3295 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3296 CASE ( 0 ) 3297 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3298 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 3299 CASE ( 1 ) 3300 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3301 END SELECT 3302 ! 3303 ! ! Write Dirichlet lateral conditions 3304 iihom = nlci-jpreci 3305 ! 3306 SELECT CASE ( nbondi_bdy_b(ib_bdy) ) 3307 CASE ( -1 ) 3308 DO jl = 1, jpreci 3309 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 3310 END DO 3311 CASE ( 0 ) 3312 DO jl = 1, jpreci 3313 ptab( jl,:,:) = zt3we(:,jl,:,2) 3314 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 3315 END DO 3316 CASE ( 1 ) 3317 DO jl = 1, jpreci 3318 ptab( jl,:,:) = zt3we(:,jl,:,2) 3319 END DO 3320 END SELECT 3321 3322 3323 ! 3. North and south directions 3324 ! ----------------------------- 3325 ! always closed : we play only with the neigbours 3326 ! 3327 IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions 3328 ijhom = nlcj-nrecj 3329 DO jl = 1, jprecj 3330 zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:) 3331 zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:) 3332 END DO 3333 ENDIF 3334 ! 3335 ! ! Migrations 3336 imigr = jprecj * jpi * jpk 3337 ! 3338 SELECT CASE ( nbondj_bdy(ib_bdy) ) 3339 CASE ( -1 ) 3340 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 ) 3341 CASE ( 0 ) 3342 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 3343 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 ) 3344 CASE ( 1 ) 3345 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 3346 END SELECT 3347 ! 3348 SELECT CASE ( nbondj_bdy_b(ib_bdy) ) 3349 CASE ( -1 ) 3350 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 3351 CASE ( 0 ) 3352 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 3353 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 3354 CASE ( 1 ) 3355 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 3356 END SELECT 3357 ! 3358 SELECT CASE ( nbondj_bdy(ib_bdy) ) 3359 CASE ( -1 ) 3360 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3361 CASE ( 0 ) 3362 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3363 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 3364 CASE ( 1 ) 3365 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3366 END SELECT 3367 ! 3368 ! ! Write Dirichlet lateral conditions 3369 ijhom = nlcj-jprecj 3370 ! 3371 SELECT CASE ( nbondj_bdy_b(ib_bdy) ) 3372 CASE ( -1 ) 3373 DO jl = 1, jprecj 3374 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 3375 END DO 3376 CASE ( 0 ) 3377 DO jl = 1, jprecj 3378 ptab(:,jl ,:) = zt3sn(:,jl,:,2) 3379 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 3380 END DO 3381 CASE ( 1 ) 3382 DO jl = 1, jprecj 3383 ptab(:,jl,:) = zt3sn(:,jl,:,2) 3384 END DO 3385 END SELECT 3386 3387 3388 ! 4. north fold treatment 3389 ! ----------------------- 3390 ! 3391 IF( npolj /= 0) THEN 3392 ! 3393 SELECT CASE ( jpni ) 3394 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 3395 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 3396 END SELECT 3397 ! 3398 ENDIF 3399 ! 3400 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we ) 3401 ! 3402 END SUBROUTINE mpp_lnk_bdy_3d 3403 3404 3405 SUBROUTINE mpp_lnk_bdy_2d( ptab, cd_type, psgn, ib_bdy ) 3406 !!---------------------------------------------------------------------- 3407 !! *** routine mpp_lnk_bdy_2d *** 3408 !! 3409 !! ** Purpose : Message passing management 3410 !! 3411 !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries 3412 !! between processors following neighboring subdomains. 3413 !! domain parameters 3414 !! nlci : first dimension of the local subdomain 3415 !! nlcj : second dimension of the local subdomain 3416 !! nbondi_bdy : mark for "east-west local boundary" 3417 !! nbondj_bdy : mark for "north-south local boundary" 3418 !! noea : number for local neighboring processors 3419 !! nowe : number for local neighboring processors 3420 !! noso : number for local neighboring processors 3421 !! nono : number for local neighboring processors 3422 !! 3423 !! ** Action : ptab with update value at its periphery 3424 !! 3425 !!---------------------------------------------------------------------- 3426 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 3427 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 3428 ! ! = T , U , V , F , W points 3429 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 3430 ! ! = 1. , the sign is kept 3431 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 3432 ! 3433 INTEGER :: ji, jj, jl ! dummy loop indices 3434 INTEGER :: imigr, iihom, ijhom ! local integers 3435 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 3436 REAL(wp) :: zland 3437 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 3438 ! 3439 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 3440 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 3441 !!---------------------------------------------------------------------- 3442 3443 ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), & 3444 & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) ) 3445 3446 zland = 0._wp 3447 3448 ! 1. standard boundary treatment 3449 ! ------------------------------ 3450 ! ! East-West boundaries 3451 ! !* Cyclic east-west 3452 IF( nbondi == 2 ) THEN 3453 IF (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) THEN 3454 ptab( 1 ,:) = ptab(jpim1,:) 3455 ptab(jpi,:) = ptab( 2 ,:) 3456 ELSE 3457 IF(.NOT.cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point 3458 ptab(nlci-jpreci+1:jpi ,:) = zland ! north 3459 ENDIF 3460 ELSEIF(nbondi == -1) THEN 3461 IF( .NOT.cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point 3462 ELSEIF(nbondi == 1) THEN 3463 ptab(nlci-jpreci+1:jpi ,:) = zland ! north 3464 ENDIF 3465 ! !* closed 3466 IF( nbondj == 2 .OR. nbondj == -1 ) THEN 3467 IF( .NOT.cd_type == 'F' ) ptab(:, 1 :jprecj) = zland ! south except F-point 3468 ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN 3469 ptab(:,nlcj-jprecj+1:jpj ) = zland ! north 3470 ENDIF 3471 ! 3472 ! 2. East and west directions exchange 3473 ! ------------------------------------ 3474 ! we play with the neigbours AND the row number because of the periodicity 3475 ! 3476 SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions 3477 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 3478 iihom = nlci-nreci 3479 DO jl = 1, jpreci 3480 zt2ew(:,jl,1) = ptab(jpreci+jl,:) 3481 zt2we(:,jl,1) = ptab(iihom +jl,:) 3482 END DO 3483 END SELECT 3484 ! 3485 ! ! Migrations 3486 imigr = jpreci * jpj 3487 ! 3488 SELECT CASE ( nbondi_bdy(ib_bdy) ) 3489 CASE ( -1 ) 3490 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 ) 3491 CASE ( 0 ) 3492 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 3493 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 ) 3494 CASE ( 1 ) 3495 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 3496 END SELECT 3497 ! 3498 SELECT CASE ( nbondi_bdy_b(ib_bdy) ) 3499 CASE ( -1 ) 3500 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 3501 CASE ( 0 ) 3502 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 3503 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 3504 CASE ( 1 ) 3505 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 3506 END SELECT 3507 ! 3508 SELECT CASE ( nbondi_bdy(ib_bdy) ) 3509 CASE ( -1 ) 3510 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3511 CASE ( 0 ) 3512 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3513 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 3514 CASE ( 1 ) 3515 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3516 END SELECT 3517 ! 3518 ! ! Write Dirichlet lateral conditions 3519 iihom = nlci-jpreci 3520 ! 3521 SELECT CASE ( nbondi_bdy_b(ib_bdy) ) 3522 CASE ( -1 ) 3523 DO jl = 1, jpreci 3524 ptab(iihom+jl,:) = zt2ew(:,jl,2) 3525 END DO 3526 CASE ( 0 ) 3527 DO jl = 1, jpreci 3528 ptab(jl ,:) = zt2we(:,jl,2) 3529 ptab(iihom+jl,:) = zt2ew(:,jl,2) 3530 END DO 3531 CASE ( 1 ) 3532 DO jl = 1, jpreci 3533 ptab(jl ,:) = zt2we(:,jl,2) 3534 END DO 3535 END SELECT 3536 3537 3538 ! 3. North and south directions 3539 ! ----------------------------- 3540 ! always closed : we play only with the neigbours 3541 ! 3542 IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions 3543 ijhom = nlcj-nrecj 3544 DO jl = 1, jprecj 3545 zt2sn(:,jl,1) = ptab(:,ijhom +jl) 3546 zt2ns(:,jl,1) = ptab(:,jprecj+jl) 3547 END DO 3548 ENDIF 3549 ! 3550 ! ! Migrations 3551 imigr = jprecj * jpi 3552 ! 3553 SELECT CASE ( nbondj_bdy(ib_bdy) ) 3554 CASE ( -1 ) 3555 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 ) 3556 CASE ( 0 ) 3557 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 3558 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 ) 3559 CASE ( 1 ) 3560 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 3561 END SELECT 3562 ! 3563 SELECT CASE ( nbondj_bdy_b(ib_bdy) ) 3564 CASE ( -1 ) 3565 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 3566 CASE ( 0 ) 3567 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 3568 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 3569 CASE ( 1 ) 3570 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 3571 END SELECT 3572 ! 3573 SELECT CASE ( nbondj_bdy(ib_bdy) ) 3574 CASE ( -1 ) 3575 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3576 CASE ( 0 ) 3577 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3578 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 3579 CASE ( 1 ) 3580 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 3581 END SELECT 3582 ! 3583 ! ! Write Dirichlet lateral conditions 3584 ijhom = nlcj-jprecj 3585 ! 3586 SELECT CASE ( nbondj_bdy_b(ib_bdy) ) 3587 CASE ( -1 ) 3588 DO jl = 1, jprecj 3589 ptab(:,ijhom+jl) = zt2ns(:,jl,2) 3590 END DO 3591 CASE ( 0 ) 3592 DO jl = 1, jprecj 3593 ptab(:,jl ) = zt2sn(:,jl,2) 3594 ptab(:,ijhom+jl) = zt2ns(:,jl,2) 3595 END DO 3596 CASE ( 1 ) 3597 DO jl = 1, jprecj 3598 ptab(:,jl) = zt2sn(:,jl,2) 3599 END DO 3600 END SELECT 3601 3602 3603 ! 4. north fold treatment 3604 ! ----------------------- 3605 ! 3606 IF( npolj /= 0) THEN 3607 ! 3608 SELECT CASE ( jpni ) 3609 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 3610 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 3611 END SELECT 3612 ! 3613 ENDIF 3614 ! 3615 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 3616 ! 3617 END SUBROUTINE mpp_lnk_bdy_2d 3618 3619 3620 SUBROUTINE mpi_init_opa( ldtxt, ksft, code ) 1098 SUBROUTINE mpi_init_oce( ldtxt, ksft, code ) 3621 1099 !!--------------------------------------------------------------------- 3622 1100 !! *** routine mpp_init.opa *** … … 3649 1127 IF( .NOT. mpi_was_called ) THEN 3650 1128 CALL mpi_init( code ) 3651 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_o pa, code )1129 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_oce, code ) 3652 1130 IF ( code /= MPI_SUCCESS ) THEN 3653 1131 DO ji = 1, SIZE(ldtxt) … … 3675 1153 ENDIF 3676 1154 ! 3677 END SUBROUTINE mpi_init_opa 3678 3679 SUBROUTINE DDPDD_MPI (ydda, yddb, ilen, itype) 1155 END SUBROUTINE mpi_init_oce 1156 1157 1158 SUBROUTINE DDPDD_MPI( ydda, yddb, ilen, itype ) 3680 1159 !!--------------------------------------------------------------------- 3681 1160 !! Routine DDPDD_MPI: used by reduction operator MPI_SUMDD … … 3684 1163 !! This subroutine computes yddb(i) = ydda(i)+yddb(i) 3685 1164 !!--------------------------------------------------------------------- 3686 INTEGER , INTENT(in) ::ilen, itype3687 COMPLEX(wp), DIMENSION(ilen), INTENT(in) ::ydda3688 COMPLEX(wp), DIMENSION(ilen), INTENT(inout) ::yddb1165 INTEGER , INTENT(in) :: ilen, itype 1166 COMPLEX(wp), DIMENSION(ilen), INTENT(in) :: ydda 1167 COMPLEX(wp), DIMENSION(ilen), INTENT(inout) :: yddb 3689 1168 ! 3690 1169 REAL(wp) :: zerr, zt1, zt2 ! local work variables 3691 INTEGER :: ji, ztmp ! local scalar 3692 1170 INTEGER :: ji, ztmp ! local scalar 1171 !!--------------------------------------------------------------------- 1172 ! 3693 1173 ztmp = itype ! avoid compilation warning 3694 1174 ! 3695 1175 DO ji=1,ilen 3696 1176 ! Compute ydda + yddb using Knuth's trick. … … 3703 1183 yddb(ji) = cmplx ( zt1 + zt2, zt2 - ((zt1 + zt2) - zt1),wp ) 3704 1184 END DO 3705 1185 ! 3706 1186 END SUBROUTINE DDPDD_MPI 3707 1187 3708 1188 3709 SUBROUTINE mpp_lbc_north_icb( pt2d, cd_type, psgn, pr2dj)1189 SUBROUTINE mpp_lbc_north_icb( pt2d, cd_type, psgn, kextj) 3710 1190 !!--------------------------------------------------------------------- 3711 1191 !! *** routine mpp_lbc_north_icb *** … … 3717 1197 !! ** Method : North fold condition and mpp with more than one proc 3718 1198 !! in i-direction require a specific treatment. We gather 3719 !! the 4+ 2*jpr2dj northern lines of the global domain on 11199 !! the 4+kextj northern lines of the global domain on 1 3720 1200 !! processor and apply lbc north-fold on this sub array. 3721 1201 !! Then we scatter the north fold array back to the processors. 3722 !! This version accounts for an extra halo with icebergs. 1202 !! This routine accounts for an extra halo with icebergs 1203 !! and assumes ghost rows and columns have been suppressed. 3723 1204 !! 3724 1205 !!---------------------------------------------------------------------- … … 3728 1209 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 3729 1210 !! ! north fold, = 1. otherwise 3730 INTEGER , OPTIONAL , INTENT(in ) :: pr2dj1211 INTEGER , INTENT(in ) :: kextj ! Extra halo width at north fold 3731 1212 ! 3732 1213 INTEGER :: ji, jj, jr 3733 1214 INTEGER :: ierr, itaille, ildi, ilei, iilb 3734 INTEGER :: i jpj, ij, iproc, ipr2dj1215 INTEGER :: ipj, ij, iproc 3735 1216 ! 3736 1217 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e … … 3738 1219 !!---------------------------------------------------------------------- 3739 1220 ! 3740 ijpj=4 3741 IF( PRESENT(pr2dj) ) THEN ! use of additional halos 3742 ipr2dj = pr2dj 3743 ELSE 3744 ipr2dj = 0 3745 ENDIF 3746 ALLOCATE( ztab_e(jpiglo,4+2*ipr2dj), znorthloc_e(jpi,4+2*ipr2dj), znorthgloio_e(jpi,4+2*ipr2dj,jpni) ) 3747 ! 3748 ztab_e(:,:) = 0._wp 3749 ! 3750 ij = 0 3751 ! put in znorthloc_e the last 4 jlines of pt2d 3752 DO jj = nlcj - ijpj + 1 - ipr2dj, nlcj +ipr2dj 1221 ipj=4 1222 ALLOCATE( ztab_e(jpiglo, 1-kextj:ipj+kextj) , & 1223 & znorthloc_e(jpimax, 1-kextj:ipj+kextj) , & 1224 & znorthgloio_e(jpimax, 1-kextj:ipj+kextj,jpni) ) 1225 ! 1226 ztab_e(:,:) = 0._wp 1227 znorthloc_e(:,:) = 0._wp 1228 ! 1229 ij = 1 - kextj 1230 ! put the last ipj+2*kextj lines of pt2d into znorthloc_e 1231 DO jj = jpj - ipj + 1 - kextj , jpj + kextj 1232 znorthloc_e(1:jpi,ij)=pt2d(1:jpi,jj) 3753 1233 ij = ij + 1 3754 DO ji = 1, jpi3755 znorthloc_e(ji,ij)=pt2d(ji,jj)3756 END DO3757 1234 END DO 3758 1235 ! 3759 itaille = jpi * ( ijpj + 2 * ipr2dj ) 3760 CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, & 3761 & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 1236 itaille = jpimax * ( ipj + 2*kextj ) 1237 ! 1238 IF( ln_timing ) CALL tic_tac(.TRUE.) 1239 CALL MPI_ALLGATHER( znorthloc_e(1,1-kextj) , itaille, MPI_DOUBLE_PRECISION, & 1240 & znorthgloio_e(1,1-kextj,1), itaille, MPI_DOUBLE_PRECISION, & 1241 & ncomm_north, ierr ) 1242 ! 1243 IF( ln_timing ) CALL tic_tac(.FALSE.) 3762 1244 ! 3763 1245 DO jr = 1, ndim_rank_north ! recover the global north array … … 3766 1248 ilei = nleit (iproc) 3767 1249 iilb = nimppt(iproc) 3768 DO jj = 1 , ijpj+2*ipr2dj1250 DO jj = 1-kextj, ipj+kextj 3769 1251 DO ji = ildi, ilei 3770 1252 ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr) … … 3773 1255 END DO 3774 1256 3775 3776 1257 ! 2. North-Fold boundary conditions 3777 1258 ! ---------------------------------- 3778 CALL lbc_nfd( ztab_e(:, :), cd_type, psgn, pr2dj = ipr2dj )3779 3780 ij = ipr2dj1259 CALL lbc_nfd( ztab_e(:,1-kextj:ipj+kextj), cd_type, psgn, kextj ) 1260 1261 ij = 1 - kextj 3781 1262 !! Scatter back to pt2d 3782 DO jj = nlcj - ijpj + 1 , nlcj +ipr2dj 3783 ij = ij +1 3784 DO ji= 1, nlci 1263 DO jj = jpj - ipj + 1 - kextj , jpj + kextj 1264 DO ji= 1, jpi 3785 1265 pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij) 3786 1266 END DO 1267 ij = ij +1 3787 1268 END DO 3788 1269 ! … … 3792 1273 3793 1274 3794 SUBROUTINE mpp_lnk_2d_icb( pt2d, cd_type, psgn, jpri, jprj )1275 SUBROUTINE mpp_lnk_2d_icb( cdname, pt2d, cd_type, psgn, kexti, kextj ) 3795 1276 !!---------------------------------------------------------------------- 3796 1277 !! *** routine mpp_lnk_2d_icb *** 3797 1278 !! 3798 !! ** Purpose : Message passing manadgement for 2d array (with extra halo and icebergs) 1279 !! ** Purpose : Message passing management for 2d array (with extra halo for icebergs) 1280 !! This routine receives a (1-kexti:jpi+kexti,1-kexti:jpj+kextj) 1281 !! array (usually (0:jpi+1, 0:jpj+1)) from lbc_lnk_icb calls. 3799 1282 !! 3800 1283 !! ** Method : Use mppsend and mpprecv function for passing mask 3801 1284 !! between processors following neighboring subdomains. 3802 1285 !! domain parameters 3803 !! nlci: first dimension of the local subdomain3804 !! nlcj: second dimension of the local subdomain3805 !! jpri : number of rows for extra outer halo3806 !! jprj : number of columns for extra outer halo1286 !! jpi : first dimension of the local subdomain 1287 !! jpj : second dimension of the local subdomain 1288 !! kexti : number of columns for extra outer halo 1289 !! kextj : number of rows for extra outer halo 3807 1290 !! nbondi : mark for "east-west local boundary" 3808 1291 !! nbondj : mark for "north-south local boundary" … … 3812 1295 !! nono : number for local neighboring processors 3813 1296 !!---------------------------------------------------------------------- 3814 INTEGER , INTENT(in ) :: jpri3815 INTEGER , INTENT(in ) :: jprj3816 REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo3817 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points3818 ! ! = T , U , V , F , W and I points3819 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the3820 ! ! ! north boundary, = 1. otherwise1297 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 1298 REAL(wp), DIMENSION(1-kexti:jpi+kexti,1-kextj:jpj+kextj), INTENT(inout) :: pt2d ! 2D array with extra halo 1299 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points 1300 REAL(wp) , INTENT(in ) :: psgn ! sign used across the north fold 1301 INTEGER , INTENT(in ) :: kexti ! extra i-halo width 1302 INTEGER , INTENT(in ) :: kextj ! extra j-halo width 1303 ! 3821 1304 INTEGER :: jl ! dummy loop indices 3822 INTEGER :: imigr, iihom, ijhom ! temporaryintegers3823 INTEGER :: ipreci, iprecj ! temporary integers1305 INTEGER :: imigr, iihom, ijhom ! local integers 1306 INTEGER :: ipreci, iprecj ! - - 3824 1307 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 3825 1308 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 3826 1309 !! 3827 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns 3828 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn 3829 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe 3830 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew 3831 !!---------------------------------------------------------------------- 3832 3833 ipreci = jpreci + jpri ! take into account outer extra 2D overlap area 3834 iprecj = jprecj + jprj 3835 1310 REAL(wp), DIMENSION(1-kexti:jpi+kexti,nn_hls+kextj,2) :: r2dns, r2dsn 1311 REAL(wp), DIMENSION(1-kextj:jpj+kextj,nn_hls+kexti,2) :: r2dwe, r2dew 1312 !!---------------------------------------------------------------------- 1313 1314 ipreci = nn_hls + kexti ! take into account outer extra 2D overlap area 1315 iprecj = nn_hls + kextj 1316 1317 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, 1, 1, 1, ld_lbc = .TRUE. ) 3836 1318 3837 1319 ! 1. standard boundary treatment … … 3841 1323 ! ! East-West boundaries 3842 1324 ! !* Cyclic east-west 3843 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6)) THEN3844 pt2d(1- jpri: 1 ,:) = pt2d(jpim1-jpri:jpim1 ,:) ! east3845 pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west1325 IF( l_Iperio ) THEN 1326 pt2d(1-kexti: 1 ,:) = pt2d(jpim1-kexti: jpim1 ,:) ! east 1327 pt2d( jpi :jpi+kexti,:) = pt2d( 2 :2+kexti,:) ! west 3846 1328 ! 3847 1329 ELSE !* closed 3848 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point 3849 pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north 1330 IF( .NOT. cd_type == 'F' ) pt2d( 1-kexti :nn_hls ,:) = 0._wp ! east except at F-point 1331 pt2d(jpi-nn_hls+1:jpi+kexti,:) = 0._wp ! west 1332 ENDIF 1333 ! ! North-South boundaries 1334 IF( l_Jperio ) THEN !* cyclic (only with no mpp j-split) 1335 pt2d(:,1-kextj: 1 ) = pt2d(:,jpjm1-kextj: jpjm1) ! north 1336 pt2d(:, jpj :jpj+kextj) = pt2d(:, 2 :2+kextj) ! south 1337 ELSE !* closed 1338 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-kextj :nn_hls ) = 0._wp ! north except at F-point 1339 pt2d(:,jpj-nn_hls+1:jpj+kextj) = 0._wp ! south 3850 1340 ENDIF 3851 1341 ! … … 3856 1346 ! 3857 1347 SELECT CASE ( jpni ) 3858 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj )3859 CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+jprj) , cd_type, psgn , pr2dj=jprj)1348 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+kextj), cd_type, psgn, kextj ) 1349 CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+kextj), cd_type, psgn, kextj ) 3860 1350 END SELECT 3861 1351 ! … … 3868 1358 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 3869 1359 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 3870 iihom = nlci-nreci-jpri1360 iihom = jpi-nreci-kexti 3871 1361 DO jl = 1, ipreci 3872 r2dew(:,jl,1) = pt2d( jpreci+jl,:)1362 r2dew(:,jl,1) = pt2d(nn_hls+jl,:) 3873 1363 r2dwe(:,jl,1) = pt2d(iihom +jl,:) 3874 1364 END DO … … 3876 1366 ! 3877 1367 ! ! Migrations 3878 imigr = ipreci * ( jpj + 2*jprj) 1368 imigr = ipreci * ( jpj + 2*kextj ) 1369 ! 1370 IF( ln_timing ) CALL tic_tac(.TRUE.) 3879 1371 ! 3880 1372 SELECT CASE ( nbondi ) 3881 1373 CASE ( -1 ) 3882 CALL mppsend( 2, r2dwe(1- jprj,1,1), imigr, noea, ml_req1 )3883 CALL mpprecv( 1, r2dew(1- jprj,1,2), imigr, noea )1374 CALL mppsend( 2, r2dwe(1-kextj,1,1), imigr, noea, ml_req1 ) 1375 CALL mpprecv( 1, r2dew(1-kextj,1,2), imigr, noea ) 3884 1376 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3885 1377 CASE ( 0 ) 3886 CALL mppsend( 1, r2dew(1- jprj,1,1), imigr, nowe, ml_req1 )3887 CALL mppsend( 2, r2dwe(1- jprj,1,1), imigr, noea, ml_req2 )3888 CALL mpprecv( 1, r2dew(1- jprj,1,2), imigr, noea )3889 CALL mpprecv( 2, r2dwe(1- jprj,1,2), imigr, nowe )1378 CALL mppsend( 1, r2dew(1-kextj,1,1), imigr, nowe, ml_req1 ) 1379 CALL mppsend( 2, r2dwe(1-kextj,1,1), imigr, noea, ml_req2 ) 1380 CALL mpprecv( 1, r2dew(1-kextj,1,2), imigr, noea ) 1381 CALL mpprecv( 2, r2dwe(1-kextj,1,2), imigr, nowe ) 3890 1382 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3891 1383 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 3892 1384 CASE ( 1 ) 3893 CALL mppsend( 1, r2dew(1- jprj,1,1), imigr, nowe, ml_req1 )3894 CALL mpprecv( 2, r2dwe(1- jprj,1,2), imigr, nowe )1385 CALL mppsend( 1, r2dew(1-kextj,1,1), imigr, nowe, ml_req1 ) 1386 CALL mpprecv( 2, r2dwe(1-kextj,1,2), imigr, nowe ) 3895 1387 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3896 1388 END SELECT 3897 1389 ! 1390 IF( ln_timing ) CALL tic_tac(.FALSE.) 1391 ! 3898 1392 ! ! Write Dirichlet lateral conditions 3899 iihom = nlci - jpreci1393 iihom = jpi - nn_hls 3900 1394 ! 3901 1395 SELECT CASE ( nbondi ) … … 3906 1400 CASE ( 0 ) 3907 1401 DO jl = 1, ipreci 3908 pt2d(jl- jpri,:) = r2dwe(:,jl,2)3909 pt2d( 1402 pt2d(jl-kexti,:) = r2dwe(:,jl,2) 1403 pt2d(iihom+jl,:) = r2dew(:,jl,2) 3910 1404 END DO 3911 1405 CASE ( 1 ) 3912 1406 DO jl = 1, ipreci 3913 pt2d(jl- jpri,:) = r2dwe(:,jl,2)1407 pt2d(jl-kexti,:) = r2dwe(:,jl,2) 3914 1408 END DO 3915 1409 END SELECT … … 3921 1415 ! 3922 1416 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 3923 ijhom = nlcj-nrecj-jprj1417 ijhom = jpj-nrecj-kextj 3924 1418 DO jl = 1, iprecj 3925 1419 r2dsn(:,jl,1) = pt2d(:,ijhom +jl) 3926 r2dns(:,jl,1) = pt2d(:, jprecj+jl)1420 r2dns(:,jl,1) = pt2d(:,nn_hls+jl) 3927 1421 END DO 3928 1422 ENDIF 3929 1423 ! 3930 1424 ! ! Migrations 3931 imigr = iprecj * ( jpi + 2*jpri ) 1425 imigr = iprecj * ( jpi + 2*kexti ) 1426 ! 1427 IF( ln_timing ) CALL tic_tac(.TRUE.) 3932 1428 ! 3933 1429 SELECT CASE ( nbondj ) 3934 1430 CASE ( -1 ) 3935 CALL mppsend( 4, r2dsn(1- jpri,1,1), imigr, nono, ml_req1 )3936 CALL mpprecv( 3, r2dns(1- jpri,1,2), imigr, nono )1431 CALL mppsend( 4, r2dsn(1-kexti,1,1), imigr, nono, ml_req1 ) 1432 CALL mpprecv( 3, r2dns(1-kexti,1,2), imigr, nono ) 3937 1433 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3938 1434 CASE ( 0 ) 3939 CALL mppsend( 3, r2dns(1- jpri,1,1), imigr, noso, ml_req1 )3940 CALL mppsend( 4, r2dsn(1- jpri,1,1), imigr, nono, ml_req2 )3941 CALL mpprecv( 3, r2dns(1- jpri,1,2), imigr, nono )3942 CALL mpprecv( 4, r2dsn(1- jpri,1,2), imigr, noso )1435 CALL mppsend( 3, r2dns(1-kexti,1,1), imigr, noso, ml_req1 ) 1436 CALL mppsend( 4, r2dsn(1-kexti,1,1), imigr, nono, ml_req2 ) 1437 CALL mpprecv( 3, r2dns(1-kexti,1,2), imigr, nono ) 1438 CALL mpprecv( 4, r2dsn(1-kexti,1,2), imigr, noso ) 3943 1439 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3944 1440 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 3945 1441 CASE ( 1 ) 3946 CALL mppsend( 3, r2dns(1- jpri,1,1), imigr, noso, ml_req1 )3947 CALL mpprecv( 4, r2dsn(1- jpri,1,2), imigr, noso )1442 CALL mppsend( 3, r2dns(1-kexti,1,1), imigr, noso, ml_req1 ) 1443 CALL mpprecv( 4, r2dsn(1-kexti,1,2), imigr, noso ) 3948 1444 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 3949 1445 END SELECT 3950 1446 ! 1447 IF( ln_timing ) CALL tic_tac(.FALSE.) 1448 ! 3951 1449 ! ! Write Dirichlet lateral conditions 3952 ijhom = nlcj - jprecj1450 ijhom = jpj - nn_hls 3953 1451 ! 3954 1452 SELECT CASE ( nbondj ) … … 3959 1457 CASE ( 0 ) 3960 1458 DO jl = 1, iprecj 3961 pt2d(:,jl- jprj) = r2dsn(:,jl,2)3962 pt2d(:,ijhom+jl 1459 pt2d(:,jl-kextj) = r2dsn(:,jl,2) 1460 pt2d(:,ijhom+jl) = r2dns(:,jl,2) 3963 1461 END DO 3964 1462 CASE ( 1 ) 3965 1463 DO jl = 1, iprecj 3966 pt2d(:,jl- jprj) = r2dsn(:,jl,2)1464 pt2d(:,jl-kextj) = r2dsn(:,jl,2) 3967 1465 END DO 3968 1466 END SELECT 3969 1467 ! 3970 1468 END SUBROUTINE mpp_lnk_2d_icb 1469 1470 1471 SUBROUTINE mpp_report( cdname, kpk, kpl, kpf, ld_lbc, ld_glb, ld_dlg ) 1472 !!---------------------------------------------------------------------- 1473 !! *** routine mpp_report *** 1474 !! 1475 !! ** Purpose : report use of mpp routines per time-setp 1476 !! 1477 !!---------------------------------------------------------------------- 1478 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1479 INTEGER , OPTIONAL, INTENT(in ) :: kpk, kpl, kpf 1480 LOGICAL , OPTIONAL, INTENT(in ) :: ld_lbc, ld_glb, ld_dlg 1481 !! 1482 LOGICAL :: ll_lbc, ll_glb, ll_dlg 1483 INTEGER :: ji, jj, jk, jh, jf ! dummy loop indices 1484 !!---------------------------------------------------------------------- 1485 ! 1486 ll_lbc = .FALSE. 1487 IF( PRESENT(ld_lbc) ) ll_lbc = ld_lbc 1488 ll_glb = .FALSE. 1489 IF( PRESENT(ld_glb) ) ll_glb = ld_glb 1490 ll_dlg = .FALSE. 1491 IF( PRESENT(ld_dlg) ) ll_dlg = ld_dlg 1492 ! 1493 ! find the smallest common frequency: default = frequency product, if multiple, choose the larger of the 2 frequency 1494 IF( ncom_dttrc /= 1 ) CALL ctl_stop( 'STOP', 'mpp_report, ncom_dttrc /= 1 not coded...' ) 1495 ncom_freq = ncom_fsbc 1496 ! 1497 IF ( ncom_stp == nit000+ncom_freq ) THEN ! avoid to count extra communications in potential initializations at nit000 1498 IF( ll_lbc ) THEN 1499 IF( .NOT. ALLOCATED(ncomm_sequence) ) ALLOCATE( ncomm_sequence(ncom_rec_max,2) ) 1500 IF( .NOT. ALLOCATED( crname_lbc) ) ALLOCATE( crname_lbc(ncom_rec_max ) ) 1501 n_sequence_lbc = n_sequence_lbc + 1 1502 IF( n_sequence_lbc > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lib_mpp, increase ncom_rec_max' ) ! deadlock 1503 crname_lbc(n_sequence_lbc) = cdname ! keep the name of the calling routine 1504 ncomm_sequence(n_sequence_lbc,1) = kpk*kpl ! size of 3rd and 4th dimensions 1505 ncomm_sequence(n_sequence_lbc,2) = kpf ! number of arrays to be treated (multi) 1506 ENDIF 1507 IF( ll_glb ) THEN 1508 IF( .NOT. ALLOCATED(crname_glb) ) ALLOCATE( crname_glb(ncom_rec_max) ) 1509 n_sequence_glb = n_sequence_glb + 1 1510 IF( n_sequence_glb > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lib_mpp, increase ncom_rec_max' ) ! deadlock 1511 crname_glb(n_sequence_glb) = cdname ! keep the name of the calling routine 1512 ENDIF 1513 IF( ll_dlg ) THEN 1514 IF( .NOT. ALLOCATED(crname_dlg) ) ALLOCATE( crname_dlg(ncom_rec_max) ) 1515 n_sequence_dlg = n_sequence_dlg + 1 1516 IF( n_sequence_dlg > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lib_mpp, increase ncom_rec_max' ) ! deadlock 1517 crname_dlg(n_sequence_dlg) = cdname ! keep the name of the calling routine 1518 ENDIF 1519 ELSE IF ( ncom_stp == nit000+2*ncom_freq ) THEN 1520 CALL ctl_opn( numcom, 'communication_report.txt', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) 1521 WRITE(numcom,*) ' ' 1522 WRITE(numcom,*) ' ------------------------------------------------------------' 1523 WRITE(numcom,*) ' Communication pattern report (second oce+sbc+top time step):' 1524 WRITE(numcom,*) ' ------------------------------------------------------------' 1525 WRITE(numcom,*) ' ' 1526 WRITE(numcom,'(A,I4)') ' Exchanged halos : ', n_sequence_lbc 1527 jj = 0; jk = 0; jf = 0; jh = 0 1528 DO ji = 1, n_sequence_lbc 1529 IF ( ncomm_sequence(ji,1) .GT. 1 ) jk = jk + 1 1530 IF ( ncomm_sequence(ji,2) .GT. 1 ) jf = jf + 1 1531 IF ( ncomm_sequence(ji,1) .GT. 1 .AND. ncomm_sequence(ji,2) .GT. 1 ) jj = jj + 1 1532 jh = MAX (jh, ncomm_sequence(ji,1)*ncomm_sequence(ji,2)) 1533 END DO 1534 WRITE(numcom,'(A,I3)') ' 3D Exchanged halos : ', jk 1535 WRITE(numcom,'(A,I3)') ' Multi arrays exchanged halos : ', jf 1536 WRITE(numcom,'(A,I3)') ' from which 3D : ', jj 1537 WRITE(numcom,'(A,I10)') ' Array max size : ', jh*jpi*jpj 1538 WRITE(numcom,*) ' ' 1539 WRITE(numcom,*) ' lbc_lnk called' 1540 jj = 1 1541 DO ji = 2, n_sequence_lbc 1542 IF( crname_lbc(ji-1) /= crname_lbc(ji) ) THEN 1543 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(ji-1)) 1544 jj = 0 1545 END IF 1546 jj = jj + 1 1547 END DO 1548 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(n_sequence_lbc)) 1549 WRITE(numcom,*) ' ' 1550 IF ( n_sequence_glb > 0 ) THEN 1551 WRITE(numcom,'(A,I4)') ' Global communications : ', n_sequence_glb 1552 jj = 1 1553 DO ji = 2, n_sequence_glb 1554 IF( crname_glb(ji-1) /= crname_glb(ji) ) THEN 1555 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(ji-1)) 1556 jj = 0 1557 END IF 1558 jj = jj + 1 1559 END DO 1560 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(n_sequence_glb)) 1561 DEALLOCATE(crname_glb) 1562 ELSE 1563 WRITE(numcom,*) ' No MPI global communication ' 1564 ENDIF 1565 WRITE(numcom,*) ' ' 1566 IF ( n_sequence_dlg > 0 ) THEN 1567 WRITE(numcom,'(A,I4)') ' Delayed global communications : ', n_sequence_dlg 1568 jj = 1 1569 DO ji = 2, n_sequence_dlg 1570 IF( crname_dlg(ji-1) /= crname_dlg(ji) ) THEN 1571 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_dlg(ji-1)) 1572 jj = 0 1573 END IF 1574 jj = jj + 1 1575 END DO 1576 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_dlg(n_sequence_dlg)) 1577 DEALLOCATE(crname_dlg) 1578 ELSE 1579 WRITE(numcom,*) ' No MPI delayed global communication ' 1580 ENDIF 1581 WRITE(numcom,*) ' ' 1582 WRITE(numcom,*) ' -----------------------------------------------' 1583 WRITE(numcom,*) ' ' 1584 DEALLOCATE(ncomm_sequence) 1585 DEALLOCATE(crname_lbc) 1586 ENDIF 1587 END SUBROUTINE mpp_report 1588 3971 1589 1590 SUBROUTINE tic_tac (ld_tic, ld_global) 1591 1592 LOGICAL, INTENT(IN) :: ld_tic 1593 LOGICAL, OPTIONAL, INTENT(IN) :: ld_global 1594 REAL(wp), DIMENSION(2), SAVE :: tic_wt 1595 REAL(wp), SAVE :: tic_ct = 0._wp 1596 INTEGER :: ii 1597 1598 IF( ncom_stp <= nit000 ) RETURN 1599 IF( ncom_stp == nitend ) RETURN 1600 ii = 1 1601 IF( PRESENT( ld_global ) ) THEN 1602 IF( ld_global ) ii = 2 1603 END IF 1604 1605 IF ( ld_tic ) THEN 1606 tic_wt(ii) = MPI_Wtime() ! start count tic->tac (waiting time) 1607 IF ( tic_ct > 0.0_wp ) compute_time = compute_time + MPI_Wtime() - tic_ct ! cumulate count tac->tic 1608 ELSE 1609 waiting_time(ii) = waiting_time(ii) + MPI_Wtime() - tic_wt(ii) ! cumulate count tic->tac 1610 tic_ct = MPI_Wtime() ! start count tac->tic (waiting time) 1611 ENDIF 1612 1613 END SUBROUTINE tic_tac 1614 1615 1616 #else 1617 !!---------------------------------------------------------------------- 1618 !! Default case: Dummy module share memory computing 1619 !!---------------------------------------------------------------------- 1620 USE in_out_manager 1621 1622 INTERFACE mpp_sum 1623 MODULE PROCEDURE mppsum_int, mppsum_a_int, mppsum_real, mppsum_a_real, mppsum_realdd, mppsum_a_realdd 1624 END INTERFACE 1625 INTERFACE mpp_max 1626 MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real 1627 END INTERFACE 1628 INTERFACE mpp_min 1629 MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real 1630 END INTERFACE 1631 INTERFACE mpp_minloc 1632 MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d 1633 END INTERFACE 1634 INTERFACE mpp_maxloc 1635 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 1636 END INTERFACE 1637 1638 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag 1639 LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms (needed here in case "key_mpp_mpi" is not used) 1640 INTEGER, PUBLIC :: mpi_comm_oce ! opa local communicator 1641 1642 INTEGER, PARAMETER, PUBLIC :: nbdelay = 0 ! make sure we don't enter loops: DO ji = 1, nbdelay 1643 CHARACTER(len=32), DIMENSION(1), PUBLIC :: c_delaylist = 'empty' 1644 CHARACTER(len=32), DIMENSION(1), PUBLIC :: c_delaycpnt = 'empty' 1645 LOGICAL, PUBLIC :: l_full_nf_update = .TRUE. 1646 TYPE :: DELAYARR 1647 REAL( wp), POINTER, DIMENSION(:) :: z1d => NULL() 1648 COMPLEX(wp), POINTER, DIMENSION(:) :: y1d => NULL() 1649 END TYPE DELAYARR 1650 TYPE( DELAYARR ), DIMENSION(1), PUBLIC :: todelay 1651 INTEGER, PUBLIC, DIMENSION(1) :: ndelayid = -1 1652 !!---------------------------------------------------------------------- 1653 CONTAINS 1654 1655 INTEGER FUNCTION lib_mpp_alloc(kumout) ! Dummy function 1656 INTEGER, INTENT(in) :: kumout 1657 lib_mpp_alloc = 0 1658 END FUNCTION lib_mpp_alloc 1659 1660 FUNCTION mynode( ldtxt, ldname, kumnam_ref, knumnam_cfg, kumond , kstop, localComm ) RESULT (function_value) 1661 INTEGER, OPTIONAL , INTENT(in ) :: localComm 1662 CHARACTER(len=*),DIMENSION(:) :: ldtxt 1663 CHARACTER(len=*) :: ldname 1664 INTEGER :: kumnam_ref, knumnam_cfg , kumond , kstop 1665 IF( PRESENT( localComm ) ) mpi_comm_oce = localComm 1666 function_value = 0 1667 IF( .FALSE. ) ldtxt(:) = 'never done' 1668 CALL ctl_opn( kumond, TRIM(ldname), 'UNKNOWN', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. , 1 ) 1669 END FUNCTION mynode 1670 1671 SUBROUTINE mppsync ! Dummy routine 1672 END SUBROUTINE mppsync 1673 1674 !!---------------------------------------------------------------------- 1675 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 1676 !! 1677 !!---------------------------------------------------------------------- 1678 !! 1679 # define OPERATION_MAX 1680 # define INTEGER_TYPE 1681 # define DIM_0d 1682 # define ROUTINE_ALLREDUCE mppmax_int 1683 # include "mpp_allreduce_generic.h90" 1684 # undef ROUTINE_ALLREDUCE 1685 # undef DIM_0d 1686 # define DIM_1d 1687 # define ROUTINE_ALLREDUCE mppmax_a_int 1688 # include "mpp_allreduce_generic.h90" 1689 # undef ROUTINE_ALLREDUCE 1690 # undef DIM_1d 1691 # undef INTEGER_TYPE 1692 ! 1693 # define REAL_TYPE 1694 # define DIM_0d 1695 # define ROUTINE_ALLREDUCE mppmax_real 1696 # include "mpp_allreduce_generic.h90" 1697 # undef ROUTINE_ALLREDUCE 1698 # undef DIM_0d 1699 # define DIM_1d 1700 # define ROUTINE_ALLREDUCE mppmax_a_real 1701 # include "mpp_allreduce_generic.h90" 1702 # undef ROUTINE_ALLREDUCE 1703 # undef DIM_1d 1704 # undef REAL_TYPE 1705 # undef OPERATION_MAX 1706 !!---------------------------------------------------------------------- 1707 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** 1708 !! 1709 !!---------------------------------------------------------------------- 1710 !! 1711 # define OPERATION_MIN 1712 # define INTEGER_TYPE 1713 # define DIM_0d 1714 # define ROUTINE_ALLREDUCE mppmin_int 1715 # include "mpp_allreduce_generic.h90" 1716 # undef ROUTINE_ALLREDUCE 1717 # undef DIM_0d 1718 # define DIM_1d 1719 # define ROUTINE_ALLREDUCE mppmin_a_int 1720 # include "mpp_allreduce_generic.h90" 1721 # undef ROUTINE_ALLREDUCE 1722 # undef DIM_1d 1723 # undef INTEGER_TYPE 1724 ! 1725 # define REAL_TYPE 1726 # define DIM_0d 1727 # define ROUTINE_ALLREDUCE mppmin_real 1728 # include "mpp_allreduce_generic.h90" 1729 # undef ROUTINE_ALLREDUCE 1730 # undef DIM_0d 1731 # define DIM_1d 1732 # define ROUTINE_ALLREDUCE mppmin_a_real 1733 # include "mpp_allreduce_generic.h90" 1734 # undef ROUTINE_ALLREDUCE 1735 # undef DIM_1d 1736 # undef REAL_TYPE 1737 # undef OPERATION_MIN 1738 1739 !!---------------------------------------------------------------------- 1740 !! *** mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real *** 1741 !! 1742 !! Global sum of 1D array or a variable (integer, real or complex) 1743 !!---------------------------------------------------------------------- 1744 !! 1745 # define OPERATION_SUM 1746 # define INTEGER_TYPE 1747 # define DIM_0d 1748 # define ROUTINE_ALLREDUCE mppsum_int 1749 # include "mpp_allreduce_generic.h90" 1750 # undef ROUTINE_ALLREDUCE 1751 # undef DIM_0d 1752 # define DIM_1d 1753 # define ROUTINE_ALLREDUCE mppsum_a_int 1754 # include "mpp_allreduce_generic.h90" 1755 # undef ROUTINE_ALLREDUCE 1756 # undef DIM_1d 1757 # undef INTEGER_TYPE 1758 ! 1759 # define REAL_TYPE 1760 # define DIM_0d 1761 # define ROUTINE_ALLREDUCE mppsum_real 1762 # include "mpp_allreduce_generic.h90" 1763 # undef ROUTINE_ALLREDUCE 1764 # undef DIM_0d 1765 # define DIM_1d 1766 # define ROUTINE_ALLREDUCE mppsum_a_real 1767 # include "mpp_allreduce_generic.h90" 1768 # undef ROUTINE_ALLREDUCE 1769 # undef DIM_1d 1770 # undef REAL_TYPE 1771 # undef OPERATION_SUM 1772 1773 # define OPERATION_SUM_DD 1774 # define COMPLEX_TYPE 1775 # define DIM_0d 1776 # define ROUTINE_ALLREDUCE mppsum_realdd 1777 # include "mpp_allreduce_generic.h90" 1778 # undef ROUTINE_ALLREDUCE 1779 # undef DIM_0d 1780 # define DIM_1d 1781 # define ROUTINE_ALLREDUCE mppsum_a_realdd 1782 # include "mpp_allreduce_generic.h90" 1783 # undef ROUTINE_ALLREDUCE 1784 # undef DIM_1d 1785 # undef COMPLEX_TYPE 1786 # undef OPERATION_SUM_DD 1787 1788 !!---------------------------------------------------------------------- 1789 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 1790 !! 1791 !!---------------------------------------------------------------------- 1792 !! 1793 # define OPERATION_MINLOC 1794 # define DIM_2d 1795 # define ROUTINE_LOC mpp_minloc2d 1796 # include "mpp_loc_generic.h90" 1797 # undef ROUTINE_LOC 1798 # undef DIM_2d 1799 # define DIM_3d 1800 # define ROUTINE_LOC mpp_minloc3d 1801 # include "mpp_loc_generic.h90" 1802 # undef ROUTINE_LOC 1803 # undef DIM_3d 1804 # undef OPERATION_MINLOC 1805 1806 # define OPERATION_MAXLOC 1807 # define DIM_2d 1808 # define ROUTINE_LOC mpp_maxloc2d 1809 # include "mpp_loc_generic.h90" 1810 # undef ROUTINE_LOC 1811 # undef DIM_2d 1812 # define DIM_3d 1813 # define ROUTINE_LOC mpp_maxloc3d 1814 # include "mpp_loc_generic.h90" 1815 # undef ROUTINE_LOC 1816 # undef DIM_3d 1817 # undef OPERATION_MAXLOC 1818 1819 SUBROUTINE mpp_delay_sum( cdname, cdelay, y_in, pout, ldlast, kcom ) 1820 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1821 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 1822 COMPLEX(wp), INTENT(in ), DIMENSION(:) :: y_in 1823 REAL(wp), INTENT( out), DIMENSION(:) :: pout 1824 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 1825 INTEGER, INTENT(in ), OPTIONAL :: kcom 1826 ! 1827 pout(:) = REAL(y_in(:), wp) 1828 END SUBROUTINE mpp_delay_sum 1829 1830 SUBROUTINE mpp_delay_max( cdname, cdelay, p_in, pout, ldlast, kcom ) 1831 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1832 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 1833 REAL(wp), INTENT(in ), DIMENSION(:) :: p_in 1834 REAL(wp), INTENT( out), DIMENSION(:) :: pout 1835 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 1836 INTEGER, INTENT(in ), OPTIONAL :: kcom 1837 ! 1838 pout(:) = p_in(:) 1839 END SUBROUTINE mpp_delay_max 1840 1841 SUBROUTINE mpp_delay_rcv( kid ) 1842 INTEGER,INTENT(in ) :: kid 1843 WRITE(*,*) 'mpp_delay_rcv: You should not have seen this print! error?', kid 1844 END SUBROUTINE mpp_delay_rcv 1845 1846 SUBROUTINE mppstop( ldfinal, ld_force_abort ) 1847 LOGICAL, OPTIONAL, INTENT(in) :: ldfinal ! source process number 1848 LOGICAL, OPTIONAL, INTENT(in) :: ld_force_abort ! source process number 1849 STOP ! non MPP case, just stop the run 1850 END SUBROUTINE mppstop 1851 1852 SUBROUTINE mpp_ini_znl( knum ) 1853 INTEGER :: knum 1854 WRITE(*,*) 'mpp_ini_znl: You should not have seen this print! error?', knum 1855 END SUBROUTINE mpp_ini_znl 1856 1857 SUBROUTINE mpp_comm_free( kcom ) 1858 INTEGER :: kcom 1859 WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom 1860 END SUBROUTINE mpp_comm_free 1861 1862 #endif 3972 1863 3973 1864 !!---------------------------------------------------------------------- … … 3988 1879 ! 3989 1880 nstop = nstop + 1 3990 IF(lwp) THEN 3991 WRITE(numout,cform_err) 3992 IF( PRESENT(cd1 ) ) WRITE(numout,*) cd1 3993 IF( PRESENT(cd2 ) ) WRITE(numout,*) cd2 3994 IF( PRESENT(cd3 ) ) WRITE(numout,*) cd3 3995 IF( PRESENT(cd4 ) ) WRITE(numout,*) cd4 3996 IF( PRESENT(cd5 ) ) WRITE(numout,*) cd5 3997 IF( PRESENT(cd6 ) ) WRITE(numout,*) cd6 3998 IF( PRESENT(cd7 ) ) WRITE(numout,*) cd7 3999 IF( PRESENT(cd8 ) ) WRITE(numout,*) cd8 4000 IF( PRESENT(cd9 ) ) WRITE(numout,*) cd9 4001 IF( PRESENT(cd10) ) WRITE(numout,*) cd10 4002 ENDIF 1881 1882 ! force to open ocean.output file 1883 IF( numout == 6 ) CALL ctl_opn( numout, 'ocean.output', 'APPEND', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. ) 1884 1885 WRITE(numout,cform_err) 1886 IF( PRESENT(cd1 ) ) WRITE(numout,*) TRIM(cd1) 1887 IF( PRESENT(cd2 ) ) WRITE(numout,*) TRIM(cd2) 1888 IF( PRESENT(cd3 ) ) WRITE(numout,*) TRIM(cd3) 1889 IF( PRESENT(cd4 ) ) WRITE(numout,*) TRIM(cd4) 1890 IF( PRESENT(cd5 ) ) WRITE(numout,*) TRIM(cd5) 1891 IF( PRESENT(cd6 ) ) WRITE(numout,*) TRIM(cd6) 1892 IF( PRESENT(cd7 ) ) WRITE(numout,*) TRIM(cd7) 1893 IF( PRESENT(cd8 ) ) WRITE(numout,*) TRIM(cd8) 1894 IF( PRESENT(cd9 ) ) WRITE(numout,*) TRIM(cd9) 1895 IF( PRESENT(cd10) ) WRITE(numout,*) TRIM(cd10) 1896 4003 1897 CALL FLUSH(numout ) 4004 1898 IF( numstp /= -1 ) CALL FLUSH(numstp ) 4005 IF( num sol /= -1 ) CALL FLUSH(numsol)1899 IF( numrun /= -1 ) CALL FLUSH(numrun ) 4006 1900 IF( numevo_ice /= -1 ) CALL FLUSH(numevo_ice) 4007 1901 ! 4008 1902 IF( cd1 == 'STOP' ) THEN 4009 IF(lwp)WRITE(numout,*) 'huge E-R-R-O-R : immediate stop'4010 CALL mppstop( )1903 WRITE(numout,*) 'huge E-R-R-O-R : immediate stop' 1904 CALL mppstop(ld_force_abort = .true.) 4011 1905 ENDIF 4012 1906 ! … … 4029 1923 IF(lwp) THEN 4030 1924 WRITE(numout,cform_war) 4031 IF( PRESENT(cd1 ) ) WRITE(numout,*) cd14032 IF( PRESENT(cd2 ) ) WRITE(numout,*) cd24033 IF( PRESENT(cd3 ) ) WRITE(numout,*) cd34034 IF( PRESENT(cd4 ) ) WRITE(numout,*) cd44035 IF( PRESENT(cd5 ) ) WRITE(numout,*) cd54036 IF( PRESENT(cd6 ) ) WRITE(numout,*) cd64037 IF( PRESENT(cd7 ) ) WRITE(numout,*) cd74038 IF( PRESENT(cd8 ) ) WRITE(numout,*) cd84039 IF( PRESENT(cd9 ) ) WRITE(numout,*) cd94040 IF( PRESENT(cd10) ) WRITE(numout,*) cd101925 IF( PRESENT(cd1 ) ) WRITE(numout,*) TRIM(cd1) 1926 IF( PRESENT(cd2 ) ) WRITE(numout,*) TRIM(cd2) 1927 IF( PRESENT(cd3 ) ) WRITE(numout,*) TRIM(cd3) 1928 IF( PRESENT(cd4 ) ) WRITE(numout,*) TRIM(cd4) 1929 IF( PRESENT(cd5 ) ) WRITE(numout,*) TRIM(cd5) 1930 IF( PRESENT(cd6 ) ) WRITE(numout,*) TRIM(cd6) 1931 IF( PRESENT(cd7 ) ) WRITE(numout,*) TRIM(cd7) 1932 IF( PRESENT(cd8 ) ) WRITE(numout,*) TRIM(cd8) 1933 IF( PRESENT(cd9 ) ) WRITE(numout,*) TRIM(cd9) 1934 IF( PRESENT(cd10) ) WRITE(numout,*) TRIM(cd10) 4041 1935 ENDIF 4042 1936 CALL FLUSH(numout) … … 4073 1967 IF( karea > 1 ) WRITE(clfile, "(a,'_',i4.4)") TRIM(clfile), karea-1 4074 1968 ENDIF 1969 #if defined key_agrif 1970 IF( .NOT. Agrif_Root() ) clfile = TRIM(Agrif_CFixed())//'_'//TRIM(clfile) 1971 knum=Agrif_Get_Unit() 1972 #else 4075 1973 knum=get_unit() 1974 #endif 1975 IF( TRIM(cdfile) == '/dev/null' ) clfile = TRIM(cdfile) ! force the use of /dev/null 4076 1976 ! 4077 1977 iost=0 4078 IF( cdacce(1:6) == 'DIRECT' ) THEN 4079 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat, RECL=klengh, ERR=100, IOSTAT=iost ) 1978 IF( cdacce(1:6) == 'DIRECT' ) THEN ! cdacce has always more than 6 characters 1979 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat, RECL=klengh , ERR=100, IOSTAT=iost ) 1980 ELSE IF( TRIM(cdstat) == 'APPEND' ) THEN ! cdstat can have less than 6 characters 1981 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS='UNKNOWN', POSITION='APPEND', ERR=100, IOSTAT=iost ) 4080 1982 ELSE 4081 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 4082 ENDIF 1983 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 1984 ENDIF 1985 IF( iost /= 0 .AND. TRIM(clfile) == '/dev/null' ) & ! for windows 1986 & OPEN(UNIT=knum,FILE='NUL', FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 4083 1987 IF( iost == 0 ) THEN 4084 1988 IF(ldwp) THEN 4085 WRITE(kout,*) ' file : ', clfile,' open ok'1989 WRITE(kout,*) ' file : ', TRIM(clfile),' open ok' 4086 1990 WRITE(kout,*) ' unit = ', knum 4087 1991 WRITE(kout,*) ' status = ', cdstat … … 4095 1999 IF(ldwp) THEN 4096 2000 WRITE(kout,*) 4097 WRITE(kout,*) ' ===>>>> : bad opening file: ', clfile2001 WRITE(kout,*) ' ===>>>> : bad opening file: ', TRIM(clfile) 4098 2002 WRITE(kout,*) ' ======= === ' 4099 2003 WRITE(kout,*) ' unit = ', knum … … 4104 2008 WRITE(kout,*) ' we stop. verify the file ' 4105 2009 WRITE(kout,*) 2010 ELSE !!! Force writing to make sure we get the information - at least once - in this violent STOP!! 2011 WRITE(*,*) 2012 WRITE(*,*) ' ===>>>> : bad opening file: ', TRIM(clfile) 2013 WRITE(*,*) ' ======= === ' 2014 WRITE(*,*) ' unit = ', knum 2015 WRITE(*,*) ' status = ', cdstat 2016 WRITE(*,*) ' form = ', cdform 2017 WRITE(*,*) ' access = ', cdacce 2018 WRITE(*,*) ' iostat = ', iost 2019 WRITE(*,*) ' we stop. verify the file ' 2020 WRITE(*,*) 4106 2021 ENDIF 2022 CALL FLUSH( kout ) 4107 2023 STOP 'ctl_opn bad opening' 4108 2024 ENDIF … … 4121 2037 INTEGER , INTENT(inout) :: kios ! IO status after reading the namelist 4122 2038 CHARACTER(len=*), INTENT(in ) :: cdnam ! group name of namelist for which error occurs 4123 CHARACTER(len= 4) :: clios ! string to convert iostat in character for print2039 CHARACTER(len=5) :: clios ! string to convert iostat in character for print 4124 2040 LOGICAL , INTENT(in ) :: ldwp ! boolean term for print 4125 2041 !!---------------------------------------------------------------------- 4126 2042 ! 4127 WRITE (clios, '(I 4.0)') kios2043 WRITE (clios, '(I5.0)') kios 4128 2044 IF( kios < 0 ) THEN 4129 2045 CALL ctl_warn( 'end of record or file while reading namelist ' &
Note: See TracChangeset
for help on using the changeset viewer.