Changeset 8738 for branches/UKMO/dev_r8183_ICEMODEL_svn_removed/NEMOGCM/NEMO/OPA_SRC/LBC/lib_mpp.F90
- Timestamp:
- 2017-11-17T15:40:12+01:00 (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/UKMO/dev_r8183_ICEMODEL_svn_removed/NEMOGCM/NEMO/OPA_SRC/LBC/lib_mpp.F90
r8733 r8738 8 8 !! 8.0 ! 1998 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI 9 9 !! ! 1998 (J.M. Molines) Open boundary conditions 10 !! NEMO 1.0 ! 2003 (J. -M. Molines, G. Madec) F90, free form10 !! NEMO 1.0 ! 2003 (J.M. Molines, G. Madec) F90, free form 11 11 !! ! 2003 (J.M. Molines) add mpp_ini_north(_3d,_2d) 12 12 !! - ! 2004 (R. Bourdalle Badie) isend option in mpi … … 19 19 !! 3.2 ! 2009 (O. Marti) add mpp_ini_znl 20 20 !! 4.0 ! 2011 (G. Madec) move ctl_ routines from in_out_manager 21 !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) Add 'mpp_lnk_bdy_3d', 'mpp_lnk_obc_3d', 22 !! 'mpp_lnk_bdy_2d' and 'mpp_lnk_obc_2d' routines and update 23 !! the mppobc routine to optimize the BDY and OBC communications 24 !! 3.5 ! 2013 ( C. Ethe, G. Madec ) message passing arrays as local variables 21 !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) Add mpp_lnk_bdy_3d/2d routines to optimize the BDY comm. 22 !! 3.5 ! 2013 (C. Ethe, G. Madec) message passing arrays as local variables 25 23 !! 3.5 ! 2013 (S.Mocavero, I.Epicoco - CMCC) north fold optimizations 26 !! 3.6 ! 2015 (O. Tintó and M. Castrillo - BSC) Added 'mpp_lnk_2d_multiple', 'mpp_lbc_north_2d_multiple', 'mpp_max_multiple' 24 !! 3.6 ! 2015 (O. Tintó and M. Castrillo - BSC) Added '_multiple' case for 2D lbc and max 25 !! 4.0 ! 2017 (G. Madec) automatique allocation of array argument (use any 3rd dimension) 26 !! - ! 2017 (G. Madec) create generic.h90 files to generate all lbc and north fold routines 27 27 !!---------------------------------------------------------------------- 28 28 … … 41 41 !! mynode : indentify the processor unit 42 42 !! mpp_lnk : interface (defined in lbclnk) for message passing of 2d or 3d arrays (mpp_lnk_2d, mpp_lnk_3d) 43 !! mpp_lnk_3d_gather : Message passing manadgement for two 3D arrays44 43 !! mpp_lnk_e : interface (defined in lbclnk) for message passing of 2d array with extra halo (mpp_lnk_2d_e) 45 44 !! mpp_lnk_icb : interface for message passing of 2d arrays with extra halo for icebergs (mpp_lnk_2d_icb) 46 45 !! mpprecv : 47 !! mppsend : SUBROUTINE mpp_ini_znl46 !! mppsend : 48 47 !! mppscatter : 49 48 !! mppgather : … … 56 55 !! mppstop : 57 56 !! mpp_ini_north : initialisation of north fold 58 !! mpp_lbc_north : north fold processors gathering57 !!gm !! mpp_lbc_north : north fold processors gathering 59 58 !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo 60 59 !! mpp_lbc_north_icb : variant of mpp_lbc_north for extra outer halo with icebergs … … 67 66 IMPLICIT NONE 68 67 PRIVATE 69 68 69 INTERFACE mpp_nfd 70 MODULE PROCEDURE mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d 71 MODULE PROCEDURE mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr 72 END INTERFACE 73 74 ! Interface associated to the mpp_lnk_... routines is defined in lbclnk 75 PUBLIC mpp_lnk_2d , mpp_lnk_3d , mpp_lnk_4d 76 PUBLIC mpp_lnk_2d_ptr, mpp_lnk_3d_ptr, mpp_lnk_4d_ptr 77 PUBLIC mpp_lnk_2d_e 78 ! 79 !!gm this should be useless 80 PUBLIC mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d 81 PUBLIC mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr 82 !!gm end 83 ! 70 84 PUBLIC ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam 71 85 PUBLIC mynode, mppstop, mppsync, mpp_comm_free 72 PUBLIC mpp_ini_north, mpp_lbc_north, mpp_lbc_north_e 86 PUBLIC mpp_ini_north, mpp_lbc_north_e 87 !!gm PUBLIC mpp_ini_north, mpp_lbc_north, mpp_lbc_north_e 88 PUBLIC mpp_lbc_north_icb, mpp_lnk_2d_icb 73 89 PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc 74 90 PUBLIC mpp_max_multiple 75 PUBLIC mpp_lnk_3d, mpp_lnk_3d_gather, mpp_lnk_2d, mpp_lnk_2d_e 76 PUBLIC mpp_lnk_2d_9 , mpp_lnk_2d_multiple 77 PUBLIC mpp_lnk_sum_3d, mpp_lnk_sum_2d 91 !!gm PUBLIC mpp_lnk_2d_9 92 !!gm PUBLIC mpp_lnk_sum_3d, mpp_lnk_sum_2d 78 93 PUBLIC mppscatter, mppgather 79 94 PUBLIC mpp_ini_ice, mpp_ini_znl … … 81 96 PUBLIC mppsend, mpprecv ! needed by TAM and ICB routines 82 97 PUBLIC mpp_lnk_bdy_2d, mpp_lnk_bdy_3d 83 PUBLIC mpp_lbc_north_icb, mpp_lnk_2d_icb84 98 PUBLIC mpprank 85 86 TYPE arrayptr87 REAL , DIMENSION (:,:), POINTER :: pt2d88 END TYPE arrayptr89 PUBLIC arrayptr90 99 91 100 !! * Interfaces … … 101 110 INTERFACE mpp_sum 102 111 MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real, & 103 112 & mppsum_realdd, mppsum_a_realdd 104 113 END INTERFACE 105 INTERFACE mpp_lbc_north106 MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d107 END INTERFACE114 !!gm INTERFACE mpp_lbc_north 115 !!gm MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d 116 !!gm END INTERFACE 108 117 INTERFACE mpp_minloc 109 118 MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d … … 112 121 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 113 122 END INTERFACE 114 115 123 INTERFACE mpp_max_multiple 116 124 MODULE PROCEDURE mppmax_real_multiple … … 137 145 138 146 ! variables used in case of sea-ice 139 INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice (public so that it can be freed in limthd)140 INTEGER :: ngrp_iworld ! group ID for the world processors (for rheology)141 INTEGER :: ngrp_ice ! group ID for the ice processors (for rheology)142 INTEGER :: ndim_rank_ice ! number of 'ice' processors143 INTEGER :: n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm147 INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice (public so that it can be freed in icethd) 148 INTEGER :: ngrp_iworld ! group ID for the world processors (for rheology) 149 INTEGER :: ngrp_ice ! group ID for the ice processors (for rheology) 150 INTEGER :: ndim_rank_ice ! number of 'ice' processors 151 INTEGER :: n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm 144 152 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_ice ! dimension ndim_rank_ice 145 153 146 154 ! variables used for zonal integration 147 155 INTEGER, PUBLIC :: ncomm_znl !: communicator made by the processors on the same zonal average 148 LOGICAL, PUBLIC :: l_znl_root ! True on the 'left'most processor on the same row149 INTEGER :: ngrp_znl !group ID for the znl processors150 INTEGER :: ndim_rank_znl !number of processors on the same zonal average156 LOGICAL, PUBLIC :: l_znl_root !: True on the 'left'most processor on the same row 157 INTEGER :: ngrp_znl ! group ID for the znl processors 158 INTEGER :: ndim_rank_znl ! number of processors on the same zonal average 151 159 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_znl ! dimension ndim_rank_znl, number of the procs into the same znl domain 152 160 153 161 ! North fold condition in mpp_mpi with jpni > 1 (PUBLIC for TAM) 154 INTEGER, PUBLIC :: ngrp_world ! group ID for the world processors155 INTEGER, PUBLIC :: ngrp_opa ! group ID for the opa processors156 INTEGER, PUBLIC :: ngrp_north ! group ID for the northern processors (to be fold)157 INTEGER, PUBLIC :: ncomm_north ! communicator made by the processors belonging to ngrp_north158 INTEGER, PUBLIC :: ndim_rank_north ! number of 'sea' processor in the northern line (can be /= jpni !)159 INTEGER, PUBLIC :: njmppmax ! value of njmpp for the processors of the northern line160 INTEGER, PUBLIC :: north_root ! number (in the comm_opa) of proc 0 in the northern comm161 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE, PUBLIC :: nrank_north !dimension ndim_rank_north162 INTEGER, PUBLIC :: ngrp_world !: group ID for the world processors 163 INTEGER, PUBLIC :: ngrp_opa !: group ID for the opa processors 164 INTEGER, PUBLIC :: ngrp_north !: group ID for the northern processors (to be fold) 165 INTEGER, PUBLIC :: ncomm_north !: communicator made by the processors belonging to ngrp_north 166 INTEGER, PUBLIC :: ndim_rank_north !: number of 'sea' processor in the northern line (can be /= jpni !) 167 INTEGER, PUBLIC :: njmppmax !: value of njmpp for the processors of the northern line 168 INTEGER, PUBLIC :: north_root !: number (in the comm_opa) of proc 0 in the northern comm 169 INTEGER, PUBLIC, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_north !: dimension ndim_rank_north 162 170 163 171 ! Type of send : standard, buffered, immediate 164 CHARACTER(len=1), PUBLIC :: cn_mpi_send !type od mpi send/recieve (S=standard, B=bsend, I=isend)165 LOGICAL , PUBLIC :: l_isend = .FALSE. !isend use indicator (T if cn_mpi_send='I')166 INTEGER , PUBLIC :: nn_buffer !size of the buffer in case of mpi_bsend167 168 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon! buffer in case of bsend169 170 LOGICAL, PUBLIC :: ln_nnogather !namelist control of northfold comms171 LOGICAL, PUBLIC :: l_north_nogather = .FALSE. !internal control of northfold comms172 INTEGER, PUBLIC :: ityp 173 !!---------------------------------------------------------------------- 174 !! NEMO/OPA 3.3 , NEMO Consortium (2010)172 CHARACTER(len=1), PUBLIC :: cn_mpi_send !: type od mpi send/recieve (S=standard, B=bsend, I=isend) 173 LOGICAL , PUBLIC :: l_isend = .FALSE. !: isend use indicator (T if cn_mpi_send='I') 174 INTEGER , PUBLIC :: nn_buffer !: size of the buffer in case of mpi_bsend 175 176 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend 177 178 LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms 179 LOGICAL, PUBLIC :: l_north_nogather = .FALSE. !: internal control of northfold comms 180 181 !!---------------------------------------------------------------------- 182 !! NEMO/OPA 4.0 , NEMO Consortium (2017) 175 183 !! $Id$ 176 184 !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) … … 178 186 CONTAINS 179 187 180 181 FUNCTION mynode( ldtxt, ldname, kumnam_ref , kumnam_cfg , kumond , kstop, localComm ) 188 FUNCTION mynode( ldtxt, ldname, kumnam_ref, kumnam_cfg, kumond, kstop, localComm ) 182 189 !!---------------------------------------------------------------------- 183 190 !! *** routine mynode *** … … 204 211 WRITE(ldtxt(ii),*) '~~~~~~ ' ; ii = ii + 1 205 212 ! 206 207 213 REWIND( kumnam_ref ) ! Namelist nammpp in reference namelist: mpi variables 208 214 READ ( kumnam_ref, nammpp, IOSTAT = ios, ERR = 901) 209 215 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in reference namelist', lwp ) 210 216 ! 211 217 REWIND( kumnam_cfg ) ! Namelist nammpp in configuration namelist: mpi variables 212 218 READ ( kumnam_cfg, nammpp, IOSTAT = ios, ERR = 902 ) 213 219 902 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in configuration namelist', lwp ) 214 220 ! 215 221 ! ! control print 216 222 WRITE(ldtxt(ii),*) ' Namelist nammpp' ; ii = ii + 1 217 223 WRITE(ldtxt(ii),*) ' mpi send type cn_mpi_send = ', cn_mpi_send ; ii = ii + 1 218 224 WRITE(ldtxt(ii),*) ' size exported buffer nn_buffer = ', nn_buffer,' bytes'; ii = ii + 1 219 225 ! 220 226 #if defined key_agrif 221 227 IF( .NOT. Agrif_Root() ) THEN … … 225 231 ENDIF 226 232 #endif 227 228 IF(jpnij < 1)THEN 229 ! If jpnij is not specified in namelist then we calculate it - this 230 ! means there will be no land cutting out. 231 jpnij = jpni * jpnj 232 END IF 233 234 IF( (jpni < 1) .OR. (jpnj < 1) )THEN 233 ! 234 IF( jpnij < 1 ) THEN ! If jpnij is not specified in namelist then we calculate it 235 jpnij = jpni * jpnj ! this means there will be no land cutting out. 236 ENDIF 237 238 IF( jpni < 1 .OR. jpnj < 1 ) THEN 235 239 WRITE(ldtxt(ii),*) ' jpni, jpnj and jpnij will be calculated automatically' ; ii = ii + 1 236 240 ELSE … … 238 242 WRITE(ldtxt(ii),*) ' processor grid extent in j jpnj = ',jpnj ; ii = ii + 1 239 243 WRITE(ldtxt(ii),*) ' number of local domains jpnij = ',jpnij ; ii = ii + 1 240 END 244 ENDIF 241 245 242 246 WRITE(ldtxt(ii),*) ' avoid use of mpi_allgather at the north fold ln_nnogather = ', ln_nnogather ; ii = ii + 1 … … 268 272 kstop = kstop + 1 269 273 END SELECT 270 ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN 274 ! 275 ELSEIF ( PRESENT(localComm) .AND. .NOT. mpi_was_called ) THEN 271 276 WRITE(ldtxt(ii),*) ' lib_mpp: You cannot provide a local communicator ' ; ii = ii + 1 272 277 WRITE(ldtxt(ii),*) ' without calling MPI_Init before ! ' ; ii = ii + 1 … … 309 314 310 315 #if defined key_agrif 311 IF (Agrif_Root()) THEN316 IF( Agrif_Root() ) THEN 312 317 CALL Agrif_MPI_Init(mpi_comm_opa) 313 318 ELSE … … 329 334 END FUNCTION mynode 330 335 331 332 SUBROUTINE mpp_lnk_3d( ptab, cd_type, psgn, cd_mpp, pval ) 333 !!---------------------------------------------------------------------- 334 !! *** routine mpp_lnk_3d *** 335 !! 336 !! ** Purpose : Message passing manadgement 337 !! 338 !! ** Method : Use mppsend and mpprecv function for passing mask 339 !! between processors following neighboring subdomains. 340 !! domain parameters 341 !! nlci : first dimension of the local subdomain 342 !! nlcj : second dimension of the local subdomain 343 !! nbondi : mark for "east-west local boundary" 344 !! nbondj : mark for "north-south local boundary" 345 !! noea : number for local neighboring processors 346 !! nowe : number for local neighboring processors 347 !! noso : number for local neighboring processors 348 !! nono : number for local neighboring processors 349 !! 350 !! ** Action : ptab with update value at its periphery 351 !! 352 !!---------------------------------------------------------------------- 353 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 354 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 355 ! ! = T , U , V , F , W points 356 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 357 ! ! = 1. , the sign is kept 358 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 359 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 360 ! 361 INTEGER :: ji, jj, jk, jl ! dummy loop indices 362 INTEGER :: imigr, iihom, ijhom ! temporary integers 363 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 364 REAL(wp) :: zland 365 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 366 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north 367 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east 368 !!---------------------------------------------------------------------- 369 370 ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), & 371 & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) ) 372 373 ! 374 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 375 ELSE ; zland = 0._wp ! zero by default 376 ENDIF 377 378 ! 1. standard boundary treatment 379 ! ------------------------------ 380 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values 381 ! 382 ! WARNING ptab is defined only between nld and nle 383 DO jk = 1, jpk 384 DO jj = nlcj+1, jpj ! added line(s) (inner only) 385 ptab(nldi :nlei , jj ,jk) = ptab(nldi:nlei, nlej,jk) 386 ptab(1 :nldi-1, jj ,jk) = ptab(nldi , nlej,jk) 387 ptab(nlei+1:nlci , jj ,jk) = ptab( nlei, nlej,jk) 388 END DO 389 DO ji = nlci+1, jpi ! added column(s) (full) 390 ptab(ji ,nldj :nlej ,jk) = ptab( nlei,nldj:nlej,jk) 391 ptab(ji ,1 :nldj-1,jk) = ptab( nlei,nldj ,jk) 392 ptab(ji ,nlej+1:jpj ,jk) = ptab( nlei, nlej,jk) 393 END DO 394 END DO 395 ! 396 ELSE ! standard close or cyclic treatment 397 ! 398 ! ! East-West boundaries 399 ! !* Cyclic east-west 400 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 401 ptab( 1 ,:,:) = ptab(jpim1,:,:) 402 ptab(jpi,:,:) = ptab( 2 ,:,:) 403 ELSE !* closed 404 IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point 405 ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north 406 ENDIF 407 ! North-south cyclic 408 IF ( nbondj == 2 .AND. jperio == 7 ) THEN !* cyclic north south only with no mpp split in latitude 409 ptab(:,1 , :) = ptab(:, jpjm1,:) 410 ptab(:,jpj,:) = ptab(:, 2,:) 411 ELSE ! ! North-South boundaries (closed) 412 IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point 413 ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north 414 ENDIF 415 ! 416 ENDIF 417 418 ! 2. East and west directions exchange 419 ! ------------------------------------ 420 ! we play with the neigbours AND the row number because of the periodicity 421 ! 422 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 423 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 424 iihom = nlci-nreci 425 DO jl = 1, jpreci 426 zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) 427 zt3we(:,jl,:,1) = ptab(iihom +jl,:,:) 428 END DO 429 END SELECT 430 ! 431 ! ! Migrations 432 imigr = jpreci * jpj * jpk 433 ! 434 SELECT CASE ( nbondi ) 435 CASE ( -1 ) 436 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 ) 437 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 438 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 439 CASE ( 0 ) 440 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 441 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 ) 442 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) 443 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 444 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 445 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 446 CASE ( 1 ) 447 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) 448 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) 449 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 450 END SELECT 451 ! 452 ! ! Write Dirichlet lateral conditions 453 iihom = nlci-jpreci 454 ! 455 SELECT CASE ( nbondi ) 456 CASE ( -1 ) 457 DO jl = 1, jpreci 458 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 459 END DO 460 CASE ( 0 ) 461 DO jl = 1, jpreci 462 ptab(jl ,:,:) = zt3we(:,jl,:,2) 463 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 464 END DO 465 CASE ( 1 ) 466 DO jl = 1, jpreci 467 ptab(jl ,:,:) = zt3we(:,jl,:,2) 468 END DO 469 END SELECT 470 471 ! 3. North and south directions 472 ! ----------------------------- 473 ! always closed : we play only with the neigbours 474 ! 475 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 476 ijhom = nlcj-nrecj 477 DO jl = 1, jprecj 478 zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:) 479 zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:) 480 END DO 481 ENDIF 482 ! 483 ! ! Migrations 484 imigr = jprecj * jpi * jpk 485 ! 486 SELECT CASE ( nbondj ) 487 CASE ( -1 ) 488 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 ) 489 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 490 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 491 CASE ( 0 ) 492 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 493 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 ) 494 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) 495 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 496 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 497 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 498 CASE ( 1 ) 499 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) 500 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) 501 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 502 END SELECT 503 ! 504 ! ! Write Dirichlet lateral conditions 505 ijhom = nlcj-jprecj 506 ! 507 SELECT CASE ( nbondj ) 508 CASE ( -1 ) 509 DO jl = 1, jprecj 510 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 511 END DO 512 CASE ( 0 ) 513 DO jl = 1, jprecj 514 ptab(:,jl ,:) = zt3sn(:,jl,:,2) 515 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 516 END DO 517 CASE ( 1 ) 518 DO jl = 1, jprecj 519 ptab(:,jl,:) = zt3sn(:,jl,:,2) 520 END DO 521 END SELECT 522 523 ! 4. north fold treatment 524 ! ----------------------- 525 ! 526 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 527 ! 528 SELECT CASE ( jpni ) 529 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 530 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 531 END SELECT 532 ! 533 ENDIF 534 ! 535 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we ) 536 ! 537 END SUBROUTINE mpp_lnk_3d 538 539 540 SUBROUTINE mpp_lnk_2d_multiple( pt2d_array , type_array , psgn_array , num_fields , cd_mpp, pval ) 541 !!---------------------------------------------------------------------- 542 !! *** routine mpp_lnk_2d_multiple *** 543 !! 544 !! ** Purpose : Message passing management for multiple 2d arrays 545 !! 546 !! ** Method : Use mppsend and mpprecv function for passing mask 547 !! between processors following neighboring subdomains. 548 !! domain parameters 549 !! nlci : first dimension of the local subdomain 550 !! nlcj : second dimension of the local subdomain 551 !! nbondi : mark for "east-west local boundary" 552 !! nbondj : mark for "north-south local boundary" 553 !! noea : number for local neighboring processors 554 !! nowe : number for local neighboring processors 555 !! noso : number for local neighboring processors 556 !! nono : number for local neighboring processors 557 !!---------------------------------------------------------------------- 558 CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: type_array ! define the nature of ptab array grid-points 559 ! ! = T , U , V , F , W and I points 560 REAL(wp) , DIMENSION(:), INTENT(in ) :: psgn_array ! =-1 the sign change across the north fold boundary 561 ! ! = 1. , the sign is kept 562 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 563 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 564 !! 565 INTEGER :: ji, jj, jl ! dummy loop indices 566 INTEGER :: ii !!MULTI SEND DUMMY LOOP INDICES 567 INTEGER :: imigr, iihom, ijhom ! temporary integers 568 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 569 INTEGER :: num_fields 570 TYPE( arrayptr ), DIMENSION(:) :: pt2d_array 571 REAL(wp) :: zland 572 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 573 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 574 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 575 576 !!---------------------------------------------------------------------- 577 ! 578 ALLOCATE( zt2ns(jpi,jprecj,2*num_fields), zt2sn(jpi,jprecj,2*num_fields), & 579 & zt2ew(jpj,jpreci,2*num_fields), zt2we(jpj,jpreci,2*num_fields) ) 580 ! 581 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 582 ELSE ; zland = 0._wp ! zero by default 583 ENDIF 584 585 ! 1. standard boundary treatment 586 ! ------------------------------ 587 ! 588 !First Array 589 DO ii = 1 , num_fields 590 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values 591 ! 592 ! WARNING pt2d is defined only between nld and nle 593 DO jj = nlcj+1, jpj ! added line(s) (inner only) 594 pt2d_array(ii)%pt2d(nldi :nlei , jj) = pt2d_array(ii)%pt2d(nldi:nlei, nlej) 595 pt2d_array(ii)%pt2d(1 :nldi-1, jj) = pt2d_array(ii)%pt2d(nldi , nlej) 596 pt2d_array(ii)%pt2d(nlei+1:nlci , jj) = pt2d_array(ii)%pt2d( nlei, nlej) 597 END DO 598 DO ji = nlci+1, jpi ! added column(s) (full) 599 pt2d_array(ii)%pt2d(ji, nldj :nlej ) = pt2d_array(ii)%pt2d(nlei, nldj:nlej) 600 pt2d_array(ii)%pt2d(ji, 1 :nldj-1) = pt2d_array(ii)%pt2d(nlei, nldj ) 601 pt2d_array(ii)%pt2d(ji, nlej+1:jpj ) = pt2d_array(ii)%pt2d(nlei, nlej) 602 END DO 603 ! 604 ELSE ! standard close or cyclic treatment 605 ! 606 ! ! East-West boundaries 607 IF( nbondi == 2 .AND. & ! Cyclic east-west 608 & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 609 pt2d_array(ii)%pt2d( 1 , : ) = pt2d_array(ii)%pt2d( jpim1, : ) ! west 610 pt2d_array(ii)%pt2d( jpi , : ) = pt2d_array(ii)%pt2d( 2 , : ) ! east 611 ELSE ! closed 612 IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d( 1 : jpreci,:) = zland ! south except F-point 613 pt2d_array(ii)%pt2d(nlci-jpreci+1 : jpi ,:) = zland ! north 614 ENDIF 615 ! Noth-South boundaries 616 IF ( nbondj == 2 .AND. jperio == 7 ) THEN !* cyclic north south 617 pt2d_array(ii)%pt2d(:, 1 ) = pt2d_array(ii)%pt2d(:, jpjm1 ) 618 pt2d_array(ii)%pt2d(:, jpj ) = pt2d_array(ii)%pt2d(:, 2 ) 619 ELSE ! 620 ! ! North-South boundaries (closed) 621 IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d(:, 1:jprecj ) = zland ! south except F-point 622 pt2d_array(ii)%pt2d(:, nlcj-jprecj+1:jpj ) = zland ! north 623 ! 624 ENDIF 625 ENDIF 626 END DO 627 628 ! 2. East and west directions exchange 629 ! ------------------------------------ 630 ! we play with the neigbours AND the row number because of the periodicity 631 ! 632 DO ii = 1 , num_fields 633 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 634 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 635 iihom = nlci-nreci 636 DO jl = 1, jpreci 637 zt2ew( : , jl , ii ) = pt2d_array(ii)%pt2d( jpreci+jl , : ) 638 zt2we( : , jl , ii ) = pt2d_array(ii)%pt2d( iihom +jl , : ) 639 END DO 640 END SELECT 641 END DO 642 ! 643 ! ! Migrations 644 imigr = jpreci * jpj 645 ! 646 SELECT CASE ( nbondi ) 647 CASE ( -1 ) 648 CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req1 ) 649 CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea ) 650 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 651 CASE ( 0 ) 652 CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 ) 653 CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req2 ) 654 CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea ) 655 CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe ) 656 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 657 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 658 CASE ( 1 ) 659 CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 ) 660 CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe ) 661 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 662 END SELECT 663 ! 664 ! ! Write Dirichlet lateral conditions 665 iihom = nlci - jpreci 666 ! 667 668 DO ii = 1 , num_fields 669 SELECT CASE ( nbondi ) 670 CASE ( -1 ) 671 DO jl = 1, jpreci 672 pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii) 673 END DO 674 CASE ( 0 ) 675 DO jl = 1, jpreci 676 pt2d_array(ii)%pt2d( jl , : ) = zt2we(:,jl,num_fields+ii) 677 pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii) 678 END DO 679 CASE ( 1 ) 680 DO jl = 1, jpreci 681 pt2d_array(ii)%pt2d( jl , : )= zt2we(:,jl,num_fields+ii) 682 END DO 683 END SELECT 684 END DO 685 686 ! 3. North and south directions 687 ! ----------------------------- 688 ! always closed : we play only with the neigbours 689 ! 690 !First Array 691 DO ii = 1 , num_fields 692 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 693 ijhom = nlcj-nrecj 694 DO jl = 1, jprecj 695 zt2sn(:,jl , ii) = pt2d_array(ii)%pt2d( : , ijhom +jl ) 696 zt2ns(:,jl , ii) = pt2d_array(ii)%pt2d( : , jprecj+jl ) 697 END DO 698 ENDIF 699 END DO 700 ! 701 ! ! Migrations 702 imigr = jprecj * jpi 703 ! 704 SELECT CASE ( nbondj ) 705 CASE ( -1 ) 706 CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req1 ) 707 CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono ) 708 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 709 CASE ( 0 ) 710 CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 ) 711 CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req2 ) 712 CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono ) 713 CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso ) 714 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 715 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 716 CASE ( 1 ) 717 CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 ) 718 CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso ) 719 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 720 END SELECT 721 ! 722 ! ! Write Dirichlet lateral conditions 723 ijhom = nlcj - jprecj 724 ! 725 726 DO ii = 1 , num_fields 727 !First Array 728 SELECT CASE ( nbondj ) 729 CASE ( -1 ) 730 DO jl = 1, jprecj 731 pt2d_array(ii)%pt2d( : , ijhom+jl ) = zt2ns( : , jl , num_fields+ii ) 732 END DO 733 CASE ( 0 ) 734 DO jl = 1, jprecj 735 pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii) 736 pt2d_array(ii)%pt2d( : , ijhom + jl ) = zt2ns( : , jl , num_fields + ii ) 737 END DO 738 CASE ( 1 ) 739 DO jl = 1, jprecj 740 pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii ) 741 END DO 742 END SELECT 743 END DO 744 745 ! 4. north fold treatment 746 ! ----------------------- 747 ! 748 !First Array 749 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 750 ! 751 SELECT CASE ( jpni ) 752 CASE ( 1 ) ; 753 DO ii = 1 , num_fields 754 CALL lbc_nfd ( pt2d_array(ii)%pt2d( : , : ), type_array(ii) , psgn_array(ii) ) ! only 1 northern proc, no mpp 755 END DO 756 CASE DEFAULT ; CALL mpp_lbc_north_2d_multiple( pt2d_array, type_array, psgn_array, num_fields ) ! for all northern procs. 757 END SELECT 758 ! 759 ENDIF 760 ! 761 ! 762 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 763 ! 764 END SUBROUTINE mpp_lnk_2d_multiple 765 766 767 SUBROUTINE load_array( pt2d, cd_type, psgn, pt2d_array, type_array, psgn_array, num_fields ) 768 !!--------------------------------------------------------------------- 769 REAL(wp), DIMENSION(jpi,jpj), TARGET, INTENT(inout) :: pt2d ! Second 2D array on which the boundary condition is applied 770 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 771 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 772 TYPE(arrayptr) , DIMENSION(9) :: pt2d_array 773 CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points 774 REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary 775 INTEGER , INTENT (inout) :: num_fields 776 !!--------------------------------------------------------------------- 777 num_fields = num_fields + 1 778 pt2d_array(num_fields)%pt2d => pt2d 779 type_array(num_fields) = cd_type 780 psgn_array(num_fields) = psgn 781 END SUBROUTINE load_array 336 !!---------------------------------------------------------------------- 337 !! *** routine mpp_lnk_(2,3,4)d *** 338 !! 339 !! * Argument : dummy argument use in mpp_lnk_... routines 340 !! ptab : array or pointer of arrays on which the boundary condition is applied 341 !! cd_nat : nature of array grid-points 342 !! psgn : sign used across the north fold boundary 343 !! kfld : optional, number of pt3d arrays 344 !! cd_mpp : optional, fill the overlap area only 345 !! pval : optional, background value (used at closed boundaries) 346 !!---------------------------------------------------------------------- 347 ! 348 ! !== 2D array and array of 2D pointer ==! 349 ! 350 # define DIM_2d 351 # define ROUTINE_LNK mpp_lnk_2d 352 # include "mpp_lnk_generic.h90" 353 # undef ROUTINE_LNK 354 # define MULTI 355 # define ROUTINE_LNK mpp_lnk_2d_ptr 356 # include "mpp_lnk_generic.h90" 357 # undef ROUTINE_LNK 358 # undef MULTI 359 # undef DIM_2d 360 ! 361 ! !== 3D array and array of 3D pointer ==! 362 ! 363 # define DIM_3d 364 # define ROUTINE_LNK mpp_lnk_3d 365 # include "mpp_lnk_generic.h90" 366 # undef ROUTINE_LNK 367 # define MULTI 368 # define ROUTINE_LNK mpp_lnk_3d_ptr 369 # include "mpp_lnk_generic.h90" 370 # undef ROUTINE_LNK 371 # undef MULTI 372 # undef DIM_3d 373 ! 374 ! !== 4D array and array of 4D pointer ==! 375 ! 376 # define DIM_4d 377 # define ROUTINE_LNK mpp_lnk_4d 378 # include "mpp_lnk_generic.h90" 379 # undef ROUTINE_LNK 380 # define MULTI 381 # define ROUTINE_LNK mpp_lnk_4d_ptr 382 # include "mpp_lnk_generic.h90" 383 # undef ROUTINE_LNK 384 # undef MULTI 385 # undef DIM_4d 386 387 !!---------------------------------------------------------------------- 388 !! *** routine mpp_nfd_(2,3,4)d *** 389 !! 390 !! * Argument : dummy argument use in mpp_nfd_... routines 391 !! ptab : array or pointer of arrays on which the boundary condition is applied 392 !! cd_nat : nature of array grid-points 393 !! psgn : sign used across the north fold boundary 394 !! kfld : optional, number of pt3d arrays 395 !! cd_mpp : optional, fill the overlap area only 396 !! pval : optional, background value (used at closed boundaries) 397 !!---------------------------------------------------------------------- 398 ! 399 ! !== 2D array and array of 2D pointer ==! 400 ! 401 # define DIM_2d 402 # define ROUTINE_NFD mpp_nfd_2d 403 # include "mpp_nfd_generic.h90" 404 # undef ROUTINE_NFD 405 # define MULTI 406 # define ROUTINE_NFD mpp_nfd_2d_ptr 407 # include "mpp_nfd_generic.h90" 408 # undef ROUTINE_NFD 409 # undef MULTI 410 # undef DIM_2d 411 ! 412 ! !== 3D array and array of 3D pointer ==! 413 ! 414 # define DIM_3d 415 # define ROUTINE_NFD mpp_nfd_3d 416 # include "mpp_nfd_generic.h90" 417 # undef ROUTINE_NFD 418 # define MULTI 419 # define ROUTINE_NFD mpp_nfd_3d_ptr 420 # include "mpp_nfd_generic.h90" 421 # undef ROUTINE_NFD 422 # undef MULTI 423 # undef DIM_3d 424 ! 425 ! !== 4D array and array of 4D pointer ==! 426 ! 427 # define DIM_4d 428 # define ROUTINE_NFD mpp_nfd_4d 429 # include "mpp_nfd_generic.h90" 430 # undef ROUTINE_NFD 431 # define MULTI 432 # define ROUTINE_NFD mpp_nfd_4d_ptr 433 # include "mpp_nfd_generic.h90" 434 # undef ROUTINE_NFD 435 # undef MULTI 436 # undef DIM_4d 437 438 439 !!---------------------------------------------------------------------- 440 !! *** routine mpp_lnk_bdy_(2,3,4)d *** 441 !! 442 !! * Argument : dummy argument use in mpp_lnk_... routines 443 !! ptab : array or pointer of arrays on which the boundary condition is applied 444 !! cd_nat : nature of array grid-points 445 !! psgn : sign used across the north fold boundary 446 !! kb_bdy : BDY boundary set 447 !! kfld : optional, number of pt3d arrays 448 !!---------------------------------------------------------------------- 449 ! 450 ! !== 2D array and array of 2D pointer ==! 451 ! 452 # define DIM_2d 453 # define ROUTINE_BDY mpp_lnk_bdy_2d 454 # include "mpp_bdy_generic.h90" 455 # undef ROUTINE_BDY 456 # define MULTI 457 # define ROUTINE_BDY mpp_lnk_bdy_2d_ptr 458 # include "mpp_bdy_generic.h90" 459 # undef ROUTINE_BDY 460 # undef MULTI 461 # undef DIM_2d 462 ! 463 ! !== 3D array and array of 3D pointer ==! 464 ! 465 # define DIM_3d 466 # define ROUTINE_BDY mpp_lnk_bdy_3d 467 # include "mpp_bdy_generic.h90" 468 # undef ROUTINE_BDY 469 # define MULTI 470 # define ROUTINE_BDY mpp_lnk_bdy_3d_ptr 471 # include "mpp_bdy_generic.h90" 472 # undef ROUTINE_BDY 473 # undef MULTI 474 # undef DIM_3d 475 ! 476 ! !== 4D array and array of 4D pointer ==! 477 ! 478 !!# define DIM_4d 479 !!# define ROUTINE_BDY mpp_lnk_bdy_4d 480 !!# include "mpp_bdy_generic.h90" 481 !!# undef ROUTINE_BDY 482 !!# define MULTI 483 !!# define ROUTINE_BDY mpp_lnk_bdy_4d_ptr 484 !!# include "mpp_bdy_generic.h90" 485 !!# undef ROUTINE_BDY 486 !!# undef MULTI 487 !!# undef DIM_4d 488 489 !!---------------------------------------------------------------------- 490 !! 491 !! load_array & mpp_lnk_2d_9 à generaliser a 3D et 4D 782 492 783 493 784 SUBROUTINE mpp_lnk_2d_9( pt2dA, cd_typeA, psgnA, pt2dB, cd_typeB, psgnB, pt2dC, cd_typeC, psgnC & 785 & , pt2dD, cd_typeD, psgnD, pt2dE, cd_typeE, psgnE, pt2dF, cd_typeF, psgnF & 786 & , pt2dG, cd_typeG, psgnG, pt2dH, cd_typeH, psgnH, pt2dI, cd_typeI, psgnI, cd_mpp, pval) 787 !!--------------------------------------------------------------------- 788 ! Second 2D array on which the boundary condition is applied 789 REAL(wp), DIMENSION(jpi,jpj), TARGET , INTENT(inout) :: pt2dA 790 REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dB , pt2dC , pt2dD , pt2dE 791 REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dF , pt2dG , pt2dH , pt2dI 792 ! define the nature of ptab array grid-points 793 CHARACTER(len=1) , INTENT(in ) :: cd_typeA 794 CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeB , cd_typeC , cd_typeD , cd_typeE 795 CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeF , cd_typeG , cd_typeH , cd_typeI 796 ! =-1 the sign change across the north fold boundary 797 REAL(wp) , INTENT(in ) :: psgnA 798 REAL(wp) , OPTIONAL, INTENT(in ) :: psgnB , psgnC , psgnD , psgnE 799 REAL(wp) , OPTIONAL, INTENT(in ) :: psgnF , psgnG , psgnH , psgnI 800 CHARACTER(len=3) , OPTIONAL, INTENT(in ) :: cd_mpp ! fill the overlap area only 801 REAL(wp) , OPTIONAL, INTENT(in ) :: pval ! background value (used at closed boundaries) 802 !! 803 TYPE(arrayptr) , DIMENSION(9) :: pt2d_array 804 CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points 805 ! ! = T , U , V , F , W and I points 806 REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary 807 INTEGER :: num_fields 808 !!--------------------------------------------------------------------- 809 ! 810 num_fields = 0 811 ! 812 ! Load the first array 813 CALL load_array( pt2dA, cd_typeA, psgnA, pt2d_array, type_array, psgn_array, num_fields ) 814 ! 815 ! Look if more arrays are added 816 IF( PRESENT(psgnB) ) CALL load_array(pt2dB,cd_typeB,psgnB,pt2d_array, type_array, psgn_array,num_fields) 817 IF( PRESENT(psgnC) ) CALL load_array(pt2dC,cd_typeC,psgnC,pt2d_array, type_array, psgn_array,num_fields) 818 IF( PRESENT(psgnD) ) CALL load_array(pt2dD,cd_typeD,psgnD,pt2d_array, type_array, psgn_array,num_fields) 819 IF( PRESENT(psgnE) ) CALL load_array(pt2dE,cd_typeE,psgnE,pt2d_array, type_array, psgn_array,num_fields) 820 IF( PRESENT(psgnF) ) CALL load_array(pt2dF,cd_typeF,psgnF,pt2d_array, type_array, psgn_array,num_fields) 821 IF( PRESENT(psgnG) ) CALL load_array(pt2dG,cd_typeG,psgnG,pt2d_array, type_array, psgn_array,num_fields) 822 IF( PRESENT(psgnH) ) CALL load_array(pt2dH,cd_typeH,psgnH,pt2d_array, type_array, psgn_array,num_fields) 823 IF( PRESENT(psgnI) ) CALL load_array(pt2dI,cd_typeI,psgnI,pt2d_array, type_array, psgn_array,num_fields) 824 ! 825 CALL mpp_lnk_2d_multiple( pt2d_array, type_array, psgn_array, num_fields, cd_mpp,pval ) 826 ! 827 END SUBROUTINE mpp_lnk_2d_9 828 829 830 SUBROUTINE mpp_lnk_2d( pt2d, cd_type, psgn, cd_mpp, pval ) 831 !!---------------------------------------------------------------------- 832 !! *** routine mpp_lnk_2d *** 833 !! 834 !! ** Purpose : Message passing manadgement for 2d array 835 !! 836 !! ** Method : Use mppsend and mpprecv function for passing mask 837 !! between processors following neighboring subdomains. 838 !! domain parameters 839 !! nlci : first dimension of the local subdomain 840 !! nlcj : second dimension of the local subdomain 841 !! nbondi : mark for "east-west local boundary" 842 !! nbondj : mark for "north-south local boundary" 843 !! noea : number for local neighboring processors 844 !! nowe : number for local neighboring processors 845 !! noso : number for local neighboring processors 846 !! nono : number for local neighboring processors 847 !! 848 !!---------------------------------------------------------------------- 849 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied 850 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 851 ! ! = T , U , V , F , W and I points 852 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 853 ! ! = 1. , the sign is kept 854 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 855 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 856 !! 857 INTEGER :: ji, jj, jl ! dummy loop indices 858 INTEGER :: imigr, iihom, ijhom ! temporary integers 859 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 860 REAL(wp) :: zland 861 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 862 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north 863 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east 864 !!---------------------------------------------------------------------- 865 ! 866 ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), & 867 & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) ) 868 ! 869 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 870 ELSE ; zland = 0._wp ! zero by default 871 ENDIF 872 873 ! 1. standard boundary treatment 874 ! ------------------------------ 875 ! 876 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values 877 ! 878 ! WARNING pt2d is defined only between nld and nle 879 DO jj = nlcj+1, jpj ! added line(s) (inner only) 880 pt2d(nldi :nlei , jj ) = pt2d(nldi:nlei, nlej) 881 pt2d(1 :nldi-1, jj ) = pt2d(nldi , nlej) 882 pt2d(nlei+1:nlci , jj ) = pt2d( nlei, nlej) 883 END DO 884 DO ji = nlci+1, jpi ! added column(s) (full) 885 pt2d(ji ,nldj :nlej ) = pt2d( nlei,nldj:nlej) 886 pt2d(ji ,1 :nldj-1) = pt2d( nlei,nldj ) 887 pt2d(ji ,nlej+1:jpj ) = pt2d( nlei, nlej) 888 END DO 889 ! 890 ELSE ! standard close or cyclic treatment 891 ! 892 ! ! East-West boundaries 893 IF( nbondi == 2 .AND. & ! Cyclic east-west 894 & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 895 pt2d( 1 ,:) = pt2d(jpim1,:) ! west 896 pt2d(jpi,:) = pt2d( 2 ,:) ! east 897 ELSE ! closed 898 IF( .NOT. cd_type == 'F' ) pt2d( 1 :jpreci,:) = zland ! south except F-point 899 pt2d(nlci-jpreci+1:jpi ,:) = zland ! north 900 ENDIF 901 ! North-South boudaries 902 IF ( nbondj == 2 .AND. jperio == 7 ) THEN !* cyclic north south 903 pt2d(:, 1 ) = pt2d(:,jpjm1) 904 pt2d(:, jpj) = pt2d(:, 2) 905 ELSE 906 ! ! North-South boundaries (closed) 907 IF( .NOT. cd_type == 'F' ) pt2d(:, 1 :jprecj) = zland !south except F-point 908 pt2d(:,nlcj-jprecj+1:jpj ) = zland ! north 909 ENDIF 910 ENDIF 911 912 ! 2. East and west directions exchange 913 ! ------------------------------------ 914 ! we play with the neigbours AND the row number because of the periodicity 915 ! 916 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 917 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 918 iihom = nlci-nreci 919 DO jl = 1, jpreci 920 zt2ew(:,jl,1) = pt2d(jpreci+jl,:) 921 zt2we(:,jl,1) = pt2d(iihom +jl,:) 922 END DO 923 END SELECT 924 ! 925 ! ! Migrations 926 imigr = jpreci * jpj 927 ! 928 SELECT CASE ( nbondi ) 929 CASE ( -1 ) 930 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 ) 931 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 932 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 933 CASE ( 0 ) 934 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 935 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 ) 936 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) 937 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 938 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 939 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 940 CASE ( 1 ) 941 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) 942 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) 943 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 944 END SELECT 945 ! 946 ! ! Write Dirichlet lateral conditions 947 iihom = nlci - jpreci 948 ! 949 SELECT CASE ( nbondi ) 950 CASE ( -1 ) 951 DO jl = 1, jpreci 952 pt2d(iihom+jl,:) = zt2ew(:,jl,2) 953 END DO 954 CASE ( 0 ) 955 DO jl = 1, jpreci 956 pt2d(jl ,:) = zt2we(:,jl,2) 957 pt2d(iihom+jl,:) = zt2ew(:,jl,2) 958 END DO 959 CASE ( 1 ) 960 DO jl = 1, jpreci 961 pt2d(jl ,:) = zt2we(:,jl,2) 962 END DO 963 END SELECT 964 965 966 ! 3. North and south directions 967 ! ----------------------------- 968 ! always closed : we play only with the neigbours 969 ! 970 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 971 ijhom = nlcj-nrecj 972 DO jl = 1, jprecj 973 zt2sn(:,jl,1) = pt2d(:,ijhom +jl) 974 zt2ns(:,jl,1) = pt2d(:,jprecj+jl) 975 END DO 976 ENDIF 977 ! 978 ! ! Migrations 979 imigr = jprecj * jpi 980 ! 981 SELECT CASE ( nbondj ) 982 CASE ( -1 ) 983 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 ) 984 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 985 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 986 CASE ( 0 ) 987 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 988 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 ) 989 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) 990 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 991 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 992 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 993 CASE ( 1 ) 994 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) 995 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) 996 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 997 END SELECT 998 ! 999 ! ! Write Dirichlet lateral conditions 1000 ijhom = nlcj - jprecj 1001 ! 1002 SELECT CASE ( nbondj ) 1003 CASE ( -1 ) 1004 DO jl = 1, jprecj 1005 pt2d(:,ijhom+jl) = zt2ns(:,jl,2) 1006 END DO 1007 CASE ( 0 ) 1008 DO jl = 1, jprecj 1009 pt2d(:,jl ) = zt2sn(:,jl,2) 1010 pt2d(:,ijhom+jl) = zt2ns(:,jl,2) 1011 END DO 1012 CASE ( 1 ) 1013 DO jl = 1, jprecj 1014 pt2d(:,jl ) = zt2sn(:,jl,2) 1015 END DO 1016 END SELECT 1017 1018 1019 ! 4. north fold treatment 1020 ! ----------------------- 1021 ! 1022 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 1023 ! 1024 SELECT CASE ( jpni ) 1025 CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp 1026 CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs. 1027 END SELECT 1028 ! 1029 ENDIF 1030 ! 1031 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) 1032 ! 1033 END SUBROUTINE mpp_lnk_2d 1034 1035 1036 SUBROUTINE mpp_lnk_3d_gather( ptab1, cd_type1, ptab2, cd_type2, psgn ) 1037 !!---------------------------------------------------------------------- 1038 !! *** routine mpp_lnk_3d_gather *** 1039 !! 1040 !! ** Purpose : Message passing manadgement for two 3D arrays 1041 !! 1042 !! ** Method : Use mppsend and mpprecv function for passing mask 1043 !! between processors following neighboring subdomains. 1044 !! domain parameters 1045 !! nlci : first dimension of the local subdomain 1046 !! nlcj : second dimension of the local subdomain 1047 !! nbondi : mark for "east-west local boundary" 1048 !! nbondj : mark for "north-south local boundary" 1049 !! noea : number for local neighboring processors 1050 !! nowe : number for local neighboring processors 1051 !! noso : number for local neighboring processors 1052 !! nono : number for local neighboring processors 1053 !! 1054 !! ** Action : ptab1 and ptab2 with update value at its periphery 1055 !! 1056 !!---------------------------------------------------------------------- 1057 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which 1058 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab2 ! the boundary condition is applied 1059 CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays 1060 CHARACTER(len=1) , INTENT(in ) :: cd_type2 ! i.e. grid-points = T , U , V , F or W points 1061 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 1062 !! ! = 1. , the sign is kept 1063 INTEGER :: jl ! dummy loop indices 1064 INTEGER :: imigr, iihom, ijhom ! temporary integers 1065 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1066 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1067 REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ns, zt4sn ! 2 x 3d for north-south & south-north 1068 REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ew, zt4we ! 2 x 3d for east-west & west-east 1069 !!---------------------------------------------------------------------- 1070 ! 1071 ALLOCATE( zt4ns(jpi,jprecj,jpk,2,2), zt4sn(jpi,jprecj,jpk,2,2) , & 1072 & zt4ew(jpj,jpreci,jpk,2,2), zt4we(jpj,jpreci,jpk,2,2) ) 1073 ! 1074 ! 1. standard boundary treatment 1075 ! ------------------------------ 1076 ! ! East-West boundaries 1077 ! !* Cyclic east-west 1078 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 1079 ptab1( 1 ,:,:) = ptab1(jpim1,:,:) 1080 ptab1(jpi,:,:) = ptab1( 2 ,:,:) 1081 ptab2( 1 ,:,:) = ptab2(jpim1,:,:) 1082 ptab2(jpi,:,:) = ptab2( 2 ,:,:) 1083 ELSE !* closed 1084 IF( .NOT. cd_type1 == 'F' ) ptab1( 1 :jpreci,:,:) = 0.e0 ! south except at F-point 1085 IF( .NOT. cd_type2 == 'F' ) ptab2( 1 :jpreci,:,:) = 0.e0 1086 ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 ! north 1087 ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0 1088 ENDIF 1089 ! North-South boundaries 1090 IF ( nbondj == 2 .AND. jperio == 7 ) THEN !* cyclic north south 1091 ptab1(:, 1 ,:) = ptab1(: , jpjm1 , :) 1092 ptab1(:, jpj ,:) = ptab1(: , 2 , :) 1093 ptab2(:, 1 ,:) = ptab2(: , jpjm1 , :) 1094 ptab2(:, jpj ,:) = ptab2(: , 2 , :) 1095 ELSE 1096 ! ! North-South boundaries closed 1097 IF( .NOT. cd_type1 == 'F' ) ptab1(:, 1 :jprecj,:) = 0.e0 ! south except at F-point 1098 IF( .NOT. cd_type2 == 'F' ) ptab2(:, 1 :jprecj,:) = 0.e0 1099 ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 ! north 1100 ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0 1101 ENDIF 1102 1103 ! 2. East and west directions exchange 1104 ! ------------------------------------ 1105 ! we play with the neigbours AND the row number because of the periodicity 1106 ! 1107 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 1108 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1109 iihom = nlci-nreci 1110 DO jl = 1, jpreci 1111 zt4ew(:,jl,:,1,1) = ptab1(jpreci+jl,:,:) 1112 zt4we(:,jl,:,1,1) = ptab1(iihom +jl,:,:) 1113 zt4ew(:,jl,:,2,1) = ptab2(jpreci+jl,:,:) 1114 zt4we(:,jl,:,2,1) = ptab2(iihom +jl,:,:) 1115 END DO 1116 END SELECT 1117 ! 1118 ! ! Migrations 1119 imigr = jpreci * jpj * jpk *2 1120 ! 1121 SELECT CASE ( nbondi ) 1122 CASE ( -1 ) 1123 CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req1 ) 1124 CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea ) 1125 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1126 CASE ( 0 ) 1127 CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 1128 CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req2 ) 1129 CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea ) 1130 CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe ) 1131 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1132 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1133 CASE ( 1 ) 1134 CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 1135 CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe ) 1136 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1137 END SELECT 1138 ! 1139 ! ! Write Dirichlet lateral conditions 1140 iihom = nlci - jpreci 1141 ! 1142 SELECT CASE ( nbondi ) 1143 CASE ( -1 ) 1144 DO jl = 1, jpreci 1145 ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2) 1146 ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2) 1147 END DO 1148 CASE ( 0 ) 1149 DO jl = 1, jpreci 1150 ptab1(jl ,:,:) = zt4we(:,jl,:,1,2) 1151 ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2) 1152 ptab2(jl ,:,:) = zt4we(:,jl,:,2,2) 1153 ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2) 1154 END DO 1155 CASE ( 1 ) 1156 DO jl = 1, jpreci 1157 ptab1(jl ,:,:) = zt4we(:,jl,:,1,2) 1158 ptab2(jl ,:,:) = zt4we(:,jl,:,2,2) 1159 END DO 1160 END SELECT 1161 1162 1163 ! 3. North and south directions 1164 ! ----------------------------- 1165 ! always closed : we play only with the neigbours 1166 ! 1167 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 1168 ijhom = nlcj - nrecj 1169 DO jl = 1, jprecj 1170 zt4sn(:,jl,:,1,1) = ptab1(:,ijhom +jl,:) 1171 zt4ns(:,jl,:,1,1) = ptab1(:,jprecj+jl,:) 1172 zt4sn(:,jl,:,2,1) = ptab2(:,ijhom +jl,:) 1173 zt4ns(:,jl,:,2,1) = ptab2(:,jprecj+jl,:) 1174 END DO 1175 ENDIF 1176 ! 1177 ! ! Migrations 1178 imigr = jprecj * jpi * jpk * 2 1179 ! 1180 SELECT CASE ( nbondj ) 1181 CASE ( -1 ) 1182 CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req1 ) 1183 CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono ) 1184 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1185 CASE ( 0 ) 1186 CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 ) 1187 CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req2 ) 1188 CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono ) 1189 CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso ) 1190 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1191 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 1192 CASE ( 1 ) 1193 CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 ) 1194 CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso ) 1195 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1196 END SELECT 1197 ! 1198 ! ! Write Dirichlet lateral conditions 1199 ijhom = nlcj - jprecj 1200 ! 1201 SELECT CASE ( nbondj ) 1202 CASE ( -1 ) 1203 DO jl = 1, jprecj 1204 ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2) 1205 ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2) 1206 END DO 1207 CASE ( 0 ) 1208 DO jl = 1, jprecj 1209 ptab1(:,jl ,:) = zt4sn(:,jl,:,1,2) 1210 ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2) 1211 ptab2(:,jl ,:) = zt4sn(:,jl,:,2,2) 1212 ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2) 1213 END DO 1214 CASE ( 1 ) 1215 DO jl = 1, jprecj 1216 ptab1(:,jl,:) = zt4sn(:,jl,:,1,2) 1217 ptab2(:,jl,:) = zt4sn(:,jl,:,2,2) 1218 END DO 1219 END SELECT 1220 1221 1222 ! 4. north fold treatment 1223 ! ----------------------- 1224 IF( npolj /= 0 ) THEN 1225 ! 1226 SELECT CASE ( jpni ) 1227 CASE ( 1 ) 1228 CALL lbc_nfd ( ptab1, cd_type1, psgn ) ! only for northern procs. 1229 CALL lbc_nfd ( ptab2, cd_type2, psgn ) 1230 CASE DEFAULT 1231 CALL mpp_lbc_north( ptab1, cd_type1, psgn ) ! for all northern procs. 1232 CALL mpp_lbc_north (ptab2, cd_type2, psgn) 1233 END SELECT 1234 ! 1235 ENDIF 1236 ! 1237 DEALLOCATE( zt4ns, zt4sn, zt4ew, zt4we ) 1238 ! 1239 END SUBROUTINE mpp_lnk_3d_gather 494 !! mpp_lnk_2d_e utilisé dans ICB 495 496 497 !! mpp_lnk_sum_2d et 3D ====>>>>>> à virer du code !!!! 498 499 500 !!---------------------------------------------------------------------- 1240 501 1241 502 … … 1284 545 1285 546 1286 ! 1. standard boundary treatment 547 ! 1. standard boundary treatment (CAUTION: the order matters Here !!!! ) 1287 548 ! ------------------------------ 1288 ! Order matters Here !!!! 1289 ! 1290 ! North-South cyclic 1291 IF ( nbondj == 2 .AND. jperio == 7 ) THEN !* cyclic north south 1292 pt2d(:, 1-jprj: 1 ) = pt2d ( :, jpjm1-jprj:jpjm1) 549 ! !== North-South boundaries 550 ! !* cyclic 551 IF( nbondj == 2 .AND. jperio == 7 ) THEN 552 pt2d(:, 1-jprj: 1 ) = pt2d ( :, jpjm1-jprj:jpjm1 ) 1293 553 pt2d(:, jpj :jpj+jprj) = pt2d ( :, 2 :2+jprj) 1294 ELSE 1295 1296 ! !* North-South boundaries (closed) 1297 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jprj : jprecj ) = 0.e0 ! south except at F-point 1298 pt2d(:,nlcj-jprecj+1:jpj+jprj) = 0.e0 ! north 1299 ENDIF 1300 1301 ! ! East-West boundaries 1302 ! !* Cyclic east-west 554 ELSE !* closed 555 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jprj : jprecj ) = 0._wp ! south except at F-point 556 pt2d(:,nlcj-jprecj+1:jpj+jprj) = 0._wp ! north 557 ENDIF 558 ! !== East-West boundaries 559 ! !* Cyclic east-west 1303 560 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 1304 pt2d(1-jpri: 1 ,:) = pt2d(jpim1-jpri: jpim1 ,:) ! east 1305 pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west 1306 ! 1307 ELSE !* closed 1308 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point 1309 pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north 1310 ENDIF 1311 ! 1312 561 pt2d(1-jpri: 1 ,:) = pt2d(jpim1-jpri: jpim1 ,:) ! east 562 pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west 563 ELSE !* closed 564 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0._wp ! south except at F-point 565 pt2d(nlci-jpreci+1:jpi+jpri,:) = 0._wp ! north 566 ENDIF 567 ! 1313 568 ! north fold treatment 1314 ! -------------------- ---569 ! -------------------- 1315 570 IF( npolj /= 0 ) THEN 1316 571 ! 1317 572 SELECT CASE ( jpni ) 1318 1319 CASE DEFAULT ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn)573 !!gm ERROR CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj ) 574 !!gm ERROR CASE DEFAULT ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn ) 1320 575 END SELECT 1321 576 ! … … 1375 630 END SELECT 1376 631 1377 1378 632 ! 3. North and south directions 1379 633 ! ----------------------------- … … 1430 684 END SUBROUTINE mpp_lnk_2d_e 1431 685 1432 SUBROUTINE mpp_lnk_sum_3d( ptab, cd_type, psgn, cd_mpp, pval )1433 !!----------------------------------------------------------------------1434 !! *** routine mpp_lnk_sum_3d ***1435 !!1436 !! ** Purpose : Message passing manadgement (sum the overlap region)1437 !!1438 !! ** Method : Use mppsend and mpprecv function for passing mask1439 !! between processors following neighboring subdomains.1440 !! domain parameters1441 !! nlci : first dimension of the local subdomain1442 !! nlcj : second dimension of the local subdomain1443 !! nbondi : mark for "east-west local boundary"1444 !! nbondj : mark for "north-south local boundary"1445 !! noea : number for local neighboring processors1446 !! nowe : number for local neighboring processors1447 !! noso : number for local neighboring processors1448 !! nono : number for local neighboring processors1449 !!1450 !! ** Action : ptab with update value at its periphery1451 !!1452 !!----------------------------------------------------------------------1453 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied1454 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points1455 ! ! = T , U , V , F , W points1456 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary1457 ! ! = 1. , the sign is kept1458 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only1459 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries)1460 !!1461 INTEGER :: ji, jj, jk, jl ! dummy loop indices1462 INTEGER :: imigr, iihom, ijhom ! temporary integers1463 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend1464 REAL(wp) :: zland1465 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend1466 !1467 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north1468 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east1469 1470 !!----------------------------------------------------------------------1471 1472 ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), &1473 & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) )1474 1475 !1476 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value1477 ELSE ; zland = 0.e0 ! zero by default1478 ENDIF1479 1480 ! 1. standard boundary treatment1481 ! ------------------------------1482 ! 2. East and west directions exchange1483 ! ------------------------------------1484 ! we play with the neigbours AND the row number because of the periodicity1485 !1486 SELECT CASE ( nbondi ) ! Read lateral conditions1487 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)1488 iihom = nlci-jpreci1489 DO jl = 1, jpreci1490 zt3ew(:,jl,:,1) = ptab(jl ,:,:) ; ptab(jl ,:,:) = 0.0_wp1491 zt3we(:,jl,:,1) = ptab(iihom+jl,:,:) ; ptab(iihom+jl,:,:) = 0.0_wp1492 END DO1493 END SELECT1494 !1495 ! ! Migrations1496 imigr = jpreci * jpj * jpk1497 !1498 SELECT CASE ( nbondi )1499 CASE ( -1 )1500 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 )1501 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea )1502 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)1503 CASE ( 0 )1504 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 )1505 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 )1506 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea )1507 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe )1508 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)1509 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)1510 CASE ( 1 )1511 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 )1512 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe )1513 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)1514 END SELECT1515 !1516 ! ! Write lateral conditions1517 iihom = nlci-nreci1518 !1519 SELECT CASE ( nbondi )1520 CASE ( -1 )1521 DO jl = 1, jpreci1522 ptab(iihom+jl,:,:) = ptab(iihom+jl,:,:) + zt3ew(:,jl,:,2)1523 END DO1524 CASE ( 0 )1525 DO jl = 1, jpreci1526 ptab(jpreci+jl,:,:) = ptab(jpreci+jl,:,:) + zt3we(:,jl,:,2)1527 ptab(iihom +jl,:,:) = ptab(iihom +jl,:,:) + zt3ew(:,jl,:,2)1528 END DO1529 CASE ( 1 )1530 DO jl = 1, jpreci1531 ptab(jpreci+jl,:,:) = ptab(jpreci+jl,:,:) + zt3we(:,jl,:,2)1532 END DO1533 END SELECT1534 1535 1536 ! 3. North and south directions1537 ! -----------------------------1538 ! always closed : we play only with the neigbours1539 !1540 IF( nbondj /= 2 ) THEN ! Read lateral conditions1541 ijhom = nlcj-jprecj1542 DO jl = 1, jprecj1543 zt3sn(:,jl,:,1) = ptab(:,ijhom+jl,:) ; ptab(:,ijhom+jl,:) = 0.0_wp1544 zt3ns(:,jl,:,1) = ptab(:,jl ,:) ; ptab(:,jl ,:) = 0.0_wp1545 END DO1546 ENDIF1547 !1548 ! ! Migrations1549 imigr = jprecj * jpi * jpk1550 !1551 SELECT CASE ( nbondj )1552 CASE ( -1 )1553 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 )1554 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono )1555 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)1556 CASE ( 0 )1557 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 )1558 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 )1559 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono )1560 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso )1561 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)1562 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)1563 CASE ( 1 )1564 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 )1565 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso )1566 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)1567 END SELECT1568 !1569 ! ! Write lateral conditions1570 ijhom = nlcj-nrecj1571 !1572 SELECT CASE ( nbondj )1573 CASE ( -1 )1574 DO jl = 1, jprecj1575 ptab(:,ijhom+jl,:) = ptab(:,ijhom+jl,:) + zt3ns(:,jl,:,2)1576 END DO1577 CASE ( 0 )1578 DO jl = 1, jprecj1579 ptab(:,jprecj+jl,:) = ptab(:,jprecj+jl,:) + zt3sn(:,jl,:,2)1580 ptab(:,ijhom +jl,:) = ptab(:,ijhom +jl,:) + zt3ns(:,jl,:,2)1581 END DO1582 CASE ( 1 )1583 DO jl = 1, jprecj1584 ptab(:,jprecj+jl,:) = ptab(:,jprecj+jl,:) + zt3sn(:,jl ,:,2)1585 END DO1586 END SELECT1587 1588 1589 ! 4. north fold treatment1590 ! -----------------------1591 !1592 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN1593 !1594 SELECT CASE ( jpni )1595 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp1596 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs.1597 END SELECT1598 !1599 ENDIF1600 !1601 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we )1602 !1603 END SUBROUTINE mpp_lnk_sum_3d1604 1605 SUBROUTINE mpp_lnk_sum_2d( pt2d, cd_type, psgn, cd_mpp, pval )1606 !!----------------------------------------------------------------------1607 !! *** routine mpp_lnk_sum_2d ***1608 !!1609 !! ** Purpose : Message passing manadgement for 2d array (sum the overlap region)1610 !!1611 !! ** Method : Use mppsend and mpprecv function for passing mask1612 !! between processors following neighboring subdomains.1613 !! domain parameters1614 !! nlci : first dimension of the local subdomain1615 !! nlcj : second dimension of the local subdomain1616 !! nbondi : mark for "east-west local boundary"1617 !! nbondj : mark for "north-south local boundary"1618 !! noea : number for local neighboring processors1619 !! nowe : number for local neighboring processors1620 !! noso : number for local neighboring processors1621 !! nono : number for local neighboring processors1622 !!1623 !!----------------------------------------------------------------------1624 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied1625 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points1626 ! ! = T , U , V , F , W and I points1627 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary1628 ! ! = 1. , the sign is kept1629 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only1630 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries)1631 !!1632 INTEGER :: ji, jj, jl ! dummy loop indices1633 INTEGER :: imigr, iihom, ijhom ! temporary integers1634 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend1635 REAL(wp) :: zland1636 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend1637 !1638 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north1639 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east1640 1641 !!----------------------------------------------------------------------1642 1643 ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), &1644 & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) )1645 1646 !1647 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value1648 ELSE ; zland = 0.e0 ! zero by default1649 ENDIF1650 1651 ! 1. standard boundary treatment1652 ! ------------------------------1653 ! 2. East and west directions exchange1654 ! ------------------------------------1655 ! we play with the neigbours AND the row number because of the periodicity1656 !1657 SELECT CASE ( nbondi ) ! Read lateral conditions1658 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)1659 iihom = nlci - jpreci1660 DO jl = 1, jpreci1661 zt2ew(:,jl,1) = pt2d(jl ,:) ; pt2d(jl ,:) = 0.0_wp1662 zt2we(:,jl,1) = pt2d(iihom +jl,:) ; pt2d(iihom +jl,:) = 0.0_wp1663 END DO1664 END SELECT1665 !1666 ! ! Migrations1667 imigr = jpreci * jpj1668 !1669 SELECT CASE ( nbondi )1670 CASE ( -1 )1671 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 )1672 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea )1673 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1674 CASE ( 0 )1675 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 )1676 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 )1677 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea )1678 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe )1679 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1680 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)1681 CASE ( 1 )1682 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 )1683 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe )1684 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1685 END SELECT1686 !1687 ! ! Write lateral conditions1688 iihom = nlci-nreci1689 !1690 SELECT CASE ( nbondi )1691 CASE ( -1 )1692 DO jl = 1, jpreci1693 pt2d(iihom+jl,:) = pt2d(iihom+jl,:) + zt2ew(:,jl,2)1694 END DO1695 CASE ( 0 )1696 DO jl = 1, jpreci1697 pt2d(jpreci+jl,:) = pt2d(jpreci+jl,:) + zt2we(:,jl,2)1698 pt2d(iihom +jl,:) = pt2d(iihom +jl,:) + zt2ew(:,jl,2)1699 END DO1700 CASE ( 1 )1701 DO jl = 1, jpreci1702 pt2d(jpreci+jl,:) = pt2d(jpreci+jl,:) + zt2we(:,jl,2)1703 END DO1704 END SELECT1705 1706 1707 ! 3. North and south directions1708 ! -----------------------------1709 ! always closed : we play only with the neigbours1710 !1711 IF( nbondj /= 2 ) THEN ! Read lateral conditions1712 ijhom = nlcj - jprecj1713 DO jl = 1, jprecj1714 zt2sn(:,jl,1) = pt2d(:,ijhom +jl) ; pt2d(:,ijhom +jl) = 0.0_wp1715 zt2ns(:,jl,1) = pt2d(:,jl ) ; pt2d(:,jl ) = 0.0_wp1716 END DO1717 ENDIF1718 !1719 ! ! Migrations1720 imigr = jprecj * jpi1721 !1722 SELECT CASE ( nbondj )1723 CASE ( -1 )1724 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 )1725 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono )1726 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1727 CASE ( 0 )1728 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 )1729 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 )1730 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono )1731 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso )1732 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1733 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)1734 CASE ( 1 )1735 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 )1736 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso )1737 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1738 END SELECT1739 !1740 ! ! Write lateral conditions1741 ijhom = nlcj-nrecj1742 !1743 SELECT CASE ( nbondj )1744 CASE ( -1 )1745 DO jl = 1, jprecj1746 pt2d(:,ijhom+jl) = pt2d(:,ijhom+jl) + zt2ns(:,jl,2)1747 END DO1748 CASE ( 0 )1749 DO jl = 1, jprecj1750 pt2d(:,jprecj+jl) = pt2d(:,jprecj+jl) + zt2sn(:,jl,2)1751 pt2d(:,ijhom +jl) = pt2d(:,ijhom +jl) + zt2ns(:,jl,2)1752 END DO1753 CASE ( 1 )1754 DO jl = 1, jprecj1755 pt2d(:,jprecj+jl) = pt2d(:,jprecj+jl) + zt2sn(:,jl,2)1756 END DO1757 END SELECT1758 1759 1760 ! 4. north fold treatment1761 ! -----------------------1762 !1763 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN1764 !1765 SELECT CASE ( jpni )1766 CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp1767 CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs.1768 END SELECT1769 !1770 ENDIF1771 !1772 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we )1773 !1774 END SUBROUTINE mpp_lnk_sum_2d1775 686 1776 687 SUBROUTINE mppsend( ktyp, pmess, kbytes, kdest, md_req ) … … 1874 785 END SUBROUTINE mppscatter 1875 786 1876 787 !!---------------------------------------------------------------------- 788 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 789 !! 790 !!---------------------------------------------------------------------- 791 !! 1877 792 SUBROUTINE mppmax_a_int( ktab, kdim, kcom ) 1878 !!----------------------------------------------------------------------1879 !! *** routine mppmax_a_int ***1880 !!1881 !! ** Purpose : Find maximum value in an integer layout array1882 !!1883 793 !!---------------------------------------------------------------------- 1884 794 INTEGER , INTENT(in ) :: kdim ! size of array 1885 795 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 1886 796 INTEGER , INTENT(in ), OPTIONAL :: kcom ! 1887 ! 1888 INTEGER :: ierror, localcomm ! temporary integer 797 INTEGER :: ierror, ilocalcomm ! temporary integer 1889 798 INTEGER, DIMENSION(kdim) :: iwork 1890 799 !!---------------------------------------------------------------------- 1891 ! 1892 localcomm = mpi_comm_opa 1893 IF( PRESENT(kcom) ) localcomm = kcom 1894 ! 1895 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, localcomm, ierror ) 1896 ! 800 ilocalcomm = mpi_comm_opa 801 IF( PRESENT(kcom) ) ilocalcomm = kcom 802 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, ilocalcomm, ierror ) 1897 803 ktab(:) = iwork(:) 1898 !1899 804 END SUBROUTINE mppmax_a_int 1900 1901 805 !! 1902 806 SUBROUTINE mppmax_int( ktab, kcom ) 1903 !!----------------------------------------------------------------------1904 !! *** routine mppmax_int ***1905 !!1906 !! ** Purpose : Find maximum value in an integer layout array1907 !!1908 807 !!---------------------------------------------------------------------- 1909 808 INTEGER, INTENT(inout) :: ktab ! ??? 1910 809 INTEGER, INTENT(in ), OPTIONAL :: kcom ! ??? 1911 ! 1912 INTEGER :: ierror, iwork, localcomm ! temporary integer 1913 !!---------------------------------------------------------------------- 1914 ! 1915 localcomm = mpi_comm_opa 1916 IF( PRESENT(kcom) ) localcomm = kcom 1917 ! 1918 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, localcomm, ierror ) 1919 ! 810 INTEGER :: ierror, iwork, ilocalcomm ! temporary integer 811 !!---------------------------------------------------------------------- 812 ilocalcomm = mpi_comm_opa 813 IF( PRESENT(kcom) ) ilocalcomm = kcom 814 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, ilocalcomm, ierror ) 1920 815 ktab = iwork 1921 !1922 816 END SUBROUTINE mppmax_int 1923 1924 817 !! 818 SUBROUTINE mppmax_a_real( ptab, kdim, kcom ) 819 !!---------------------------------------------------------------------- 820 REAL(wp), DIMENSION(kdim), INTENT(inout) :: ptab 821 INTEGER , INTENT(in ) :: kdim 822 INTEGER , OPTIONAL , INTENT(in ) :: kcom 823 INTEGER :: ierror, ilocalcomm 824 REAL(wp), DIMENSION(kdim) :: zwork 825 !!---------------------------------------------------------------------- 826 ilocalcomm = mpi_comm_opa 827 IF( PRESENT(kcom) ) ilocalcomm = kcom 828 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, ilocalcomm, ierror ) 829 ptab(:) = zwork(:) 830 END SUBROUTINE mppmax_a_real 831 !! 832 SUBROUTINE mppmax_real( ptab, kcom ) 833 !!---------------------------------------------------------------------- 834 REAL(wp), INTENT(inout) :: ptab ! ??? 835 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? 836 INTEGER :: ierror, ilocalcomm 837 REAL(wp) :: zwork 838 !!---------------------------------------------------------------------- 839 ilocalcomm = mpi_comm_opa 840 IF( PRESENT(kcom) ) ilocalcomm = kcom! 841 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, ilocalcomm, ierror ) 842 ptab = zwork 843 END SUBROUTINE mppmax_real 844 845 846 !!---------------------------------------------------------------------- 847 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** 848 !! 849 !!---------------------------------------------------------------------- 850 !! 1925 851 SUBROUTINE mppmin_a_int( ktab, kdim, kcom ) 1926 !!----------------------------------------------------------------------1927 !! *** routine mppmin_a_int ***1928 !!1929 !! ** Purpose : Find minimum value in an integer layout array1930 !!1931 852 !!---------------------------------------------------------------------- 1932 853 INTEGER , INTENT( in ) :: kdim ! size of array … … 1934 855 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 1935 856 !! 1936 INTEGER :: ierror, localcomm ! temporary integer857 INTEGER :: ierror, ilocalcomm ! temporary integer 1937 858 INTEGER, DIMENSION(kdim) :: iwork 1938 859 !!---------------------------------------------------------------------- 1939 ! 1940 localcomm = mpi_comm_opa 1941 IF( PRESENT(kcom) ) localcomm = kcom 1942 ! 1943 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, localcomm, ierror ) 1944 ! 860 ilocalcomm = mpi_comm_opa 861 IF( PRESENT(kcom) ) ilocalcomm = kcom 862 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, ilocalcomm, ierror ) 1945 863 ktab(:) = iwork(:) 1946 !1947 864 END SUBROUTINE mppmin_a_int 1948 1949 865 !! 1950 866 SUBROUTINE mppmin_int( ktab, kcom ) 1951 !!----------------------------------------------------------------------1952 !! *** routine mppmin_int ***1953 !!1954 !! ** Purpose : Find minimum value in an integer layout array1955 !!1956 867 !!---------------------------------------------------------------------- 1957 868 INTEGER, INTENT(inout) :: ktab ! ??? 1958 869 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 1959 870 !! 1960 INTEGER :: ierror, iwork, localcomm 1961 !!---------------------------------------------------------------------- 1962 ! 1963 localcomm = mpi_comm_opa 1964 IF( PRESENT(kcom) ) localcomm = kcom 1965 ! 1966 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, localcomm, ierror ) 1967 ! 871 INTEGER :: ierror, iwork, ilocalcomm 872 !!---------------------------------------------------------------------- 873 ilocalcomm = mpi_comm_opa 874 IF( PRESENT(kcom) ) ilocalcomm = kcom 875 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, ilocalcomm, ierror ) 1968 876 ktab = iwork 1969 !1970 877 END SUBROUTINE mppmin_int 1971 1972 1973 SUBROUTINE mppsum_a_int( ktab, kdim ) 1974 !!---------------------------------------------------------------------- 1975 !! *** routine mppsum_a_int *** 1976 !! 1977 !! ** Purpose : Global integer sum, 1D array case 1978 !! 1979 !!---------------------------------------------------------------------- 1980 INTEGER, INTENT(in ) :: kdim ! ??? 1981 INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? 1982 ! 1983 INTEGER :: ierror 1984 INTEGER, DIMENSION (kdim) :: iwork 1985 !!---------------------------------------------------------------------- 1986 ! 1987 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 1988 ! 1989 ktab(:) = iwork(:) 1990 ! 1991 END SUBROUTINE mppsum_a_int 1992 1993 1994 SUBROUTINE mppsum_int( ktab ) 1995 !!---------------------------------------------------------------------- 1996 !! *** routine mppsum_int *** 1997 !! 1998 !! ** Purpose : Global integer sum 1999 !! 2000 !!---------------------------------------------------------------------- 2001 INTEGER, INTENT(inout) :: ktab 2002 !! 2003 INTEGER :: ierror, iwork 2004 !!---------------------------------------------------------------------- 2005 ! 2006 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 2007 ! 2008 ktab = iwork 2009 ! 2010 END SUBROUTINE mppsum_int 2011 2012 2013 SUBROUTINE mppmax_a_real( ptab, kdim, kcom ) 2014 !!---------------------------------------------------------------------- 2015 !! *** routine mppmax_a_real *** 2016 !! 2017 !! ** Purpose : Maximum 2018 !! 878 !! 879 SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) 2019 880 !!---------------------------------------------------------------------- 2020 881 INTEGER , INTENT(in ) :: kdim 2021 882 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 2022 883 INTEGER , INTENT(in ), OPTIONAL :: kcom 2023 ! 2024 INTEGER :: ierror, localcomm 2025 REAL(wp), DIMENSION(kdim) :: zwork 2026 !!---------------------------------------------------------------------- 2027 ! 2028 localcomm = mpi_comm_opa 2029 IF( PRESENT(kcom) ) localcomm = kcom 2030 ! 2031 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, localcomm, ierror ) 2032 ptab(:) = zwork(:) 2033 ! 2034 END SUBROUTINE mppmax_a_real 2035 2036 2037 SUBROUTINE mppmax_real( ptab, kcom ) 2038 !!---------------------------------------------------------------------- 2039 !! *** routine mppmax_real *** 2040 !! 2041 !! ** Purpose : Maximum 2042 !! 2043 !!---------------------------------------------------------------------- 2044 REAL(wp), INTENT(inout) :: ptab ! ??? 2045 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? 2046 !! 2047 INTEGER :: ierror, localcomm 2048 REAL(wp) :: zwork 2049 !!---------------------------------------------------------------------- 2050 ! 2051 localcomm = mpi_comm_opa 2052 IF( PRESENT(kcom) ) localcomm = kcom 2053 ! 2054 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, localcomm, ierror ) 2055 ptab = zwork 2056 ! 2057 END SUBROUTINE mppmax_real 2058 2059 SUBROUTINE mppmax_real_multiple( ptab, NUM , kcom ) 2060 !!---------------------------------------------------------------------- 2061 !! *** routine mppmax_real *** 2062 !! 2063 !! ** Purpose : Maximum 2064 !! 2065 !!---------------------------------------------------------------------- 2066 REAL(wp), DIMENSION(:) , INTENT(inout) :: ptab ! ??? 2067 INTEGER , INTENT(in ) :: NUM 2068 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? 2069 !! 2070 INTEGER :: ierror, localcomm 2071 REAL(wp) , POINTER , DIMENSION(:) :: zwork 2072 !!---------------------------------------------------------------------- 2073 ! 2074 CALL wrk_alloc(NUM , zwork) 2075 localcomm = mpi_comm_opa 2076 IF( PRESENT(kcom) ) localcomm = kcom 2077 ! 2078 CALL mpi_allreduce( ptab, zwork, NUM, mpi_double_precision, mpi_max, localcomm, ierror ) 2079 ptab = zwork 2080 CALL wrk_dealloc(NUM , zwork) 2081 ! 2082 END SUBROUTINE mppmax_real_multiple 2083 2084 2085 SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) 2086 !!---------------------------------------------------------------------- 2087 !! *** routine mppmin_a_real *** 2088 !! 2089 !! ** Purpose : Minimum of REAL, array case 2090 !! 2091 !!----------------------------------------------------------------------- 2092 INTEGER , INTENT(in ) :: kdim 2093 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 2094 INTEGER , INTENT(in ), OPTIONAL :: kcom 2095 !! 2096 INTEGER :: ierror, localcomm 884 INTEGER :: ierror, ilocalcomm 2097 885 REAL(wp), DIMENSION(kdim) :: zwork 2098 886 !!----------------------------------------------------------------------- 2099 ! 2100 localcomm = mpi_comm_opa 2101 IF( PRESENT(kcom) ) localcomm = kcom 2102 ! 2103 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, localcomm, ierror ) 887 ilocalcomm = mpi_comm_opa 888 IF( PRESENT(kcom) ) ilocalcomm = kcom 889 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, ilocalcomm, ierror ) 2104 890 ptab(:) = zwork(:) 2105 !2106 891 END SUBROUTINE mppmin_a_real 2107 2108 892 !! 2109 893 SUBROUTINE mppmin_real( ptab, kcom ) 2110 !!----------------------------------------------------------------------2111 !! *** routine mppmin_real ***2112 !!2113 !! ** Purpose : minimum of REAL, scalar case2114 !!2115 894 !!----------------------------------------------------------------------- 2116 895 REAL(wp), INTENT(inout) :: ptab ! 2117 896 INTEGER , INTENT(in ), OPTIONAL :: kcom 2118 !! 2119 INTEGER :: ierror 2120 REAL(wp) :: zwork 2121 INTEGER :: localcomm 2122 !!----------------------------------------------------------------------- 2123 ! 2124 localcomm = mpi_comm_opa 2125 IF( PRESENT(kcom) ) localcomm = kcom 2126 ! 2127 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, localcomm, ierror ) 2128 ptab = zwork 2129 ! 2130 END SUBROUTINE mppmin_real 2131 2132 2133 SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) 2134 !!---------------------------------------------------------------------- 2135 !! *** routine mppsum_a_real *** 2136 !! 2137 !! ** Purpose : global sum, REAL ARRAY argument case 2138 !! 2139 !!----------------------------------------------------------------------- 2140 INTEGER , INTENT( in ) :: kdim ! size of ptab 2141 REAL(wp), DIMENSION(kdim), INTENT( inout ) :: ptab ! input array 2142 INTEGER , INTENT( in ), OPTIONAL :: kcom 2143 !! 2144 INTEGER :: ierror ! temporary integer 2145 INTEGER :: localcomm 2146 REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace 2147 !!----------------------------------------------------------------------- 2148 ! 2149 localcomm = mpi_comm_opa 2150 IF( PRESENT(kcom) ) localcomm = kcom 2151 ! 2152 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, localcomm, ierror ) 2153 ptab(:) = zwork(:) 2154 ! 2155 END SUBROUTINE mppsum_a_real 2156 2157 2158 SUBROUTINE mppsum_real( ptab, kcom ) 2159 !!---------------------------------------------------------------------- 2160 !! *** routine mppsum_real *** 2161 !! 2162 !! ** Purpose : global sum, SCALAR argument case 2163 !! 2164 !!----------------------------------------------------------------------- 2165 REAL(wp), INTENT(inout) :: ptab ! input scalar 2166 INTEGER , INTENT(in ), OPTIONAL :: kcom 2167 !! 2168 INTEGER :: ierror, localcomm 897 INTEGER :: ierror, ilocalcomm 2169 898 REAL(wp) :: zwork 2170 899 !!----------------------------------------------------------------------- 2171 ! 2172 localcomm = mpi_comm_opa 2173 IF( PRESENT(kcom) ) localcomm = kcom 2174 ! 2175 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, localcomm, ierror ) 900 ilocalcomm = mpi_comm_opa 901 IF( PRESENT(kcom) ) ilocalcomm = kcom 902 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, ilocalcomm, ierror ) 2176 903 ptab = zwork 2177 ! 904 END SUBROUTINE mppmin_real 905 906 907 !!---------------------------------------------------------------------- 908 !! *** mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real *** 909 !! 910 !! Global sum of 1D array or a variable (integer, real or complex) 911 !!---------------------------------------------------------------------- 912 !! 913 SUBROUTINE mppsum_a_int( ktab, kdim ) 914 !!---------------------------------------------------------------------- 915 INTEGER, INTENT(in ) :: kdim ! ??? 916 INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? 917 INTEGER :: ierror 918 INTEGER, DIMENSION (kdim) :: iwork 919 !!---------------------------------------------------------------------- 920 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 921 ktab(:) = iwork(:) 922 END SUBROUTINE mppsum_a_int 923 !! 924 SUBROUTINE mppsum_int( ktab ) 925 !!---------------------------------------------------------------------- 926 INTEGER, INTENT(inout) :: ktab 927 INTEGER :: ierror, iwork 928 !!---------------------------------------------------------------------- 929 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 930 ktab = iwork 931 END SUBROUTINE mppsum_int 932 !! 933 SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) 934 !!----------------------------------------------------------------------- 935 INTEGER , INTENT(in ) :: kdim ! size of ptab 936 REAL(wp), DIMENSION(kdim), INTENT(inout) :: ptab ! input array 937 INTEGER , OPTIONAL , INTENT(in ) :: kcom ! specific communicator 938 INTEGER :: ierror, ilocalcomm ! local integer 939 REAL(wp) :: zwork(kdim) ! local workspace 940 !!----------------------------------------------------------------------- 941 ilocalcomm = mpi_comm_opa 942 IF( PRESENT(kcom) ) ilocalcomm = kcom 943 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, ilocalcomm, ierror ) 944 ptab(:) = zwork(:) 945 END SUBROUTINE mppsum_a_real 946 !! 947 SUBROUTINE mppsum_real( ptab, kcom ) 948 !!----------------------------------------------------------------------- 949 REAL(wp) , INTENT(inout) :: ptab ! input scalar 950 INTEGER , OPTIONAL, INTENT(in ) :: kcom 951 INTEGER :: ierror, ilocalcomm 952 REAL(wp) :: zwork 953 !!----------------------------------------------------------------------- 954 ilocalcomm = mpi_comm_opa 955 IF( PRESENT(kcom) ) ilocalcomm = kcom 956 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, ilocalcomm, ierror ) 957 ptab = zwork 2178 958 END SUBROUTINE mppsum_real 2179 2180 959 !! 2181 960 SUBROUTINE mppsum_realdd( ytab, kcom ) 2182 !!----------------------------------------------------------------------2183 !! *** routine mppsum_realdd ***2184 !!2185 !! ** Purpose : global sum in Massively Parallel Processing2186 !! SCALAR argument case for double-double precision2187 !!2188 961 !!----------------------------------------------------------------------- 2189 COMPLEX(wp), INTENT(inout) :: ytab ! input scalar 2190 INTEGER , INTENT(in ), OPTIONAL :: kcom 2191 ! 2192 INTEGER :: ierror 2193 INTEGER :: localcomm 962 COMPLEX(wp) , INTENT(inout) :: ytab ! input scalar 963 INTEGER , OPTIONAL, INTENT(in ) :: kcom 964 INTEGER :: ierror, ilocalcomm 2194 965 COMPLEX(wp) :: zwork 2195 966 !!----------------------------------------------------------------------- 2196 ! 2197 localcomm = mpi_comm_opa 2198 IF( PRESENT(kcom) ) localcomm = kcom 2199 ! 2200 ! reduce local sums into global sum 2201 CALL MPI_ALLREDUCE (ytab, zwork, 1, MPI_DOUBLE_COMPLEX, MPI_SUMDD, localcomm, ierror ) 967 ilocalcomm = mpi_comm_opa 968 IF( PRESENT(kcom) ) ilocalcomm = kcom 969 CALL MPI_ALLREDUCE( ytab, zwork, 1, MPI_DOUBLE_COMPLEX, MPI_SUMDD, ilocalcomm, ierror ) 2202 970 ytab = zwork 2203 !2204 971 END SUBROUTINE mppsum_realdd 2205 2206 972 !! 2207 973 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 2208 974 !!---------------------------------------------------------------------- 2209 !! *** routine mppsum_a_realdd ***2210 !!2211 !! ** Purpose : global sum in Massively Parallel Processing2212 !! COMPLEX ARRAY case for double-double precision2213 !!2214 !!-----------------------------------------------------------------------2215 975 INTEGER , INTENT(in ) :: kdim ! size of ytab 2216 976 COMPLEX(wp), DIMENSION(kdim), INTENT(inout) :: ytab ! input array 2217 977 INTEGER , OPTIONAL , INTENT(in ) :: kcom 2218 ! 2219 INTEGER:: ierror, localcomm ! local integer 978 INTEGER:: ierror, ilocalcomm ! local integer 2220 979 COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace 2221 980 !!----------------------------------------------------------------------- 2222 ! 2223 localcomm = mpi_comm_opa 2224 IF( PRESENT(kcom) ) localcomm = kcom 2225 ! 2226 CALL MPI_ALLREDUCE( ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, MPI_SUMDD, localcomm, ierror ) 981 ilocalcomm = mpi_comm_opa 982 IF( PRESENT(kcom) ) ilocalcomm = kcom 983 CALL MPI_ALLREDUCE( ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, MPI_SUMDD, ilocalcomm, ierror ) 2227 984 ytab(:) = zwork(:) 2228 !2229 985 END SUBROUTINE mppsum_a_realdd 986 987 988 SUBROUTINE mppmax_real_multiple( pt1d, kdim, kcom ) 989 !!---------------------------------------------------------------------- 990 !! *** routine mppmax_real *** 991 !! 992 !! ** Purpose : Maximum across processor of each element of a 1D arrays 993 !! 994 !!---------------------------------------------------------------------- 995 REAL(wp), DIMENSION(kdim), INTENT(inout) :: pt1d ! 1D arrays 996 INTEGER , INTENT(in ) :: kdim 997 INTEGER , OPTIONAL , INTENT(in ) :: kcom ! local communicator 998 !! 999 INTEGER :: ierror, ilocalcomm 1000 REAL(wp), DIMENSION(kdim) :: zwork 1001 !!---------------------------------------------------------------------- 1002 ilocalcomm = mpi_comm_opa 1003 IF( PRESENT(kcom) ) ilocalcomm = kcom 1004 ! 1005 CALL mpi_allreduce( pt1d, zwork, kdim, mpi_double_precision, mpi_max, ilocalcomm, ierror ) 1006 pt1d(:) = zwork(:) 1007 ! 1008 END SUBROUTINE mppmax_real_multiple 2230 1009 2231 1010 … … 2243 1022 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 2244 1023 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 2245 INTEGER , INTENT( out) :: ki, kj 1024 INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame 2246 1025 ! 2247 1026 INTEGER :: ierror … … 2251 1030 !!----------------------------------------------------------------------- 2252 1031 ! 2253 zmin = MINVAL( ptab(:,:) , mask= pmask == 1. e0)2254 ilocs = MINLOC( ptab(:,:) , mask= pmask == 1. e0)1032 zmin = MINVAL( ptab(:,:) , mask= pmask == 1._wp ) 1033 ilocs = MINLOC( ptab(:,:) , mask= pmask == 1._wp ) 2255 1034 ! 2256 1035 ki = ilocs(1) + nimpp - 1 … … 2279 1058 !! 2280 1059 !!-------------------------------------------------------------------------- 2281 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array2282 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask2283 REAL(wp) 2284 INTEGER 2285 ! !1060 REAL(wp), DIMENSION(:,:,:), INTENT(in ) :: ptab ! Local 2D array 1061 REAL(wp), DIMENSION(:,:,:), INTENT(in ) :: pmask ! Local mask 1062 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 1063 INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame 1064 ! 2286 1065 INTEGER :: ierror 2287 1066 REAL(wp) :: zmin ! local minimum … … 2290 1069 !!----------------------------------------------------------------------- 2291 1070 ! 2292 zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1. e0)2293 ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1. e0)1071 zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1._wp ) 1072 ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1._wp ) 2294 1073 ! 2295 1074 ki = ilocs(1) + nimpp - 1 … … 2297 1076 kk = ilocs(3) 2298 1077 ! 2299 zain(1,:) =zmin2300 zain(2,:) =ki+10000.*kj+100000000.*kk1078 zain(1,:) = zmin 1079 zain(2,:) = ki + 10000.*kj + 100000000.*kk 2301 1080 ! 2302 1081 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) … … 2331 1110 !!----------------------------------------------------------------------- 2332 1111 ! 2333 zmax = MAXVAL( ptab(:,:) , mask= pmask == 1. e0)2334 ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1. e0)1112 zmax = MAXVAL( ptab(:,:) , mask= pmask == 1._wp ) 1113 ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1._wp ) 2335 1114 ! 2336 1115 ki = ilocs(1) + nimpp - 1 … … 2359 1138 !! 2360 1139 !!-------------------------------------------------------------------------- 2361 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array 2362 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask 2363 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 2364 INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame 2365 !! 2366 REAL(wp) :: zmax ! local maximum 1140 REAL(wp), DIMENSION (:,:,:), INTENT(in ) :: ptab ! Local 2D array 1141 REAL(wp), DIMENSION (:,:,:), INTENT(in ) :: pmask ! Local mask 1142 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 1143 INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame 1144 ! 1145 INTEGER :: ierror ! local integer 1146 REAL(wp) :: zmax ! local maximum 2367 1147 REAL(wp), DIMENSION(2,1) :: zain, zaout 2368 1148 INTEGER , DIMENSION(3) :: ilocs 2369 INTEGER :: ierror2370 1149 !!----------------------------------------------------------------------- 2371 1150 ! 2372 zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1. e0)2373 ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1. e0)1151 zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1._wp ) 1152 ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1._wp ) 2374 1153 ! 2375 1154 ki = ilocs(1) + nimpp - 1 … … 2377 1156 kk = ilocs(3) 2378 1157 ! 2379 zain(1,:) =zmax2380 zain(2,:) =ki+10000.*kj+100000000.*kk1158 zain(1,:) = zmax 1159 zain(2,:) = ki + 10000.*kj + 100000000.*kk 2381 1160 ! 2382 1161 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) … … 2422 1201 2423 1202 SUBROUTINE mpp_comm_free( kcom ) 2424 !!----------------------------------------------------------------------2425 1203 !!---------------------------------------------------------------------- 2426 1204 INTEGER, INTENT(in) :: kcom … … 2680 1458 2681 1459 2682 SUBROUTINE mpp_lbc_north_3d( pt3d, cd_type, psgn )2683 !!---------------------------------------------------------------------2684 !! *** routine mpp_lbc_north_3d ***2685 !!2686 !! ** Purpose : Ensure proper north fold horizontal bondary condition2687 !! in mpp configuration in case of jpn1 > 12688 !!2689 !! ** Method : North fold condition and mpp with more than one proc2690 !! in i-direction require a specific treatment. We gather2691 !! the 4 northern lines of the global domain on 1 processor2692 !! and apply lbc north-fold on this sub array. Then we2693 !! scatter the north fold array back to the processors.2694 !!2695 !!----------------------------------------------------------------------2696 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: pt3d ! 3D array on which the b.c. is applied2697 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points2698 ! ! = T , U , V , F or W gridpoints2699 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold2700 !! ! = 1. , the sign is kept2701 INTEGER :: ji, jj, jr, jk2702 INTEGER :: ierr, itaille, ildi, ilei, iilb2703 INTEGER :: ijpj, ijpjm1, ij, iproc2704 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather2705 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather2706 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for mpi_isend when avoiding mpi_allgather2707 ! ! Workspace for message transfers avoiding mpi_allgather2708 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab2709 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk2710 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio2711 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr2712 2713 INTEGER :: istatus(mpi_status_size)2714 INTEGER :: iflag2715 !!----------------------------------------------------------------------2716 !2717 ALLOCATE( ztab(jpiglo,4,jpk) , znorthloc(jpi,4,jpk), zfoldwk(jpi,4,jpk), znorthgloio(jpi,4,jpk,jpni) )2718 ALLOCATE( ztabl(jpi,4,jpk), ztabr(jpi*jpmaxngh, 4, jpk) )2719 2720 ijpj = 42721 ijpjm1 = 32722 !2723 znorthloc(:,:,:) = 02724 DO jk = 1, jpk2725 DO jj = nlcj - ijpj +1, nlcj ! put in xnorthloc the last 4 jlines of pt3d2726 ij = jj - nlcj + ijpj2727 znorthloc(:,ij,jk) = pt3d(:,jj,jk)2728 END DO2729 END DO2730 !2731 ! ! Build in procs of ncomm_north the znorthgloio2732 itaille = jpi * jpk * ijpj2733 2734 IF ( l_north_nogather ) THEN2735 !2736 ztabr(:,:,:) = 02737 ztabl(:,:,:) = 02738 2739 DO jk = 1, jpk2740 DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array2741 ij = jj - nlcj + ijpj2742 DO ji = nfsloop, nfeloop2743 ztabl(ji,ij,jk) = pt3d(ji,jj,jk)2744 END DO2745 END DO2746 END DO2747 2748 DO jr = 1,nsndto2749 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN2750 CALL mppsend( 5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr) )2751 ENDIF2752 END DO2753 DO jr = 1,nsndto2754 iproc = nfipproc(isendto(jr),jpnj)2755 IF(iproc .ne. -1) THEN2756 ilei = nleit (iproc+1)2757 ildi = nldit (iproc+1)2758 iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj)2759 ENDIF2760 IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN2761 CALL mpprecv(5, zfoldwk, itaille, iproc)2762 DO jk = 1, jpk2763 DO jj = 1, ijpj2764 DO ji = ildi, ilei2765 ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk)2766 END DO2767 END DO2768 END DO2769 ELSE IF (iproc .eq. (narea-1)) THEN2770 DO jk = 1, jpk2771 DO jj = 1, ijpj2772 DO ji = ildi, ilei2773 ztabr(iilb+ji,jj,jk) = pt3d(ji,nlcj-ijpj+jj,jk)2774 END DO2775 END DO2776 END DO2777 ENDIF2778 END DO2779 IF (l_isend) THEN2780 DO jr = 1,nsndto2781 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN2782 CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err)2783 ENDIF2784 END DO2785 ENDIF2786 CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition2787 DO jk = 1, jpk2788 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d2789 ij = jj - nlcj + ijpj2790 DO ji= 1, nlci2791 pt3d(ji,jj,jk) = ztabl(ji,ij,jk)2792 END DO2793 END DO2794 END DO2795 !2796 2797 ELSE2798 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, &2799 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr )2800 !2801 ztab(:,:,:) = 0.e02802 DO jr = 1, ndim_rank_north ! recover the global north array2803 iproc = nrank_north(jr) + 12804 ildi = nldit (iproc)2805 ilei = nleit (iproc)2806 iilb = nimppt(iproc)2807 DO jk = 1, jpk2808 DO jj = 1, ijpj2809 DO ji = ildi, ilei2810 ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr)2811 END DO2812 END DO2813 END DO2814 END DO2815 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition2816 !2817 DO jk = 1, jpk2818 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d2819 ij = jj - nlcj + ijpj2820 DO ji= 1, nlci2821 pt3d(ji,jj,jk) = ztab(ji+nimpp-1,ij,jk)2822 END DO2823 END DO2824 END DO2825 !2826 ENDIF2827 !2828 ! The ztab array has been either:2829 ! a. Fully populated by the mpi_allgather operation or2830 ! b. Had the active points for this domain and northern neighbours populated2831 ! by peer to peer exchanges2832 ! Either way the array may be folded by lbc_nfd and the result for the span of2833 ! this domain will be identical.2834 !2835 DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio )2836 DEALLOCATE( ztabl, ztabr )2837 !2838 END SUBROUTINE mpp_lbc_north_3d2839 2840 2841 SUBROUTINE mpp_lbc_north_2d( pt2d, cd_type, psgn)2842 !!---------------------------------------------------------------------2843 !! *** routine mpp_lbc_north_2d ***2844 !!2845 !! ** Purpose : Ensure proper north fold horizontal bondary condition2846 !! in mpp configuration in case of jpn1 > 1 (for 2d array )2847 !!2848 !! ** Method : North fold condition and mpp with more than one proc2849 !! in i-direction require a specific treatment. We gather2850 !! the 4 northern lines of the global domain on 1 processor2851 !! and apply lbc north-fold on this sub array. Then we2852 !! scatter the north fold array back to the processors.2853 !!2854 !!----------------------------------------------------------------------2855 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the b.c. is applied2856 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt2d grid-points2857 ! ! = T , U , V , F or W gridpoints2858 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold2859 !! ! = 1. , the sign is kept2860 INTEGER :: ji, jj, jr2861 INTEGER :: ierr, itaille, ildi, ilei, iilb2862 INTEGER :: ijpj, ijpjm1, ij, iproc2863 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather2864 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather2865 INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather2866 ! ! Workspace for message transfers avoiding mpi_allgather2867 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab2868 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: znorthloc, zfoldwk2869 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio2870 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztabl, ztabr2871 INTEGER :: istatus(mpi_status_size)2872 INTEGER :: iflag2873 !!----------------------------------------------------------------------2874 !2875 ALLOCATE( ztab(jpiglo,4), znorthloc(jpi,4), zfoldwk(jpi,4), znorthgloio(jpi,4,jpni) )2876 ALLOCATE( ztabl(jpi,4), ztabr(jpi*jpmaxngh, 4) )2877 !2878 ijpj = 42879 ijpjm1 = 32880 !2881 DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d2882 ij = jj - nlcj + ijpj2883 znorthloc(:,ij) = pt2d(:,jj)2884 END DO2885 2886 ! ! Build in procs of ncomm_north the znorthgloio2887 itaille = jpi * ijpj2888 IF ( l_north_nogather ) THEN2889 !2890 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified2891 ! (in nemo_northcomms) as being involved in this process' northern boundary exchange2892 !2893 ztabr(:,:) = 02894 ztabl(:,:) = 02895 2896 DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array2897 ij = jj - nlcj + ijpj2898 DO ji = nfsloop, nfeloop2899 ztabl(ji,ij) = pt2d(ji,jj)2900 END DO2901 END DO2902 2903 DO jr = 1,nsndto2904 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN2905 CALL mppsend(5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr))2906 ENDIF2907 END DO2908 DO jr = 1,nsndto2909 iproc = nfipproc(isendto(jr),jpnj)2910 IF(iproc .ne. -1) THEN2911 ilei = nleit (iproc+1)2912 ildi = nldit (iproc+1)2913 iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj)2914 ENDIF2915 IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN2916 CALL mpprecv(5, zfoldwk, itaille, iproc)2917 DO jj = 1, ijpj2918 DO ji = ildi, ilei2919 ztabr(iilb+ji,jj) = zfoldwk(ji,jj)2920 END DO2921 END DO2922 ELSE IF (iproc .eq. (narea-1)) THEN2923 DO jj = 1, ijpj2924 DO ji = ildi, ilei2925 ztabr(iilb+ji,jj) = pt2d(ji,nlcj-ijpj+jj)2926 END DO2927 END DO2928 ENDIF2929 END DO2930 IF (l_isend) THEN2931 DO jr = 1,nsndto2932 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN2933 CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err)2934 ENDIF2935 END DO2936 ENDIF2937 CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition2938 !2939 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d2940 ij = jj - nlcj + ijpj2941 DO ji = 1, nlci2942 pt2d(ji,jj) = ztabl(ji,ij)2943 END DO2944 END DO2945 !2946 ELSE2947 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, &2948 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr )2949 !2950 ztab(:,:) = 0.e02951 DO jr = 1, ndim_rank_north ! recover the global north array2952 iproc = nrank_north(jr) + 12953 ildi = nldit (iproc)2954 ilei = nleit (iproc)2955 iilb = nimppt(iproc)2956 DO jj = 1, ijpj2957 DO ji = ildi, ilei2958 ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr)2959 END DO2960 END DO2961 END DO2962 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition2963 !2964 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d2965 ij = jj - nlcj + ijpj2966 DO ji = 1, nlci2967 pt2d(ji,jj) = ztab(ji+nimpp-1,ij)2968 END DO2969 END DO2970 !2971 ENDIF2972 DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio )2973 DEALLOCATE( ztabl, ztabr )2974 !2975 END SUBROUTINE mpp_lbc_north_2d2976 2977 SUBROUTINE mpp_lbc_north_2d_multiple( pt2d_array, cd_type, psgn, num_fields)2978 !!---------------------------------------------------------------------2979 !! *** routine mpp_lbc_north_2d ***2980 !!2981 !! ** Purpose : Ensure proper north fold horizontal bondary condition2982 !! in mpp configuration in case of jpn1 > 12983 !! (for multiple 2d arrays )2984 !!2985 !! ** Method : North fold condition and mpp with more than one proc2986 !! in i-direction require a specific treatment. We gather2987 !! the 4 northern lines of the global domain on 1 processor2988 !! and apply lbc north-fold on this sub array. Then we2989 !! scatter the north fold array back to the processors.2990 !!2991 !!----------------------------------------------------------------------2992 INTEGER , INTENT (in ) :: num_fields ! number of variables contained in pt2d2993 TYPE( arrayptr ), DIMENSION(:) :: pt2d_array2994 CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: cd_type ! nature of pt2d grid-points2995 ! ! = T , U , V , F or W gridpoints2996 REAL(wp), DIMENSION(:), INTENT(in ) :: psgn ! = -1. the sign change across the north fold2997 !! ! = 1. , the sign is kept2998 INTEGER :: ji, jj, jr, jk2999 INTEGER :: ierr, itaille, ildi, ilei, iilb3000 INTEGER :: ijpj, ijpjm1, ij, iproc3001 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather3002 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather3003 INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather3004 ! ! Workspace for message transfers avoiding mpi_allgather3005 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab3006 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk3007 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio3008 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr3009 INTEGER :: istatus(mpi_status_size)3010 INTEGER :: iflag3011 !!----------------------------------------------------------------------3012 !3013 ALLOCATE( ztab(jpiglo,4,num_fields), znorthloc(jpi,4,num_fields), zfoldwk(jpi,4,num_fields), &3014 & znorthgloio(jpi,4,num_fields,jpni) ) ! expanded to 3 dimensions3015 ALLOCATE( ztabl(jpi,4,num_fields), ztabr(jpi*jpmaxngh, 4,num_fields) )3016 !3017 ijpj = 43018 ijpjm1 = 33019 !3020 3021 DO jk = 1, num_fields3022 DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d (for every variable)3023 ij = jj - nlcj + ijpj3024 znorthloc(:,ij,jk) = pt2d_array(jk)%pt2d(:,jj)3025 END DO3026 END DO3027 ! ! Build in procs of ncomm_north the znorthgloio3028 itaille = jpi * ijpj3029 3030 IF ( l_north_nogather ) THEN3031 !3032 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified3033 ! (in nemo_northcomms) as being involved in this process' northern boundary exchange3034 !3035 ztabr(:,:,:) = 03036 ztabl(:,:,:) = 03037 3038 DO jk = 1, num_fields3039 DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array3040 ij = jj - nlcj + ijpj3041 DO ji = nfsloop, nfeloop3042 ztabl(ji,ij,jk) = pt2d_array(jk)%pt2d(ji,jj)3043 END DO3044 END DO3045 END DO3046 3047 DO jr = 1,nsndto3048 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN3049 CALL mppsend(5, znorthloc, itaille*num_fields, nfipproc(isendto(jr),jpnj), ml_req_nf(jr)) ! Buffer expanded "num_fields" times3050 ENDIF3051 END DO3052 DO jr = 1,nsndto3053 iproc = nfipproc(isendto(jr),jpnj)3054 IF(iproc .ne. -1) THEN3055 ilei = nleit (iproc+1)3056 ildi = nldit (iproc+1)3057 iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj)3058 ENDIF3059 IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN3060 CALL mpprecv(5, zfoldwk, itaille*num_fields, iproc) ! Buffer expanded "num_fields" times3061 DO jk = 1 , num_fields3062 DO jj = 1, ijpj3063 DO ji = ildi, ilei3064 ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk) ! Modified to 3D3065 END DO3066 END DO3067 END DO3068 ELSE IF (iproc .eq. (narea-1)) THEN3069 DO jk = 1, num_fields3070 DO jj = 1, ijpj3071 DO ji = ildi, ilei3072 ztabr(iilb+ji,jj,jk) = pt2d_array(jk)%pt2d(ji,nlcj-ijpj+jj) ! Modified to 3D3073 END DO3074 END DO3075 END DO3076 ENDIF3077 END DO3078 IF (l_isend) THEN3079 DO jr = 1,nsndto3080 IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN3081 CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err)3082 ENDIF3083 END DO3084 ENDIF3085 !3086 DO ji = 1, num_fields ! Loop to manage 3D variables3087 CALL mpp_lbc_nfd( ztabl(:,:,ji), ztabr(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition3088 END DO3089 !3090 DO jk = 1, num_fields3091 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d3092 ij = jj - nlcj + ijpj3093 DO ji = 1, nlci3094 pt2d_array(jk)%pt2d(ji,jj) = ztabl(ji,ij,jk) ! Modified to 3D3095 END DO3096 END DO3097 END DO3098 3099 !3100 ELSE3101 !3102 CALL MPI_ALLGATHER( znorthloc , itaille*num_fields, MPI_DOUBLE_PRECISION, &3103 & znorthgloio, itaille*num_fields, MPI_DOUBLE_PRECISION, ncomm_north, ierr )3104 !3105 ztab(:,:,:) = 0.e03106 DO jk = 1, num_fields3107 DO jr = 1, ndim_rank_north ! recover the global north array3108 iproc = nrank_north(jr) + 13109 ildi = nldit (iproc)3110 ilei = nleit (iproc)3111 iilb = nimppt(iproc)3112 DO jj = 1, ijpj3113 DO ji = ildi, ilei3114 ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr)3115 END DO3116 END DO3117 END DO3118 END DO3119 3120 DO ji = 1, num_fields3121 CALL lbc_nfd( ztab(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition3122 END DO3123 !3124 DO jk = 1, num_fields3125 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d3126 ij = jj - nlcj + ijpj3127 DO ji = 1, nlci3128 pt2d_array(jk)%pt2d(ji,jj) = ztab(ji+nimpp-1,ij,jk)3129 END DO3130 END DO3131 END DO3132 !3133 !3134 ENDIF3135 DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio )3136 DEALLOCATE( ztabl, ztabr )3137 !3138 END SUBROUTINE mpp_lbc_north_2d_multiple3139 3140 1460 SUBROUTINE mpp_lbc_north_e( pt2d, cd_type, psgn) 3141 1461 !!--------------------------------------------------------------------- … … 3155 1475 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo 3156 1476 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 3157 ! ! = T , U , V , F or W -points 3158 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 3159 !! ! north fold, = 1. otherwise 1477 REAL(wp) , INTENT(in ) :: psgn ! sign used across the north fold 1478 ! 3160 1479 INTEGER :: ji, jj, jr 3161 1480 INTEGER :: ierr, itaille, ildi, ilei, iilb 3162 1481 INTEGER :: ijpj, ij, iproc 3163 !3164 1482 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e 3165 1483 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e 3166 3167 1484 !!---------------------------------------------------------------------- 3168 1485 ! 3169 1486 ALLOCATE( ztab_e(jpiglo,4+2*jpr2dj), znorthloc_e(jpi,4+2*jpr2dj), znorthgloio_e(jpi,4+2*jpr2dj,jpni) ) 3170 3171 1487 ! 3172 1488 ijpj=4 3173 ztab_e(:,:) = 0. e03174 3175 ij =01489 ztab_e(:,:) = 0._wp 1490 1491 ij = 0 3176 1492 ! put in znorthloc_e the last 4 jlines of pt2d 3177 1493 DO jj = nlcj - ijpj + 1 - jpr2dj, nlcj +jpr2dj 3178 1494 ij = ij + 1 3179 1495 DO ji = 1, jpi 3180 znorthloc_e(ji,ij) =pt2d(ji,jj)1496 znorthloc_e(ji,ij) = pt2d(ji,jj) 3181 1497 END DO 3182 1498 END DO 3183 1499 ! 3184 1500 itaille = jpi * ( ijpj + 2 * jpr2dj ) 3185 CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, &1501 CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, & 3186 1502 & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 3187 1503 ! 3188 1504 DO jr = 1, ndim_rank_north ! recover the global north array 3189 1505 iproc = nrank_north(jr) + 1 3190 ildi = nldit (iproc)3191 ilei = nleit (iproc)3192 iilb = nimppt(iproc)1506 ildi = nldit (iproc) 1507 ilei = nleit (iproc) 1508 iilb = nimppt(iproc) 3193 1509 DO jj = 1, ijpj+2*jpr2dj 3194 1510 DO ji = ildi, ilei … … 3198 1514 END DO 3199 1515 3200 3201 1516 ! 2. North-Fold boundary conditions 3202 1517 ! ---------------------------------- 3203 CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = jpr2dj )1518 !!gm ERROR CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = jpr2dj ) 3204 1519 3205 1520 ij = jpr2dj … … 3215 1530 ! 3216 1531 END SUBROUTINE mpp_lbc_north_e 3217 3218 3219 SUBROUTINE mpp_lnk_bdy_3d( ptab, cd_type, psgn, ib_bdy )3220 !!----------------------------------------------------------------------3221 !! *** routine mpp_lnk_bdy_3d ***3222 !!3223 !! ** Purpose : Message passing management3224 !!3225 !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries3226 !! between processors following neighboring subdomains.3227 !! domain parameters3228 !! nlci : first dimension of the local subdomain3229 !! nlcj : second dimension of the local subdomain3230 !! nbondi_bdy : mark for "east-west local boundary"3231 !! nbondj_bdy : mark for "north-south local boundary"3232 !! noea : number for local neighboring processors3233 !! nowe : number for local neighboring processors3234 !! noso : number for local neighboring processors3235 !! nono : number for local neighboring processors3236 !!3237 !! ** Action : ptab with update value at its periphery3238 !!3239 !!----------------------------------------------------------------------3240 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied3241 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points3242 ! ! = T , U , V , F , W points3243 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary3244 ! ! = 1. , the sign is kept3245 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set3246 !3247 INTEGER :: ji, jj, jk, jl ! dummy loop indices3248 INTEGER :: imigr, iihom, ijhom ! local integers3249 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend3250 REAL(wp) :: zland ! local scalar3251 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend3252 !3253 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north3254 REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east3255 !!----------------------------------------------------------------------3256 !3257 ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), &3258 & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) )3259 3260 zland = 0._wp3261 3262 ! 1. standard boundary treatment3263 ! ------------------------------3264 ! ! East-West boundaries3265 ! !* Cyclic east-west3266 IF( nbondi == 2) THEN3267 IF( nperio == 1 .OR. nperio == 4 .OR. nperio == 6 ) THEN3268 ptab( 1 ,:,:) = ptab(jpim1,:,:)3269 ptab(jpi,:,:) = ptab( 2 ,:,:)3270 ELSE3271 IF( .NOT. cd_type == 'F' ) ptab(1:jpreci,:,:) = zland ! south except F-point3272 ptab(nlci-jpreci+1:jpi,:,:) = zland ! north3273 ENDIF3274 ELSEIF(nbondi == -1) THEN3275 IF( .NOT. cd_type == 'F' ) ptab(1:jpreci,:,:) = zland ! south except F-point3276 ELSEIF(nbondi == 1) THEN3277 ptab(nlci-jpreci+1:jpi,:,:) = zland ! north3278 ENDIF !* closed3279 3280 IF (nbondj == 2 .OR. nbondj == -1) THEN3281 IF( .NOT. cd_type == 'F' ) ptab(:,1:jprecj,:) = zland ! south except F-point3282 ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN3283 ptab(:,nlcj-jprecj+1:jpj,:) = zland ! north3284 ENDIF3285 !3286 ! 2. East and west directions exchange3287 ! ------------------------------------3288 ! we play with the neigbours AND the row number because of the periodicity3289 !3290 SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions3291 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)3292 iihom = nlci-nreci3293 DO jl = 1, jpreci3294 zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:)3295 zt3we(:,jl,:,1) = ptab(iihom +jl,:,:)3296 END DO3297 END SELECT3298 !3299 ! ! Migrations3300 imigr = jpreci * jpj * jpk3301 !3302 SELECT CASE ( nbondi_bdy(ib_bdy) )3303 CASE ( -1 )3304 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 )3305 CASE ( 0 )3306 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 )3307 CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 )3308 CASE ( 1 )3309 CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 )3310 END SELECT3311 !3312 SELECT CASE ( nbondi_bdy_b(ib_bdy) )3313 CASE ( -1 )3314 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea )3315 CASE ( 0 )3316 CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea )3317 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe )3318 CASE ( 1 )3319 CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe )3320 END SELECT3321 !3322 SELECT CASE ( nbondi_bdy(ib_bdy) )3323 CASE ( -1 )3324 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3325 CASE ( 0 )3326 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3327 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)3328 CASE ( 1 )3329 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3330 END SELECT3331 !3332 ! ! Write Dirichlet lateral conditions3333 iihom = nlci-jpreci3334 !3335 SELECT CASE ( nbondi_bdy_b(ib_bdy) )3336 CASE ( -1 )3337 DO jl = 1, jpreci3338 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2)3339 END DO3340 CASE ( 0 )3341 DO jl = 1, jpreci3342 ptab( jl,:,:) = zt3we(:,jl,:,2)3343 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2)3344 END DO3345 CASE ( 1 )3346 DO jl = 1, jpreci3347 ptab( jl,:,:) = zt3we(:,jl,:,2)3348 END DO3349 END SELECT3350 3351 3352 ! 3. North and south directions3353 ! -----------------------------3354 ! always closed : we play only with the neigbours3355 !3356 IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions3357 ijhom = nlcj-nrecj3358 DO jl = 1, jprecj3359 zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:)3360 zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:)3361 END DO3362 ENDIF3363 !3364 ! ! Migrations3365 imigr = jprecj * jpi * jpk3366 !3367 SELECT CASE ( nbondj_bdy(ib_bdy) )3368 CASE ( -1 )3369 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 )3370 CASE ( 0 )3371 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 )3372 CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 )3373 CASE ( 1 )3374 CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 )3375 END SELECT3376 !3377 SELECT CASE ( nbondj_bdy_b(ib_bdy) )3378 CASE ( -1 )3379 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono )3380 CASE ( 0 )3381 CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono )3382 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso )3383 CASE ( 1 )3384 CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso )3385 END SELECT3386 !3387 SELECT CASE ( nbondj_bdy(ib_bdy) )3388 CASE ( -1 )3389 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3390 CASE ( 0 )3391 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3392 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)3393 CASE ( 1 )3394 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3395 END SELECT3396 !3397 ! ! Write Dirichlet lateral conditions3398 ijhom = nlcj-jprecj3399 !3400 SELECT CASE ( nbondj_bdy_b(ib_bdy) )3401 CASE ( -1 )3402 DO jl = 1, jprecj3403 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2)3404 END DO3405 CASE ( 0 )3406 DO jl = 1, jprecj3407 ptab(:,jl ,:) = zt3sn(:,jl,:,2)3408 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2)3409 END DO3410 CASE ( 1 )3411 DO jl = 1, jprecj3412 ptab(:,jl,:) = zt3sn(:,jl,:,2)3413 END DO3414 END SELECT3415 3416 3417 ! 4. north fold treatment3418 ! -----------------------3419 !3420 IF( npolj /= 0) THEN3421 !3422 SELECT CASE ( jpni )3423 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp3424 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs.3425 END SELECT3426 !3427 ENDIF3428 !3429 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we )3430 !3431 END SUBROUTINE mpp_lnk_bdy_3d3432 3433 3434 SUBROUTINE mpp_lnk_bdy_2d( ptab, cd_type, psgn, ib_bdy )3435 !!----------------------------------------------------------------------3436 !! *** routine mpp_lnk_bdy_2d ***3437 !!3438 !! ** Purpose : Message passing management3439 !!3440 !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries3441 !! between processors following neighboring subdomains.3442 !! domain parameters3443 !! nlci : first dimension of the local subdomain3444 !! nlcj : second dimension of the local subdomain3445 !! nbondi_bdy : mark for "east-west local boundary"3446 !! nbondj_bdy : mark for "north-south local boundary"3447 !! noea : number for local neighboring processors3448 !! nowe : number for local neighboring processors3449 !! noso : number for local neighboring processors3450 !! nono : number for local neighboring processors3451 !!3452 !! ** Action : ptab with update value at its periphery3453 !!3454 !!----------------------------------------------------------------------3455 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied3456 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points3457 ! ! = T , U , V , F , W points3458 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary3459 ! ! = 1. , the sign is kept3460 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set3461 !3462 INTEGER :: ji, jj, jl ! dummy loop indices3463 INTEGER :: imigr, iihom, ijhom ! local integers3464 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend3465 REAL(wp) :: zland3466 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend3467 !3468 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north3469 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east3470 !!----------------------------------------------------------------------3471 3472 ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), &3473 & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) )3474 3475 zland = 0._wp3476 3477 ! 1. standard boundary treatment3478 ! ------------------------------3479 ! ! East-West boundaries3480 ! !* Cyclic east-west3481 IF( nbondi == 2 ) THEN3482 IF (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) THEN3483 ptab( 1 ,:) = ptab(jpim1,:)3484 ptab(jpi,:) = ptab( 2 ,:)3485 ELSE3486 IF(.NOT.cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point3487 ptab(nlci-jpreci+1:jpi ,:) = zland ! north3488 ENDIF3489 ELSEIF(nbondi == -1) THEN3490 IF( .NOT.cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point3491 ELSEIF(nbondi == 1) THEN3492 ptab(nlci-jpreci+1:jpi ,:) = zland ! north3493 ENDIF3494 ! !* closed3495 IF( nbondj == 2 .OR. nbondj == -1 ) THEN3496 IF( .NOT.cd_type == 'F' ) ptab(:, 1 :jprecj) = zland ! south except F-point3497 ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN3498 ptab(:,nlcj-jprecj+1:jpj ) = zland ! north3499 ENDIF3500 !3501 ! 2. East and west directions exchange3502 ! ------------------------------------3503 ! we play with the neigbours AND the row number because of the periodicity3504 !3505 SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions3506 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)3507 iihom = nlci-nreci3508 DO jl = 1, jpreci3509 zt2ew(:,jl,1) = ptab(jpreci+jl,:)3510 zt2we(:,jl,1) = ptab(iihom +jl,:)3511 END DO3512 END SELECT3513 !3514 ! ! Migrations3515 imigr = jpreci * jpj3516 !3517 SELECT CASE ( nbondi_bdy(ib_bdy) )3518 CASE ( -1 )3519 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 )3520 CASE ( 0 )3521 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 )3522 CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 )3523 CASE ( 1 )3524 CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 )3525 END SELECT3526 !3527 SELECT CASE ( nbondi_bdy_b(ib_bdy) )3528 CASE ( -1 )3529 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea )3530 CASE ( 0 )3531 CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea )3532 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe )3533 CASE ( 1 )3534 CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe )3535 END SELECT3536 !3537 SELECT CASE ( nbondi_bdy(ib_bdy) )3538 CASE ( -1 )3539 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3540 CASE ( 0 )3541 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3542 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)3543 CASE ( 1 )3544 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3545 END SELECT3546 !3547 ! ! Write Dirichlet lateral conditions3548 iihom = nlci-jpreci3549 !3550 SELECT CASE ( nbondi_bdy_b(ib_bdy) )3551 CASE ( -1 )3552 DO jl = 1, jpreci3553 ptab(iihom+jl,:) = zt2ew(:,jl,2)3554 END DO3555 CASE ( 0 )3556 DO jl = 1, jpreci3557 ptab(jl ,:) = zt2we(:,jl,2)3558 ptab(iihom+jl,:) = zt2ew(:,jl,2)3559 END DO3560 CASE ( 1 )3561 DO jl = 1, jpreci3562 ptab(jl ,:) = zt2we(:,jl,2)3563 END DO3564 END SELECT3565 3566 3567 ! 3. North and south directions3568 ! -----------------------------3569 ! always closed : we play only with the neigbours3570 !3571 IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions3572 ijhom = nlcj-nrecj3573 DO jl = 1, jprecj3574 zt2sn(:,jl,1) = ptab(:,ijhom +jl)3575 zt2ns(:,jl,1) = ptab(:,jprecj+jl)3576 END DO3577 ENDIF3578 !3579 ! ! Migrations3580 imigr = jprecj * jpi3581 !3582 SELECT CASE ( nbondj_bdy(ib_bdy) )3583 CASE ( -1 )3584 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 )3585 CASE ( 0 )3586 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 )3587 CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 )3588 CASE ( 1 )3589 CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 )3590 END SELECT3591 !3592 SELECT CASE ( nbondj_bdy_b(ib_bdy) )3593 CASE ( -1 )3594 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono )3595 CASE ( 0 )3596 CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono )3597 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso )3598 CASE ( 1 )3599 CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso )3600 END SELECT3601 !3602 SELECT CASE ( nbondj_bdy(ib_bdy) )3603 CASE ( -1 )3604 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3605 CASE ( 0 )3606 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3607 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err)3608 CASE ( 1 )3609 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err)3610 END SELECT3611 !3612 ! ! Write Dirichlet lateral conditions3613 ijhom = nlcj-jprecj3614 !3615 SELECT CASE ( nbondj_bdy_b(ib_bdy) )3616 CASE ( -1 )3617 DO jl = 1, jprecj3618 ptab(:,ijhom+jl) = zt2ns(:,jl,2)3619 END DO3620 CASE ( 0 )3621 DO jl = 1, jprecj3622 ptab(:,jl ) = zt2sn(:,jl,2)3623 ptab(:,ijhom+jl) = zt2ns(:,jl,2)3624 END DO3625 CASE ( 1 )3626 DO jl = 1, jprecj3627 ptab(:,jl) = zt2sn(:,jl,2)3628 END DO3629 END SELECT3630 3631 3632 ! 4. north fold treatment3633 ! -----------------------3634 !3635 IF( npolj /= 0) THEN3636 !3637 SELECT CASE ( jpni )3638 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp3639 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs.3640 END SELECT3641 !3642 ENDIF3643 !3644 DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we )3645 !3646 END SUBROUTINE mpp_lnk_bdy_2d3647 1532 3648 1533 … … 3706 1591 END SUBROUTINE mpi_init_opa 3707 1592 3708 SUBROUTINE DDPDD_MPI (ydda, yddb, ilen, itype) 1593 1594 SUBROUTINE DDPDD_MPI( ydda, yddb, ilen, itype ) 3709 1595 !!--------------------------------------------------------------------- 3710 1596 !! Routine DDPDD_MPI: used by reduction operator MPI_SUMDD … … 3713 1599 !! This subroutine computes yddb(i) = ydda(i)+yddb(i) 3714 1600 !!--------------------------------------------------------------------- 3715 INTEGER , INTENT(in) ::ilen, itype3716 COMPLEX(wp), DIMENSION(ilen), INTENT(in) ::ydda3717 COMPLEX(wp), DIMENSION(ilen), INTENT(inout) ::yddb1601 INTEGER , INTENT(in) :: ilen, itype 1602 COMPLEX(wp), DIMENSION(ilen), INTENT(in) :: ydda 1603 COMPLEX(wp), DIMENSION(ilen), INTENT(inout) :: yddb 3718 1604 ! 3719 1605 REAL(wp) :: zerr, zt1, zt2 ! local work variables 3720 INTEGER :: ji, ztmp ! local scalar 3721 1606 INTEGER :: ji, ztmp ! local scalar 1607 !!--------------------------------------------------------------------- 1608 ! 3722 1609 ztmp = itype ! avoid compilation warning 3723 1610 ! 3724 1611 DO ji=1,ilen 3725 1612 ! Compute ydda + yddb using Knuth's trick. … … 3732 1619 yddb(ji) = cmplx ( zt1 + zt2, zt2 - ((zt1 + zt2) - zt1),wp ) 3733 1620 END DO 3734 1621 ! 3735 1622 END SUBROUTINE DDPDD_MPI 3736 1623 … … 3802 1689 END DO 3803 1690 3804 3805 1691 ! 2. North-Fold boundary conditions 3806 1692 ! ---------------------------------- 3807 CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = ipr2dj )1693 !!gm ERROR CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = ipr2dj ) 3808 1694 3809 1695 ij = ipr2dj … … 3841 1727 !! nono : number for local neighboring processors 3842 1728 !!---------------------------------------------------------------------- 1729 REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo 1730 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points 1731 REAL(wp) , INTENT(in ) :: psgn ! sign used across the north fold 3843 1732 INTEGER , INTENT(in ) :: jpri 3844 1733 INTEGER , INTENT(in ) :: jprj 3845 REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo 3846 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points 3847 ! ! = T , U , V , F , W and I points 3848 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the 3849 !! ! north boundary, = 1. otherwise 1734 ! 3850 1735 INTEGER :: jl ! dummy loop indices 3851 INTEGER :: imigr, iihom, ijhom ! temporaryintegers3852 INTEGER :: ipreci, iprecj ! temporary integers1736 INTEGER :: imigr, iihom, ijhom ! local integers 1737 INTEGER :: ipreci, iprecj ! - - 3853 1738 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 3854 1739 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 3855 1740 !! 3856 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns 3857 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn 3858 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe 3859 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew 1741 REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns, r2dsn 1742 REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe, r2dew 3860 1743 !!---------------------------------------------------------------------- 3861 1744 … … 3875 1758 ! 3876 1759 ELSE !* closed 3877 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0. e0! south except at F-point3878 pt2d(nlci-jpreci+1:jpi+jpri,:) = 0. e0! north1760 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0._wp ! south except at F-point 1761 pt2d(nlci-jpreci+1:jpi+jpri,:) = 0._wp ! north 3879 1762 ENDIF 3880 1763 ! … … 3885 1768 ! 3886 1769 SELECT CASE ( jpni ) 3887 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj )3888 CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+jprj) , cd_type, psgn , pr2dj=jprj )1770 !!gm ERROR CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj ) 1771 !!gm ERROR CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+jprj) , cd_type, psgn , pr2dj=jprj ) 3889 1772 END SELECT 3890 1773 ! … … 3996 1879 END DO 3997 1880 END SELECT 3998 1881 ! 3999 1882 END SUBROUTINE mpp_lnk_2d_icb 4000 1883 … … 4020 1903 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 4021 1904 END INTERFACE 1905 INTERFACE mpp_max_multiple 1906 MODULE PROCEDURE mppmax_real_multiple 1907 END INTERFACE 4022 1908 4023 1909 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag … … 4191 2077 WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom 4192 2078 END SUBROUTINE mpp_comm_free 2079 2080 SUBROUTINE mppmax_real_multiple( ptab, kdim , kcom ) 2081 REAL, DIMENSION(:) :: ptab ! 2082 INTEGER :: kdim ! 2083 INTEGER, OPTIONAL :: kcom ! 2084 WRITE(*,*) 'mppmax_real_multiple: You should not have seen this print! error?', ptab(1), kdim 2085 END SUBROUTINE mppmax_real_multiple 2086 4193 2087 #endif 4194 2088 … … 4225 2119 CALL FLUSH(numout ) 4226 2120 IF( numstp /= -1 ) CALL FLUSH(numstp ) 4227 IF( num sol /= -1 ) CALL FLUSH(numsol)2121 IF( numrun /= -1 ) CALL FLUSH(numrun ) 4228 2122 IF( numevo_ice /= -1 ) CALL FLUSH(numevo_ice) 4229 2123 ! … … 4332 2226 WRITE(kout,*) 4333 2227 ENDIF 4334 CALL FLUSH( kout)2228 CALL FLUSH( kout ) 4335 2229 STOP 'ctl_opn bad opening' 4336 2230 ENDIF
Note: See TracChangeset
for help on using the changeset viewer.