Changeset 1344 for trunk/NEMO/OPA_SRC/lib_mpp.F90
- Timestamp:
- 2009-03-27T15:02:19+01:00 (15 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/NEMO/OPA_SRC/lib_mpp.F90
r1304 r1344 2 2 !!====================================================================== 3 3 !! *** MODULE lib_mpp *** 4 !! Ocean numerics: massively parallel processing libra iry4 !! Ocean numerics: massively parallel processing library 5 5 !!===================================================================== 6 #if defined key_mpp_mpi || defined key_mpp_shmem 6 !! History : OPA ! 1994 (M. Guyon, J. Escobar, M. Imbard) Original code 7 !! 7.0 ! 1997 (A.M. Treguier) SHMEM additions 8 !! 8.0 ! 1998 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI 9 !! ! 1998 (J.M. Molines) Open boundary conditions 10 !! NEMO 1.0 ! 2003 (J.-M. Molines, G. Madec) F90, free form 11 !! ! 2003 (J.M. Molines) add mpp_ini_north(_3d,_2d) 12 !! - ! 2004 (R. Bourdalle Badie) isend option in mpi 13 !! ! 2004 (J.M. Molines) minloc, maxloc 14 !! - ! 2005 (G. Madec, S. Masson) npolj=5,6 F-point & ice cases 15 !! - ! 2005 (R. Redler) Replacement of MPI_COMM_WORLD except for MPI_Abort 16 !! - ! 2005 (R. Benshila, G. Madec) add extra halo case 17 !! - ! 2008 (R. Benshila) add mpp_ini_ice 18 !! 3.2 ! 2009 (R. Benshila) SHMEM suppression, north fold in lbc_nfd 7 19 !!---------------------------------------------------------------------- 8 !! 'key_mpp_mpi' OR MPI massively parallel processing library 9 !! 'key_mpp_shmem' SHMEM massively parallel processing library 20 #if defined key_mpp_mpi 10 21 !!---------------------------------------------------------------------- 11 !! mynode 12 !! mpparent 13 !! mppshmem 14 !! mpp_lnk : generic interface (defined in lbclnk) for : 15 !! mpp_lnk_2d, mpp_lnk_3d 22 !! 'key_mpp_mpi' MPI massively parallel processing library 23 !!---------------------------------------------------------------------- 24 !! mynode : indentify the processor unit 25 !! mpp_lnk : interface (defined in lbclnk) for message passing of 2d or 3d arrays (mpp_lnk_2d, mpp_lnk_3d) 16 26 !! mpp_lnk_3d_gather : Message passing manadgement for two 3D arrays 17 !! mpp_lnk_e : interface defined in lbclnk 18 !! mpplnks 19 !! mpprecv 20 !! mppsend 21 !! mppscatter 22 !! mppgather 23 !! mpp_isl : generic inteface for : 24 !! mppisl_int , mppisl_a_int , mppisl_real, mppisl_a_real 25 !! mpp_min : generic interface for : 26 !! mppmin_int , mppmin_a_int , mppmin_real, mppmin_a_real 27 !! mpp_max : generic interface for : 28 !! mppmax_int , mppmax_a_int , mppmax_real, mppmax_a_real 29 !! mpp_sum : generic interface for : 30 !! mppsum_int , mppsum_a_int , mppsum_real, mppsum_a_real 31 !! mpp_minloc 32 !! mpp_maxloc 33 !! mppsync 34 !! mppstop 35 !! mppobc : variant of mpp_lnk for open boundaries 36 !! mpp_ini_north 37 !! mpp_lbc_north 38 !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo (nsolv=4) 27 !! mpp_lnk_e : interface (defined in lbclnk) for message passing of 2d array with extra halo (mpp_lnk_2d_e) 28 !! mpprecv : 29 !! mppsend : 30 !! mppscatter : 31 !! mppgather : 32 !! mpp_isl : generic inteface for mppisl_int , mppisl_a_int , mppisl_real, mppisl_a_real 33 !! mpp_min : generic interface for mppmin_int , mppmin_a_int , mppmin_real, mppmin_a_real 34 !! mpp_max : generic interface for mppmax_int , mppmax_a_int , mppmax_real, mppmax_a_real 35 !! mpp_sum : generic interface for mppsum_int , mppsum_a_int , mppsum_real, mppsum_a_real 36 !! mpp_minloc : 37 !! mpp_maxloc : 38 !! mppsync : 39 !! mppstop : 40 !! mppobc : variant of mpp_lnk for open boundary condition 41 !! mpp_ini_north : initialisation of north fold 42 !! mpp_lbc_north : north fold processors gathering 43 !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo 39 44 !!---------------------------------------------------------------------- 40 45 !! History : … … 46 51 !! ! 05 (G. Madec, S. Masson) npolj=5,6 F-point & ice cases 47 52 !! ! 05 (R. Redler) Replacement of MPI_COMM_WORLD except for MPI_Abort 53 !! ! 09 (R. Benshila) SHMEM suppression, north fold in lbc_nfd 48 54 !!---------------------------------------------------------------------- 49 55 !! OPA 9.0 , LOCEAN-IPSL (2005) … … 54 60 USE dom_oce ! ocean space and time domain 55 61 USE in_out_manager ! I/O manager 62 USE lbcnfd ! north fold treatment 56 63 57 64 IMPLICIT NONE 58 59 65 PRIVATE 60 PUBLIC mynode, mpparent, mpp_isl, mpp_min, mpp_max, mpp_sum, mpp_lbc_north 61 PUBLIC mpp_lbc_north_e, mpp_minloc, mpp_maxloc, mpp_lnk_3d, mpp_lnk_2d, mpp_lnk_3d_gather, mpp_lnk_2d_e, mpplnks 62 PUBLIC mpprecv, mppsend, mppscatter, mppgather, mppobc, mpp_ini_north, mppstop, mppsync, mpp_ini_ice, mpp_comm_free 66 67 PUBLIC mynode, mppstop, mppsync, mpp_comm_free 68 PUBLIC mpp_ini_north, mpp_lbc_north, mpp_lbc_north_e 69 PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc 70 PUBLIC mpp_lnk_3d, mpp_lnk_3d_gather, mpp_lnk_2d, mpp_lnk_2d_e 71 PUBLIC mpprecv, mppsend, mppscatter, mppgather 72 PUBLIC mppobc, mpp_ini_ice, mpp_isl 63 73 #if defined key_oasis3 || defined key_oasis4 64 PUBLIC mppsize, mpprank74 PUBLIC mppsize, mpprank 65 75 #endif 66 76 67 77 !! * Interfaces 68 78 !! define generic interface for these routine as they are called sometimes 69 !! with scalar arguments instead of array arguments, which causes problems 70 !! for the compilation on AIX system as well as NEC and SGI. Ok on COMPACQ 71 79 !! with scalar arguments instead of array arguments, which causes problems 80 !! for the compilation on AIX system as well as NEC and SGI. Ok on COMPACQ 72 81 INTERFACE mpp_isl 73 82 MODULE PROCEDURE mppisl_a_int, mppisl_int, mppisl_a_real, mppisl_real … … 85 94 MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d 86 95 END INTERFACE 87 INTERFACE mpp_minloc 88 MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d 89 END INTERFACE 90 INTERFACE mpp_maxloc 91 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 92 END INTERFACE 93 94 95 !! * Share module variables 96 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .TRUE. !: mpp flag 97 98 !! The processor number is a required power of two : 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024,... 99 INTEGER, PARAMETER :: & 100 nprocmax = 2**10 ! maximun dimension 101 102 #if defined key_mpp_mpi 96 INTERFACE mpp_minloc 97 MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d 98 END INTERFACE 99 INTERFACE mpp_maxloc 100 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 101 END INTERFACE 102 103 103 104 !! ========================= !! 104 105 !! MPI variable definition !! 105 106 !! ========================= !! 106 !$AGRIF_DO_NOT_TREAT107 !$AGRIF_DO_NOT_TREAT 107 108 # include <mpif.h> 108 !$AGRIF_END_DO_NOT_TREAT 109 110 INTEGER :: & 111 mppsize, & ! number of process 112 mpprank, & ! process number [ 0 - size-1 ] 113 mpi_comm_opa ! opa local communicator 114 109 !$AGRIF_END_DO_NOT_TREAT 110 111 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .TRUE. !: mpp flag 112 113 INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2) 114 115 INTEGER :: mppsize ! number of process 116 INTEGER :: mpprank ! process number [ 0 - size-1 ] 117 INTEGER :: mpi_comm_opa ! opa local communicator 118 119 !!gm question : Pourquoi toutes les variables ice sont public??? 115 120 ! variables used in case of sea-ice 116 INTEGER, PUBLIC :: & ! 117 ngrp_ice, & ! group ID for the ice processors (to compute rheology) 118 ncomm_ice, & ! communicator made by the processors with sea-ice 119 ndim_rank_ice, & ! number of 'ice' processors 120 n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm 121 INTEGER, DIMENSION(:), ALLOCATABLE :: & 122 nrank_ice ! dimension ndim_rank_north, number of the procs belonging to ncomm_north 123 ! variables used in case of north fold condition in mpp_mpi with jpni > 1 124 INTEGER :: & ! 125 ngrp_world, & ! group ID for the world processors 126 ngrp_north, & ! group ID for the northern processors (to be fold) 127 ncomm_north, & ! communicator made by the processors belonging to ngrp_north 128 ndim_rank_north, & ! number of 'sea' processor in the northern line (can be /= jpni !) 129 njmppmax ! value of njmpp for the processors of the northern line 130 INTEGER :: & ! 131 north_root ! number (in the comm_opa) of proc 0 in the northern comm 132 INTEGER, DIMENSION(:), ALLOCATABLE :: & 133 nrank_north ! dimension ndim_rank_north, number of the procs belonging to ncomm_north 134 CHARACTER (len=1) :: & 135 c_mpi_send = 'S' ! type od mpi send/recieve (S=standard, B=bsend, I=isend) 136 LOGICAL :: & 137 l_isend = .FALSE. ! isend use indicator (T if c_mpi_send='I') 138 INTEGER :: & ! size of the buffer in case of mpi_bsend 139 nn_buffer = 0 140 REAL(kind=wp), ALLOCATABLE, DIMENSION(:) :: tampon ! buffer in case of bsend 141 142 #elif defined key_mpp_shmem 143 !! ========================= !! 144 !! SHMEM variable definition !! 145 !! ========================= !! 146 # include <fpvm3.h> 147 # include <mpp/shmem.fh> 148 149 CHARACTER (len=80), PARAMETER :: simfile = 'pvm3_ndim' ! file name 150 CHARACTER (len=47), PARAMETER :: executable = 'opa' ! executable name 151 CHARACTER, PARAMETER :: opaall = "" ! group name (old def opaall*(*)) 152 153 INTEGER, PARAMETER :: & !! SHMEM control print 154 mynode_print = 0, & ! flag for print, mynode routine 155 mpprecv_print = 0, & ! flag for print, mpprecv routine 156 mppsend_print = 0, & ! flag for print, mppsend routine 157 mppsync_print = 0, & ! flag for print, mppsync routine 158 mppsum_print = 0, & ! flag for print, mpp_sum routine 159 mppisl_print = 0, & ! flag for print, mpp_isl routine 160 mppmin_print = 0, & ! flag for print, mpp_min routine 161 mppmax_print = 0, & ! flag for print, mpp_max routine 162 mpparent_print = 0 ! flag for print, mpparent routine 163 164 INTEGER, PARAMETER :: & !! Variable definition 165 jpvmint = 21 ! ??? 166 167 INTEGER, PARAMETER :: & !! Maximum dimension of array to sum on the processors 168 jpmsec = 50000, & ! ??? 169 jpmpplat = 30, & ! ??? 170 jpmppsum = MAX( jpisl*jpisl, jpmpplat*jpk, jpmsec ) ! ??? 171 172 INTEGER :: & 173 npvm_ipas , & ! pvm initialization flag 174 npvm_mytid, & ! pvm tid 175 npvm_me , & ! node number [ 0 - nproc-1 ] 176 npvm_nproc, & ! real number of nodes 177 npvm_inum ! ??? 178 INTEGER, DIMENSION(0:nprocmax-1) :: & 179 npvm_tids ! tids array [ 0 - nproc-1 ] 180 181 INTEGER :: & 182 nt3d_ipas , & ! pvm initialization flag 183 nt3d_mytid, & ! pvm tid 184 nt3d_me , & ! node number [ 0 - nproc-1 ] 185 nt3d_nproc ! real number of nodes 186 INTEGER, DIMENSION(0:nprocmax-1) :: & 187 nt3d_tids ! tids array [ 0 - nproc-1 ] 188 189 !! real sum reduction 190 INTEGER, DIMENSION(SHMEM_REDUCE_SYNC_SIZE) :: & 191 nrs1sync_shmem, & ! 192 nrs2sync_shmem 193 REAL(wp), DIMENSION( MAX( SHMEM_REDUCE_MIN_WRKDATA_SIZE, jpmppsum/2+1 ) ) :: & 194 wrs1wrk_shmem, & ! 195 wrs2wrk_shmem ! 196 REAL(wp), DIMENSION(jpmppsum) :: & 197 wrstab_shmem ! 198 199 !! minimum and maximum reduction 200 INTEGER, DIMENSION(SHMEM_REDUCE_SYNC_SIZE) :: & 201 ni1sync_shmem, & ! 202 ni2sync_shmem ! 203 REAL(wp), DIMENSION( MAX( SHMEM_REDUCE_MIN_WRKDATA_SIZE, jpmppsum/2+1 ) ) :: & 204 wi1wrk_shmem, & ! 205 wi2wrk_shmem 206 REAL(wp), DIMENSION(jpmppsum) :: & 207 wintab_shmem, & ! 208 wi1tab_shmem, & ! 209 wi2tab_shmem ! 210 211 !! value not equal zero for barotropic stream function around islands 212 INTEGER, DIMENSION(SHMEM_REDUCE_SYNC_SIZE) :: & 213 ni11sync_shmem, & ! 214 ni12sync_shmem, & ! 215 ni21sync_shmem, & ! 216 ni22sync_shmem ! 217 REAL(wp), DIMENSION( MAX( SHMEM_REDUCE_MIN_WRKDATA_SIZE, jpmppsum/2+1 ) ) :: & 218 wi11wrk_shmem, & ! 219 wi12wrk_shmem, & ! 220 wi21wrk_shmem, & ! 221 wi22wrk_shmem ! 222 REAL(wp), DIMENSION(jpmppsum) :: & 223 wiltab_shmem , & ! 224 wi11tab_shmem, & ! 225 wi12tab_shmem, & ! 226 wi21tab_shmem, & ! 227 wi22tab_shmem 228 229 INTEGER, DIMENSION( MAX( SHMEM_REDUCE_MIN_WRKDATA_SIZE, jpmppsum/2+1 ) ) :: & 230 ni11wrk_shmem, & ! 231 ni12wrk_shmem, & ! 232 ni21wrk_shmem, & ! 233 ni22wrk_shmem ! 234 INTEGER, DIMENSION(jpmppsum) :: & 235 niitab_shmem , & ! 236 ni11tab_shmem, & ! 237 ni12tab_shmem ! 238 INTEGER, DIMENSION(SHMEM_REDUCE_SYNC_SIZE) :: & 239 nis1sync_shmem, & ! 240 nis2sync_shmem ! 241 INTEGER, DIMENSION( MAX( SHMEM_REDUCE_MIN_WRKDATA_SIZE, jpmppsum/2+1 ) ) :: & 242 nis1wrk_shmem, & ! 243 nis2wrk_shmem ! 244 INTEGER, DIMENSION(jpmppsum) :: & 245 nistab_shmem 246 247 !! integer sum reduction 248 INTEGER, DIMENSION(SHMEM_REDUCE_SYNC_SIZE) :: & 249 nil1sync_shmem, & ! 250 nil2sync_shmem ! 251 INTEGER, DIMENSION( MAX( SHMEM_REDUCE_MIN_WRKDATA_SIZE, jpmppsum/2+1 ) ) :: & 252 nil1wrk_shmem, & ! 253 nil2wrk_shmem ! 254 INTEGER, DIMENSION(jpmppsum) :: & 255 niltab_shmem 256 #endif 257 258 REAL(wp), DIMENSION(jpi,jprecj,jpk,2,2) :: & 259 t4ns, t4sn ! 3d message passing arrays north-south & south-north 260 REAL(wp), DIMENSION(jpj,jpreci,jpk,2,2) :: & 261 t4ew, t4we ! 3d message passing arrays east-west & west-east 262 REAL(wp), DIMENSION(jpi,jprecj,jpk,2,2) :: & 263 t4p1, t4p2 ! 3d message passing arrays north fold 264 REAL(wp), DIMENSION(jpi,jprecj,jpk,2) :: & 265 t3ns, t3sn ! 3d message passing arrays north-south & south-north 266 REAL(wp), DIMENSION(jpj,jpreci,jpk,2) :: & 267 t3ew, t3we ! 3d message passing arrays east-west & west-east 268 REAL(wp), DIMENSION(jpi,jprecj,jpk,2) :: & 269 t3p1, t3p2 ! 3d message passing arrays north fold 270 REAL(wp), DIMENSION(jpi,jprecj,2) :: & 271 t2ns, t2sn ! 2d message passing arrays north-south & south-north 272 REAL(wp), DIMENSION(jpj,jpreci,2) :: & 273 t2ew, t2we ! 2d message passing arrays east-west & west-east 274 REAL(wp), DIMENSION(jpi,jprecj,2) :: & 275 t2p1, t2p2 ! 2d message passing arrays north fold 276 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,jprecj+jpr2dj,2) :: & 277 tr2ns, tr2sn ! 2d message passing arrays north-south & south-north including extra outer halo 278 REAL(wp), DIMENSION(1-jpr2dj:jpj+jpr2dj,jpreci+jpr2di,2) :: & 279 tr2ew, tr2we ! 2d message passing arrays east-west & west-east including extra outer halo 121 INTEGER, PUBLIC :: ngrp_ice !: group ID for the ice processors (for rheology) 122 INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice 123 INTEGER, PUBLIC :: ndim_rank_ice !: number of 'ice' processors 124 INTEGER, PUBLIC :: n_ice_root !: number (in the comm_ice) of proc 0 in the ice comm 125 INTEGER, DIMENSION(:), ALLOCATABLE :: nrank_ice ! dimension ndim_rank_ice 126 127 ! North fold condition in mpp_mpi with jpni > 1 128 INTEGER :: ngrp_world ! group ID for the world processors 129 INTEGER :: ngrp_north ! group ID for the northern processors (to be fold) 130 INTEGER :: ncomm_north ! communicator made by the processors belonging to ngrp_north 131 INTEGER :: ndim_rank_north ! number of 'sea' processor in the northern line (can be /= jpni !) 132 INTEGER :: njmppmax ! value of njmpp for the processors of the northern line 133 INTEGER :: north_root ! number (in the comm_opa) of proc 0 in the northern comm 134 INTEGER, DIMENSION(:), ALLOCATABLE :: nrank_north ! dimension ndim_rank_north 135 136 ! Type of send : standard, buffered, immediate 137 CHARACTER(len=1) :: c_mpi_send = 'S' ! type od mpi send/recieve (S=standard, B=bsend, I=isend) 138 LOGICAL :: l_isend = .FALSE. ! isend use indicator (T if c_mpi_send='I') 139 INTEGER :: nn_buffer = 0 ! size of the buffer in case of mpi_bsend 140 141 REAL(wp), ALLOCATABLE, DIMENSION(:) :: tampon ! buffer in case of bsend 142 143 ! message passing arrays 144 REAL(wp), DIMENSION(jpi,jprecj,jpk,2,2) :: t4ns, t4sn ! 2 x 3d for north-south & south-north 145 REAL(wp), DIMENSION(jpj,jpreci,jpk,2,2) :: t4ew, t4we ! 2 x 3d for east-west & west-east 146 REAL(wp), DIMENSION(jpi,jprecj,jpk,2,2) :: t4p1, t4p2 ! 2 x 3d for north fold 147 REAL(wp), DIMENSION(jpi,jprecj,jpk,2) :: t3ns, t3sn ! 3d for north-south & south-north 148 REAL(wp), DIMENSION(jpj,jpreci,jpk,2) :: t3ew, t3we ! 3d for east-west & west-east 149 REAL(wp), DIMENSION(jpi,jprecj,jpk,2) :: t3p1, t3p2 ! 3d for north fold 150 REAL(wp), DIMENSION(jpi,jprecj,2) :: t2ns, t2sn ! 2d for north-south & south-north 151 REAL(wp), DIMENSION(jpj,jpreci,2) :: t2ew, t2we ! 2d for east-west & west-east 152 REAL(wp), DIMENSION(jpi,jprecj,2) :: t2p1, t2p2 ! 2d for north fold 153 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,jprecj+jpr2dj,2) :: tr2ns, tr2sn ! 2d for north-south & south-north + extra outer halo 154 REAL(wp), DIMENSION(1-jpr2dj:jpj+jpr2dj,jpreci+jpr2di,2) :: tr2ew, tr2we ! 2d for east-west & west-east + extra outer halo 280 155 !!---------------------------------------------------------------------- 281 !! OPA 9.0 , LOCEAN-IPSL (2005)156 !! NEMO/OPA 3.2 , LOCEAN-IPSL (2009) 282 157 !! $Id$ 283 !! This software is governed by the CeCILL licence see modipsl/doc/NEMO_CeCILL.txt284 !!--------------------------------------------------------------------- 158 !! Software governed by the CeCILL licence (modipsl/doc/NEMO_CeCILL.txt) 159 !!---------------------------------------------------------------------- 285 160 286 161 CONTAINS … … 293 168 !! 294 169 !!---------------------------------------------------------------------- 295 #if defined key_mpp_mpi296 !! * Local variables (MPI version)297 170 INTEGER :: mynode, ierr, code 298 171 LOGICAL :: mpi_was_called 299 INTEGER, OPTIONAL :: localComm172 INTEGER, OPTIONAL :: localComm 300 173 NAMELIST/nam_mpp/ c_mpi_send, nn_buffer 301 174 !!---------------------------------------------------------------------- 302 175 ! 303 176 WRITE(numout,*) 304 177 WRITE(numout,*) 'mynode : mpi initialisation' 305 178 WRITE(numout,*) '~~~~~~ ' 306 179 WRITE(numout,*) 307 308 ! Namelist namrun : parameters of the run 309 REWIND( numnam ) 180 ! 181 REWIND( numnam ) ! Namelist namrun : parameters of the run 310 182 READ ( numnam, nam_mpp ) 311 183 ! ! control print 312 184 WRITE(numout,*) ' Namelist nam_mpp' 313 185 WRITE(numout,*) ' mpi send type c_mpi_send = ', c_mpi_send 186 !!gm IF(lwp) WRITE(numout,*) ' Namelist nam_mpp' 187 !!gm IF(lwp) WRITE(numout,*) ' mpi send type c_mpi_send = ', c_mpi_send 188 314 189 315 190 #if defined key_agrif 316 191 IF( Agrif_Root() ) THEN 317 192 #endif 318 !!bug RB : should be clean to use Agrif in coupled mode193 !!bug RB : should be clean to use Agrif in coupled mode 319 194 #if ! defined key_agrif 320 195 CALL mpi_initialized ( mpi_was_called, code ) … … 389 264 mpi_comm_opa = mpi_comm_world 390 265 #endif 391 392 266 CALL mpi_comm_rank( mpi_comm_opa, mpprank, ierr ) 267 CALL mpi_comm_size( mpi_comm_opa, mppsize, ierr ) 393 268 mynode = mpprank 394 #else 395 !! * Local variables (SHMEM version) 396 INTEGER :: mynode 397 INTEGER :: & 398 imypid, imyhost, ji, info, iparent_tid 399 !!---------------------------------------------------------------------- 400 401 IF( npvm_ipas /= nprocmax ) THEN 402 ! --- first passage in mynode 403 ! ------------- 404 ! enroll in pvm 405 ! ------------- 406 CALL pvmfmytid( npvm_mytid ) 407 IF( mynode_print /= 0 ) THEN 408 WRITE(numout,*) 'mynode, npvm_ipas =', npvm_ipas, ' nprocmax=', nprocmax 409 WRITE(numout,*) 'mynode, npvm_mytid=', npvm_mytid, ' after pvmfmytid' 410 ENDIF 411 412 ! --------------------------------------------------------------- 413 ! find out IF i am parent or child spawned processes have parents 414 ! --------------------------------------------------------------- 415 CALL mpparent( iparent_tid ) 416 IF( mynode_print /= 0 ) THEN 417 WRITE(numout,*) 'mynode, npvm_mytid=', npvm_mytid, & 418 & ' after mpparent, npvm_tids(0) = ', & 419 & npvm_tids(0), ' iparent_tid=', iparent_tid 420 ENDIF 421 IF( iparent_tid < 0 ) THEN 422 WRITE(numout,*) 'mynode, npvm_mytid=', npvm_mytid, & 423 & ' after mpparent, npvm_tids(0) = ', & 424 & npvm_tids(0), ' iparent_tid=', iparent_tid 425 npvm_tids(0) = npvm_mytid 426 npvm_me = 0 427 IF( jpnij > nprocmax ) THEN 428 WRITE(ctmp1,*) 'npvm_mytid=', npvm_mytid, ' too great' 429 CALL ctl_stop( ctmp1 ) 430 431 ELSE 432 npvm_nproc = jpnij 433 ENDIF 434 435 ! ------------------------- 436 ! start up copies of myself 437 ! ------------------------- 438 IF( npvm_nproc > 1 ) THEN 439 DO ji = 1, npvm_nproc-1 440 npvm_tids(ji) = nt3d_tids(ji) 441 END DO 442 info=npvm_nproc-1 443 444 IF( mynode_print /= 0 ) THEN 445 WRITE(numout,*) 'mynode, npvm_mytid=',npvm_mytid, & 446 & ' maitre=',executable,' info=', info & 447 & ,' npvm_nproc=',npvm_nproc 448 WRITE(numout,*) 'mynode, npvm_mytid=',npvm_mytid, & 449 & ' npvm_tids ',(npvm_tids(ji),ji=0,npvm_nproc-1) 450 ENDIF 451 452 ! --------------------------- 453 ! multicast tids array to children 454 ! --------------------------- 455 CALL pvmfinitsend( pvmdefault, info ) 456 CALL pvmfpack ( jpvmint, npvm_nproc, 1 , 1, info ) 457 CALL pvmfpack ( jpvmint, npvm_tids , npvm_nproc, 1, info ) 458 CALL pvmfmcast( npvm_nproc-1, npvm_tids(1), 10, info ) 459 ENDIF 460 ELSE 461 462 ! --------------------------------- 463 ! receive the tids array and set me 464 ! --------------------------------- 465 IF( mynode_print /= 0 ) WRITE(numout,*) 'mynode, npvm_mytid=',npvm_mytid, ' pvmfrecv' 466 CALL pvmfrecv( iparent_tid, 10, info ) 467 IF( mynode_print /= 0 ) WRITE(numout,*) 'mynode, npvm_mytid=',npvm_mytid, " fin pvmfrecv" 468 CALL pvmfunpack( jpvmint, npvm_nproc, 1 , 1, info ) 469 CALL pvmfunpack( jpvmint, npvm_tids , npvm_nproc, 1, info ) 470 IF( mynode_print /= 0 ) THEN 471 WRITE(numout,*) 'mynode, npvm_mytid=',npvm_mytid, & 472 & ' esclave=', executable,' info=', info,' npvm_nproc=',npvm_nproc 473 WRITE(numout,*) 'mynode, npvm_mytid=', npvm_mytid, & 474 & 'npvm_tids', ( npvm_tids(ji), ji = 0, npvm_nproc-1 ) 475 ENDIF 476 DO ji = 0, npvm_nproc-1 477 IF( npvm_mytid == npvm_tids(ji) ) npvm_me = ji 478 END DO 479 ENDIF 480 481 ! ------------------------------------------------------------ 482 ! all nproc tasks are equal now 483 ! and can address each other by tids(0) thru tids(nproc-1) 484 ! for each process me => process number [0-(nproc-1)] 485 ! ------------------------------------------------------------ 486 CALL pvmfjoingroup ( "bidon", info ) 487 CALL pvmfbarrier ( "bidon", npvm_nproc, info ) 488 DO ji = 0, npvm_nproc-1 489 IF( ji == npvm_me ) THEN 490 CALL pvmfjoingroup ( opaall, npvm_inum ) 491 IF( npvm_inum /= npvm_me ) WRITE(numout,*) 'mynode not arrived in the good order for opaall' 492 ENDIF 493 CALL pvmfbarrier( "bidon", npvm_nproc, info ) 494 END DO 495 CALL pvmfbarrier( opaall, npvm_nproc, info ) 496 497 ELSE 498 ! --- other passage in mynode 499 ENDIF 500 501 npvm_ipas = nprocmax 502 mynode = npvm_me 503 imypid = npvm_mytid 504 imyhost = npvm_tids(0) 505 IF( mynode_print /= 0 ) THEN 506 WRITE(numout,*)'mynode: npvm_mytid=', npvm_mytid, ' npvm_me=', npvm_me, & 507 & ' npvm_nproc=', npvm_nproc , ' npvm_ipas=', npvm_ipas 508 ENDIF 509 #endif 269 ! 510 270 END FUNCTION mynode 511 271 512 513 SUBROUTINE mpparent( kparent_tid )514 !!----------------------------------------------------------------------515 !! *** routine mpparent ***516 !!517 !! ** Purpose : use an pvmfparent routine for T3E (key_mpp_shmem)518 !! or only return -1 (key_mpp_mpi)519 !!----------------------------------------------------------------------520 !! * Arguments521 INTEGER, INTENT(inout) :: kparent_tid ! ???522 523 #if defined key_mpp_mpi524 ! MPI version : retour -1525 526 kparent_tid = -1527 528 #else529 !! * Local variables (SHMEN onto T3E version)530 INTEGER :: &531 it3d_my_pe, LEADZ, ji, info532 533 CALL pvmfmytid( nt3d_mytid )534 CALL pvmfgetpe( nt3d_mytid, it3d_my_pe )535 IF( mpparent_print /= 0 ) THEN536 WRITE(numout,*) 'mpparent: nt3d_mytid= ', nt3d_mytid ,' it3d_my_pe=',it3d_my_pe537 ENDIF538 IF( it3d_my_pe == 0 ) THEN539 !-----------------------------------------------------------------!540 ! process = 0 => receive other tids !541 !-----------------------------------------------------------------!542 kparent_tid = -1543 IF(mpparent_print /= 0 ) THEN544 WRITE(numout,*) 'mpparent, nt3d_mytid=',nt3d_mytid ,' kparent_tid=',kparent_tid545 ENDIF546 ! --- END receive dimension ---547 IF( jpnij > nprocmax ) THEN548 WRITE(ctmp1,*) 'mytid=',nt3d_mytid,' too great'549 CALL ctl_stop( ctmp1 )550 ELSE551 nt3d_nproc = jpnij552 ENDIF553 IF( mpparent_print /= 0 ) THEN554 WRITE(numout,*) 'mpparent, nt3d_mytid=', nt3d_mytid , ' nt3d_nproc=', nt3d_nproc555 ENDIF556 !-------- receive tids from others process --------557 DO ji = 1, nt3d_nproc-1558 CALL pvmfrecv( ji , 100, info )559 CALL pvmfunpack( jpvmint, nt3d_tids(ji), 1, 1, info )560 IF( mpparent_print /= 0 ) THEN561 WRITE(numout,*) 'mpparent, nt3d_mytid=', nt3d_mytid , ' receive=', nt3d_tids(ji), ' from = ', ji562 ENDIF563 END DO564 nt3d_tids(0) = nt3d_mytid565 IF( mpparent_print /= 0 ) THEN566 WRITE(numout,*) 'mpparent, nt3d_mytid=', nt3d_mytid , ' nt3d_tids(ji) =', (nt3d_tids(ji), &567 ji = 0, nt3d_nproc-1 )568 WRITE(numout,*) 'mpparent, nt3d_mytid=', nt3d_mytid , ' kparent_tid=', kparent_tid569 ENDIF570 571 ELSE572 !!----------------------------------------------------------------!573 ! process <> 0 => send other tids !574 !!----------------------------------------------------------------!575 kparent_tid = 0576 CALL pvmfinitsend( pvmdataraw, info )577 CALL pvmfpack( jpvmint, nt3d_mytid, 1, 1, info )578 CALL pvmfsend( kparent_tid, 100, info )579 ENDIF580 #endif581 582 END SUBROUTINE mpparent583 584 #if defined key_mpp_shmem585 586 SUBROUTINE mppshmem587 !!----------------------------------------------------------------------588 !! *** routine mppshmem ***589 !!590 !! ** Purpose : SHMEM ROUTINE591 !!592 !!----------------------------------------------------------------------593 nrs1sync_shmem = SHMEM_SYNC_VALUE594 nrs2sync_shmem = SHMEM_SYNC_VALUE595 nis1sync_shmem = SHMEM_SYNC_VALUE596 nis2sync_shmem = SHMEM_SYNC_VALUE597 nil1sync_shmem = SHMEM_SYNC_VALUE598 nil2sync_shmem = SHMEM_SYNC_VALUE599 ni11sync_shmem = SHMEM_SYNC_VALUE600 ni12sync_shmem = SHMEM_SYNC_VALUE601 ni21sync_shmem = SHMEM_SYNC_VALUE602 ni22sync_shmem = SHMEM_SYNC_VALUE603 CALL barrier()604 605 END SUBROUTINE mppshmem606 607 #endif608 272 609 273 SUBROUTINE mpp_lnk_3d( ptab, cd_type, psgn, cd_mpp, pval ) … … 628 292 !! 629 293 !!---------------------------------------------------------------------- 630 !! * Arguments 631 CHARACTER(len=1) , INTENT( in ) :: & 632 cd_type ! define the nature of ptab array grid-points 633 ! ! = T , U , V , F , W points 634 ! ! = S : T-point, north fold treatment ??? 635 ! ! = G : F-point, north fold treatment ??? 636 REAL(wp), INTENT( in ) :: & 637 psgn ! control of the sign change 638 ! ! = -1. , the sign is changed if north fold boundary 639 ! ! = 1. , the sign is kept if north fold boundary 640 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT( inout ) :: & 641 ptab ! 3D array on which the boundary condition is applied 642 CHARACTER(len=3), INTENT( in ), OPTIONAL :: & 643 cd_mpp ! fill the overlap area only 644 REAL(wp) , INTENT(in ), OPTIONAL :: pval ! background value (used at closed boundaries) 645 646 !! * Local variables 647 INTEGER :: ji, jj, jk, jl ! dummy loop indices 648 INTEGER :: imigr, iihom, ijhom, iloc, ijt, iju ! temporary integers 649 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 650 INTEGER :: ml_stat(MPI_STATUS_SIZE) ! for key_mpi_isend 294 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied 295 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 296 ! ! = T , U , V , F , W points 297 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 298 ! ! = 1. , the sign is kept 299 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 300 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 301 !! 302 INTEGER :: ji, jj, jl ! dummy loop indices 303 INTEGER :: imigr, iihom, ijhom ! temporary integers 304 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 651 305 REAL(wp) :: zland 652 !!---------------------------------------------------------------------- 306 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 307 !!---------------------------------------------------------------------- 308 309 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 310 ELSE ; zland = 0.e0 ! zero by default 311 ENDIF 653 312 654 313 ! 1. standard boundary treatment 655 314 ! ------------------------------ 656 657 IF( PRESENT( pval ) ) THEN ! set land value (zero by default) 658 zland = pval 659 ELSE 660 zland = 0.e0 661 ENDIF 662 663 IF( PRESENT( cd_mpp ) ) THEN 664 DO jj = nlcj+1, jpj ! only fill extra allows last line 315 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with non zero values 316 ! 317 DO jj = nlcj+1, jpj ! added line(s) (inner only) 665 318 ptab(1:nlci, jj, :) = ptab(1:nlci, nlej, :) 666 319 END DO 667 DO ji = nlci+1, jpi ! only fill extra allows last column320 DO ji = nlci+1, jpi ! added column(s) (full) 668 321 ptab(ji , : , :) = ptab(nlei , : , :) 669 322 END DO 670 ELSE671 672 ! ! East-West boundaries673 ! ! ====================674 IF( nbondi == 2 .AND. & !Cyclic east-west675 &(nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN323 ! 324 ELSE ! standard close or cyclic treatment 325 ! 326 ! ! East-West boundaries 327 ! !* Cyclic east-west 328 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 676 329 ptab( 1 ,:,:) = ptab(jpim1,:,:) 677 330 ptab(jpi,:,:) = ptab( 2 ,:,:) 678 679 ELSE ! closed 680 SELECT CASE ( cd_type ) 681 CASE ( 'T', 'U', 'V', 'W' ) 682 ptab( 1 :jpreci,:,:) = zland 683 ptab(nlci-jpreci+1:jpi ,:,:) = zland 684 CASE ( 'F' ) 685 ptab(nlci-jpreci+1:jpi ,:,:) = zland 686 END SELECT 331 ELSE !* closed 332 IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point 333 ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north 687 334 ENDIF 688 689 ! ! North-South boundaries 690 ! ! ====================== 691 SELECT CASE ( cd_type ) 692 CASE ( 'T', 'U', 'V', 'W' ) 693 ptab(:, 1 :jprecj,:) = zland 694 ptab(:,nlcj-jprecj+1:jpj ,:) = zland 695 CASE ( 'F' ) 696 ptab(:,nlcj-jprecj+1:jpj ,:) = zland 697 END SELECT 698 335 ! ! North-South boundaries (always closed) 336 IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point 337 ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north 338 ! 699 339 ENDIF 340 !!gm question: il me semble que le cas cd_mpp est seulement pour remplir les halos ajouter 341 !!gm pour avoir le meme nb de pts sur chaque proc 342 !!gm ===>> le endif au dessus devrait etre tout en bas de la routine : pas de comm ! 343 !!gm i.e. reduction des comm a la lecture du forcage 344 !!gm en effet l'idee de Seb etait que les champs lus le sont partout (1:nlci,1:nlcj) 700 345 701 346 ! 2. East and west directions exchange 702 347 ! ------------------------------------ 703 704 ! 2.1 Read Dirichlet lateral conditions 705 706 SELECT CASE ( nbondi ) 707 CASE ( -1, 0, 1 ) ! all exept 2 348 ! we play with the neigbours AND the row number because of the periodicity 349 ! 350 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 351 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 708 352 iihom = nlci-nreci 709 353 DO jl = 1, jpreci … … 712 356 END DO 713 357 END SELECT 714 715 ! 2.2 Migrations 716 717 #if defined key_mpp_shmem 718 !! * SHMEM version 719 358 ! 359 ! ! Migrations 720 360 imigr = jpreci * jpj * jpk 721 722 SELECT CASE ( nbondi ) 723 CASE ( -1 ) 724 CALL shmem_put( t3we(1,1,1,2), t3we(1,1,1,1), imigr, noea ) 725 CASE ( 0 ) 726 CALL shmem_put( t3ew(1,1,1,2), t3ew(1,1,1,1), imigr, nowe ) 727 CALL shmem_put( t3we(1,1,1,2), t3we(1,1,1,1), imigr, noea ) 728 CASE ( 1 ) 729 CALL shmem_put( t3ew(1,1,1,2), t3ew(1,1,1,1), imigr, nowe ) 730 END SELECT 731 732 CALL barrier() 733 CALL shmem_udcflush() 734 735 #elif defined key_mpp_mpi 736 !! * Local variables (MPI version) 737 738 imigr = jpreci * jpj * jpk 739 361 ! 740 362 SELECT CASE ( nbondi ) 741 363 CASE ( -1 ) … … 755 377 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 756 378 END SELECT 757 #endif 758 759 ! 2.3 Write Dirichlet lateral conditions 760 379 ! 380 ! ! Write Dirichlet lateral conditions 761 381 iihom = nlci-jpreci 762 382 ! 763 383 SELECT CASE ( nbondi ) 764 384 CASE ( -1 ) … … 780 400 ! 3. North and south directions 781 401 ! ----------------------------- 782 783 ! 3.1 Read Dirichlet lateral conditions 784 785 IF( nbondj /= 2 ) THEN 402 ! always closed : we play only with the neigbours 403 ! 404 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 786 405 ijhom = nlcj-nrecj 787 406 DO jl = 1, jprecj … … 790 409 END DO 791 410 ENDIF 792 793 ! 3.2 Migrations 794 795 #if defined key_mpp_shmem 796 !! * SHMEM version 797 411 ! 412 ! ! Migrations 798 413 imigr = jprecj * jpi * jpk 799 800 SELECT CASE ( nbondj ) 801 CASE ( -1 ) 802 CALL shmem_put( t3sn(1,1,1,2), t3sn(1,1,1,1), imigr, nono ) 803 CASE ( 0 ) 804 CALL shmem_put( t3ns(1,1,1,2), t3ns(1,1,1,1), imigr, noso ) 805 CALL shmem_put( t3sn(1,1,1,2), t3sn(1,1,1,1), imigr, nono ) 806 CASE ( 1 ) 807 CALL shmem_put( t3ns(1,1,1,2), t3ns(1,1,1,1), imigr, noso ) 808 END SELECT 809 810 CALL barrier() 811 CALL shmem_udcflush() 812 813 #elif defined key_mpp_mpi 814 !! * Local variables (MPI version) 815 816 imigr=jprecj*jpi*jpk 817 414 ! 818 415 SELECT CASE ( nbondj ) 819 416 CASE ( -1 ) … … 833 430 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 834 431 END SELECT 835 836 #endif 837 838 ! 3.3 Write Dirichlet lateral conditions 839 432 ! 433 ! ! Write Dirichlet lateral conditions 840 434 ijhom = nlcj-jprecj 841 435 ! 842 436 SELECT CASE ( nbondj ) 843 437 CASE ( -1 ) … … 859 453 ! 4. north fold treatment 860 454 ! ----------------------- 861 862 IF (PRESENT(cd_mpp)) THEN 863 ! No north fold treatment (it is assumed to be already OK) 864 865 ELSE 866 867 ! 4.1 treatment without exchange (jpni odd) 868 ! T-point pivot 869 870 SELECT CASE ( jpni ) 871 872 CASE ( 1 ) ! only one proc along I, no mpp exchange 873 874 SELECT CASE ( npolj ) 875 876 CASE ( 3 , 4 ) ! T pivot 877 iloc = jpiglo - 2 * ( nimpp - 1 ) 878 879 SELECT CASE ( cd_type ) 880 881 CASE ( 'T' , 'S', 'W' ) 882 DO jk = 1, jpk 883 DO ji = 2, nlci 884 ijt=iloc-ji+2 885 ptab(ji,nlcj,jk) = psgn * ptab(ijt,nlcj-2,jk) 886 END DO 887 DO ji = nlci/2+1, nlci 888 ijt=iloc-ji+2 889 ptab(ji,nlcj-1,jk) = psgn * ptab(ijt,nlcj-1,jk) 890 END DO 891 END DO 892 893 CASE ( 'U' ) 894 DO jk = 1, jpk 895 DO ji = 1, nlci-1 896 iju=iloc-ji+1 897 ptab(ji,nlcj,jk) = psgn * ptab(iju,nlcj-2,jk) 898 END DO 899 DO ji = nlci/2, nlci-1 900 iju=iloc-ji+1 901 ptab(ji,nlcj-1,jk) = psgn * ptab(iju,nlcj-1,jk) 902 END DO 903 END DO 904 905 CASE ( 'V' ) 906 DO jk = 1, jpk 907 DO ji = 2, nlci 908 ijt=iloc-ji+2 909 ptab(ji,nlcj-1,jk) = psgn * ptab(ijt,nlcj-2,jk) 910 ptab(ji,nlcj ,jk) = psgn * ptab(ijt,nlcj-3,jk) 911 END DO 912 END DO 913 914 CASE ( 'F', 'G' ) 915 DO jk = 1, jpk 916 DO ji = 1, nlci-1 917 iju=iloc-ji+1 918 ptab(ji,nlcj-1,jk) = psgn * ptab(iju,nlcj-2,jk) 919 ptab(ji,nlcj ,jk) = psgn * ptab(iju,nlcj-3,jk) 920 END DO 921 END DO 922 923 END SELECT 924 925 CASE ( 5 , 6 ) ! F pivot 926 iloc=jpiglo-2*(nimpp-1) 927 928 SELECT CASE ( cd_type ) 929 930 CASE ( 'T' , 'S', 'W' ) 931 DO jk = 1, jpk 932 DO ji = 1, nlci 933 ijt=iloc-ji+1 934 ptab(ji,nlcj,jk) = psgn * ptab(ijt,nlcj-1,jk) 935 END DO 936 END DO 937 938 CASE ( 'U' ) 939 DO jk = 1, jpk 940 DO ji = 1, nlci-1 941 iju=iloc-ji 942 ptab(ji,nlcj,jk) = psgn * ptab(iju,nlcj-1,jk) 943 END DO 944 END DO 945 946 CASE ( 'V' ) 947 DO jk = 1, jpk 948 DO ji = 1, nlci 949 ijt=iloc-ji+1 950 ptab(ji,nlcj ,jk) = psgn * ptab(ijt,nlcj-2,jk) 951 END DO 952 DO ji = nlci/2+1, nlci 953 ijt=iloc-ji+1 954 ptab(ji,nlcj-1,jk) = psgn * ptab(ijt,nlcj-1,jk) 955 END DO 956 END DO 957 958 CASE ( 'F', 'G' ) 959 DO jk = 1, jpk 960 DO ji = 1, nlci-1 961 iju=iloc-ji 962 ptab(ji,nlcj,jk) = psgn * ptab(iju,nlcj-2,jk) 963 END DO 964 DO ji = nlci/2+1, nlci-1 965 iju=iloc-ji 966 ptab(ji,nlcj-1,jk) = psgn * ptab(iju,nlcj-1,jk) 967 END DO 968 END DO 969 END SELECT ! cd_type 970 971 END SELECT ! npolj 972 973 CASE DEFAULT ! more than 1 proc along I 974 IF ( npolj /= 0 ) CALL mpp_lbc_north (ptab, cd_type, psgn) ! only for northern procs. 975 976 END SELECT ! jpni 977 455 ! 456 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 457 ! 458 SELECT CASE ( jpni ) 459 CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp 460 CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. 461 END SELECT 462 ! 978 463 ENDIF 979 980 981 ! 5. East and west directions exchange 982 ! ------------------------------------ 983 984 SELECT CASE ( npolj ) 985 986 CASE ( 3, 4, 5, 6 ) 987 988 ! 5.1 Read Dirichlet lateral conditions 989 990 SELECT CASE ( nbondi ) 991 992 CASE ( -1, 0, 1 ) 993 iihom = nlci-nreci 994 DO jl = 1, jpreci 995 t3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) 996 t3we(:,jl,:,1) = ptab(iihom +jl,:,:) 997 END DO 998 999 END SELECT 1000 1001 ! 5.2 Migrations 1002 1003 #if defined key_mpp_shmem 1004 !! SHMEM version 1005 1006 imigr = jpreci * jpj * jpk 1007 1008 SELECT CASE ( nbondi ) 1009 CASE ( -1 ) 1010 CALL shmem_put( t3we(1,1,1,2), t3we(1,1,1,1), imigr, noea ) 1011 CASE ( 0 ) 1012 CALL shmem_put( t3ew(1,1,1,2), t3ew(1,1,1,1), imigr, nowe ) 1013 CALL shmem_put( t3we(1,1,1,2), t3we(1,1,1,1), imigr, noea ) 1014 CASE ( 1 ) 1015 CALL shmem_put( t3ew(1,1,1,2), t3ew(1,1,1,1), imigr, nowe ) 1016 END SELECT 1017 1018 CALL barrier() 1019 CALL shmem_udcflush() 1020 1021 #elif defined key_mpp_mpi 1022 !! MPI version 1023 1024 imigr=jpreci*jpj*jpk 1025 1026 SELECT CASE ( nbondi ) 1027 CASE ( -1 ) 1028 CALL mppsend( 2, t3we(1,1,1,1), imigr, noea, ml_req1 ) 1029 CALL mpprecv( 1, t3ew(1,1,1,2), imigr ) 1030 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1031 CASE ( 0 ) 1032 CALL mppsend( 1, t3ew(1,1,1,1), imigr, nowe, ml_req1 ) 1033 CALL mppsend( 2, t3we(1,1,1,1), imigr, noea, ml_req2 ) 1034 CALL mpprecv( 1, t3ew(1,1,1,2), imigr ) 1035 CALL mpprecv( 2, t3we(1,1,1,2), imigr ) 1036 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1037 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1038 CASE ( 1 ) 1039 CALL mppsend( 1, t3ew(1,1,1,1), imigr, nowe, ml_req1 ) 1040 CALL mpprecv( 2, t3we(1,1,1,2), imigr ) 1041 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1042 END SELECT 1043 #endif 1044 1045 ! 5.3 Write Dirichlet lateral conditions 1046 1047 iihom = nlci-jpreci 1048 1049 SELECT CASE ( nbondi) 1050 CASE ( -1 ) 1051 DO jl = 1, jpreci 1052 ptab(iihom+jl,:,:) = t3ew(:,jl,:,2) 1053 END DO 1054 CASE ( 0 ) 1055 DO jl = 1, jpreci 1056 ptab(jl ,:,:) = t3we(:,jl,:,2) 1057 ptab(iihom+jl,:,:) = t3ew(:,jl,:,2) 1058 END DO 1059 CASE ( 1 ) 1060 DO jl = 1, jpreci 1061 ptab(jl ,:,:) = t3we(:,jl,:,2) 1062 END DO 1063 END SELECT 1064 1065 END SELECT ! npolj 1066 464 ! 1067 465 END SUBROUTINE mpp_lnk_3d 1068 466 … … 1087 485 !! 1088 486 !!---------------------------------------------------------------------- 1089 !! * Arguments 1090 CHARACTER(len=1) , INTENT( in ) :: & 1091 cd_type ! define the nature of pt2d array grid-points 1092 ! ! = T , U , V , F , W 1093 ! ! = S : T-point, north fold treatment 1094 ! ! = G : F-point, north fold treatment 1095 ! ! = I : sea-ice velocity at F-point with index shift 1096 REAL(wp), INTENT( in ) :: & 1097 psgn ! control of the sign change 1098 ! ! = -1. , the sign is changed if north fold boundary 1099 ! ! = 1. , the sign is kept if north fold boundary 1100 REAL(wp), DIMENSION(jpi,jpj), INTENT( inout ) :: & 1101 pt2d ! 2D array on which the boundary condition is applied 1102 CHARACTER(len=3), INTENT( in ), OPTIONAL :: & 1103 cd_mpp ! fill the overlap area only 1104 REAL(wp) , INTENT(in ), OPTIONAL :: pval ! background value (used at closed boundaries) 1105 1106 !! * Local variables 1107 INTEGER :: ji, jj, jl ! dummy loop indices 1108 INTEGER :: & 1109 imigr, iihom, ijhom, & ! temporary integers 1110 iloc, ijt, iju ! " " 1111 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1112 INTEGER :: ml_stat(MPI_STATUS_SIZE) ! for key_mpi_isend 487 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied 488 CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points 489 ! ! = T , U , V , F , W and I points 490 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 491 ! ! = 1. , the sign is kept 492 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 493 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 494 !! 495 INTEGER :: ji, jj, jl ! dummy loop indices 496 INTEGER :: imigr, iihom, ijhom ! temporary integers 497 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1113 498 REAL(wp) :: zland 1114 !!---------------------------------------------------------------------- 1115 1116 IF( PRESENT( pval ) ) THEN ! set land value (zero by default) 1117 zland = pval 1118 ELSE 1119 zland = 0.e0 499 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 500 !!---------------------------------------------------------------------- 501 502 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 503 ELSE ; zland = 0.e0 ! zero by default 1120 504 ENDIF 1121 505 1122 506 ! 1. standard boundary treatment 1123 507 ! ------------------------------ 1124 IF (PRESENT(cd_mpp)) THEN 1125 DO jj = nlcj+1, jpj ! only fill extra allows last line 508 ! 509 IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with non zero values 510 ! 511 DO jj = nlcj+1, jpj ! last line (inner) 1126 512 pt2d(1:nlci, jj) = pt2d(1:nlci, nlej) 1127 513 END DO 1128 DO ji = nlci+1, jpi ! only fill extra allowslast column514 DO ji = nlci+1, jpi ! last column 1129 515 pt2d(ji , : ) = pt2d(nlei , : ) 1130 END DO 1131 ELSE1132 1133 ! ! East-West boundaries1134 ! ! ====================1135 IF( nbondi == 2 .AND. & ! Cyclic east-west516 END DO 517 ! 518 ELSE ! standard close or cyclic treatment 519 ! 520 ! ! East-West boundaries 521 IF( nbondi == 2 .AND. & ! Cyclic east-west 1136 522 & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 1137 pt2d( 1 ,:) = pt2d(jpim1,:) 1138 pt2d(jpi,:) = pt2d( 2 ,:) 1139 1140 ELSE ! ... closed 1141 SELECT CASE ( cd_type ) 1142 CASE ( 'T', 'U', 'V', 'W' , 'I' ) 1143 pt2d( 1 :jpreci,:) = zland 1144 pt2d(nlci-jpreci+1:jpi ,:) = zland 1145 CASE ( 'F' ) 1146 pt2d(nlci-jpreci+1:jpi ,:) = zland 1147 END SELECT 523 pt2d( 1 ,:) = pt2d(jpim1,:) ! west 524 pt2d(jpi,:) = pt2d( 2 ,:) ! east 525 ELSE ! closed 526 IF( .NOT. cd_type == 'F' ) pt2d( 1 :jpreci,:) = zland ! south except F-point 527 pt2d(nlci-jpreci+1:jpi ,:) = zland ! north 1148 528 ENDIF 1149 1150 ! ! North-South boundaries 1151 ! ! ====================== 1152 SELECT CASE ( cd_type ) 1153 CASE ( 'T', 'U', 'V', 'W' , 'I' ) 1154 pt2d(:, 1 :jprecj) = zland 1155 pt2d(:,nlcj-jprecj+1:jpj ) = zland 1156 CASE ( 'F' ) 1157 pt2d(:,nlcj-jprecj+1:jpj ) = zland 1158 END SELECT 1159 529 ! ! North-South boundaries (always closed) 530 IF( .NOT. cd_type == 'F' ) pt2d(:, 1 :jprecj) = zland !south except F-point 531 pt2d(:,nlcj-jprecj+1:jpj ) = zland ! north 532 ! 533 !!gm question: il me semble que le cas cd_mpp est seulement pour remplir les halos ajouter 534 !!gm pour avoir le meme nb de pts sur chaque proc 535 !!gm ===>> le endif au dessus devrait etre tout en bas de la routine : pas de comm ! 536 !!gm i.e. reduction des comm a la lecture du forcage 537 !!gm en effet l'idee de Seb etait que les champs lus le sont partout (1:nlci,1:nlcj) 1160 538 ENDIF 1161 539 1162 1163 ! 2. East and west directions 1164 ! --------------------------- 1165 1166 ! 2.1 Read Dirichlet lateral conditions 1167 1168 SELECT CASE ( nbondi ) 1169 CASE ( -1, 0, 1 ) ! all except 2 540 ! 2. East and west directions exchange 541 ! ------------------------------------ 542 ! we play with the neigbours AND the row number because of the periodicity 543 ! 544 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 545 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1170 546 iihom = nlci-nreci 1171 547 DO jl = 1, jpreci … … 1174 550 END DO 1175 551 END SELECT 1176 1177 ! 2.2 Migrations 1178 1179 #if defined key_mpp_shmem 1180 !! * SHMEM version 1181 552 ! 553 ! ! Migrations 1182 554 imigr = jpreci * jpj 1183 1184 SELECT CASE ( nbondi ) 1185 CASE ( -1 ) 1186 CALL shmem_put( t2we(1,1,2), t2we(1,1,1), imigr, noea ) 1187 CASE ( 0 ) 1188 CALL shmem_put( t2ew(1,1,2), t2ew(1,1,1), imigr, nowe ) 1189 CALL shmem_put( t2we(1,1,2), t2we(1,1,1), imigr, noea ) 1190 CASE ( 1 ) 1191 CALL shmem_put( t2ew(1,1,2), t2ew(1,1,1), imigr, nowe ) 1192 END SELECT 1193 1194 CALL barrier() 1195 CALL shmem_udcflush() 1196 1197 #elif defined key_mpp_mpi 1198 !! * MPI version 1199 1200 imigr = jpreci * jpj 1201 555 ! 1202 556 SELECT CASE ( nbondi ) 1203 557 CASE ( -1 ) … … 1217 571 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1218 572 END SELECT 1219 1220 #endif 1221 1222 ! 2.3 Write Dirichlet lateral conditions 1223 573 ! 574 ! ! Write Dirichlet lateral conditions 1224 575 iihom = nlci - jpreci 576 ! 1225 577 SELECT CASE ( nbondi ) 1226 578 CASE ( -1 ) … … 1242 594 ! 3. North and south directions 1243 595 ! ----------------------------- 1244 1245 ! 3.1 Read Dirichlet lateral conditions 1246 1247 IF( nbondj /= 2 ) THEN 596 ! always closed : we play only with the neigbours 597 ! 598 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 1248 599 ijhom = nlcj-nrecj 1249 600 DO jl = 1, jprecj … … 1252 603 END DO 1253 604 ENDIF 1254 1255 ! 3.2 Migrations 1256 1257 #if defined key_mpp_shmem 1258 !! * SHMEM version 1259 605 ! 606 ! ! Migrations 1260 607 imigr = jprecj * jpi 1261 1262 SELECT CASE ( nbondj ) 1263 CASE ( -1 ) 1264 CALL shmem_put( t2sn(1,1,2), t2sn(1,1,1), imigr, nono ) 1265 CASE ( 0 ) 1266 CALL shmem_put( t2ns(1,1,2), t2ns(1,1,1), imigr, noso ) 1267 CALL shmem_put( t2sn(1,1,2), t2sn(1,1,1), imigr, nono ) 1268 CASE ( 1 ) 1269 CALL shmem_put( t2ns(1,1,2), t2ns(1,1,1), imigr, noso ) 1270 END SELECT 1271 CALL barrier() 1272 CALL shmem_udcflush() 1273 1274 #elif defined key_mpp_mpi 1275 !! * MPI version 1276 1277 imigr = jprecj * jpi 1278 608 ! 1279 609 SELECT CASE ( nbondj ) 1280 610 CASE ( -1 ) … … 1294 624 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1295 625 END SELECT 1296 1297 #endif 1298 1299 ! 3.3 Write Dirichlet lateral conditions 1300 626 ! 627 ! ! Write Dirichlet lateral conditions 1301 628 ijhom = nlcj - jprecj 1302 629 ! 1303 630 SELECT CASE ( nbondj ) 1304 631 CASE ( -1 ) … … 1315 642 pt2d(:,jl ) = t2sn(:,jl,2) 1316 643 END DO 1317 END SELECT 1318 644 END SELECT 645 1319 646 1320 647 ! 4. north fold treatment 1321 648 ! ----------------------- 1322 1323 IF (PRESENT(cd_mpp)) THEN 1324 ! No north fold treatment (it is assumed to be already OK) 1325 1326 ELSE 1327 1328 ! 4.1 treatment without exchange (jpni odd) 1329 1330 SELECT CASE ( jpni ) 1331 1332 CASE ( 1 ) ! only one proc along I, no mpp exchange 1333 1334 SELECT CASE ( npolj ) 1335 1336 CASE ( 3 , 4 ) ! T pivot 1337 iloc = jpiglo - 2 * ( nimpp - 1 ) 1338 1339 SELECT CASE ( cd_type ) 1340 1341 CASE ( 'T' , 'S', 'W' ) 1342 DO ji = 2, nlci 1343 ijt=iloc-ji+2 1344 pt2d(ji,nlcj) = psgn * pt2d(ijt,nlcj-2) 1345 END DO 1346 DO ji = nlci/2+1, nlci 1347 ijt=iloc-ji+2 1348 pt2d(ji,nlcj-1) = psgn * pt2d(ijt,nlcj-1) 1349 END DO 1350 1351 CASE ( 'U' ) 1352 DO ji = 1, nlci-1 1353 iju=iloc-ji+1 1354 pt2d(ji,nlcj) = psgn * pt2d(iju,nlcj-2) 1355 END DO 1356 DO ji = nlci/2, nlci-1 1357 iju=iloc-ji+1 1358 pt2d(ji,nlcj-1) = psgn * pt2d(iju,nlcj-1) 1359 END DO 1360 1361 CASE ( 'V' ) 1362 DO ji = 2, nlci 1363 ijt=iloc-ji+2 1364 pt2d(ji,nlcj-1) = psgn * pt2d(ijt,nlcj-2) 1365 pt2d(ji,nlcj ) = psgn * pt2d(ijt,nlcj-3) 1366 END DO 1367 1368 CASE ( 'F', 'G' ) 1369 DO ji = 1, nlci-1 1370 iju=iloc-ji+1 1371 pt2d(ji,nlcj-1) = psgn * pt2d(iju,nlcj-2) 1372 pt2d(ji,nlcj ) = psgn * pt2d(iju,nlcj-3) 1373 END DO 1374 1375 CASE ( 'I' ) ! ice U-V point 1376 pt2d(2,nlcj) = psgn * pt2d(3,nlcj-1) 1377 DO ji = 3, nlci 1378 iju = iloc - ji + 3 1379 pt2d(ji,nlcj) = psgn * pt2d(iju,nlcj-1) 1380 END DO 1381 1382 END SELECT 1383 1384 CASE ( 5 , 6 ) ! F pivot 1385 iloc=jpiglo-2*(nimpp-1) 1386 1387 SELECT CASE (cd_type ) 1388 1389 CASE ( 'T', 'S', 'W' ) 1390 DO ji = 1, nlci 1391 ijt=iloc-ji+1 1392 pt2d(ji,nlcj) = psgn * pt2d(ijt,nlcj-1) 1393 END DO 1394 1395 CASE ( 'U' ) 1396 DO ji = 1, nlci-1 1397 iju=iloc-ji 1398 pt2d(ji,nlcj) = psgn * pt2d(iju,nlcj-1) 1399 END DO 1400 1401 CASE ( 'V' ) 1402 DO ji = 1, nlci 1403 ijt=iloc-ji+1 1404 pt2d(ji,nlcj ) = psgn * pt2d(ijt,nlcj-2) 1405 END DO 1406 DO ji = nlci/2+1, nlci 1407 ijt=iloc-ji+1 1408 pt2d(ji,nlcj-1) = psgn * pt2d(ijt,nlcj-1) 1409 END DO 1410 1411 CASE ( 'F', 'G' ) 1412 DO ji = 1, nlci-1 1413 iju=iloc-ji 1414 pt2d(ji,nlcj) = psgn * pt2d(iju,nlcj-2) 1415 END DO 1416 DO ji = nlci/2+1, nlci-1 1417 iju=iloc-ji 1418 pt2d(ji,nlcj-1) = psgn * pt2d(iju,nlcj-1) 1419 END DO 1420 1421 CASE ( 'I' ) ! ice U-V point 1422 pt2d( 2 ,nlcj) = zland 1423 DO ji = 2 , nlci-1 1424 ijt = iloc - ji + 2 1425 pt2d(ji,nlcj)= 0.5 * ( pt2d(ji,nlcj-1) + psgn * pt2d(ijt,nlcj-1) ) 1426 END DO 1427 1428 END SELECT ! cd_type 1429 1430 END SELECT ! npolj 1431 1432 CASE DEFAULT ! more than 1 proc along I 1433 IF( npolj /= 0 ) CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! only for northern procs. 1434 1435 END SELECT ! jpni 1436 649 ! 650 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 651 ! 652 SELECT CASE ( jpni ) 653 CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp 654 CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs. 655 END SELECT 656 ! 1437 657 ENDIF 1438 1439 ! 5. East and west directions 1440 ! --------------------------- 1441 1442 SELECT CASE ( npolj ) 1443 1444 CASE ( 3, 4, 5, 6 ) 1445 1446 ! 5.1 Read Dirichlet lateral conditions 1447 1448 SELECT CASE ( nbondi ) 1449 CASE ( -1, 0, 1 ) 1450 iihom = nlci-nreci 1451 DO jl = 1, jpreci 1452 DO jj = 1, jpj 1453 t2ew(jj,jl,1) = pt2d(jpreci+jl,jj) 1454 t2we(jj,jl,1) = pt2d(iihom +jl,jj) 1455 END DO 1456 END DO 1457 END SELECT 1458 1459 ! 5.2 Migrations 1460 1461 #if defined key_mpp_shmem 1462 !! * SHMEM version 1463 1464 imigr=jpreci*jpj 1465 1466 SELECT CASE ( nbondi ) 1467 CASE ( -1 ) 1468 CALL shmem_put( t2we(1,1,2), t2we(1,1,1), imigr, noea ) 1469 CASE ( 0 ) 1470 CALL shmem_put( t2ew(1,1,2), t2ew(1,1,1), imigr, nowe ) 1471 CALL shmem_put( t2we(1,1,2), t2we(1,1,1), imigr, noea ) 1472 CASE ( 1 ) 1473 CALL shmem_put( t2ew(1,1,2), t2ew(1,1,1), imigr, nowe ) 1474 END SELECT 1475 1476 CALL barrier() 1477 CALL shmem_udcflush() 1478 1479 #elif defined key_mpp_mpi 1480 !! * MPI version 1481 1482 imigr=jpreci*jpj 1483 1484 SELECT CASE ( nbondi ) 1485 CASE ( -1 ) 1486 CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req1 ) 1487 CALL mpprecv( 1, t2ew(1,1,2), imigr ) 1488 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1489 CASE ( 0 ) 1490 CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) 1491 CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req2 ) 1492 CALL mpprecv( 1, t2ew(1,1,2), imigr ) 1493 CALL mpprecv( 2, t2we(1,1,2), imigr ) 1494 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1495 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 1496 CASE ( 1 ) 1497 CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) 1498 CALL mpprecv( 2, t2we(1,1,2), imigr ) 1499 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1500 END SELECT 1501 #endif 1502 1503 ! 5.3 Write Dirichlet lateral conditions 1504 1505 iihom = nlci - jpreci 1506 1507 SELECT CASE ( nbondi ) 1508 CASE ( -1 ) 1509 DO jl = 1, jpreci 1510 pt2d(iihom+jl,:) = t2ew(:,jl,2) 1511 END DO 1512 CASE ( 0 ) 1513 DO jl = 1, jpreci 1514 pt2d(jl ,:) = t2we(:,jl,2) 1515 pt2d(iihom+jl,:) = t2ew(:,jl,2) 1516 END DO 1517 CASE ( 1 ) 1518 DO jl = 1, jpreci 1519 pt2d(jl,:) = t2we(:,jl,2) 1520 END DO 1521 END SELECT 1522 1523 END SELECT ! npolj 1524 658 ! 1525 659 END SUBROUTINE mpp_lnk_2d 1526 660 … … 1547 681 !! 1548 682 !!---------------------------------------------------------------------- 1549 !! * Arguments 1550 CHARACTER(len=1) , INTENT( in ) :: & 1551 cd_type1, cd_type2 ! define the nature of ptab array grid-points 1552 ! ! = T , U , V , F , W points 1553 ! ! = S : T-point, north fold treatment ??? 1554 ! ! = G : F-point, north fold treatment ??? 1555 REAL(wp), INTENT( in ) :: & 1556 psgn ! control of the sign change 1557 ! ! = -1. , the sign is changed if north fold boundary 1558 ! ! = 1. , the sign is kept if north fold boundary 1559 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT( inout ) :: & 1560 ptab1, ptab2 ! 3D array on which the boundary condition is applied 1561 1562 !! * Local variables 1563 INTEGER :: ji, jk, jl ! dummy loop indices 1564 INTEGER :: imigr, iihom, ijhom, iloc, ijt, iju ! temporary integers 1565 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1566 INTEGER :: ml_stat(MPI_STATUS_SIZE) ! for key_mpi_isend 683 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which 684 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab2 ! the boundary condition is applied 685 CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays 686 CHARACTER(len=1) , INTENT(in ) :: cd_type2 ! i.e. grid-points = T , U , V , F or W points 687 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 688 !! ! = 1. , the sign is kept 689 INTEGER :: jl ! dummy loop indices 690 INTEGER :: imigr, iihom, ijhom ! temporary integers 691 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 692 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 1567 693 !!---------------------------------------------------------------------- 1568 694 1569 695 ! 1. standard boundary treatment 1570 696 ! ------------------------------ 1571 ! ! East-West boundaries 1572 ! ! ==================== 1573 IF( nbondi == 2 .AND. & ! Cyclic east-west 1574 & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 697 ! ! East-West boundaries 698 ! !* Cyclic east-west 699 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 1575 700 ptab1( 1 ,:,:) = ptab1(jpim1,:,:) 1576 701 ptab1(jpi,:,:) = ptab1( 2 ,:,:) 1577 702 ptab2( 1 ,:,:) = ptab2(jpim1,:,:) 1578 703 ptab2(jpi,:,:) = ptab2( 2 ,:,:) 1579 1580 ELSE ! closed 1581 SELECT CASE ( cd_type1 ) 1582 CASE ( 'T', 'U', 'V', 'W' ) 1583 ptab1( 1 :jpreci,:,:) = 0.e0 1584 ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 1585 CASE ( 'F' ) 1586 ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 1587 END SELECT 1588 SELECT CASE ( cd_type2 ) 1589 CASE ( 'T', 'U', 'V', 'W' ) 1590 ptab2( 1 :jpreci,:,:) = 0.e0 1591 ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0 1592 CASE ( 'F' ) 1593 ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0 1594 END SELECT 704 ELSE !* closed 705 IF( .NOT. cd_type1 == 'F' ) ptab1( 1 :jpreci,:,:) = 0.e0 ! south except at F-point 706 IF( .NOT. cd_type2 == 'F' ) ptab2( 1 :jpreci,:,:) = 0.e0 707 ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 ! north 708 ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0 1595 709 ENDIF 1596 710 1597 ! ! North-South boundaries 1598 ! ! ====================== 1599 SELECT CASE ( cd_type1 ) 1600 CASE ( 'T', 'U', 'V', 'W' ) 1601 ptab1(:, 1 :jprecj,:) = 0.e0 1602 ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 1603 CASE ( 'F' ) 1604 ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 1605 END SELECT 1606 1607 SELECT CASE ( cd_type2 ) 1608 CASE ( 'T', 'U', 'V', 'W' ) 1609 ptab2(:, 1 :jprecj,:) = 0.e0 1610 ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0 1611 CASE ( 'F' ) 1612 ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0 1613 END SELECT 711 712 ! ! North-South boundaries 713 IF( .NOT. cd_type1 == 'F' ) ptab1(:, 1 :jprecj,:) = 0.e0 ! south except at F-point 714 IF( .NOT. cd_type2 == 'F' ) ptab2(:, 1 :jprecj,:) = 0.e0 715 ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 ! north 716 ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0 1614 717 1615 718 1616 719 ! 2. East and west directions exchange 1617 720 ! ------------------------------------ 1618 1619 ! 2.1 Read Dirichlet lateral conditions 1620 1621 SELECT CASE ( nbondi ) 1622 CASE ( -1, 0, 1 ) ! all exept 2 721 ! we play with the neigbours AND the row number because of the periodicity 722 ! 723 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 724 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 1623 725 iihom = nlci-nreci 1624 726 DO jl = 1, jpreci … … 1629 731 END DO 1630 732 END SELECT 1631 1632 ! 2.2 Migrations 1633 1634 #if defined key_mpp_shmem 1635 !! * SHMEM version 1636 733 ! 734 ! ! Migrations 1637 735 imigr = jpreci * jpj * jpk *2 1638 1639 SELECT CASE ( nbondi ) 1640 CASE ( -1 ) 1641 CALL shmem_put( t4we(1,1,1,1,2), t4we(1,1,1,1,1), imigr, noea ) 1642 CASE ( 0 ) 1643 CALL shmem_put( t4ew(1,1,1,1,2), t4ew(1,1,1,1,1), imigr, nowe ) 1644 CALL shmem_put( t4we(1,1,1,1,2), t4we(1,1,1,1,1), imigr, noea ) 1645 CASE ( 1 ) 1646 CALL shmem_put( t4ew(1,1,1,1,2), t4ew(1,1,1,1,1), imigr, nowe ) 1647 END SELECT 1648 1649 CALL barrier() 1650 CALL shmem_udcflush() 1651 1652 #elif defined key_mpp_mpi 1653 !! * Local variables (MPI version) 1654 1655 imigr = jpreci * jpj * jpk *2 1656 736 ! 1657 737 SELECT CASE ( nbondi ) 1658 738 CASE ( -1 ) … … 1672 752 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1673 753 END SELECT 1674 #endif 1675 1676 ! 2.3 Write Dirichlet lateral conditions 1677 1678 iihom = nlci-jpreci 1679 754 ! 755 ! ! Write Dirichlet lateral conditions 756 iihom = nlci - jpreci 757 ! 1680 758 SELECT CASE ( nbondi ) 1681 759 CASE ( -1 ) … … 1701 779 ! 3. North and south directions 1702 780 ! ----------------------------- 1703 1704 ! 3.1 Read Dirichlet lateral conditions 1705 1706 IF( nbondj /= 2 ) THEN 1707 ijhom = nlcj-nrecj 781 ! always closed : we play only with the neigbours 782 ! 783 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 784 ijhom = nlcj - nrecj 1708 785 DO jl = 1, jprecj 1709 786 t4sn(:,jl,:,1,1) = ptab1(:,ijhom +jl,:) … … 1713 790 END DO 1714 791 ENDIF 1715 1716 ! 3.2 Migrations 1717 1718 #if defined key_mpp_shmem 1719 !! * SHMEM version 1720 792 ! 793 ! ! Migrations 1721 794 imigr = jprecj * jpi * jpk * 2 1722 1723 SELECT CASE ( nbondj ) 1724 CASE ( -1 ) 1725 CALL shmem_put( t4sn(1,1,1,1,2), t4sn(1,1,1,1,1), imigr, nono ) 1726 CASE ( 0 ) 1727 CALL shmem_put( t4ns(1,1,1,1,2), t4ns(1,1,1,1,1), imigr, noso ) 1728 CALL shmem_put( t4sn(1,1,1,1,2), t4sn(1,1,1,1,1), imigr, nono ) 1729 CASE ( 1 ) 1730 CALL shmem_put( t4ns(1,1,1,1,2), t4ns(1,1,1,1;,1), imigr, noso ) 1731 END SELECT 1732 1733 CALL barrier() 1734 CALL shmem_udcflush() 1735 1736 #elif defined key_mpp_mpi 1737 !! * Local variables (MPI version) 1738 1739 imigr=jprecj * jpi * jpk * 2 1740 795 ! 1741 796 SELECT CASE ( nbondj ) 1742 797 CASE ( -1 ) … … 1756 811 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 1757 812 END SELECT 1758 1759 #endif 1760 1761 ! 3.3 Write Dirichlet lateral conditions 1762 1763 ijhom = nlcj-jprecj 1764 813 ! 814 ! ! Write Dirichlet lateral conditions 815 ijhom = nlcj - jprecj 816 ! 1765 817 SELECT CASE ( nbondj ) 1766 818 CASE ( -1 ) … … 1786 838 ! 4. north fold treatment 1787 839 ! ----------------------- 1788 1789 ! 4.1 treatment without exchange (jpni odd) 1790 ! T-point pivot 1791 1792 SELECT CASE ( jpni ) 1793 1794 CASE ( 1 ) ! only one proc along I, no mpp exchange 1795 1796 SELECT CASE ( npolj ) 1797 1798 CASE ( 3 , 4 ) ! T pivot 1799 iloc = jpiglo - 2 * ( nimpp - 1 ) 1800 1801 SELECT CASE ( cd_type1 ) 1802 1803 CASE ( 'T' , 'S', 'W' ) 1804 DO jk = 1, jpk 1805 DO ji = 2, nlci 1806 ijt=iloc-ji+2 1807 ptab1(ji,nlcj,jk) = psgn * ptab1(ijt,nlcj-2,jk) 1808 END DO 1809 DO ji = nlci/2+1, nlci 1810 ijt=iloc-ji+2 1811 ptab1(ji,nlcj-1,jk) = psgn * ptab1(ijt,nlcj-1,jk) 1812 END DO 1813 END DO 1814 1815 CASE ( 'U' ) 1816 DO jk = 1, jpk 1817 DO ji = 1, nlci-1 1818 iju=iloc-ji+1 1819 ptab1(ji,nlcj,jk) = psgn * ptab1(iju,nlcj-2,jk) 1820 END DO 1821 DO ji = nlci/2, nlci-1 1822 iju=iloc-ji+1 1823 ptab1(ji,nlcj-1,jk) = psgn * ptab1(iju,nlcj-1,jk) 1824 END DO 1825 END DO 1826 1827 CASE ( 'V' ) 1828 DO jk = 1, jpk 1829 DO ji = 2, nlci 1830 ijt=iloc-ji+2 1831 ptab1(ji,nlcj-1,jk) = psgn * ptab1(ijt,nlcj-2,jk) 1832 ptab1(ji,nlcj ,jk) = psgn * ptab1(ijt,nlcj-3,jk) 1833 END DO 1834 END DO 1835 1836 CASE ( 'F', 'G' ) 1837 DO jk = 1, jpk 1838 DO ji = 1, nlci-1 1839 iju=iloc-ji+1 1840 ptab1(ji,nlcj-1,jk) = psgn * ptab1(iju,nlcj-2,jk) 1841 ptab1(ji,nlcj ,jk) = psgn * ptab1(iju,nlcj-3,jk) 1842 END DO 1843 END DO 1844 1845 END SELECT 1846 1847 SELECT CASE ( cd_type2 ) 1848 1849 CASE ( 'T' , 'S', 'W' ) 1850 DO jk = 1, jpk 1851 DO ji = 2, nlci 1852 ijt=iloc-ji+2 1853 ptab2(ji,nlcj,jk) = psgn * ptab2(ijt,nlcj-2,jk) 1854 END DO 1855 DO ji = nlci/2+1, nlci 1856 ijt=iloc-ji+2 1857 ptab2(ji,nlcj-1,jk) = psgn * ptab2(ijt,nlcj-1,jk) 1858 END DO 1859 END DO 1860 1861 CASE ( 'U' ) 1862 DO jk = 1, jpk 1863 DO ji = 1, nlci-1 1864 iju=iloc-ji+1 1865 ptab2(ji,nlcj,jk) = psgn * ptab2(iju,nlcj-2,jk) 1866 END DO 1867 DO ji = nlci/2, nlci-1 1868 iju=iloc-ji+1 1869 ptab2(ji,nlcj-1,jk) = psgn * ptab2(iju,nlcj-1,jk) 1870 END DO 1871 END DO 1872 1873 CASE ( 'V' ) 1874 DO jk = 1, jpk 1875 DO ji = 2, nlci 1876 ijt=iloc-ji+2 1877 ptab2(ji,nlcj-1,jk) = psgn * ptab2(ijt,nlcj-2,jk) 1878 ptab2(ji,nlcj ,jk) = psgn * ptab2(ijt,nlcj-3,jk) 1879 END DO 1880 END DO 1881 1882 CASE ( 'F', 'G' ) 1883 DO jk = 1, jpk 1884 DO ji = 1, nlci-1 1885 iju=iloc-ji+1 1886 ptab2(ji,nlcj-1,jk) = psgn * ptab2(iju,nlcj-2,jk) 1887 ptab2(ji,nlcj ,jk) = psgn * ptab2(iju,nlcj-3,jk) 1888 END DO 1889 END DO 1890 1891 END SELECT 1892 1893 CASE ( 5 , 6 ) ! F pivot 1894 iloc=jpiglo-2*(nimpp-1) 1895 1896 SELECT CASE ( cd_type1 ) 1897 1898 CASE ( 'T' , 'S', 'W' ) 1899 DO jk = 1, jpk 1900 DO ji = 1, nlci 1901 ijt=iloc-ji+1 1902 ptab1(ji,nlcj,jk) = psgn * ptab1(ijt,nlcj-1,jk) 1903 END DO 1904 END DO 1905 1906 CASE ( 'U' ) 1907 DO jk = 1, jpk 1908 DO ji = 1, nlci-1 1909 iju=iloc-ji 1910 ptab1(ji,nlcj,jk) = psgn * ptab1(iju,nlcj-1,jk) 1911 END DO 1912 END DO 1913 1914 CASE ( 'V' ) 1915 DO jk = 1, jpk 1916 DO ji = 1, nlci 1917 ijt=iloc-ji+1 1918 ptab1(ji,nlcj ,jk) = psgn * ptab1(ijt,nlcj-2,jk) 1919 END DO 1920 DO ji = nlci/2+1, nlci 1921 ijt=iloc-ji+1 1922 ptab1(ji,nlcj-1,jk) = psgn * ptab1(ijt,nlcj-1,jk) 1923 END DO 1924 END DO 1925 1926 CASE ( 'F', 'G' ) 1927 DO jk = 1, jpk 1928 DO ji = 1, nlci-1 1929 iju=iloc-ji 1930 ptab1(ji,nlcj,jk) = psgn * ptab1(iju,nlcj-2,jk) 1931 END DO 1932 DO ji = nlci/2+1, nlci-1 1933 iju=iloc-ji 1934 ptab1(ji,nlcj-1,jk) = psgn * ptab1(iju,nlcj-1,jk) 1935 END DO 1936 END DO 1937 END SELECT ! cd_type1 1938 1939 SELECT CASE ( cd_type2 ) 1940 1941 CASE ( 'T' , 'S', 'W' ) 1942 DO jk = 1, jpk 1943 DO ji = 1, nlci 1944 ijt=iloc-ji+1 1945 ptab2(ji,nlcj,jk) = psgn * ptab2(ijt,nlcj-1,jk) 1946 END DO 1947 END DO 1948 1949 CASE ( 'U' ) 1950 DO jk = 1, jpk 1951 DO ji = 1, nlci-1 1952 iju=iloc-ji 1953 ptab2(ji,nlcj,jk) = psgn * ptab2(iju,nlcj-1,jk) 1954 END DO 1955 END DO 1956 1957 CASE ( 'V' ) 1958 DO jk = 1, jpk 1959 DO ji = 1, nlci 1960 ijt=iloc-ji+1 1961 ptab2(ji,nlcj ,jk) = psgn * ptab2(ijt,nlcj-2,jk) 1962 END DO 1963 DO ji = nlci/2+1, nlci 1964 ijt=iloc-ji+1 1965 ptab2(ji,nlcj-1,jk) = psgn * ptab2(ijt,nlcj-1,jk) 1966 END DO 1967 END DO 1968 1969 CASE ( 'F', 'G' ) 1970 DO jk = 1, jpk 1971 DO ji = 1, nlci-1 1972 iju=iloc-ji 1973 ptab2(ji,nlcj,jk) = psgn * ptab2(iju,nlcj-2,jk) 1974 END DO 1975 DO ji = nlci/2+1, nlci-1 1976 iju=iloc-ji 1977 ptab2(ji,nlcj-1,jk) = psgn * ptab2(iju,nlcj-1,jk) 1978 END DO 1979 END DO 1980 1981 END SELECT ! cd_type2 1982 1983 END SELECT ! npolj 1984 1985 CASE DEFAULT ! more than 1 proc along I 1986 IF ( npolj /= 0 ) THEN 1987 CALL mpp_lbc_north (ptab1, cd_type1, psgn) ! only for northern procs. 1988 CALL mpp_lbc_north (ptab2, cd_type2, psgn) ! only for northern procs. 1989 ENDIF 1990 1991 END SELECT ! jpni 1992 1993 1994 ! 5. East and west directions exchange 1995 ! ------------------------------------ 1996 1997 SELECT CASE ( npolj ) 1998 1999 CASE ( 3, 4, 5, 6 ) 2000 2001 ! 5.1 Read Dirichlet lateral conditions 2002 2003 SELECT CASE ( nbondi ) 2004 2005 CASE ( -1, 0, 1 ) 2006 iihom = nlci-nreci 2007 DO jl = 1, jpreci 2008 t4ew(:,jl,:,1,1) = ptab1(jpreci+jl,:,:) 2009 t4we(:,jl,:,1,1) = ptab1(iihom +jl,:,:) 2010 t4ew(:,jl,:,2,1) = ptab2(jpreci+jl,:,:) 2011 t4we(:,jl,:,2,1) = ptab2(iihom +jl,:,:) 2012 END DO 2013 2014 END SELECT 2015 2016 ! 5.2 Migrations 2017 2018 #if defined key_mpp_shmem 2019 !! SHMEM version 2020 2021 imigr = jpreci * jpj * jpk * 2 2022 2023 SELECT CASE ( nbondi ) 2024 CASE ( -1 ) 2025 CALL shmem_put( t4we(1,1,1,1,2), t4we(1,1,1,1,1), imigr, noea ) 2026 CASE ( 0 ) 2027 CALL shmem_put( t4ew(1,1,1,1,2), t4ew(1,1,1,1,1), imigr, nowe ) 2028 CALL shmem_put( t4we(1,1,1,1,2), t4we(1,1,1,1,1), imigr, noea ) 2029 CASE ( 1 ) 2030 CALL shmem_put( t4ew(1,1,1,1,2), t4ew(1,1,1,1,1), imigr, nowe ) 2031 END SELECT 2032 2033 CALL barrier() 2034 CALL shmem_udcflush() 2035 2036 #elif defined key_mpp_mpi 2037 !! MPI version 2038 2039 imigr = jpreci * jpj * jpk * 2 2040 2041 SELECT CASE ( nbondi ) 2042 CASE ( -1 ) 2043 CALL mppsend( 2, t4we(1,1,1,1,1), imigr, noea, ml_req1 ) 2044 CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr ) 2045 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2046 CASE ( 0 ) 2047 CALL mppsend( 1, t4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 2048 CALL mppsend( 2, t4we(1,1,1,1,1), imigr, noea, ml_req2 ) 2049 CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr ) 2050 CALL mpprecv( 2, t4we(1,1,1,1,2), imigr ) 2051 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2052 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 2053 CASE ( 1 ) 2054 CALL mppsend( 1, t4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 2055 CALL mpprecv( 2, t4we(1,1,1,1,2), imigr ) 2056 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2057 END SELECT 2058 #endif 2059 2060 ! 5.3 Write Dirichlet lateral conditions 2061 2062 iihom = nlci-jpreci 2063 2064 SELECT CASE ( nbondi) 2065 CASE ( -1 ) 2066 DO jl = 1, jpreci 2067 ptab1(iihom+jl,:,:) = t4ew(:,jl,:,1,2) 2068 ptab2(iihom+jl,:,:) = t4ew(:,jl,:,2,2) 2069 END DO 2070 CASE ( 0 ) 2071 DO jl = 1, jpreci 2072 ptab1(jl ,:,:) = t4we(:,jl,:,1,2) 2073 ptab1(iihom+jl,:,:) = t4ew(:,jl,:,1,2) 2074 ptab2(jl ,:,:) = t4we(:,jl,:,2,2) 2075 ptab2(iihom+jl,:,:) = t4ew(:,jl,:,2,2) 2076 END DO 2077 CASE ( 1 ) 2078 DO jl = 1, jpreci 2079 ptab1(jl ,:,:) = t4we(:,jl,:,1,2) 2080 ptab2(jl ,:,:) = t4we(:,jl,:,2,2) 2081 END DO 2082 END SELECT 2083 2084 END SELECT ! npolj 2085 840 IF( npolj /= 0 ) THEN 841 ! 842 SELECT CASE ( jpni ) 843 CASE ( 1 ) 844 CALL lbc_nfd ( ptab1, cd_type1, psgn ) ! only for northern procs. 845 CALL lbc_nfd ( ptab2, cd_type2, psgn ) 846 CASE DEFAULT 847 CALL mpp_lbc_north( ptab1, cd_type1, psgn ) ! for all northern procs. 848 CALL mpp_lbc_north (ptab2, cd_type2, psgn) 849 END SELECT 850 ! 851 ENDIF 852 ! 2086 853 END SUBROUTINE mpp_lnk_3d_gather 2087 854 … … 2106 873 !! noso : number for local neighboring processors 2107 874 !! nono : number for local neighboring processors 2108 !! 2109 !! History : 2110 !! 2111 !! 9.0 ! 05-09 (R. Benshila, G. Madec) original code 2112 !! 2113 !!---------------------------------------------------------------------- 2114 !! * Arguments 2115 CHARACTER(len=1) , INTENT( in ) :: & 2116 cd_type ! define the nature of pt2d array grid-points 2117 ! ! = T , U , V , F , W 2118 ! ! = S : T-point, north fold treatment 2119 ! ! = G : F-point, north fold treatment 2120 ! ! = I : sea-ice velocity at F-point with index shift 2121 REAL(wp), INTENT( in ) :: & 2122 psgn ! control of the sign change 2123 ! ! = -1. , the sign is changed if north fold boundary 2124 ! ! = 1. , the sign is kept if north fold boundary 2125 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT( inout ) :: & 2126 pt2d ! 2D array on which the boundary condition is applied 2127 2128 !! * Local variables 2129 INTEGER :: ji, jl ! dummy loop indices 2130 INTEGER :: & 2131 imigr, iihom, ijhom, & ! temporary integers 2132 iloc, ijt, iju ! " " 2133 INTEGER :: & 2134 ipreci, iprecj ! temporary integers 2135 INTEGER :: ml_req1, ml_req2, ml_err ! for isend 2136 INTEGER :: ml_stat(MPI_STATUS_SIZE) ! for isend 2137 !!--------------------------------------------------------------------- 2138 2139 ! take into account outer extra 2D overlap area 2140 ipreci = jpreci + jpr2di 875 !! 876 !!---------------------------------------------------------------------- 877 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo 878 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points 879 ! ! = T , U , V , F , W and I points 880 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the 881 !! ! north boundary, = 1. otherwise 882 INTEGER :: jl ! dummy loop indices 883 INTEGER :: imigr, iihom, ijhom ! temporary integers 884 INTEGER :: ipreci, iprecj ! temporary integers 885 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 886 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 887 !!---------------------------------------------------------------------- 888 889 ipreci = jpreci + jpr2di ! take into account outer extra 2D overlap area 2141 890 iprecj = jprecj + jpr2dj 2142 891 … … 2144 893 ! 1. standard boundary treatment 2145 894 ! ------------------------------ 2146 2147 ! ! East-West boundaries 2148 ! ! ==================== 2149 IF( nbondi == 2 .AND. & ! Cyclic east-west 2150 & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 2151 pt2d(1-jpr2di: 1 ,:) = pt2d(jpim1-jpr2di: jpim1 ,:) 2152 pt2d( jpi :jpi+jpr2di,:) = pt2d( 2 :2+jpr2di,:) 2153 2154 ELSE ! ... closed 2155 SELECT CASE ( cd_type ) 2156 CASE ( 'T', 'U', 'V', 'W' , 'I' ) 2157 pt2d( 1-jpr2di :jpreci ,:) = 0.e0 2158 pt2d(nlci-jpreci+1:jpi+jpr2di,:) = 0.e0 2159 CASE ( 'F' ) 2160 pt2d(nlci-jpreci+1:jpi+jpr2di,:) = 0.e0 2161 END SELECT 895 ! Order matters Here !!!! 896 ! 897 ! !* North-South boundaries (always colsed) 898 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jpr2dj : jprecj ) = 0.e0 ! south except at F-point 899 pt2d(:,nlcj-jprecj+1:jpj+jpr2dj) = 0.e0 ! north 900 901 ! ! East-West boundaries 902 ! !* Cyclic east-west 903 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 904 pt2d(1-jpr2di: 1 ,:) = pt2d(jpim1-jpr2di: jpim1 ,:) ! east 905 pt2d( jpi :jpi+jpr2di,:) = pt2d( 2 :2+jpr2di,:) ! west 906 ! 907 ELSE !* closed 908 IF( .NOT. cd_type == 'F' ) pt2d( 1-jpr2di :jpreci ,:) = 0.e0 ! south except at F-point 909 pt2d(nlci-jpreci+1:jpi+jpr2di,:) = 0.e0 ! north 2162 910 ENDIF 2163 2164 ! ! North-South boundaries 2165 ! ! ======================2166 SELECT CASE ( cd_type )2167 CASE ( 'T', 'U', 'V', 'W' , 'I' )2168 pt2d(:, 1-jpr2dj : jprecj ) = 0.e02169 pt2d(:,nlcj-jprecj+1:jpj+jpr2dj) = 0.e02170 CASE ( 'F')2171 pt2d(:,nlcj-jprecj+1:jpj+jpr2dj) = 0.e02172 END SELECT2173 2174 2175 ! 2. East and west directions 2176 ! ---------------------------2177 2178 ! 2.1 Read Dirichlet lateral conditions2179 2180 SELECT CASE ( nbondi ) 2181 CASE ( -1, 0, 1 ) ! all except 2911 ! 912 913 ! north fold treatment 914 ! ----------------------- 915 IF( npolj /= 0 ) THEN 916 ! 917 SELECT CASE ( jpni ) 918 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jpr2dj), cd_type, psgn, pr2dj=jpr2dj ) 919 CASE DEFAULT ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn ) 920 END SELECT 921 ! 922 ENDIF 923 924 ! 2. East and west directions exchange 925 ! ------------------------------------ 926 ! we play with the neigbours AND the row number because of the periodicity 927 ! 928 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 929 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 2182 930 iihom = nlci-nreci-jpr2di 2183 931 DO jl = 1, ipreci … … 2186 934 END DO 2187 935 END SELECT 2188 2189 ! 2.2 Migrations 2190 2191 #if defined key_mpp_shmem 2192 !! * SHMEM version 2193 936 ! 937 ! ! Migrations 2194 938 imigr = ipreci * ( jpj + 2*jpr2dj) 2195 2196 SELECT CASE ( nbondi ) 2197 CASE ( -1 ) 2198 CALL shmem_put( tr2we(1-jpr2dj,1,2), tr2we(1,1,1), imigr, noea ) 2199 CASE ( 0 ) 2200 CALL shmem_put( tr2ew(1-jpr2dj,1,2), tr2ew(1,1,1), imigr, nowe ) 2201 CALL shmem_put( tr2we(1-jpr2dj,1,2), tr2we(1,1,1), imigr, noea ) 2202 CASE ( 1 ) 2203 CALL shmem_put( tr2ew(1-jpr2dj,1,2), tr2ew(1,1,1), imigr, nowe ) 2204 END SELECT 2205 2206 CALL barrier() 2207 CALL shmem_udcflush() 2208 2209 #elif defined key_mpp_mpi 2210 !! * MPI version 2211 2212 imigr = ipreci * ( jpj + 2*jpr2dj) 2213 939 ! 2214 940 SELECT CASE ( nbondi ) 2215 941 CASE ( -1 ) … … 2229 955 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2230 956 END SELECT 2231 2232 #endif 2233 2234 ! 2.3 Write Dirichlet lateral conditions 2235 957 ! 958 ! ! Write Dirichlet lateral conditions 2236 959 iihom = nlci - jpreci 2237 960 ! 2238 961 SELECT CASE ( nbondi ) 2239 962 CASE ( -1 ) … … 2255 978 ! 3. North and south directions 2256 979 ! ----------------------------- 2257 2258 ! 3.1 Read Dirichlet lateral conditions 2259 2260 IF( nbondj /= 2 ) THEN 980 ! always closed : we play only with the neigbours 981 ! 982 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 2261 983 ijhom = nlcj-nrecj-jpr2dj 2262 984 DO jl = 1, iprecj … … 2265 987 END DO 2266 988 ENDIF 2267 2268 ! 3.2 Migrations 2269 2270 #if defined key_mpp_shmem 2271 !! * SHMEM version 2272 989 ! 990 ! ! Migrations 2273 991 imigr = iprecj * ( jpi + 2*jpr2di ) 2274 2275 SELECT CASE ( nbondj ) 2276 CASE ( -1 ) 2277 CALL shmem_put( tr2sn(1-jpr2di,1,2), tr2sn(1,1,1), imigr, nono ) 2278 CASE ( 0 ) 2279 CALL shmem_put( tr2ns(1-jpr2di,1,2), tr2ns(1,1,1), imigr, noso ) 2280 CALL shmem_put( tr2sn(1-jpr2di,1,2), tr2sn(1,1,1), imigr, nono ) 2281 CASE ( 1 ) 2282 CALL shmem_put( tr2ns(1-jpr2di,1,2), tr2ns(1,1,1), imigr, noso ) 2283 END SELECT 2284 CALL barrier() 2285 CALL shmem_udcflush() 2286 2287 #elif defined key_mpp_mpi 2288 !! * MPI version 2289 2290 imigr = iprecj * ( jpi + 2*jpr2di ) 2291 992 ! 2292 993 SELECT CASE ( nbondj ) 2293 994 CASE ( -1 ) … … 2307 1008 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2308 1009 END SELECT 2309 2310 #endif 2311 2312 ! 3.3 Write Dirichlet lateral conditions 2313 1010 ! 1011 ! ! Write Dirichlet lateral conditions 2314 1012 ijhom = nlcj - jprecj 2315 1013 ! 2316 1014 SELECT CASE ( nbondj ) 2317 1015 CASE ( -1 ) … … 2328 1026 pt2d(:,jl-jpr2dj) = tr2sn(:,jl,2) 2329 1027 END DO 2330 END SELECT 2331 2332 2333 ! 4. north fold treatment 2334 ! ----------------------- 2335 2336 ! 4.1 treatment without exchange (jpni odd) 2337 2338 SELECT CASE ( jpni ) 2339 2340 CASE ( 1 ) ! only one proc along I, no mpp exchange 2341 2342 SELECT CASE ( npolj ) 2343 2344 CASE ( 3 , 4 ) ! T pivot 2345 iloc = jpiglo - 2 * ( nimpp - 1 ) 2346 2347 SELECT CASE ( cd_type ) 2348 2349 CASE ( 'T', 'S', 'W' ) 2350 DO jl = 0, iprecj-1 2351 DO ji = 2-jpr2di, nlci+jpr2di 2352 ijt=iloc-ji+2 2353 pt2d(ji,nlcj+jl) = psgn * pt2d(ijt,nlcj-2-jl) 2354 END DO 2355 END DO 2356 DO ji = nlci/2+1, nlci+jpr2di 2357 ijt=iloc-ji+2 2358 pt2d(ji,nlcj-1) = psgn * pt2d(ijt,nlcj-1) 2359 END DO 2360 2361 CASE ( 'U' ) 2362 DO jl =0, iprecj-1 2363 DO ji = 1-jpr2di, nlci-1-jpr2di 2364 iju=iloc-ji+1 2365 pt2d(ji,nlcj+jl) = psgn * pt2d(iju,nlcj-2-jl) 2366 END DO 2367 END DO 2368 DO ji = nlci/2, nlci-1+jpr2di 2369 iju=iloc-ji+1 2370 pt2d(ji,nlcj-1) = psgn * pt2d(iju,nlcj-1) 2371 END DO 2372 2373 CASE ( 'V' ) 2374 DO jl = -1, iprecj-1 2375 DO ji = 2-jpr2di, nlci+jpr2di 2376 ijt=iloc-ji+2 2377 pt2d(ji,nlcj+jl) = psgn * pt2d(ijt,nlcj-3-jl) 2378 END DO 2379 END DO 2380 2381 CASE ( 'F', 'G' ) 2382 DO jl = -1, iprecj-1 2383 DO ji = 1-jpr2di, nlci-1+jpr2di 2384 iju=iloc-ji+1 2385 pt2d(ji,nlcj+jl) = psgn * pt2d(iju,nlcj-3-jl) 2386 END DO 2387 END DO 2388 2389 CASE ( 'I' ) ! ice U-V point 2390 DO jl = 0, iprecj-1 2391 pt2d(2,nlcj+jl) = psgn * pt2d(3,nlcj-1-jl) 2392 DO ji = 3, nlci+jpr2di 2393 iju = iloc - ji + 3 2394 pt2d(ji,nlcj+jl) = psgn * pt2d(iju,nlcj-1-jl) 2395 END DO 2396 END DO 2397 2398 END SELECT 2399 2400 CASE ( 5 , 6 ) ! F pivot 2401 iloc=jpiglo-2*(nimpp-1) 2402 2403 SELECT CASE (cd_type ) 2404 2405 CASE ( 'T', 'S', 'W' ) 2406 DO jl = 0, iprecj-1 2407 DO ji = 1-jpr2di, nlci+jpr2di 2408 ijt=iloc-ji+1 2409 pt2d(ji,nlcj+jl) = psgn * pt2d(ijt,nlcj-1-jl) 2410 END DO 2411 END DO 2412 2413 CASE ( 'U' ) 2414 DO jl = 0, iprecj-1 2415 DO ji = 1-jpr2di, nlci-1+jpr2di 2416 iju=iloc-ji 2417 pt2d(ji,nlcj+jl) = psgn * pt2d(iju,nlcj-1-jl) 2418 END DO 2419 END DO 2420 2421 CASE ( 'V' ) 2422 DO jl = 0, iprecj-1 2423 DO ji = 1-jpr2di, nlci+jpr2di 2424 ijt=iloc-ji+1 2425 pt2d(ji,nlcj+jl) = psgn * pt2d(ijt,nlcj-2-jl) 2426 END DO 2427 END DO 2428 DO ji = nlci/2+1, nlci+jpr2di 2429 ijt=iloc-ji+1 2430 pt2d(ji,nlcj-1) = psgn * pt2d(ijt,nlcj-1) 2431 END DO 2432 2433 CASE ( 'F', 'G' ) 2434 DO jl = 0, iprecj-1 2435 DO ji = 1-jpr2di, nlci-1+jpr2di 2436 iju=iloc-ji 2437 pt2d(ji,nlcj+jl) = psgn * pt2d(iju,nlcj-2-jl) 2438 END DO 2439 END DO 2440 DO ji = nlci/2+1, nlci-1+jpr2di 2441 iju=iloc-ji 2442 pt2d(ji,nlcj-1) = psgn * pt2d(iju,nlcj-1) 2443 END DO 2444 2445 CASE ( 'I' ) ! ice U-V point 2446 pt2d( 2 ,nlcj) = 0.e0 2447 DO jl = 0, iprecj-1 2448 DO ji = 2 , nlci-1+jpr2di 2449 ijt = iloc - ji + 2 2450 pt2d(ji,nlcj+jl)= 0.5 * ( pt2d(ji,nlcj-1-jl) + psgn * pt2d(ijt,nlcj-1-jl) ) 2451 END DO 2452 END DO 2453 2454 END SELECT ! cd_type 2455 2456 END SELECT ! npolj 2457 2458 CASE DEFAULT ! more than 1 proc along I 2459 IF( npolj /= 0 ) CALL mpp_lbc_north_e( pt2d, cd_type, psgn ) ! only for northern procs 2460 2461 END SELECT ! jpni 2462 2463 2464 ! 5. East and west directions 2465 ! --------------------------- 2466 2467 SELECT CASE ( npolj ) 2468 2469 CASE ( 3, 4, 5, 6 ) 2470 2471 ! 5.1 Read Dirichlet lateral conditions 2472 2473 SELECT CASE ( nbondi ) 2474 CASE ( -1, 0, 1 ) 2475 iihom = nlci-nreci-jpr2di 2476 DO jl = 1, ipreci 2477 tr2ew(:,jl,1) = pt2d(jpreci+jl,:) 2478 tr2we(:,jl,1) = pt2d(iihom +jl,:) 2479 END DO 2480 END SELECT 2481 2482 ! 5.2 Migrations 2483 2484 #if defined key_mpp_shmem 2485 !! * SHMEM version 2486 2487 imigr = ipreci * ( jpj + 2*jpr2dj ) 2488 2489 SELECT CASE ( nbondi ) 2490 CASE ( -1 ) 2491 CALL shmem_put( tr2we(1-jpr2dj,1,2), tr2we(1,1,1), imigr, noea ) 2492 CASE ( 0 ) 2493 CALL shmem_put( tr2ew(1-jpr2dj,1,2), tr2ew(1,1,1), imigr, nowe ) 2494 CALL shmem_put( tr2we(1-jpr2dj,1,2), tr2we(1,1,1), imigr, noea ) 2495 CASE ( 1 ) 2496 CALL shmem_put( tr2ew(1-jpr2dj,1,2), tr2ew(1,1,1), imigr, nowe ) 2497 END SELECT 2498 2499 CALL barrier() 2500 CALL shmem_udcflush() 2501 2502 #elif defined key_mpp_mpi 2503 !! * MPI version 2504 2505 imigr=ipreci* ( jpj + 2*jpr2dj ) 2506 2507 SELECT CASE ( nbondi ) 2508 CASE ( -1 ) 2509 CALL mppsend( 2, tr2we(1-jpr2dj,1,1), imigr, noea, ml_req1 ) 2510 CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr ) 2511 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2512 CASE ( 0 ) 2513 CALL mppsend( 1, tr2ew(1-jpr2dj,1,1), imigr, nowe, ml_req1 ) 2514 CALL mppsend( 2, tr2we(1-jpr2dj,1,1), imigr, noea, ml_req2 ) 2515 CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr ) 2516 CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr ) 2517 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2518 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 2519 CASE ( 1 ) 2520 CALL mppsend( 1, tr2ew(1-jpr2dj,1,1), imigr, nowe, ml_req1 ) 2521 CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr ) 2522 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2523 END SELECT 2524 #endif 2525 2526 ! 5.3 Write Dirichlet lateral conditions 2527 2528 iihom = nlci - jpreci 2529 2530 SELECT CASE ( nbondi ) 2531 CASE ( -1 ) 2532 DO jl = 1, ipreci 2533 pt2d(iihom+jl,:) = tr2ew(:,jl,2) 2534 END DO 2535 CASE ( 0 ) 2536 DO jl = 1, ipreci 2537 pt2d(jl- jpr2di,:) = tr2we(:,jl,2) 2538 pt2d(iihom+jl,:) = tr2ew(:,jl,2) 2539 END DO 2540 CASE ( 1 ) 2541 DO jl = 1, ipreci 2542 pt2d(jl-jpr2di,:) = tr2we(:,jl,2) 2543 END DO 2544 END SELECT 2545 2546 END SELECT ! npolj 2547 1028 END SELECT 1029 2548 1030 END SUBROUTINE mpp_lnk_2d_e 2549 1031 2550 1032 2551 SUBROUTINE mpplnks( ptab ) 2552 !!---------------------------------------------------------------------- 2553 !! *** routine mpplnks *** 2554 !! 2555 !! ** Purpose : Message passing manadgement for add 2d array local boundary 2556 !! 2557 !! ** Method : Use mppsend and mpprecv function for passing mask between 2558 !! processors following neighboring subdomains. 2559 !! domain parameters 1033 SUBROUTINE mppsend( ktyp, pmess, kbytes, kdest, md_req ) 1034 !!---------------------------------------------------------------------- 1035 !! *** routine mppsend *** 1036 !! 1037 !! ** Purpose : Send messag passing array 1038 !! 1039 !!---------------------------------------------------------------------- 1040 REAL(wp), INTENT(inout) :: pmess(*) ! array of real 1041 INTEGER , INTENT(in ) :: kbytes ! size of the array pmess 1042 INTEGER , INTENT(in ) :: kdest ! receive process number 1043 INTEGER , INTENT(in ) :: ktyp ! tag of the message 1044 INTEGER , INTENT(in ) :: md_req ! argument for isend 1045 !! 1046 INTEGER :: iflag 1047 !!---------------------------------------------------------------------- 1048 ! 1049 SELECT CASE ( c_mpi_send ) 1050 CASE ( 'S' ) ! Standard mpi send (blocking) 1051 CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag ) 1052 CASE ( 'B' ) ! Buffer mpi send (blocking) 1053 CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag ) 1054 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 1055 ! be carefull, one more argument here : the mpi request identifier.. 1056 CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa, md_req, iflag ) 1057 END SELECT 1058 ! 1059 END SUBROUTINE mppsend 1060 1061 1062 SUBROUTINE mpprecv( ktyp, pmess, kbytes ) 1063 !!---------------------------------------------------------------------- 1064 !! *** routine mpprecv *** 1065 !! 1066 !! ** Purpose : Receive messag passing array 1067 !! 1068 !!---------------------------------------------------------------------- 1069 REAL(wp), INTENT(inout) :: pmess(*) ! array of real 1070 INTEGER , INTENT(in ) :: kbytes ! suze of the array pmess 1071 INTEGER , INTENT(in ) :: ktyp ! Tag of the recevied message 1072 !! 1073 INTEGER :: istatus(mpi_status_size) 1074 INTEGER :: iflag 1075 !!---------------------------------------------------------------------- 1076 ! 1077 CALL mpi_recv( pmess, kbytes, mpi_double_precision, mpi_any_source, ktyp, mpi_comm_opa, istatus, iflag ) 1078 ! 1079 END SUBROUTINE mpprecv 1080 1081 1082 SUBROUTINE mppgather( ptab, kp, pio ) 1083 !!---------------------------------------------------------------------- 1084 !! *** routine mppgather *** 1085 !! 1086 !! ** Purpose : Transfert between a local subdomain array and a work 1087 !! array which is distributed following the vertical level. 1088 !! 1089 !!---------------------------------------------------------------------- 1090 REAL(wp), DIMENSION(jpi,jpj), INTENT(in ) :: ptab ! subdomain input array 1091 INTEGER , INTENT(in ) :: kp ! record length 1092 REAL(wp), DIMENSION(jpi,jpj,jpnij), INTENT( out) :: pio ! subdomain input array 1093 !! 1094 INTEGER :: itaille, ierror ! temporary integer 1095 !!--------------------------------------------------------------------- 1096 ! 1097 itaille = jpi * jpj 1098 CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , & 1099 & mpi_double_precision, kp , mpi_comm_opa, ierror ) 1100 ! 1101 END SUBROUTINE mppgather 1102 1103 1104 SUBROUTINE mppscatter( pio, kp, ptab ) 1105 !!---------------------------------------------------------------------- 1106 !! *** routine mppscatter *** 1107 !! 1108 !! ** Purpose : Transfert between awork array which is distributed 1109 !! following the vertical level and the local subdomain array. 1110 !! 1111 !!---------------------------------------------------------------------- 1112 REAL(wp), DIMENSION(jpi,jpj,jpnij) :: pio ! output array 1113 INTEGER :: kp ! Tag (not used with MPI 1114 REAL(wp), DIMENSION(jpi,jpj) :: ptab ! subdomain array input 1115 !! 1116 INTEGER :: itaille, ierror ! temporary integer 1117 !!--------------------------------------------------------------------- 1118 ! 1119 itaille=jpi*jpj 1120 ! 1121 CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille , & 1122 & mpi_double_precision, kp , mpi_comm_opa, ierror ) 1123 ! 1124 END SUBROUTINE mppscatter 1125 1126 1127 SUBROUTINE mppisl_a_int( ktab, kdim ) 1128 !!---------------------------------------------------------------------- 1129 !! *** routine mppisl_a_int *** 1130 !! 1131 !! ** Purpose : Massively parallel processors 1132 !! Find the non zero value 1133 !! 1134 !!---------------------------------------------------------------------- 1135 INTEGER, INTENT(in ) :: kdim ! ??? 1136 INTEGER, INTENT(inout), DIMENSION(kdim) :: ktab ! ??? 1137 !! 1138 LOGICAL :: lcommute 1139 INTEGER :: mpi_isl, ierror ! temporary integer 1140 INTEGER, DIMENSION(kdim) :: iwork 1141 !!---------------------------------------------------------------------- 1142 ! 1143 lcommute = .TRUE. 1144 CALL mpi_op_create( lc_isl, lcommute, mpi_isl, ierror ) 1145 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_isl, mpi_comm_opa, ierror ) 1146 ktab(:) = iwork(:) 1147 ! 1148 END SUBROUTINE mppisl_a_int 1149 1150 1151 SUBROUTINE mppisl_int( ktab ) 1152 !!---------------------------------------------------------------------- 1153 !! *** routine mppisl_int *** 1154 !! 1155 !! ** Purpose : Massively parallel processors 1156 !! Find the non zero value 1157 !! 1158 !!---------------------------------------------------------------------- 1159 INTEGER , INTENT(inout) :: ktab ! 1160 !! 1161 LOGICAL :: lcommute 1162 INTEGER :: mpi_isl, ierror, iwork ! temporary integer 1163 !!---------------------------------------------------------------------- 1164 ! 1165 lcommute = .TRUE. 1166 CALL mpi_op_create( lc_isl, lcommute, mpi_isl, ierror ) 1167 CALL mpi_allreduce(ktab, iwork, 1, mpi_integer, mpi_isl, mpi_comm_opa, ierror) 1168 ktab = iwork 1169 ! 1170 END SUBROUTINE mppisl_int 1171 1172 1173 SUBROUTINE mppmax_a_int( ktab, kdim, kcom ) 1174 !!---------------------------------------------------------------------- 1175 !! *** routine mppmax_a_int *** 1176 !! 1177 !! ** Purpose : Find maximum value in an integer layout array 1178 !! 1179 !!---------------------------------------------------------------------- 1180 INTEGER , INTENT(in ) :: kdim ! size of array 1181 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 1182 INTEGER , INTENT(in ), OPTIONAL :: kcom ! 1183 !! 1184 INTEGER :: ierror, localcomm ! temporary integer 1185 INTEGER, DIMENSION(kdim) :: iwork 1186 !!---------------------------------------------------------------------- 1187 ! 1188 localcomm = mpi_comm_opa 1189 IF( PRESENT(kcom) ) localcomm = kcom 1190 ! 1191 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, localcomm, ierror ) 1192 ! 1193 ktab(:) = iwork(:) 1194 ! 1195 END SUBROUTINE mppmax_a_int 1196 1197 1198 SUBROUTINE mppmax_int( ktab, kcom ) 1199 !!---------------------------------------------------------------------- 1200 !! *** routine mppmax_int *** 1201 !! 1202 !! ** Purpose : Find maximum value in an integer layout array 1203 !! 1204 !!---------------------------------------------------------------------- 1205 INTEGER, INTENT(inout) :: ktab ! ??? 1206 INTEGER, INTENT(in ), OPTIONAL :: kcom ! ??? 1207 !! 1208 INTEGER :: ierror, iwork, localcomm ! temporary integer 1209 !!---------------------------------------------------------------------- 1210 ! 1211 localcomm = mpi_comm_opa 1212 IF( PRESENT(kcom) ) localcomm = kcom 1213 ! 1214 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, localcomm, ierror) 1215 ! 1216 ktab = iwork 1217 ! 1218 END SUBROUTINE mppmax_int 1219 1220 1221 SUBROUTINE mppmin_a_int( ktab, kdim, kcom ) 1222 !!---------------------------------------------------------------------- 1223 !! *** routine mppmin_a_int *** 1224 !! 1225 !! ** Purpose : Find minimum value in an integer layout array 1226 !! 1227 !!---------------------------------------------------------------------- 1228 INTEGER , INTENT( in ) :: kdim ! size of array 1229 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 1230 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 1231 !! 1232 INTEGER :: ierror, localcomm ! temporary integer 1233 INTEGER, DIMENSION(kdim) :: iwork 1234 !!---------------------------------------------------------------------- 1235 ! 1236 localcomm = mpi_comm_opa 1237 IF( PRESENT(kcom) ) localcomm = kcom 1238 ! 1239 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, localcomm, ierror ) 1240 ! 1241 ktab(:) = iwork(:) 1242 ! 1243 END SUBROUTINE mppmin_a_int 1244 1245 1246 SUBROUTINE mppmin_int( ktab ) 1247 !!---------------------------------------------------------------------- 1248 !! *** routine mppmin_int *** 1249 !! 1250 !! ** Purpose : Find minimum value in an integer layout array 1251 !! 1252 !!---------------------------------------------------------------------- 1253 INTEGER, INTENT(inout) :: ktab ! ??? 1254 !! 1255 INTEGER :: ierror, iwork 1256 !!---------------------------------------------------------------------- 1257 ! 1258 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, mpi_comm_opa, ierror ) 1259 ! 1260 ktab = iwork 1261 ! 1262 END SUBROUTINE mppmin_int 1263 1264 1265 SUBROUTINE mppsum_a_int( ktab, kdim ) 1266 !!---------------------------------------------------------------------- 1267 !! *** routine mppsum_a_int *** 1268 !! 1269 !! ** Purpose : Global integer sum, 1D array case 1270 !! 1271 !!---------------------------------------------------------------------- 1272 INTEGER, INTENT(in ) :: kdim ! ??? 1273 INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? 1274 !! 1275 INTEGER :: ierror 1276 INTEGER, DIMENSION (kdim) :: iwork 1277 !!---------------------------------------------------------------------- 1278 ! 1279 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 1280 ! 1281 ktab(:) = iwork(:) 1282 ! 1283 END SUBROUTINE mppsum_a_int 1284 1285 1286 SUBROUTINE mppsum_int( ktab ) 1287 !!---------------------------------------------------------------------- 1288 !! *** routine mppsum_int *** 1289 !! 1290 !! ** Purpose : Global integer sum 1291 !! 1292 !!---------------------------------------------------------------------- 1293 INTEGER, INTENT(inout) :: ktab 1294 !! 1295 INTEGER :: ierror, iwork 1296 !!---------------------------------------------------------------------- 1297 ! 1298 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) 1299 ! 1300 ktab = iwork 1301 ! 1302 END SUBROUTINE mppsum_int 1303 1304 1305 SUBROUTINE mppisl_a_real( ptab, kdim ) 1306 !!---------------------------------------------------------------------- 1307 !! *** routine mppisl_a_real *** 1308 !! 1309 !! ** Purpose : Massively parallel processors 1310 !! Find the non zero island barotropic stream function value 1311 !! 1312 !! Modifications: 1313 !! ! 93-09 (M. Imbard) 1314 !! ! 96-05 (j. Escobar) 1315 !! ! 98-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI 1316 !!---------------------------------------------------------------------- 1317 INTEGER , INTENT( in ) :: kdim ! ??? 1318 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab ! ??? 1319 !! 1320 LOGICAL :: lcommute = .TRUE. 1321 INTEGER :: mpi_isl, ierror 1322 REAL(wp), DIMENSION(kdim) :: zwork 1323 !!---------------------------------------------------------------------- 1324 ! 1325 CALL mpi_op_create( lc_isl, lcommute, mpi_isl, ierror ) 1326 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_isl, mpi_comm_opa, ierror ) 1327 ptab(:) = zwork(:) 1328 ! 1329 END SUBROUTINE mppisl_a_real 1330 1331 1332 SUBROUTINE mppisl_real( ptab ) 1333 !!---------------------------------------------------------------------- 1334 !! *** routine mppisl_real *** 1335 !! 1336 !! ** Purpose : Massively parallel processors 1337 !! Find the non zero island barotropic stream function value 1338 !! 1339 !! Modifications: 1340 !! ! 93-09 (M. Imbard) 1341 !! ! 96-05 (j. Escobar) 1342 !! ! 98-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI 1343 !!---------------------------------------------------------------------- 1344 REAL(wp), INTENT(inout) :: ptab 1345 1346 LOGICAL :: lcommute = .TRUE. 1347 INTEGER :: mpi_isl, ierror 1348 REAL(wp) :: zwork 1349 !!---------------------------------------------------------------------- 1350 ! 1351 CALL mpi_op_create( lc_isl, lcommute, mpi_isl, ierror ) 1352 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_isl, mpi_comm_opa, ierror ) 1353 ptab = zwork 1354 ! 1355 END SUBROUTINE mppisl_real 1356 1357 1358 FUNCTION lc_isl( py, px, kdim ) 1359 !!---------------------------------------------------------------------- 1360 !!---------------------------------------------------------------------- 1361 INTEGER :: kdim 1362 REAL(wp), DIMENSION(kdim) :: px, py 1363 !! 1364 INTEGER :: ji 1365 INTEGER :: lc_isl 1366 !!---------------------------------------------------------------------- 1367 ! 1368 DO ji = 1, kdim 1369 IF( py(ji) /= 0. ) px(ji) = py(ji) 1370 END DO 1371 lc_isl=0 1372 ! 1373 END FUNCTION lc_isl 1374 1375 1376 SUBROUTINE mppmax_a_real( ptab, kdim, kcom ) 1377 !!---------------------------------------------------------------------- 1378 !! *** routine mppmax_a_real *** 1379 !! 1380 !! ** Purpose : Maximum 1381 !! 1382 !!---------------------------------------------------------------------- 1383 INTEGER , INTENT(in ) :: kdim 1384 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 1385 INTEGER , INTENT(in ), OPTIONAL :: kcom 1386 !! 1387 INTEGER :: ierror, localcomm 1388 REAL(wp), DIMENSION(kdim) :: zwork 1389 !!---------------------------------------------------------------------- 1390 ! 1391 localcomm = mpi_comm_opa 1392 IF( PRESENT(kcom) ) localcomm = kcom 1393 ! 1394 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, localcomm, ierror ) 1395 ptab(:) = zwork(:) 1396 ! 1397 END SUBROUTINE mppmax_a_real 1398 1399 1400 SUBROUTINE mppmax_real( ptab, kcom ) 1401 !!---------------------------------------------------------------------- 1402 !! *** routine mppmax_real *** 1403 !! 1404 !! ** Purpose : Maximum 1405 !! 1406 !!---------------------------------------------------------------------- 1407 REAL(wp), INTENT(inout) :: ptab ! ??? 1408 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? 1409 !! 1410 INTEGER :: ierror, localcomm 1411 REAL(wp) :: zwork 1412 !!---------------------------------------------------------------------- 1413 ! 1414 localcomm = mpi_comm_opa 1415 IF( PRESENT(kcom) ) localcomm = kcom 1416 ! 1417 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, localcomm, ierror ) 1418 ptab = zwork 1419 ! 1420 END SUBROUTINE mppmax_real 1421 1422 1423 SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) 1424 !!---------------------------------------------------------------------- 1425 !! *** routine mppmin_a_real *** 1426 !! 1427 !! ** Purpose : Minimum of REAL, array case 1428 !! 1429 !!----------------------------------------------------------------------- 1430 INTEGER , INTENT(in ) :: kdim 1431 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 1432 INTEGER , INTENT(in ), OPTIONAL :: kcom 1433 !! 1434 INTEGER :: ierror, localcomm 1435 REAL(wp), DIMENSION(kdim) :: zwork 1436 !!----------------------------------------------------------------------- 1437 ! 1438 localcomm = mpi_comm_opa 1439 IF( PRESENT(kcom) ) localcomm = kcom 1440 ! 1441 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, localcomm, ierror ) 1442 ptab(:) = zwork(:) 1443 ! 1444 END SUBROUTINE mppmin_a_real 1445 1446 1447 SUBROUTINE mppmin_real( ptab, kcom ) 1448 !!---------------------------------------------------------------------- 1449 !! *** routine mppmin_real *** 1450 !! 1451 !! ** Purpose : minimum of REAL, scalar case 1452 !! 1453 !!----------------------------------------------------------------------- 1454 REAL(wp), INTENT(inout) :: ptab ! 1455 INTEGER , INTENT(in ), OPTIONAL :: kcom 1456 !! 1457 INTEGER :: ierror 1458 REAL(wp) :: zwork 1459 INTEGER :: localcomm 1460 !!----------------------------------------------------------------------- 1461 ! 1462 localcomm = mpi_comm_opa 1463 IF( PRESENT(kcom) ) localcomm = kcom 1464 ! 1465 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, localcomm, ierror ) 1466 ptab = zwork 1467 ! 1468 END SUBROUTINE mppmin_real 1469 1470 1471 SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) 1472 !!---------------------------------------------------------------------- 1473 !! *** routine mppsum_a_real *** 1474 !! 1475 !! ** Purpose : global sum, REAL ARRAY argument case 1476 !! 1477 !!----------------------------------------------------------------------- 1478 INTEGER , INTENT( in ) :: kdim ! size of ptab 1479 REAL(wp), DIMENSION(kdim), INTENT( inout ) :: ptab ! input array 1480 INTEGER , INTENT( in ), OPTIONAL :: kcom 1481 !! 1482 INTEGER :: ierror ! temporary integer 1483 INTEGER :: localcomm 1484 REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace 1485 !!----------------------------------------------------------------------- 1486 ! 1487 localcomm = mpi_comm_opa 1488 IF( PRESENT(kcom) ) localcomm = kcom 1489 ! 1490 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, localcomm, ierror ) 1491 ptab(:) = zwork(:) 1492 ! 1493 END SUBROUTINE mppsum_a_real 1494 1495 1496 SUBROUTINE mppsum_real( ptab, kcom ) 1497 !!---------------------------------------------------------------------- 1498 !! *** routine mppsum_real *** 1499 !! 1500 !! ** Purpose : global sum, SCALAR argument case 1501 !! 1502 !!----------------------------------------------------------------------- 1503 REAL(wp), INTENT(inout) :: ptab ! input scalar 1504 INTEGER , INTENT(in ), OPTIONAL :: kcom 1505 !! 1506 INTEGER :: ierror, localcomm 1507 REAL(wp) :: zwork 1508 !!----------------------------------------------------------------------- 1509 ! 1510 localcomm = mpi_comm_opa 1511 IF( PRESENT(kcom) ) localcomm = kcom 1512 ! 1513 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, localcomm, ierror ) 1514 ptab = zwork 1515 ! 1516 END SUBROUTINE mppsum_real 1517 1518 1519 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) 1520 !!------------------------------------------------------------------------ 1521 !! *** routine mpp_minloc *** 1522 !! 1523 !! ** Purpose : Compute the global minimum of an array ptab 1524 !! and also give its global position 1525 !! 1526 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 1527 !! 1528 !!-------------------------------------------------------------------------- 1529 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 1530 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 1531 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 1532 INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame 1533 !! 1534 INTEGER , DIMENSION(2) :: ilocs 1535 INTEGER :: ierror 1536 REAL(wp) :: zmin ! local minimum 1537 REAL(wp), DIMENSION(2,1) :: zain, zaout 1538 !!----------------------------------------------------------------------- 1539 ! 1540 zmin = MINVAL( ptab(:,:) , mask= pmask == 1.e0 ) 1541 ilocs = MINLOC( ptab(:,:) , mask= pmask == 1.e0 ) 1542 ! 1543 ki = ilocs(1) + nimpp - 1 1544 kj = ilocs(2) + njmpp - 1 1545 ! 1546 zain(1,:)=zmin 1547 zain(2,:)=ki+10000.*kj 1548 ! 1549 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 1550 ! 1551 pmin = zaout(1,1) 1552 kj = INT(zaout(2,1)/10000.) 1553 ki = INT(zaout(2,1) - 10000.*kj ) 1554 ! 1555 END SUBROUTINE mpp_minloc2d 1556 1557 1558 SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj ,kk) 1559 !!------------------------------------------------------------------------ 1560 !! *** routine mpp_minloc *** 1561 !! 1562 !! ** Purpose : Compute the global minimum of an array ptab 1563 !! and also give its global position 1564 !! 1565 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 1566 !! 1567 !!-------------------------------------------------------------------------- 1568 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array 1569 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask 1570 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 1571 INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame 1572 !! 1573 INTEGER :: ierror 1574 REAL(wp) :: zmin ! local minimum 1575 INTEGER , DIMENSION(3) :: ilocs 1576 REAL(wp), DIMENSION(2,1) :: zain, zaout 1577 !!----------------------------------------------------------------------- 1578 ! 1579 zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) 1580 ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) 1581 ! 1582 ki = ilocs(1) + nimpp - 1 1583 kj = ilocs(2) + njmpp - 1 1584 kk = ilocs(3) 1585 ! 1586 zain(1,:)=zmin 1587 zain(2,:)=ki+10000.*kj+100000000.*kk 1588 ! 1589 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 1590 ! 1591 pmin = zaout(1,1) 1592 kk = INT( zaout(2,1) / 100000000. ) 1593 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 1594 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 1595 ! 1596 END SUBROUTINE mpp_minloc3d 1597 1598 1599 SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) 1600 !!------------------------------------------------------------------------ 1601 !! *** routine mpp_maxloc *** 1602 !! 1603 !! ** Purpose : Compute the global maximum of an array ptab 1604 !! and also give its global position 1605 !! 1606 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 1607 !! 1608 !!-------------------------------------------------------------------------- 1609 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 1610 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 1611 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 1612 INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame 1613 !! 1614 INTEGER :: ierror 1615 INTEGER, DIMENSION (2) :: ilocs 1616 REAL(wp) :: zmax ! local maximum 1617 REAL(wp), DIMENSION(2,1) :: zain, zaout 1618 !!----------------------------------------------------------------------- 1619 ! 1620 zmax = MAXVAL( ptab(:,:) , mask= pmask == 1.e0 ) 1621 ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1.e0 ) 1622 ! 1623 ki = ilocs(1) + nimpp - 1 1624 kj = ilocs(2) + njmpp - 1 1625 ! 1626 zain(1,:) = zmax 1627 zain(2,:) = ki + 10000. * kj 1628 ! 1629 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 1630 ! 1631 pmax = zaout(1,1) 1632 kj = INT( zaout(2,1) / 10000. ) 1633 ki = INT( zaout(2,1) - 10000.* kj ) 1634 ! 1635 END SUBROUTINE mpp_maxloc2d 1636 1637 1638 SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) 1639 !!------------------------------------------------------------------------ 1640 !! *** routine mpp_maxloc *** 1641 !! 1642 !! ** Purpose : Compute the global maximum of an array ptab 1643 !! and also give its global position 1644 !! 1645 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 1646 !! 1647 !!-------------------------------------------------------------------------- 1648 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array 1649 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask 1650 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 1651 INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame 1652 !! 1653 REAL(wp) :: zmax ! local maximum 1654 REAL(wp), DIMENSION(2,1) :: zain, zaout 1655 INTEGER , DIMENSION(3) :: ilocs 1656 INTEGER :: ierror 1657 !!----------------------------------------------------------------------- 1658 ! 1659 zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) 1660 ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) 1661 ! 1662 ki = ilocs(1) + nimpp - 1 1663 kj = ilocs(2) + njmpp - 1 1664 kk = ilocs(3) 1665 ! 1666 zain(1,:)=zmax 1667 zain(2,:)=ki+10000.*kj+100000000.*kk 1668 ! 1669 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 1670 ! 1671 pmax = zaout(1,1) 1672 kk = INT( zaout(2,1) / 100000000. ) 1673 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 1674 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 1675 ! 1676 END SUBROUTINE mpp_maxloc3d 1677 1678 1679 SUBROUTINE mppsync() 1680 !!---------------------------------------------------------------------- 1681 !! *** routine mppsync *** 1682 !! 1683 !! ** Purpose : Massively parallel processors, synchroneous 1684 !! 1685 !!----------------------------------------------------------------------- 1686 INTEGER :: ierror 1687 !!----------------------------------------------------------------------- 1688 ! 1689 CALL mpi_barrier( mpi_comm_opa, ierror ) 1690 ! 1691 END SUBROUTINE mppsync 1692 1693 1694 SUBROUTINE mppstop 1695 !!---------------------------------------------------------------------- 1696 !! *** routine mppstop *** 1697 !! 1698 !! ** purpose : Stop massilively parallel processors method 1699 !! 1700 !!---------------------------------------------------------------------- 1701 INTEGER :: info 1702 !!---------------------------------------------------------------------- 1703 ! 1704 CALL mppsync 1705 CALL mpi_finalize( info ) 1706 ! 1707 END SUBROUTINE mppstop 1708 1709 1710 SUBROUTINE mppobc( ptab, kd1, kd2, kl, kk, ktype, kij ) 1711 !!---------------------------------------------------------------------- 1712 !! *** routine mppobc *** 1713 !! 1714 !! ** Purpose : Message passing manadgement for open boundary 1715 !! conditions array 1716 !! 1717 !! ** Method : Use mppsend and mpprecv function for passing mask 1718 !! between processors following neighboring subdomains. 1719 !! domain parameters 2560 1720 !! nlci : first dimension of the local subdomain 2561 1721 !! nlcj : second dimension of the local subdomain … … 2568 1728 !! 2569 1729 !!---------------------------------------------------------------------- 2570 !! * Arguments 2571 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: & 2572 ptab ! 2D array 2573 2574 !! * Local variables 2575 INTEGER :: ji, jl ! dummy loop indices 2576 INTEGER :: & 2577 imigr, iihom, ijhom ! temporary integers 2578 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 2579 INTEGER :: ml_stat(MPI_STATUS_SIZE) ! for key_mpi_isend 2580 !!---------------------------------------------------------------------- 2581 2582 2583 ! 1. north fold treatment 2584 ! ----------------------- 2585 2586 ! 1.1 treatment without exchange (jpni odd) 2587 2588 SELECT CASE ( npolj ) 2589 CASE ( 4 ) 2590 DO ji = 1, nlci 2591 ptab(ji,nlcj-2) = ptab(ji,nlcj-2) + t2p1(ji,1,1) 2592 END DO 2593 CASE ( 6 ) 2594 DO ji = 1, nlci 2595 ptab(ji,nlcj-1) = ptab(ji,nlcj-1) + t2p1(ji,1,1) 2596 END DO 2597 2598 ! 1.2 treatment with exchange (jpni greater than 1) 2599 ! 2600 CASE ( 3 ) 2601 #if defined key_mpp_shmem 2602 2603 !! * SHMEN version 2604 2605 imigr=jprecj*jpi 2606 2607 CALL shmem_put(t2p1(1,1,2),t2p1(1,1,1),imigr,nono) 2608 CALL barrier() 2609 CALL shmem_udcflush() 2610 2611 # elif defined key_mpp_mpi 2612 !! * MPI version 2613 2614 imigr=jprecj*jpi 2615 2616 CALL mppsend(3,t2p1(1,1,1),imigr,nono, ml_req1) 2617 CALL mpprecv(3,t2p1(1,1,2),imigr) 2618 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2619 2620 #endif 2621 2622 ! Write north fold conditions 2623 2624 DO ji = 1, nlci 2625 ptab(ji,nlcj-2) = ptab(ji,nlcj-2)+t2p1(ji,1,2) 2626 END DO 2627 2628 CASE ( 5 ) 2629 2630 #if defined key_mpp_shmem 2631 2632 !! * SHMEN version 2633 2634 imigr=jprecj*jpi 2635 2636 CALL shmem_put(t2p1(1,1,2),t2p1(1,1,1),imigr,nono) 2637 CALL barrier() 2638 CALL shmem_udcflush() 2639 2640 # elif defined key_mpp_mpi 2641 !! * Local variables (MPI version) 2642 2643 imigr=jprecj*jpi 2644 2645 CALL mppsend(3,t2p1(1,1,1),imigr,nono, ml_req1) 2646 CALL mpprecv(3,t2p1(1,1,2),imigr) 2647 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2648 2649 #endif 2650 2651 ! Write north fold conditions 2652 2653 DO ji = 1, nlci 2654 ptab(ji,nlcj-1) = ptab(ji,nlcj-1)+t2p1(ji,1,2) 2655 END DO 2656 2657 END SELECT 2658 2659 2660 ! 2. East and west directions 2661 ! --------------------------- 2662 2663 ! 2.1 Read Dirichlet lateral conditions 2664 2665 iihom = nlci-jpreci 2666 2667 SELECT CASE ( nbondi ) 2668 2669 CASE ( -1, 0, 1 ) ! all except 2 2670 DO jl = 1, jpreci 2671 t2ew(:,jl,1) = ptab( jl ,:) 2672 t2we(:,jl,1) = ptab(iihom+jl,:) 2673 END DO 2674 END SELECT 2675 2676 ! 2.2 Migrations 2677 2678 #if defined key_mpp_shmem 2679 2680 !! * SHMEN version 2681 2682 imigr=jpreci*jpj 2683 2684 SELECT CASE ( nbondi ) 2685 2686 CASE ( -1 ) 2687 CALL shmem_put(t2we(1,1,2),t2we(1,1,1),imigr,noea) 2688 2689 CASE ( 0 ) 2690 CALL shmem_put(t2ew(1,1,2),t2ew(1,1,1),imigr,nowe) 2691 CALL shmem_put(t2we(1,1,2),t2we(1,1,1),imigr,noea) 2692 2693 CASE ( 1 ) 2694 CALL shmem_put(t2ew(1,1,2),t2ew(1,1,1),imigr,nowe) 2695 2696 END SELECT 2697 CALL barrier() 2698 CALL shmem_udcflush() 2699 2700 # elif defined key_mpp_mpi 2701 !! * Local variables (MPI version) 2702 2703 imigr=jpreci*jpj 2704 2705 SELECT CASE ( nbondi ) 2706 2707 CASE ( -1 ) 2708 CALL mppsend(2,t2we(1,1,1),imigr,noea, ml_req1) 2709 CALL mpprecv(1,t2ew(1,1,2),imigr) 2710 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2711 CASE ( 0 ) 2712 CALL mppsend(1,t2ew(1,1,1),imigr,nowe, ml_req1) 2713 CALL mppsend(2,t2we(1,1,1),imigr,noea, ml_req2) 2714 CALL mpprecv(1,t2ew(1,1,2),imigr) 2715 CALL mpprecv(2,t2we(1,1,2),imigr) 2716 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2717 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 2718 2719 CASE ( 1 ) 2720 CALL mppsend(1,t2ew(1,1,1),imigr,nowe, ml_req1) 2721 CALL mpprecv(2,t2we(1,1,2),imigr) 2722 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2723 2724 END SELECT 2725 1730 INTEGER , INTENT(in ) :: kd1, kd2 ! starting and ending indices 1731 INTEGER , INTENT(in ) :: kl ! index of open boundary 1732 INTEGER , INTENT(in ) :: kk ! vertical dimension 1733 INTEGER , INTENT(in ) :: ktype ! define north/south or east/west cdt 1734 ! ! = 1 north/south ; = 2 east/west 1735 INTEGER , INTENT(in ) :: kij ! horizontal dimension 1736 REAL(wp), INTENT(inout), DIMENSION(kij,kk) :: ptab ! variable array 1737 !! 1738 INTEGER :: ji, jj, jk, jl ! dummy loop indices 1739 INTEGER :: iipt0, iipt1, ilpt1 ! temporary integers 1740 INTEGER :: ijpt0, ijpt1 ! - - 1741 INTEGER :: imigr, iihom, ijhom ! - - 1742 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 1743 INTEGER :: ml_stat(MPI_STATUS_SIZE) ! for key_mpi_isend 1744 REAL(wp), DIMENSION(jpi,jpj) :: ztab ! temporary workspace 1745 !!---------------------------------------------------------------------- 1746 1747 ! boundary condition initialization 1748 ! --------------------------------- 1749 ztab(:,:) = 0.e0 1750 ! 1751 IF( ktype==1 ) THEN ! north/south boundaries 1752 iipt0 = MAX( 1, MIN(kd1 - nimpp+1, nlci ) ) 1753 iipt1 = MAX( 0, MIN(kd2 - nimpp+1, nlci - 1 ) ) 1754 ilpt1 = MAX( 1, MIN(kd2 - nimpp+1, nlci ) ) 1755 ijpt0 = MAX( 1, MIN(kl - njmpp+1, nlcj ) ) 1756 ijpt1 = MAX( 0, MIN(kl - njmpp+1, nlcj - 1 ) ) 1757 ELSEIF( ktype==2 ) THEN ! east/west boundaries 1758 iipt0 = MAX( 1, MIN(kl - nimpp+1, nlci ) ) 1759 iipt1 = MAX( 0, MIN(kl - nimpp+1, nlci - 1 ) ) 1760 ijpt0 = MAX( 1, MIN(kd1 - njmpp+1, nlcj ) ) 1761 ijpt1 = MAX( 0, MIN(kd2 - njmpp+1, nlcj - 1 ) ) 1762 ilpt1 = MAX( 1, MIN(kd2 - njmpp+1, nlcj ) ) 1763 ELSE 1764 CALL ctl_stop( 'mppobc: bad ktype' ) 1765 ENDIF 1766 1767 ! Communication level by level 1768 ! ---------------------------- 1769 !!gm Remark : this is very time consumming!!! 1770 ! ! ------------------------ ! 1771 DO jk = 1, kk ! Loop over the levels ! 1772 ! ! ------------------------ ! 1773 ! 1774 IF( ktype == 1 ) THEN ! north/south boundaries 1775 DO jj = ijpt0, ijpt1 1776 DO ji = iipt0, iipt1 1777 ztab(ji,jj) = ptab(ji,jk) 1778 END DO 1779 END DO 1780 ELSEIF( ktype == 2 ) THEN ! east/west boundaries 1781 DO jj = ijpt0, ijpt1 1782 DO ji = iipt0, iipt1 1783 ztab(ji,jj) = ptab(jj,jk) 1784 END DO 1785 END DO 1786 ENDIF 1787 1788 1789 ! 1. East and west directions 1790 ! --------------------------- 1791 ! 1792 IF( nbondi /= 2 ) THEN ! Read Dirichlet lateral conditions 1793 iihom = nlci-nreci 1794 DO jl = 1, jpreci 1795 t2ew(:,jl,1) = ztab(jpreci+jl,:) 1796 t2we(:,jl,1) = ztab(iihom +jl,:) 1797 END DO 1798 ENDIF 1799 ! 1800 ! ! Migrations 1801 imigr=jpreci*jpj 1802 ! 1803 IF( nbondi == -1 ) THEN 1804 CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req1 ) 1805 CALL mpprecv( 1, t2ew(1,1,2), imigr ) 1806 IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1807 ELSEIF( nbondi == 0 ) THEN 1808 CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) 1809 CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req2 ) 1810 CALL mpprecv( 1, t2ew(1,1,2), imigr ) 1811 CALL mpprecv( 2, t2we(1,1,2), imigr ) 1812 IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1813 IF(l_isend) CALL mpi_wait( ml_req2, ml_stat, ml_err ) 1814 ELSEIF( nbondi == 1 ) THEN 1815 CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) 1816 CALL mpprecv( 2, t2we(1,1,2), imigr ) 1817 IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1818 ENDIF 1819 ! 1820 ! ! Write Dirichlet lateral conditions 1821 iihom = nlci-jpreci 1822 ! 1823 IF( nbondi == 0 .OR. nbondi == 1 ) THEN 1824 DO jl = 1, jpreci 1825 ztab(jl,:) = t2we(:,jl,2) 1826 END DO 1827 ENDIF 1828 IF( nbondi == -1 .OR. nbondi == 0 ) THEN 1829 DO jl = 1, jpreci 1830 ztab(iihom+jl,:) = t2ew(:,jl,2) 1831 END DO 1832 ENDIF 1833 1834 1835 ! 2. North and south directions 1836 ! ----------------------------- 1837 ! 1838 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 1839 ijhom = nlcj-nrecj 1840 DO jl = 1, jprecj 1841 t2sn(:,jl,1) = ztab(:,ijhom +jl) 1842 t2ns(:,jl,1) = ztab(:,jprecj+jl) 1843 END DO 1844 ENDIF 1845 ! 1846 ! ! Migrations 1847 imigr = jprecj * jpi 1848 ! 1849 IF( nbondj == -1 ) THEN 1850 CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req1 ) 1851 CALL mpprecv( 3, t2ns(1,1,2), imigr ) 1852 IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1853 ELSEIF( nbondj == 0 ) THEN 1854 CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) 1855 CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req2 ) 1856 CALL mpprecv( 3, t2ns(1,1,2), imigr ) 1857 CALL mpprecv( 4, t2sn(1,1,2), imigr ) 1858 IF( l_isend ) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1859 IF( l_isend ) CALL mpi_wait( ml_req2, ml_stat, ml_err ) 1860 ELSEIF( nbondj == 1 ) THEN 1861 CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) 1862 CALL mpprecv( 4, t2sn(1,1,2), imigr) 1863 IF( l_isend ) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1864 ENDIF 1865 ! 1866 ! ! Write Dirichlet lateral conditions 1867 ijhom = nlcj - jprecj 1868 IF( nbondj == 0 .OR. nbondj == 1 ) THEN 1869 DO jl = 1, jprecj 1870 ztab(:,jl) = t2sn(:,jl,2) 1871 END DO 1872 ENDIF 1873 IF( nbondj == 0 .OR. nbondj == -1 ) THEN 1874 DO jl = 1, jprecj 1875 ztab(:,ijhom+jl) = t2ns(:,jl,2) 1876 END DO 1877 ENDIF 1878 IF( ktype==1 .AND. kd1 <= jpi+nimpp-1 .AND. nimpp <= kd2 ) THEN 1879 DO jj = ijpt0, ijpt1 ! north/south boundaries 1880 DO ji = iipt0,ilpt1 1881 ptab(ji,jk) = ztab(ji,jj) 1882 END DO 1883 END DO 1884 ELSEIF( ktype==2 .AND. kd1 <= jpj+njmpp-1 .AND. njmpp <= kd2 ) THEN 1885 DO jj = ijpt0, ilpt1 ! east/west boundaries 1886 DO ji = iipt0,iipt1 1887 ptab(jj,jk) = ztab(ji,jj) 1888 END DO 1889 END DO 1890 ENDIF 1891 ! 1892 END DO 1893 ! 1894 END SUBROUTINE mppobc 1895 1896 1897 SUBROUTINE mpp_comm_free( kcom ) 1898 !!---------------------------------------------------------------------- 1899 !!---------------------------------------------------------------------- 1900 INTEGER, INTENT(in) :: kcom 1901 !! 1902 INTEGER :: ierr 1903 !!---------------------------------------------------------------------- 1904 ! 1905 CALL MPI_COMM_FREE(kcom, ierr) 1906 ! 1907 END SUBROUTINE mpp_comm_free 1908 1909 1910 SUBROUTINE mpp_ini_ice( pindic ) 1911 !!---------------------------------------------------------------------- 1912 !! *** routine mpp_ini_ice *** 1913 !! 1914 !! ** Purpose : Initialize special communicator for ice areas 1915 !! condition together with global variables needed in the ddmpp folding 1916 !! 1917 !! ** Method : - Look for ice processors in ice routines 1918 !! - Put their number in nrank_ice 1919 !! - Create groups for the world processors and the ice processors 1920 !! - Create a communicator for ice processors 1921 !! 1922 !! ** output 1923 !! njmppmax = njmpp for northern procs 1924 !! ndim_rank_ice = number of processors with ice 1925 !! nrank_ice (ndim_rank_ice) = ice processors 1926 !! ngrp_world = group ID for the world processors 1927 !! ngrp_ice = group ID for the ice processors 1928 !! ncomm_ice = communicator for the ice procs. 1929 !! n_ice_root = number (in the world) of proc 0 in the ice comm. 1930 !! 1931 !!---------------------------------------------------------------------- 1932 INTEGER, INTENT(in) :: pindic 1933 !! 1934 INTEGER :: ierr 1935 INTEGER :: jjproc 1936 INTEGER :: ii 1937 INTEGER, DIMENSION(jpnij) :: kice 1938 INTEGER, DIMENSION(jpnij) :: zwork 1939 !!---------------------------------------------------------------------- 1940 ! 1941 ! Look for how many procs with sea-ice 1942 ! 1943 kice = 0 1944 DO jjproc = 1, jpnij 1945 IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 1 1946 END DO 1947 ! 1948 zwork = 0 1949 CALL MPI_ALLREDUCE( kice, zwork, jpnij, mpi_integer, mpi_sum, mpi_comm_opa, ierr ) 1950 ndim_rank_ice = SUM( zwork ) 1951 1952 ! Allocate the right size to nrank_north 1953 #if ! defined key_agrif 1954 IF( ALLOCATED( nrank_ice ) ) DEALLOCATE( nrank_ice ) 1955 #else 1956 DEALLOCATE( nrank_ice ) 2726 1957 #endif 2727 2728 ! 2.3 Write Dirichlet lateral conditions 2729 2730 iihom = nlci-nreci 2731 2732 SELECT CASE ( nbondi ) 2733 2734 CASE ( -1 ) 2735 DO jl = 1, jpreci 2736 ptab(iihom +jl,:) = ptab(iihom +jl,:)+t2ew(:,jl,2) 2737 END DO 2738 2739 CASE ( 0 ) 2740 DO jl = 1, jpreci 2741 ptab(jpreci+jl,:) = ptab(jpreci+jl,:)+t2we(:,jl,2) 2742 ptab(iihom +jl,:) = ptab(iihom +jl,:)+t2ew(:,jl,2) 2743 END DO 2744 2745 CASE ( 1 ) 2746 DO jl = 1, jpreci 2747 ptab(jpreci+jl,:) = ptab(jpreci+jl,:)+t2we(:,jl,2) 2748 END DO 2749 END SELECT 2750 2751 2752 ! 3. North and south directions 2753 ! ----------------------------- 2754 2755 ! 3.1 Read Dirichlet lateral conditions 2756 2757 ijhom = nlcj-jprecj 2758 2759 SELECT CASE ( nbondj ) 2760 2761 CASE ( -1, 0, 1 ) 2762 DO jl = 1, jprecj 2763 t2sn(:,jl,1) = ptab(:,ijhom+jl) 2764 t2ns(:,jl,1) = ptab(:, jl ) 2765 END DO 2766 2767 END SELECT 2768 2769 ! 3.2 Migrations 2770 2771 #if defined key_mpp_shmem 2772 2773 !! * SHMEN version 2774 2775 imigr=jprecj*jpi 2776 2777 SELECT CASE ( nbondj ) 2778 2779 CASE ( -1 ) 2780 CALL shmem_put(t2sn(1,1,2),t2sn(1,1,1),imigr,nono) 2781 2782 CASE ( 0 ) 2783 CALL shmem_put(t2ns(1,1,2),t2ns(1,1,1),imigr,noso) 2784 CALL shmem_put(t2sn(1,1,2),t2sn(1,1,1),imigr,nono) 2785 2786 CASE ( 1 ) 2787 CALL shmem_put(t2ns(1,1,2),t2ns(1,1,1),imigr,noso) 2788 2789 END SELECT 2790 CALL barrier() 2791 CALL shmem_udcflush() 2792 2793 # elif defined key_mpp_mpi 2794 !! * Local variables (MPI version) 2795 2796 imigr=jprecj*jpi 2797 2798 SELECT CASE ( nbondj ) 2799 2800 CASE ( -1 ) 2801 CALL mppsend(4,t2sn(1,1,1),imigr,nono, ml_req1) 2802 CALL mpprecv(3,t2ns(1,1,2),imigr) 2803 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2804 2805 CASE ( 0 ) 2806 CALL mppsend(3,t2ns(1,1,1),imigr,noso, ml_req1) 2807 CALL mppsend(4,t2sn(1,1,1),imigr,nono, ml_req2) 2808 CALL mpprecv(3,t2ns(1,1,2),imigr) 2809 CALL mpprecv(4,t2sn(1,1,2),imigr) 2810 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2811 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 2812 2813 CASE ( 1 ) 2814 CALL mppsend(3,t2ns(1,1,1),imigr,noso, ml_req1) 2815 CALL mpprecv(4,t2sn(1,1,2),imigr) 2816 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 2817 END SELECT 2818 2819 #endif 2820 2821 ! 3.3 Write Dirichlet lateral conditions 2822 2823 ijhom = nlcj-nrecj 2824 2825 SELECT CASE ( nbondj ) 2826 2827 CASE ( -1 ) 2828 DO jl = 1, jprecj 2829 ptab(:,ijhom +jl) = ptab(:,ijhom +jl)+t2ns(:,jl,2) 2830 END DO 2831 2832 CASE ( 0 ) 2833 DO jl = 1, jprecj 2834 ptab(:,jprecj+jl) = ptab(:,jprecj+jl)+t2sn(:,jl,2) 2835 ptab(:,ijhom +jl) = ptab(:,ijhom +jl)+t2ns(:,jl,2) 2836 END DO 2837 2838 CASE ( 1 ) 2839 DO jl = 1, jprecj 2840 ptab(:,jprecj+jl) = ptab(:,jprecj+jl)+t2sn(:,jl,2) 2841 END DO 2842 2843 END SELECT 2844 2845 END SUBROUTINE mpplnks 2846 2847 2848 SUBROUTINE mppsend( ktyp, pmess, kbytes, kdest, md_req) 2849 !!---------------------------------------------------------------------- 2850 !! *** routine mppsend *** 2851 !! 2852 !! ** Purpose : Send messag passing array 2853 !! 2854 !!---------------------------------------------------------------------- 2855 !! * Arguments 2856 REAL(wp), INTENT(inout) :: pmess(*) ! array of real 2857 INTEGER , INTENT( in ) :: kbytes, & ! size of the array pmess 2858 & kdest , & ! receive process number 2859 & ktyp, & ! Tag of the message 2860 & md_req ! Argument for isend 2861 !!---------------------------------------------------------------------- 2862 #if defined key_mpp_shmem 2863 !! * SHMEM version : routine not used 2864 2865 #elif defined key_mpp_mpi 2866 !! * MPI version 2867 INTEGER :: iflag 2868 2869 SELECT CASE ( c_mpi_send ) 2870 CASE ( 'S' ) ! Standard mpi send (blocking) 2871 CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest, ktyp, & 2872 & mpi_comm_opa, iflag ) 2873 CASE ( 'B' ) ! Buffer mpi send (blocking) 2874 CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest, ktyp, & 2875 & mpi_comm_opa, iflag ) 2876 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 2877 ! Be carefull, one more argument here : the mpi request identifier.. 2878 CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest, ktyp, & 2879 & mpi_comm_opa, md_req, iflag ) 2880 END SELECT 2881 #endif 2882 2883 END SUBROUTINE mppsend 2884 2885 2886 SUBROUTINE mpprecv( ktyp, pmess, kbytes ) 2887 !!---------------------------------------------------------------------- 2888 !! *** routine mpprecv *** 2889 !! 2890 !! ** Purpose : Receive messag passing array 2891 !! 2892 !!---------------------------------------------------------------------- 2893 !! * Arguments 2894 REAL(wp), INTENT(inout) :: pmess(*) ! array of real 2895 INTEGER , INTENT( in ) :: kbytes, & ! suze of the array pmess 2896 & ktyp ! Tag of the recevied message 2897 !!---------------------------------------------------------------------- 2898 #if defined key_mpp_shmem 2899 !! * SHMEM version : routine not used 2900 2901 # elif defined key_mpp_mpi 2902 !! * MPI version 2903 INTEGER :: istatus(mpi_status_size) 2904 INTEGER :: iflag 2905 2906 CALL mpi_recv( pmess, kbytes, mpi_double_precision, mpi_any_source, ktyp, & 2907 & mpi_comm_opa, istatus, iflag ) 2908 #endif 2909 2910 END SUBROUTINE mpprecv 2911 2912 2913 SUBROUTINE mppgather( ptab, kp, pio ) 2914 !!---------------------------------------------------------------------- 2915 !! *** routine mppgather *** 2916 !! 2917 !! ** Purpose : Transfert between a local subdomain array and a work 2918 !! array which is distributed following the vertical level. 2919 !! 2920 !! ** Method : 2921 !! 2922 !!---------------------------------------------------------------------- 2923 !! * Arguments 2924 REAL(wp), DIMENSION(jpi,jpj), INTENT( in ) :: ptab ! subdomain input array 2925 INTEGER , INTENT( in ) :: kp ! record length 2926 REAL(wp), DIMENSION(jpi,jpj,jpnij), INTENT( out ) :: pio ! subdomain input array 2927 !!--------------------------------------------------------------------- 2928 #if defined key_mpp_shmem 2929 !! * SHMEM version 2930 2931 CALL barrier() 2932 CALL shmem_put( pio(1,1,npvm_me+1), ptab, jpi*jpj, kp ) 2933 CALL barrier() 2934 2935 #elif defined key_mpp_mpi 2936 !! * Local variables (MPI version) 2937 INTEGER :: itaille,ierror 2938 2939 itaille=jpi*jpj 2940 CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille, & 2941 & mpi_double_precision, kp , mpi_comm_opa, ierror ) 2942 #endif 2943 2944 END SUBROUTINE mppgather 2945 2946 2947 SUBROUTINE mppscatter( pio, kp, ptab ) 2948 !!---------------------------------------------------------------------- 2949 !! *** routine mppscatter *** 2950 !! 2951 !! ** Purpose : Transfert between awork array which is distributed 2952 !! following the vertical level and the local subdomain array. 2953 !! 2954 !! ** Method : 2955 !! 2956 !!---------------------------------------------------------------------- 2957 REAL(wp), DIMENSION(jpi,jpj,jpnij) :: pio ! output array 2958 INTEGER :: kp ! Tag (not used with MPI 2959 REAL(wp), DIMENSION(jpi,jpj) :: ptab ! subdomain array input 2960 !!--------------------------------------------------------------------- 2961 #if defined key_mpp_shmem 2962 !! * SHMEM version 2963 2964 CALL barrier() 2965 CALL shmem_get( ptab, pio(1,1,npvm_me+1), jpi*jpj, kp ) 2966 CALL barrier() 2967 2968 # elif defined key_mpp_mpi 2969 !! * Local variables (MPI version) 2970 INTEGER :: itaille, ierror 2971 2972 itaille=jpi*jpj 2973 2974 CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille, & 2975 & mpi_double_precision, kp, mpi_comm_opa, ierror ) 2976 #endif 2977 2978 END SUBROUTINE mppscatter 2979 2980 2981 SUBROUTINE mppisl_a_int( ktab, kdim ) 2982 !!---------------------------------------------------------------------- 2983 !! *** routine mppisl_a_int *** 2984 !! 2985 !! ** Purpose : Massively parallel processors 2986 !! Find the non zero value 2987 !! 2988 !!---------------------------------------------------------------------- 2989 !! * Arguments 2990 INTEGER, INTENT( in ) :: kdim ! ??? 2991 INTEGER, INTENT(inout), DIMENSION(kdim) :: ktab ! ??? 2992 2993 #if defined key_mpp_shmem 2994 !! * Local variables (SHMEM version) 2995 INTEGER :: ji 2996 INTEGER, SAVE :: ibool=0 2997 2998 IF( kdim > jpmppsum ) CALL ctl_stop( 'mppisl_a_int routine : kdim is too big', & 2999 & 'change jpmppsum dimension in mpp.h' ) 3000 3001 DO ji = 1, kdim 3002 niitab_shmem(ji) = ktab(ji) 3003 END DO 3004 CALL barrier() 3005 IF(ibool == 0 ) THEN 3006 CALL shmem_int8_min_to_all (ni11tab_shmem,niitab_shmem,kdim,0 & 3007 ,0,N$PES,ni11wrk_shmem,ni11sync_shmem) 3008 CALL shmem_int8_max_to_all (ni12tab_shmem,niitab_shmem,kdim,0 & 3009 ,0,N$PES,ni12wrk_shmem,ni12sync_shmem) 3010 ELSE 3011 CALL shmem_int8_min_to_all (ni11tab_shmem,niitab_shmem,kdim,0 & 3012 ,0,N$PES,ni21wrk_shmem,ni21sync_shmem) 3013 CALL shmem_int8_max_to_all (ni12tab_shmem,niitab_shmem,kdim,0 & 3014 ,0,N$PES,ni22wrk_shmem,ni22sync_shmem) 3015 ENDIF 3016 CALL barrier() 3017 ibool=ibool+1 3018 ibool=MOD( ibool,2) 3019 DO ji = 1, kdim 3020 IF( ni11tab_shmem(ji) /= 0. ) THEN 3021 ktab(ji) = ni11tab_shmem(ji) 3022 ELSE 3023 ktab(ji) = ni12tab_shmem(ji) 1958 ALLOCATE( nrank_ice(ndim_rank_ice) ) 1959 ! 1960 ii = 0 1961 nrank_ice = 0 1962 DO jjproc = 1, jpnij 1963 IF( zwork(jjproc) == 1) THEN 1964 ii = ii + 1 1965 nrank_ice(ii) = jjproc -1 3024 1966 ENDIF 3025 1967 END DO 3026 3027 # elif defined key_mpp_mpi 3028 !! * Local variables (MPI version) 3029 LOGICAL :: lcommute 3030 INTEGER, DIMENSION(kdim) :: iwork 3031 INTEGER :: mpi_isl,ierror 3032 3033 lcommute = .TRUE. 3034 CALL mpi_op_create( lc_isl, lcommute, mpi_isl, ierror ) 3035 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer & 3036 , mpi_isl, mpi_comm_opa, ierror ) 3037 ktab(:) = iwork(:) 3038 #endif 3039 3040 END SUBROUTINE mppisl_a_int 3041 3042 3043 SUBROUTINE mppisl_int( ktab ) 3044 !!---------------------------------------------------------------------- 3045 !! *** routine mppisl_int *** 3046 !! 3047 !! ** Purpose : Massively parallel processors 3048 !! Find the non zero value 3049 !! 3050 !!---------------------------------------------------------------------- 3051 !! * Arguments 3052 INTEGER , INTENT( inout ) :: ktab ! 3053 3054 #if defined key_mpp_shmem 3055 !! * Local variables (SHMEM version) 3056 INTEGER, SAVE :: ibool=0 3057 3058 niitab_shmem(1) = ktab 3059 CALL barrier() 3060 IF(ibool == 0 ) THEN 3061 CALL shmem_int8_min_to_all (ni11tab_shmem,niitab_shmem,1,0 & 3062 ,0,N$PES,ni11wrk_shmem,ni11sync_shmem) 3063 CALL shmem_int8_max_to_all (ni12tab_shmem,niitab_shmem,1,0 & 3064 ,0,N$PES,ni12wrk_shmem,ni12sync_shmem) 3065 ELSE 3066 CALL shmem_int8_min_to_all (ni11tab_shmem,niitab_shmem,1,0 & 3067 ,0,N$PES,ni21wrk_shmem,ni21sync_shmem) 3068 CALL shmem_int8_max_to_all (ni12tab_shmem,niitab_shmem,1,0 & 3069 ,0,N$PES,ni22wrk_shmem,ni22sync_shmem) 3070 ENDIF 3071 CALL barrier() 3072 ibool=ibool+1 3073 ibool=MOD( ibool,2) 3074 IF( ni11tab_shmem(1) /= 0. ) THEN 3075 ktab = ni11tab_shmem(1) 3076 ELSE 3077 ktab = ni12tab_shmem(1) 3078 ENDIF 3079 3080 # elif defined key_mpp_mpi 3081 3082 !! * Local variables (MPI version) 3083 LOGICAL :: lcommute 3084 INTEGER :: mpi_isl,ierror 3085 INTEGER :: iwork 3086 3087 lcommute = .TRUE. 3088 CALL mpi_op_create(lc_isl,lcommute,mpi_isl,ierror) 3089 CALL mpi_allreduce(ktab, iwork, 1,mpi_integer & 3090 ,mpi_isl,mpi_comm_opa,ierror) 3091 ktab = iwork 3092 #endif 3093 3094 END SUBROUTINE mppisl_int 3095 3096 3097 SUBROUTINE mppmax_a_int( ktab, kdim, kcom ) 3098 !!---------------------------------------------------------------------- 3099 !! *** routine mppmax_a_int *** 3100 !! 3101 !! ** Purpose : Find maximum value in an integer layout array 3102 !! 3103 !!---------------------------------------------------------------------- 3104 !! * Arguments 3105 INTEGER , INTENT( in ) :: kdim ! size of array 3106 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 3107 INTEGER , INTENT(in) , OPTIONAL :: kcom 3108 3109 #if defined key_mpp_shmem 3110 !! * Local declarations (SHMEM version) 3111 INTEGER :: ji 3112 INTEGER, SAVE :: ibool=0 3113 3114 IF( kdim > jpmppsum ) CALL ctl_stop( 'mppmax_a_int routine : kdim is too big', & 3115 & 'change jpmppsum dimension in mpp.h' ) 3116 3117 DO ji = 1, kdim 3118 niltab_shmem(ji) = ktab(ji) 1968 1969 ! Create the world group 1970 CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_world, ierr ) 1971 1972 ! Create the ice group from the world group 1973 CALL MPI_GROUP_INCL( ngrp_world, ndim_rank_ice, nrank_ice, ngrp_ice, ierr ) 1974 1975 ! Create the ice communicator , ie the pool of procs with sea-ice 1976 CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_ice, ncomm_ice, ierr ) 1977 1978 ! Find proc number in the world of proc 0 in the north 1979 ! The following line seems to be useless, we just comment & keep it as reminder 1980 ! CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_ice,1,0,ngrp_world,n_ice_root,ierr) 1981 ! 1982 END SUBROUTINE mpp_ini_ice 1983 1984 1985 SUBROUTINE mpp_ini_north 1986 !!---------------------------------------------------------------------- 1987 !! *** routine mpp_ini_north *** 1988 !! 1989 !! ** Purpose : Initialize special communicator for north folding 1990 !! condition together with global variables needed in the mpp folding 1991 !! 1992 !! ** Method : - Look for northern processors 1993 !! - Put their number in nrank_north 1994 !! - Create groups for the world processors and the north processors 1995 !! - Create a communicator for northern processors 1996 !! 1997 !! ** output 1998 !! njmppmax = njmpp for northern procs 1999 !! ndim_rank_north = number of processors in the northern line 2000 !! nrank_north (ndim_rank_north) = number of the northern procs. 2001 !! ngrp_world = group ID for the world processors 2002 !! ngrp_north = group ID for the northern processors 2003 !! ncomm_north = communicator for the northern procs. 2004 !! north_root = number (in the world) of proc 0 in the northern comm. 2005 !! 2006 !!---------------------------------------------------------------------- 2007 INTEGER :: ierr 2008 INTEGER :: jjproc 2009 INTEGER :: ii, ji 2010 !!---------------------------------------------------------------------- 2011 ! 2012 njmppmax = MAXVAL( njmppt ) 2013 ! 2014 ! Look for how many procs on the northern boundary 2015 ndim_rank_north = 0 2016 DO jjproc = 1, jpnij 2017 IF( njmppt(jjproc) == njmppmax ) ndim_rank_north = ndim_rank_north + 1 3119 2018 END DO 3120 CALL barrier() 3121 IF(ibool == 0 ) THEN 3122 CALL shmem_int8_max_to_all (niltab_shmem,niltab_shmem,kdim,0,0 & 3123 ,N$PES,nil1wrk_shmem,nil1sync_shmem ) 3124 ELSE 3125 CALL shmem_int8_max_to_all (niltab_shmem,niltab_shmem,kdim,0,0 & 3126 ,N$PES,nil2wrk_shmem,nil2sync_shmem ) 3127 ENDIF 3128 CALL barrier() 3129 ibool=ibool+1 3130 ibool=MOD( ibool,2) 3131 DO ji = 1, kdim 3132 ktab(ji) = niltab_shmem(ji) 2019 ! 2020 ! Allocate the right size to nrank_north 2021 ALLOCATE( nrank_north(ndim_rank_north) ) 2022 2023 ! Fill the nrank_north array with proc. number of northern procs. 2024 ! Note : the rank start at 0 in MPI 2025 ii = 0 2026 DO ji = 1, jpnij 2027 IF ( njmppt(ji) == njmppmax ) THEN 2028 ii=ii+1 2029 nrank_north(ii)=ji-1 2030 END IF 3133 2031 END DO 3134 3135 # elif defined key_mpp_mpi 3136 3137 !! * Local variables (MPI version) 3138 INTEGER :: ierror 3139 INTEGER :: localcomm 3140 INTEGER, DIMENSION(kdim) :: iwork 3141 3142 localcomm = mpi_comm_opa 3143 IF( PRESENT(kcom) ) localcomm = kcom 3144 3145 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, & 3146 & mpi_max, localcomm, ierror ) 3147 3148 ktab(:) = iwork(:) 3149 #endif 3150 3151 END SUBROUTINE mppmax_a_int 3152 3153 3154 SUBROUTINE mppmax_int( ktab, kcom ) 3155 !!---------------------------------------------------------------------- 3156 !! *** routine mppmax_int *** 3157 !! 3158 !! ** Purpose : 3159 !! Massively parallel processors 3160 !! Find maximum value in an integer layout array 3161 !! 3162 !!---------------------------------------------------------------------- 3163 !! * Arguments 3164 INTEGER, INTENT(inout) :: ktab ! ??? 3165 INTEGER, INTENT(in), OPTIONAL :: kcom ! ??? 3166 3167 !! * Local declarations 3168 3169 #if defined key_mpp_shmem 3170 3171 !! * Local variables (SHMEM version) 3172 INTEGER :: ji 3173 INTEGER, SAVE :: ibool=0 3174 3175 niltab_shmem(1) = ktab 3176 CALL barrier() 3177 IF(ibool == 0 ) THEN 3178 CALL shmem_int8_max_to_all (niltab_shmem,niltab_shmem, 1,0,0 & 3179 ,N$PES,nil1wrk_shmem,nil1sync_shmem ) 3180 ELSE 3181 CALL shmem_int8_max_to_all (niltab_shmem,niltab_shmem, 1,0,0 & 3182 ,N$PES,nil2wrk_shmem,nil2sync_shmem ) 3183 ENDIF 3184 CALL barrier() 3185 ibool=ibool+1 3186 ibool=MOD( ibool,2) 3187 ktab = niltab_shmem(1) 3188 3189 # elif defined key_mpp_mpi 3190 3191 !! * Local variables (MPI version) 3192 INTEGER :: ierror, iwork 3193 INTEGER :: localcomm 3194 3195 localcomm = mpi_comm_opa 3196 IF( PRESENT(kcom) ) localcomm = kcom 3197 3198 CALL mpi_allreduce(ktab,iwork, 1,mpi_integer & 3199 & ,mpi_max,localcomm,ierror) 3200 3201 ktab = iwork 3202 #endif 3203 3204 END SUBROUTINE mppmax_int 3205 3206 3207 SUBROUTINE mppmin_a_int( ktab, kdim, kcom ) 3208 !!---------------------------------------------------------------------- 3209 !! *** routine mppmin_a_int *** 3210 !! 3211 !! ** Purpose : Find minimum value in an integer layout array 3212 !! 3213 !!---------------------------------------------------------------------- 3214 !! * Arguments 3215 INTEGER , INTENT( in ) :: kdim ! size of array 3216 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 3217 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 3218 3219 #if defined key_mpp_shmem 3220 !! * Local declarations (SHMEM version) 3221 INTEGER :: ji 3222 INTEGER, SAVE :: ibool=0 3223 3224 IF( kdim > jpmppsum ) CALL ctl_stop( 'mppmin_a_int routine : kdim is too big', & 3225 & 'change jpmppsum dimension in mpp.h' ) 3226 3227 DO ji = 1, kdim 3228 niltab_shmem(ji) = ktab(ji) 3229 END DO 3230 CALL barrier() 3231 IF(ibool == 0 ) THEN 3232 CALL shmem_int8_min_to_all (niltab_shmem,niltab_shmem,kdim,0,0 & 3233 ,N$PES,nil1wrk_shmem,nil1sync_shmem ) 3234 ELSE 3235 CALL shmem_int8_min_to_all (niltab_shmem,niltab_shmem,kdim,0,0 & 3236 ,N$PES,nil2wrk_shmem,nil2sync_shmem ) 3237 ENDIF 3238 CALL barrier() 3239 ibool=ibool+1 3240 ibool=MOD( ibool,2) 3241 DO ji = 1, kdim 3242 ktab(ji) = niltab_shmem(ji) 3243 END DO 3244 3245 # elif defined key_mpp_mpi 3246 3247 !! * Local variables (MPI version) 3248 INTEGER :: ierror 3249 INTEGER :: localcomm 3250 INTEGER, DIMENSION(kdim) :: iwork 3251 3252 localcomm = mpi_comm_opa 3253 IF( PRESENT(kcom) ) localcomm = kcom 3254 3255 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, & 3256 & mpi_min, localcomm, ierror ) 3257 3258 ktab(:) = iwork(:) 3259 #endif 3260 3261 END SUBROUTINE mppmin_a_int 3262 3263 3264 SUBROUTINE mppmin_int( ktab ) 3265 !!---------------------------------------------------------------------- 3266 !! *** routine mppmin_int *** 3267 !! 3268 !! ** Purpose : 3269 !! Massively parallel processors 3270 !! Find minimum value in an integer layout array 3271 !! 3272 !!---------------------------------------------------------------------- 3273 !! * Arguments 3274 INTEGER, INTENT(inout) :: ktab ! ??? 3275 3276 !! * Local declarations 3277 3278 #if defined key_mpp_shmem 3279 3280 !! * Local variables (SHMEM version) 3281 INTEGER :: ji 3282 INTEGER, SAVE :: ibool=0 3283 3284 niltab_shmem(1) = ktab 3285 CALL barrier() 3286 IF(ibool == 0 ) THEN 3287 CALL shmem_int8_min_to_all (niltab_shmem,niltab_shmem, 1,0,0 & 3288 ,N$PES,nil1wrk_shmem,nil1sync_shmem ) 3289 ELSE 3290 CALL shmem_int8_min_to_all (niltab_shmem,niltab_shmem, 1,0,0 & 3291 ,N$PES,nil2wrk_shmem,nil2sync_shmem ) 3292 ENDIF 3293 CALL barrier() 3294 ibool=ibool+1 3295 ibool=MOD( ibool,2) 3296 ktab = niltab_shmem(1) 3297 3298 # elif defined key_mpp_mpi 3299 3300 !! * Local variables (MPI version) 3301 INTEGER :: ierror, iwork 3302 3303 CALL mpi_allreduce(ktab,iwork, 1,mpi_integer & 3304 & ,mpi_min,mpi_comm_opa,ierror) 3305 3306 ktab = iwork 3307 #endif 3308 3309 END SUBROUTINE mppmin_int 3310 3311 3312 SUBROUTINE mppsum_a_int( ktab, kdim ) 3313 !!---------------------------------------------------------------------- 3314 !! *** routine mppsum_a_int *** 3315 !! 3316 !! ** Purpose : Massively parallel processors 3317 !! Global integer sum 3318 !! 3319 !!---------------------------------------------------------------------- 3320 !! * Arguments 3321 INTEGER, INTENT( in ) :: kdim ! ??? 3322 INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? 3323 3324 #if defined key_mpp_shmem 3325 3326 !! * Local variables (SHMEM version) 3327 INTEGER :: ji 3328 INTEGER, SAVE :: ibool=0 3329 3330 IF( kdim > jpmppsum ) CALL ctl_stop( 'mppsum_a_int routine : kdim is too big', & 3331 & 'change jpmppsum dimension in mpp.h' ) 3332 3333 DO ji = 1, kdim 3334 nistab_shmem(ji) = ktab(ji) 3335 END DO 3336 CALL barrier() 3337 IF(ibool == 0 ) THEN 3338 CALL shmem_int8_sum_to_all(nistab_shmem,nistab_shmem,kdim,0,0, & 3339 N$PES,nis1wrk_shmem,nis1sync_shmem) 3340 ELSE 3341 CALL shmem_int8_sum_to_all(nistab_shmem,nistab_shmem,kdim,0,0, & 3342 N$PES,nis2wrk_shmem,nis2sync_shmem) 3343 ENDIF 3344 CALL barrier() 3345 ibool = ibool + 1 3346 ibool = MOD( ibool, 2 ) 3347 DO ji = 1, kdim 3348 ktab(ji) = nistab_shmem(ji) 3349 END DO 3350 3351 # elif defined key_mpp_mpi 3352 3353 !! * Local variables (MPI version) 3354 INTEGER :: ierror 3355 INTEGER, DIMENSION (kdim) :: iwork 3356 3357 CALL mpi_allreduce(ktab, iwork,kdim,mpi_integer & 3358 ,mpi_sum,mpi_comm_opa,ierror) 3359 3360 ktab(:) = iwork(:) 3361 #endif 3362 3363 END SUBROUTINE mppsum_a_int 3364 3365 3366 SUBROUTINE mppsum_int( ktab ) 3367 !!---------------------------------------------------------------------- 3368 !! *** routine mppsum_int *** 3369 !! 3370 !! ** Purpose : Global integer sum 3371 !! 3372 !!---------------------------------------------------------------------- 3373 !! * Arguments 3374 INTEGER, INTENT(inout) :: ktab 3375 3376 #if defined key_mpp_shmem 3377 3378 !! * Local variables (SHMEM version) 3379 INTEGER, SAVE :: ibool=0 3380 3381 nistab_shmem(1) = ktab 3382 CALL barrier() 3383 IF(ibool == 0 ) THEN 3384 CALL shmem_int8_sum_to_all(nistab_shmem,nistab_shmem, 1,0,0, & 3385 N$PES,nis1wrk_shmem,nis1sync_shmem) 3386 ELSE 3387 CALL shmem_int8_sum_to_all(nistab_shmem,nistab_shmem, 1,0,0, & 3388 N$PES,nis2wrk_shmem,nis2sync_shmem) 3389 ENDIF 3390 CALL barrier() 3391 ibool=ibool+1 3392 ibool=MOD( ibool,2) 3393 ktab = nistab_shmem(1) 3394 3395 # elif defined key_mpp_mpi 3396 3397 !! * Local variables (MPI version) 3398 INTEGER :: ierror, iwork 3399 3400 CALL mpi_allreduce(ktab,iwork, 1,mpi_integer & 3401 ,mpi_sum,mpi_comm_opa,ierror) 3402 3403 ktab = iwork 3404 3405 #endif 3406 3407 END SUBROUTINE mppsum_int 3408 3409 3410 SUBROUTINE mppisl_a_real( ptab, kdim ) 3411 !!---------------------------------------------------------------------- 3412 !! *** routine mppisl_a_real *** 3413 !! 3414 !! ** Purpose : Massively parallel processors 3415 !! Find the non zero island barotropic stream function value 3416 !! 3417 !! Modifications: 3418 !! ! 93-09 (M. Imbard) 3419 !! ! 96-05 (j. Escobar) 3420 !! ! 98-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI 3421 !!---------------------------------------------------------------------- 3422 INTEGER , INTENT( in ) :: kdim ! ??? 3423 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab ! ??? 3424 3425 #if defined key_mpp_shmem 3426 3427 !! * Local variables (SHMEM version) 3428 INTEGER :: ji 3429 INTEGER, SAVE :: ibool=0 3430 3431 IF( kdim > jpmppsum ) CALL ctl_stop( 'mppisl_a_real routine : kdim is too big', & 3432 & 'change jpmppsum dimension in mpp.h' ) 3433 3434 DO ji = 1, kdim 3435 wiltab_shmem(ji) = ptab(ji) 3436 END DO 3437 CALL barrier() 3438 IF(ibool == 0 ) THEN 3439 CALL shmem_real8_min_to_all (wi1tab_shmem,wiltab_shmem,kdim,0 & 3440 ,0,N$PES,wi11wrk_shmem,ni11sync_shmem) 3441 CALL shmem_real8_max_to_all (wi2tab_shmem,wiltab_shmem,kdim,0 & 3442 ,0,N$PES,wi12wrk_shmem,ni12sync_shmem) 3443 ELSE 3444 CALL shmem_real8_min_to_all (wi1tab_shmem,wiltab_shmem,kdim,0 & 3445 ,0,N$PES,wi21wrk_shmem,ni21sync_shmem) 3446 CALL shmem_real8_max_to_all (wi2tab_shmem,wiltab_shmem,kdim,0 & 3447 ,0,N$PES,wi22wrk_shmem,ni22sync_shmem) 3448 ENDIF 3449 CALL barrier() 3450 ibool=ibool+1 3451 ibool=MOD( ibool,2) 3452 DO ji = 1, kdim 3453 IF(wi1tab_shmem(ji) /= 0. ) THEN 3454 ptab(ji) = wi1tab_shmem(ji) 3455 ELSE 3456 ptab(ji) = wi2tab_shmem(ji) 3457 ENDIF 3458 END DO 3459 3460 # elif defined key_mpp_mpi 3461 3462 !! * Local variables (MPI version) 3463 LOGICAL :: lcommute = .TRUE. 3464 INTEGER :: mpi_isl, ierror 3465 REAL(wp), DIMENSION(kdim) :: zwork 3466 3467 CALL mpi_op_create(lc_isl,lcommute,mpi_isl,ierror) 3468 CALL mpi_allreduce(ptab, zwork,kdim,mpi_double_precision & 3469 ,mpi_isl,mpi_comm_opa,ierror) 3470 ptab(:) = zwork(:) 3471 3472 #endif 3473 3474 END SUBROUTINE mppisl_a_real 3475 3476 3477 SUBROUTINE mppisl_real( ptab ) 3478 !!---------------------------------------------------------------------- 3479 !! *** routine mppisl_real *** 3480 !! 3481 !! ** Purpose : Massively parallel processors 3482 !! Find the non zero island barotropic stream function value 3483 !! 3484 !! Modifications: 3485 !! ! 93-09 (M. Imbard) 3486 !! ! 96-05 (j. Escobar) 3487 !! ! 98-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI 3488 !!---------------------------------------------------------------------- 3489 REAL(wp), INTENT(inout) :: ptab 3490 3491 #if defined key_mpp_shmem 3492 3493 !! * Local variables (SHMEM version) 3494 INTEGER, SAVE :: ibool=0 3495 3496 wiltab_shmem(1) = ptab 3497 CALL barrier() 3498 IF(ibool == 0 ) THEN 3499 CALL shmem_real8_min_to_all (wi1tab_shmem,wiltab_shmem, 1,0 & 3500 ,0,N$PES,wi11wrk_shmem,ni11sync_shmem) 3501 CALL shmem_real8_max_to_all (wi2tab_shmem,wiltab_shmem, 1,0 & 3502 ,0,N$PES,wi12wrk_shmem,ni12sync_shmem) 3503 ELSE 3504 CALL shmem_real8_min_to_all (wi1tab_shmem,wiltab_shmem, 1,0 & 3505 ,0,N$PES,wi21wrk_shmem,ni21sync_shmem) 3506 CALL shmem_real8_max_to_all (wi2tab_shmem,wiltab_shmem, 1,0 & 3507 ,0,N$PES,wi22wrk_shmem,ni22sync_shmem) 3508 ENDIF 3509 CALL barrier() 3510 ibool = ibool + 1 3511 ibool = MOD( ibool, 2 ) 3512 IF( wi1tab_shmem(1) /= 0. ) THEN 3513 ptab = wi1tab_shmem(1) 3514 ELSE 3515 ptab = wi2tab_shmem(1) 3516 ENDIF 3517 3518 # elif defined key_mpp_mpi 3519 3520 !! * Local variables (MPI version) 3521 LOGICAL :: lcommute = .TRUE. 3522 INTEGER :: mpi_isl, ierror 3523 REAL(wp) :: zwork 3524 3525 CALL mpi_op_create( lc_isl, lcommute, mpi_isl, ierror ) 3526 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, & 3527 & mpi_isl , mpi_comm_opa, ierror ) 3528 ptab = zwork 3529 3530 #endif 3531 3532 END SUBROUTINE mppisl_real 3533 3534 3535 FUNCTION lc_isl( py, px, kdim ) 3536 INTEGER :: kdim 3537 REAL(wp), DIMENSION(kdim) :: px, py 3538 INTEGER :: kdtatyp, ji 3539 INTEGER :: lc_isl 3540 DO ji = 1, kdim 3541 IF( py(ji) /= 0. ) px(ji) = py(ji) 3542 END DO 3543 lc_isl=0 3544 3545 END FUNCTION lc_isl 3546 3547 3548 SUBROUTINE mppmax_a_real( ptab, kdim, kcom ) 3549 !!---------------------------------------------------------------------- 3550 !! *** routine mppmax_a_real *** 3551 !! 3552 !! ** Purpose : Maximum 3553 !! 3554 !!---------------------------------------------------------------------- 3555 !! * Arguments 3556 INTEGER , INTENT( in ) :: kdim 3557 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 3558 INTEGER , INTENT( in ), OPTIONAL :: kcom 3559 3560 #if defined key_mpp_shmem 3561 3562 !! * Local variables (SHMEM version) 3563 INTEGER :: ji 3564 INTEGER, SAVE :: ibool=0 3565 3566 IF( kdim > jpmppsum ) CALL ctl_stop( 'mppmax_a_real routine : kdim is too big', & 3567 & 'change jpmppsum dimension in mpp.h' ) 3568 3569 DO ji = 1, kdim 3570 wintab_shmem(ji) = ptab(ji) 3571 END DO 3572 CALL barrier() 3573 IF(ibool == 0 ) THEN 3574 CALL shmem_real8_max_to_all (wintab_shmem,wintab_shmem,kdim,0 & 3575 ,0,N$PES,wi1wrk_shmem,ni1sync_shmem) 3576 ELSE 3577 CALL shmem_real8_max_to_all (wintab_shmem,wintab_shmem,kdim,0 & 3578 ,0,N$PES,wi2wrk_shmem,ni2sync_shmem) 3579 ENDIF 3580 CALL barrier() 3581 ibool=ibool+1 3582 ibool=MOD( ibool,2) 3583 DO ji = 1, kdim 3584 ptab(ji) = wintab_shmem(ji) 3585 END DO 3586 3587 # elif defined key_mpp_mpi 3588 3589 !! * Local variables (MPI version) 3590 INTEGER :: ierror 3591 INTEGER :: localcomm 3592 REAL(wp), DIMENSION(kdim) :: zwork 3593 3594 localcomm = mpi_comm_opa 3595 IF( PRESENT(kcom) ) localcomm = kcom 3596 3597 CALL mpi_allreduce(ptab, zwork,kdim,mpi_double_precision & 3598 ,mpi_max,localcomm,ierror) 3599 ptab(:) = zwork(:) 3600 3601 #endif 3602 3603 END SUBROUTINE mppmax_a_real 3604 3605 3606 SUBROUTINE mppmax_real( ptab, kcom ) 3607 !!---------------------------------------------------------------------- 3608 !! *** routine mppmax_real *** 3609 !! 3610 !! ** Purpose : Maximum 3611 !! 3612 !!---------------------------------------------------------------------- 3613 !! * Arguments 3614 REAL(wp), INTENT(inout) :: ptab ! ??? 3615 INTEGER , INTENT( in ), OPTIONAL :: kcom ! ??? 3616 3617 #if defined key_mpp_shmem 3618 3619 !! * Local variables (SHMEM version) 3620 INTEGER, SAVE :: ibool=0 3621 3622 wintab_shmem(1) = ptab 3623 CALL barrier() 3624 IF(ibool == 0 ) THEN 3625 CALL shmem_real8_max_to_all (wintab_shmem,wintab_shmem, 1,0 & 3626 ,0,N$PES,wi1wrk_shmem,ni1sync_shmem) 3627 ELSE 3628 CALL shmem_real8_max_to_all (wintab_shmem,wintab_shmem, 1,0 & 3629 ,0,N$PES,wi2wrk_shmem,ni2sync_shmem) 3630 ENDIF 3631 CALL barrier() 3632 ibool=ibool+1 3633 ibool=MOD( ibool,2) 3634 ptab = wintab_shmem(1) 3635 3636 # elif defined key_mpp_mpi 3637 3638 !! * Local variables (MPI version) 3639 INTEGER :: ierror 3640 INTEGER :: localcomm 3641 REAL(wp) :: zwork 3642 3643 localcomm = mpi_comm_opa 3644 IF( PRESENT(kcom) ) localcomm = kcom 3645 3646 CALL mpi_allreduce( ptab, zwork , 1 , mpi_double_precision, & 3647 & mpi_max, localcomm, ierror ) 3648 ptab = zwork 3649 3650 #endif 3651 3652 END SUBROUTINE mppmax_real 3653 3654 3655 SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) 3656 !!---------------------------------------------------------------------- 3657 !! *** routine mppmin_a_real *** 3658 !! 3659 !! ** Purpose : Minimum 3660 !! 3661 !!----------------------------------------------------------------------- 3662 !! * Arguments 3663 INTEGER , INTENT( in ) :: kdim 3664 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 3665 INTEGER , INTENT( in ), OPTIONAL :: kcom 3666 3667 #if defined key_mpp_shmem 3668 3669 !! * Local variables (SHMEM version) 3670 INTEGER :: ji 3671 INTEGER, SAVE :: ibool=0 3672 3673 IF( kdim > jpmppsum ) CALL ctl_stop( 'mpprmin routine : kdim is too big', & 3674 & 'change jpmppsum dimension in mpp.h' ) 3675 3676 DO ji = 1, kdim 3677 wintab_shmem(ji) = ptab(ji) 3678 END DO 3679 CALL barrier() 3680 IF(ibool == 0 ) THEN 3681 CALL shmem_real8_min_to_all (wintab_shmem,wintab_shmem,kdim,0 & 3682 ,0,N$PES,wi1wrk_shmem,ni1sync_shmem) 3683 ELSE 3684 CALL shmem_real8_min_to_all (wintab_shmem,wintab_shmem,kdim,0 & 3685 ,0,N$PES,wi2wrk_shmem,ni2sync_shmem) 3686 ENDIF 3687 CALL barrier() 3688 ibool=ibool+1 3689 ibool=MOD( ibool,2) 3690 DO ji = 1, kdim 3691 ptab(ji) = wintab_shmem(ji) 3692 END DO 3693 3694 # elif defined key_mpp_mpi 3695 3696 !! * Local variables (MPI version) 3697 INTEGER :: ierror 3698 INTEGER :: localcomm 3699 REAL(wp), DIMENSION(kdim) :: zwork 3700 3701 localcomm = mpi_comm_opa 3702 IF( PRESENT(kcom) ) localcomm = kcom 3703 3704 CALL mpi_allreduce(ptab, zwork,kdim,mpi_double_precision & 3705 ,mpi_min,localcomm,ierror) 3706 ptab(:) = zwork(:) 3707 3708 #endif 3709 3710 END SUBROUTINE mppmin_a_real 3711 3712 3713 SUBROUTINE mppmin_real( ptab, kcom ) 3714 !!---------------------------------------------------------------------- 3715 !! *** routine mppmin_real *** 3716 !! 3717 !! ** Purpose : minimum in Massively Parallel Processing 3718 !! REAL scalar case 3719 !! 3720 !!----------------------------------------------------------------------- 3721 !! * Arguments 3722 REAL(wp), INTENT( inout ) :: ptab ! 3723 INTEGER , INTENT( in ), OPTIONAL :: kcom 3724 3725 #if defined key_mpp_shmem 3726 3727 !! * Local variables (SHMEM version) 3728 INTEGER, SAVE :: ibool=0 3729 3730 wintab_shmem(1) = ptab 3731 CALL barrier() 3732 IF(ibool == 0 ) THEN 3733 CALL shmem_real8_min_to_all (wintab_shmem,wintab_shmem, 1,0 & 3734 ,0,N$PES,wi1wrk_shmem,ni1sync_shmem) 3735 ELSE 3736 CALL shmem_real8_min_to_all (wintab_shmem,wintab_shmem, 1,0 & 3737 ,0,N$PES,wi2wrk_shmem,ni2sync_shmem) 3738 ENDIF 3739 CALL barrier() 3740 ibool=ibool+1 3741 ibool=MOD( ibool,2) 3742 ptab = wintab_shmem(1) 3743 3744 # elif defined key_mpp_mpi 3745 3746 !! * Local variables (MPI version) 3747 INTEGER :: ierror 3748 REAL(wp) :: zwork 3749 INTEGER :: localcomm 3750 3751 localcomm = mpi_comm_opa 3752 IF( PRESENT(kcom) ) localcomm = kcom 3753 3754 CALL mpi_allreduce( ptab, zwork, 1,mpi_double_precision & 3755 & ,mpi_min,localcomm,ierror) 3756 ptab = zwork 3757 3758 #endif 3759 3760 END SUBROUTINE mppmin_real 3761 3762 3763 SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) 3764 !!---------------------------------------------------------------------- 3765 !! *** routine mppsum_a_real *** 3766 !! 3767 !! ** Purpose : global sum in Massively Parallel Processing 3768 !! REAL ARRAY argument case 3769 !! 3770 !!----------------------------------------------------------------------- 3771 INTEGER , INTENT( in ) :: kdim ! size of ptab 3772 REAL(wp), DIMENSION(kdim), INTENT( inout ) :: ptab ! input array 3773 INTEGER , INTENT( in ), OPTIONAL :: kcom 3774 3775 #if defined key_mpp_shmem 3776 3777 !! * Local variables (SHMEM version) 3778 INTEGER :: ji 3779 INTEGER, SAVE :: ibool=0 3780 3781 IF( kdim > jpmppsum ) CALL ctl_stop( 'mppsum_a_real routine : kdim is too big', & 3782 & 'change jpmppsum dimension in mpp.h' ) 3783 3784 DO ji = 1, kdim 3785 wrstab_shmem(ji) = ptab(ji) 3786 END DO 3787 CALL barrier() 3788 IF(ibool == 0 ) THEN 3789 CALL shmem_real8_sum_to_all (wrstab_shmem,wrstab_shmem,kdim,0 & 3790 ,0,N$PES,wrs1wrk_shmem,nrs1sync_shmem ) 3791 ELSE 3792 CALL shmem_real8_sum_to_all (wrstab_shmem,wrstab_shmem,kdim,0 & 3793 ,0,N$PES,wrs2wrk_shmem,nrs2sync_shmem ) 3794 ENDIF 3795 CALL barrier() 3796 ibool=ibool+1 3797 ibool=MOD( ibool,2) 3798 DO ji = 1, kdim 3799 ptab(ji) = wrstab_shmem(ji) 3800 END DO 3801 3802 # elif defined key_mpp_mpi 3803 3804 !! * Local variables (MPI version) 3805 INTEGER :: ierror ! temporary integer 3806 INTEGER :: localcomm 3807 REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace 3808 3809 3810 localcomm = mpi_comm_opa 3811 IF( PRESENT(kcom) ) localcomm = kcom 3812 3813 CALL mpi_allreduce(ptab, zwork,kdim,mpi_double_precision & 3814 & ,mpi_sum,localcomm,ierror) 3815 ptab(:) = zwork(:) 3816 3817 #endif 3818 3819 END SUBROUTINE mppsum_a_real 3820 3821 3822 SUBROUTINE mppsum_real( ptab, kcom ) 3823 !!---------------------------------------------------------------------- 3824 !! *** routine mppsum_real *** 3825 !! 3826 !! ** Purpose : global sum in Massively Parallel Processing 3827 !! SCALAR argument case 3828 !! 3829 !!----------------------------------------------------------------------- 3830 REAL(wp), INTENT(inout) :: ptab ! input scalar 3831 INTEGER , INTENT( in ), OPTIONAL :: kcom 3832 3833 #if defined key_mpp_shmem 3834 3835 !! * Local variables (SHMEM version) 3836 INTEGER, SAVE :: ibool=0 3837 3838 wrstab_shmem(1) = ptab 3839 CALL barrier() 3840 IF(ibool == 0 ) THEN 3841 CALL shmem_real8_sum_to_all (wrstab_shmem,wrstab_shmem, 1,0 & 3842 ,0,N$PES,wrs1wrk_shmem,nrs1sync_shmem ) 3843 ELSE 3844 CALL shmem_real8_sum_to_all (wrstab_shmem,wrstab_shmem, 1,0 & 3845 ,0,N$PES,wrs2wrk_shmem,nrs2sync_shmem ) 3846 ENDIF 3847 CALL barrier() 3848 ibool = ibool + 1 3849 ibool = MOD( ibool, 2 ) 3850 ptab = wrstab_shmem(1) 3851 3852 # elif defined key_mpp_mpi 3853 3854 !! * Local variables (MPI version) 3855 INTEGER :: ierror 3856 INTEGER :: localcomm 3857 REAL(wp) :: zwork 3858 3859 localcomm = mpi_comm_opa 3860 IF( PRESENT(kcom) ) localcomm = kcom 3861 3862 CALL mpi_allreduce(ptab, zwork, 1,mpi_double_precision & 3863 & ,mpi_sum,localcomm,ierror) 3864 ptab = zwork 3865 3866 #endif 3867 3868 END SUBROUTINE mppsum_real 3869 3870 SUBROUTINE mpp_minloc2d(ptab, pmask, pmin, ki,kj ) 3871 !!------------------------------------------------------------------------ 3872 !! *** routine mpp_minloc *** 3873 !! 3874 !! ** Purpose : Compute the global minimum of an array ptab 3875 !! and also give its global position 3876 !! 3877 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 3878 !! 3879 !! ** Arguments : I : ptab =local 2D array 3880 !! O : pmin = global minimum 3881 !! O : ki,kj = global position of minimum 3882 !! 3883 !! ** Author : J.M. Molines 10/10/2004 3884 !!-------------------------------------------------------------------------- 3885 #ifdef key_mpp_shmem 3886 CALL ctl_stop( ' mpp_minloc not yet available in SHMEM' ) 3887 # elif key_mpp_mpi 3888 !! * Arguments 3889 REAL(wp), DIMENSION (jpi,jpj), INTENT (in) :: ptab ,& ! Local 2D array 3890 & pmask ! Local mask 3891 REAL(wp) , INTENT (out) :: pmin ! Global minimum of ptab 3892 INTEGER , INTENT (out) :: ki,kj ! index of minimum in global frame 3893 3894 !! * Local variables 3895 REAL(wp) :: zmin ! local minimum 3896 REAL(wp) ,DIMENSION(2,1) :: zain, zaout 3897 INTEGER, DIMENSION (2) :: ilocs 3898 INTEGER :: ierror 3899 3900 3901 zmin = MINVAL( ptab(:,:) , mask= pmask == 1.e0 ) 3902 ilocs = MINLOC( ptab(:,:) , mask= pmask == 1.e0 ) 3903 3904 ki = ilocs(1) + nimpp - 1 3905 kj = ilocs(2) + njmpp - 1 3906 3907 zain(1,:)=zmin 3908 zain(2,:)=ki+10000.*kj 3909 3910 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 3911 3912 pmin=zaout(1,1) 3913 kj= INT(zaout(2,1)/10000.) 3914 ki= INT(zaout(2,1) - 10000.*kj ) 3915 #endif 3916 3917 END SUBROUTINE mpp_minloc2d 3918 3919 3920 SUBROUTINE mpp_minloc3d(ptab, pmask, pmin, ki,kj ,kk) 3921 !!------------------------------------------------------------------------ 3922 !! *** routine mpp_minloc *** 3923 !! 3924 !! ** Purpose : Compute the global minimum of an array ptab 3925 !! and also give its global position 3926 !! 3927 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 3928 !! 3929 !! ** Arguments : I : ptab =local 2D array 3930 !! O : pmin = global minimum 3931 !! O : ki,kj = global position of minimum 3932 !! 3933 !! ** Author : J.M. Molines 10/10/2004 3934 !!-------------------------------------------------------------------------- 3935 #ifdef key_mpp_shmem 3936 CALL ctl_stop( ' mpp_minloc not yet available in SHMEM' ) 3937 # elif key_mpp_mpi 3938 !! * Arguments 3939 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT (in) :: ptab ,& ! Local 2D array 3940 & pmask ! Local mask 3941 REAL(wp) , INTENT (out) :: pmin ! Global minimum of ptab 3942 INTEGER , INTENT (out) :: ki,kj,kk ! index of minimum in global frame 3943 3944 !! * Local variables 3945 REAL(wp) :: zmin ! local minimum 3946 REAL(wp) ,DIMENSION(2,1) :: zain, zaout 3947 INTEGER, DIMENSION (3) :: ilocs 3948 INTEGER :: ierror 3949 3950 3951 zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) 3952 ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) 3953 3954 ki = ilocs(1) + nimpp - 1 3955 kj = ilocs(2) + njmpp - 1 3956 kk = ilocs(3) 3957 3958 zain(1,:)=zmin 3959 zain(2,:)=ki+10000.*kj+100000000.*kk 3960 3961 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 3962 3963 pmin=zaout(1,1) 3964 kk= INT(zaout(2,1)/100000000.) 3965 kj= INT(zaout(2,1) - kk * 100000000. )/10000 3966 ki= INT(zaout(2,1) - kk * 100000000. -kj * 10000. ) 3967 #endif 3968 3969 END SUBROUTINE mpp_minloc3d 3970 3971 3972 SUBROUTINE mpp_maxloc2d(ptab, pmask, pmax, ki,kj ) 3973 !!------------------------------------------------------------------------ 3974 !! *** routine mpp_maxloc *** 3975 !! 3976 !! ** Purpose : Compute the global maximum of an array ptab 3977 !! and also give its global position 3978 !! 3979 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 3980 !! 3981 !! ** Arguments : I : ptab =local 2D array 3982 !! O : pmax = global maximum 3983 !! O : ki,kj = global position of maximum 3984 !! 3985 !! ** Author : J.M. Molines 10/10/2004 3986 !!-------------------------------------------------------------------------- 3987 #ifdef key_mpp_shmem 3988 CALL ctl_stop( ' mpp_maxloc not yet available in SHMEM' ) 3989 # elif key_mpp_mpi 3990 !! * Arguments 3991 REAL(wp), DIMENSION (jpi,jpj), INTENT (in) :: ptab ,& ! Local 2D array 3992 & pmask ! Local mask 3993 REAL(wp) , INTENT (out) :: pmax ! Global maximum of ptab 3994 INTEGER , INTENT (out) :: ki,kj ! index of maximum in global frame 3995 3996 !! * Local variables 3997 REAL(wp) :: zmax ! local maximum 3998 REAL(wp) ,DIMENSION(2,1) :: zain, zaout 3999 INTEGER, DIMENSION (2) :: ilocs 4000 INTEGER :: ierror 4001 4002 4003 zmax = MAXVAL( ptab(:,:) , mask= pmask == 1.e0 ) 4004 ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1.e0 ) 4005 4006 ki = ilocs(1) + nimpp - 1 4007 kj = ilocs(2) + njmpp - 1 4008 4009 zain(1,:)=zmax 4010 zain(2,:)=ki+10000.*kj 4011 4012 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 4013 4014 pmax=zaout(1,1) 4015 kj= INT(zaout(2,1)/10000.) 4016 ki= INT(zaout(2,1) - 10000.*kj ) 4017 #endif 4018 4019 END SUBROUTINE mpp_maxloc2d 4020 4021 SUBROUTINE mpp_maxloc3d(ptab, pmask, pmax, ki,kj,kk ) 4022 !!------------------------------------------------------------------------ 4023 !! *** routine mpp_maxloc *** 4024 !! 4025 !! ** Purpose : Compute the global maximum of an array ptab 4026 !! and also give its global position 4027 !! 4028 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 4029 !! 4030 !! ** Arguments : I : ptab =local 2D array 4031 !! O : pmax = global maximum 4032 !! O : ki,kj = global position of maximum 4033 !! 4034 !! ** Author : J.M. Molines 10/10/2004 4035 !!-------------------------------------------------------------------------- 4036 #ifdef key_mpp_shmem 4037 CALL ctl_stop( ' mpp_maxloc not yet available in SHMEM' ) 4038 # elif key_mpp_mpi 4039 !! * Arguments 4040 REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT (in) :: ptab ,& ! Local 2D array 4041 & pmask ! Local mask 4042 REAL(wp) , INTENT (out) :: pmax ! Global maximum of ptab 4043 INTEGER , INTENT (out) :: ki,kj,kk ! index of maximum in global frame 4044 4045 !! * Local variables 4046 REAL(wp) :: zmax ! local maximum 4047 REAL(wp) ,DIMENSION(2,1) :: zain, zaout 4048 INTEGER, DIMENSION (3) :: ilocs 4049 INTEGER :: ierror 4050 4051 4052 zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) 4053 ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) 4054 4055 ki = ilocs(1) + nimpp - 1 4056 kj = ilocs(2) + njmpp - 1 4057 kk = ilocs(3) 4058 4059 zain(1,:)=zmax 4060 zain(2,:)=ki+10000.*kj+100000000.*kk 4061 4062 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 4063 4064 pmax=zaout(1,1) 4065 kk= INT(zaout(2,1)/100000000.) 4066 kj= INT(zaout(2,1) - kk * 100000000. )/10000 4067 ki= INT(zaout(2,1) - kk * 100000000. -kj * 10000. ) 4068 #endif 4069 4070 END SUBROUTINE mpp_maxloc3d 4071 4072 SUBROUTINE mppsync() 4073 !!---------------------------------------------------------------------- 4074 !! *** routine mppsync *** 4075 !! 4076 !! ** Purpose : Massively parallel processors, synchroneous 4077 !! 4078 !!----------------------------------------------------------------------- 4079 4080 #if defined key_mpp_shmem 4081 4082 !! * Local variables (SHMEM version) 4083 CALL barrier() 4084 4085 # elif defined key_mpp_mpi 4086 4087 !! * Local variables (MPI version) 4088 INTEGER :: ierror 4089 4090 CALL mpi_barrier(mpi_comm_opa,ierror) 4091 4092 #endif 4093 4094 END SUBROUTINE mppsync 4095 4096 4097 SUBROUTINE mppstop 4098 !!---------------------------------------------------------------------- 4099 !! *** routine mppstop *** 4100 !! 4101 !! ** purpose : Stop massilively parallel processors method 4102 !! 4103 !!---------------------------------------------------------------------- 4104 !! * Local declarations 4105 INTEGER :: info 4106 !!---------------------------------------------------------------------- 4107 4108 ! 1. Mpp synchroneus 4109 ! ------------------ 4110 4111 CALL mppsync 4112 #if defined key_mpp_mpi 4113 CALL mpi_finalize( info ) 4114 #endif 4115 4116 END SUBROUTINE mppstop 4117 4118 4119 SUBROUTINE mppobc( ptab, kd1, kd2, kl, kk, ktype, kij ) 4120 !!---------------------------------------------------------------------- 4121 !! *** routine mppobc *** 4122 !! 4123 !! ** Purpose : Message passing manadgement for open boundary 4124 !! conditions array 4125 !! 4126 !! ** Method : Use mppsend and mpprecv function for passing mask 4127 !! between processors following neighboring subdomains. 4128 !! domain parameters 4129 !! nlci : first dimension of the local subdomain 4130 !! nlcj : second dimension of the local subdomain 4131 !! nbondi : mark for "east-west local boundary" 4132 !! nbondj : mark for "north-south local boundary" 4133 !! noea : number for local neighboring processors 4134 !! nowe : number for local neighboring processors 4135 !! noso : number for local neighboring processors 4136 !! nono : number for local neighboring processors 4137 !! 4138 !! History : 4139 !! ! 98-07 (J.M. Molines) Open boundary conditions 4140 !!---------------------------------------------------------------------- 4141 !! * Arguments 4142 INTEGER , INTENT( in ) :: & 4143 kd1, kd2, & ! starting and ending indices 4144 kl , & ! index of open boundary 4145 kk, & ! vertical dimension 4146 ktype, & ! define north/south or east/west cdt 4147 ! ! = 1 north/south ; = 2 east/west 4148 kij ! horizontal dimension 4149 REAL(wp), DIMENSION(kij,kk), INTENT( inout ) :: & 4150 ptab ! variable array 4151 4152 !! * Local variables 4153 INTEGER :: ji, jj, jk, jl ! dummy loop indices 4154 INTEGER :: & 4155 iipt0, iipt1, ilpt1, & ! temporary integers 4156 ijpt0, ijpt1, & ! " " 4157 imigr, iihom, ijhom ! " " 4158 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 4159 INTEGER :: ml_stat(MPI_STATUS_SIZE) ! for key_mpi_isend 4160 REAL(wp), DIMENSION(jpi,jpj) :: & 4161 ztab ! temporary workspace 4162 !!---------------------------------------------------------------------- 4163 4164 4165 ! boundary condition initialization 4166 ! --------------------------------- 4167 4168 ztab(:,:) = 0.e0 4169 4170 IF( ktype==1 ) THEN ! north/south boundaries 4171 iipt0 = MAX( 1, MIN(kd1 - nimpp+1, nlci ) ) 4172 iipt1 = MAX( 0, MIN(kd2 - nimpp+1, nlci - 1 ) ) 4173 ilpt1 = MAX( 1, MIN(kd2 - nimpp+1, nlci ) ) 4174 ijpt0 = MAX( 1, MIN(kl - njmpp+1, nlcj ) ) 4175 ijpt1 = MAX( 0, MIN(kl - njmpp+1, nlcj - 1 ) ) 4176 ELSEIF( ktype==2 ) THEN ! east/west boundaries 4177 iipt0 = MAX( 1, MIN(kl - nimpp+1, nlci ) ) 4178 iipt1 = MAX( 0, MIN(kl - nimpp+1, nlci - 1 ) ) 4179 ijpt0 = MAX( 1, MIN(kd1 - njmpp+1, nlcj ) ) 4180 ijpt1 = MAX( 0, MIN(kd2 - njmpp+1, nlcj - 1 ) ) 4181 ilpt1 = MAX( 1, MIN(kd2 - njmpp+1, nlcj ) ) 4182 ELSE 4183 CALL ctl_stop( 'mppobc: bad ktype' ) 4184 ENDIF 4185 4186 DO jk = 1, kk 4187 IF( ktype==1 ) THEN ! north/south boundaries 4188 DO jj = ijpt0, ijpt1 4189 DO ji = iipt0, iipt1 4190 ztab(ji,jj) = ptab(ji,jk) 4191 END DO 4192 END DO 4193 ELSEIF( ktype==2 ) THEN ! east/west boundaries 4194 DO jj = ijpt0, ijpt1 4195 DO ji = iipt0, iipt1 4196 ztab(ji,jj) = ptab(jj,jk) 4197 END DO 4198 END DO 4199 ENDIF 4200 4201 4202 ! 1. East and west directions 4203 ! --------------------------- 4204 4205 ! 1.1 Read Dirichlet lateral conditions 4206 4207 IF( nbondi /= 2 ) THEN 4208 iihom = nlci-nreci 4209 4210 DO jl = 1, jpreci 4211 t2ew(:,jl,1) = ztab(jpreci+jl,:) 4212 t2we(:,jl,1) = ztab(iihom +jl,:) 4213 END DO 4214 ENDIF 4215 4216 ! 1.2 Migrations 4217 4218 #if defined key_mpp_shmem 4219 !! * (SHMEM version) 4220 imigr=jpreci*jpj*jpbyt 4221 4222 IF( nbondi == -1 ) THEN 4223 CALL shmem_put( t2we(1,1,2), t2we(1,1,1), imigr/jpbyt, noea ) 4224 ELSEIF( nbondi == 0 ) THEN 4225 CALL shmem_put( t2ew(1,1,2), t2ew(1,1,1), imigr/jpbyt, nowe ) 4226 CALL shmem_put( t2we(1,1,2), t2we(1,1,1), imigr/jpbyt, noea ) 4227 ELSEIF( nbondi == 1 ) THEN 4228 CALL shmem_put( t2ew(1,1,2), t2ew(1,1,1), imigr/jpbyt, nowe ) 4229 ENDIF 4230 CALL barrier() 4231 CALL shmem_udcflush() 4232 4233 # elif key_mpp_mpi 4234 !! * (MPI version) 4235 4236 imigr=jpreci*jpj 4237 4238 IF( nbondi == -1 ) THEN 4239 CALL mppsend(2,t2we(1,1,1),imigr,noea, ml_req1) 4240 CALL mpprecv(1,t2ew(1,1,2),imigr) 4241 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 4242 ELSEIF( nbondi == 0 ) THEN 4243 CALL mppsend(1,t2ew(1,1,1),imigr,nowe, ml_req1) 4244 CALL mppsend(2,t2we(1,1,1),imigr,noea, ml_req2) 4245 CALL mpprecv(1,t2ew(1,1,2),imigr) 4246 CALL mpprecv(2,t2we(1,1,2),imigr) 4247 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 4248 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 4249 ELSEIF( nbondi == 1 ) THEN 4250 CALL mppsend(1,t2ew(1,1,1),imigr,nowe, ml_req1) 4251 CALL mpprecv(2,t2we(1,1,2),imigr) 4252 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 4253 ENDIF 4254 #endif 4255 4256 4257 ! 1.3 Write Dirichlet lateral conditions 4258 4259 iihom = nlci-jpreci 4260 IF( nbondi == 0 .OR. nbondi == 1 ) THEN 4261 DO jl = 1, jpreci 4262 ztab(jl,:) = t2we(:,jl,2) 4263 END DO 4264 ENDIF 4265 4266 IF( nbondi == -1 .OR. nbondi == 0 ) THEN 4267 DO jl = 1, jpreci 4268 ztab(iihom+jl,:) = t2ew(:,jl,2) 4269 END DO 4270 ENDIF 4271 4272 4273 ! 2. North and south directions 4274 ! ----------------------------- 4275 4276 ! 2.1 Read Dirichlet lateral conditions 4277 4278 IF( nbondj /= 2 ) THEN 4279 ijhom = nlcj-nrecj 4280 DO jl = 1, jprecj 4281 t2sn(:,jl,1) = ztab(:,ijhom +jl) 4282 t2ns(:,jl,1) = ztab(:,jprecj+jl) 4283 END DO 4284 ENDIF 4285 4286 ! 2.2 Migrations 4287 4288 #if defined key_mpp_shmem 4289 !! * SHMEM version 4290 4291 imigr=jprecj*jpi*jpbyt 4292 4293 IF( nbondj == -1 ) THEN 4294 CALL shmem_put( t2sn(1,1,2), t2sn(1,1,1), imigr/jpbyt, nono ) 4295 ELSEIF( nbondj == 0 ) THEN 4296 CALL shmem_put( t2ns(1,1,2), t2ns(1,1,1), imigr/jpbyt, noso ) 4297 CALL shmem_put( t2sn(1,1,2), t2sn(1,1,1), imigr/jpbyt, nono ) 4298 ELSEIF( nbondj == 1 ) THEN 4299 CALL shmem_put( t2ns(1,1,2), t2ns(1,1,1), imigr/jpbyt, noso ) 4300 ENDIF 4301 CALL barrier() 4302 CALL shmem_udcflush() 4303 4304 # elif key_mpp_mpi 4305 !! * Local variables (MPI version) 4306 4307 imigr=jprecj*jpi 4308 4309 IF( nbondj == -1 ) THEN 4310 CALL mppsend(4,t2sn(1,1,1),imigr,nono, ml_req1) 4311 CALL mpprecv(3,t2ns(1,1,2),imigr) 4312 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 4313 ELSEIF( nbondj == 0 ) THEN 4314 CALL mppsend(3,t2ns(1,1,1),imigr,noso, ml_req1) 4315 CALL mppsend(4,t2sn(1,1,1),imigr,nono, ml_req2) 4316 CALL mpprecv(3,t2ns(1,1,2),imigr) 4317 CALL mpprecv(4,t2sn(1,1,2),imigr) 4318 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 4319 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 4320 ELSEIF( nbondj == 1 ) THEN 4321 CALL mppsend(3,t2ns(1,1,1),imigr,noso, ml_req1) 4322 CALL mpprecv(4,t2sn(1,1,2),imigr) 4323 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 4324 ENDIF 4325 4326 #endif 4327 4328 ! 2.3 Write Dirichlet lateral conditions 4329 4330 ijhom = nlcj - jprecj 4331 IF( nbondj == 0 .OR. nbondj == 1 ) THEN 4332 DO jl = 1, jprecj 4333 ztab(:,jl) = t2sn(:,jl,2) 4334 END DO 4335 ENDIF 4336 4337 IF( nbondj == 0 .OR. nbondj == -1 ) THEN 4338 DO jl = 1, jprecj 4339 ztab(:,ijhom+jl) = t2ns(:,jl,2) 4340 END DO 4341 ENDIF 4342 4343 IF( ktype==1 .AND. kd1 <= jpi+nimpp-1 .AND. nimpp <= kd2 ) THEN 4344 ! north/south boundaries 4345 DO jj = ijpt0,ijpt1 4346 DO ji = iipt0,ilpt1 4347 ptab(ji,jk) = ztab(ji,jj) 4348 END DO 4349 END DO 4350 ELSEIF( ktype==2 .AND. kd1 <= jpj+njmpp-1 .AND. njmpp <= kd2 ) THEN 4351 ! east/west boundaries 4352 DO jj = ijpt0,ilpt1 4353 DO ji = iipt0,iipt1 4354 ptab(jj,jk) = ztab(ji,jj) 4355 END DO 4356 END DO 4357 ENDIF 4358 4359 END DO 4360 4361 END SUBROUTINE mppobc 4362 4363 SUBROUTINE mpp_comm_free( kcom) 4364 4365 INTEGER, INTENT(in) :: kcom 4366 INTEGER :: ierr 4367 4368 CALL MPI_COMM_FREE(kcom, ierr) 4369 4370 END SUBROUTINE mpp_comm_free 4371 4372 4373 SUBROUTINE mpp_ini_ice(pindic) 4374 !!---------------------------------------------------------------------- 4375 !! *** routine mpp_ini_ice *** 4376 !! 4377 !! ** Purpose : Initialize special communicator for ice areas 4378 !! condition together with global variables needed in the ddmpp folding 4379 !! 4380 !! ** Method : - Look for ice processors in ice routines 4381 !! - Put their number in nrank_ice 4382 !! - Create groups for the world processors and the ice processors 4383 !! - Create a communicator for ice processors 4384 !! 4385 !! ** output 4386 !! njmppmax = njmpp for northern procs 4387 !! ndim_rank_ice = number of processors in the northern line 4388 !! nrank_north (ndim_rank_north) = number of the northern procs. 4389 !! ngrp_world = group ID for the world processors 4390 !! ngrp_ice = group ID for the ice processors 4391 !! ncomm_ice = communicator for the ice procs. 4392 !! n_ice_root = number (in the world) of proc 0 in the ice comm. 4393 !! 4394 !! History : 4395 !! ! 03-09 (J.M. Molines, MPI only ) 4396 !!---------------------------------------------------------------------- 4397 #ifdef key_mpp_shmem 4398 CALL ctl_stop( ' mpp_ini_ice not available in SHMEM' ) 4399 # elif key_mpp_mpi 4400 INTEGER, INTENT(in) :: pindic 4401 INTEGER :: ierr 4402 INTEGER :: jproc 4403 INTEGER :: ii 4404 INTEGER, DIMENSION(jpnij) :: kice 4405 INTEGER, DIMENSION(jpnij) :: zwork 4406 !!---------------------------------------------------------------------- 4407 4408 ! Look for how many procs with sea-ice 4409 ! 4410 kice = 0 4411 DO jproc=1,jpnij 4412 IF(jproc == narea .AND. pindic .GT. 0) THEN 4413 kice(jproc) = 1 4414 ENDIF 4415 END DO 4416 4417 zwork = 0 4418 CALL MPI_ALLREDUCE( kice, zwork,jpnij, mpi_integer, & 4419 mpi_sum, mpi_comm_opa, ierr ) 4420 ndim_rank_ice = sum(zwork) 4421 4422 ! Allocate the right size to nrank_north 4423 #if ! defined key_agrif 4424 IF(ALLOCATED(nrank_ice)) DEALLOCATE(nrank_ice) 4425 #else 4426 DEALLOCATE(nrank_ice) 4427 #endif 4428 4429 ALLOCATE(nrank_ice(ndim_rank_ice)) 4430 4431 ii = 0 4432 nrank_ice = 0 4433 DO jproc=1,jpnij 4434 IF(zwork(jproc) == 1) THEN 4435 ii = ii + 1 4436 nrank_ice(ii) = jproc -1 4437 ENDIF 4438 END DO 4439 4440 ! Create the world group 4441 CALL MPI_COMM_GROUP(mpi_comm_opa,ngrp_world,ierr) 4442 4443 ! Create the ice group from the world group 4444 CALL MPI_GROUP_INCL(ngrp_world,ndim_rank_ice,nrank_ice,ngrp_ice,ierr) 4445 4446 ! Create the ice communicator , ie the pool of procs with sea-ice 4447 CALL MPI_COMM_CREATE(mpi_comm_opa,ngrp_ice,ncomm_ice,ierr) 4448 4449 ! Find proc number in the world of proc 0 in the north 4450 ! The following line seems to be useless, we just comment & keep it as reminder 4451 ! CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_ice,1,0,ngrp_world,n_ice_root,ierr) 4452 #endif 4453 4454 END SUBROUTINE mpp_ini_ice 4455 4456 4457 SUBROUTINE mpp_ini_north 4458 !!---------------------------------------------------------------------- 4459 !! *** routine mpp_ini_north *** 4460 !! 4461 !! ** Purpose : Initialize special communicator for north folding 4462 !! condition together with global variables needed in the mpp folding 4463 !! 4464 !! ** Method : - Look for northern processors 4465 !! - Put their number in nrank_north 4466 !! - Create groups for the world processors and the north processors 4467 !! - Create a communicator for northern processors 4468 !! 4469 !! ** output 4470 !! njmppmax = njmpp for northern procs 4471 !! ndim_rank_north = number of processors in the northern line 4472 !! nrank_north (ndim_rank_north) = number of the northern procs. 4473 !! ngrp_world = group ID for the world processors 4474 !! ngrp_north = group ID for the northern processors 4475 !! ncomm_north = communicator for the northern procs. 4476 !! north_root = number (in the world) of proc 0 in the northern comm. 4477 !! 4478 !! History : 4479 !! ! 03-09 (J.M. Molines, MPI only ) 4480 !!---------------------------------------------------------------------- 4481 #ifdef key_mpp_shmem 4482 CALL ctl_stop( ' mpp_ini_north not available in SHMEM' ) 4483 # elif key_mpp_mpi 4484 INTEGER :: ierr 4485 INTEGER :: jproc 4486 INTEGER :: ii,ji 4487 !!---------------------------------------------------------------------- 4488 4489 njmppmax=MAXVAL(njmppt) 4490 4491 ! Look for how many procs on the northern boundary 4492 ! 4493 ndim_rank_north=0 4494 DO jproc=1,jpnij 4495 IF ( njmppt(jproc) == njmppmax ) THEN 4496 ndim_rank_north = ndim_rank_north + 1 4497 END IF 4498 END DO 4499 4500 4501 ! Allocate the right size to nrank_north 4502 ! 4503 ALLOCATE(nrank_north(ndim_rank_north)) 4504 4505 ! Fill the nrank_north array with proc. number of northern procs. 4506 ! Note : the rank start at 0 in MPI 4507 ! 4508 ii=0 4509 DO ji = 1, jpnij 4510 IF ( njmppt(ji) == njmppmax ) THEN 4511 ii=ii+1 4512 nrank_north(ii)=ji-1 4513 END IF 4514 END DO 4515 ! create the world group 4516 ! 4517 CALL MPI_COMM_GROUP(mpi_comm_opa,ngrp_world,ierr) 4518 ! 4519 ! Create the North group from the world group 4520 CALL MPI_GROUP_INCL(ngrp_world,ndim_rank_north,nrank_north,ngrp_north,ierr) 4521 4522 ! Create the North communicator , ie the pool of procs in the north group 4523 ! 4524 CALL MPI_COMM_CREATE(mpi_comm_opa,ngrp_north,ncomm_north,ierr) 4525 4526 4527 ! find proc number in the world of proc 0 in the north 4528 CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_north,1,0,ngrp_world,north_root,ierr) 4529 #endif 4530 4531 END SUBROUTINE mpp_ini_north 4532 4533 4534 SUBROUTINE mpp_lbc_north_3d ( pt3d, cd_type, psgn ) 2032 ! 2033 ! create the world group 2034 CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_world, ierr ) 2035 ! 2036 ! Create the North group from the world group 2037 CALL MPI_GROUP_INCL( ngrp_world, ndim_rank_north, nrank_north, ngrp_north, ierr ) 2038 ! 2039 ! Create the North communicator , ie the pool of procs in the north group 2040 CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_north, ncomm_north, ierr ) 2041 ! 2042 END SUBROUTINE mpp_ini_north 2043 2044 2045 SUBROUTINE mpp_lbc_north_3d( pt3d, cd_type, psgn ) 4535 2046 !!--------------------------------------------------------------------- 4536 2047 !! *** routine mpp_lbc_north_3d *** 4537 2048 !! 4538 !! ** Purpose : 4539 !! Ensure proper north fold horizontal bondary condition in mpp configuration 4540 !! in case of jpn1 > 1 4541 !! 4542 !! ** Method : 4543 !! Gather the 4 northern lines of the global domain on 1 processor and 4544 !! apply lbc north-fold on this sub array. Then scatter the fold array 4545 !! back to the processors. 4546 !! 4547 !! History : 4548 !! 8.5 ! 03-09 (J.M. Molines ) For mpp folding condition at north 4549 !! from lbc routine 4550 !! 9.0 ! 03-12 (J.M. Molines ) encapsulation into lib_mpp, coding rules of lbc_lnk 4551 !!---------------------------------------------------------------------- 4552 !! * Arguments 4553 CHARACTER(len=1), INTENT( in ) :: & 4554 cd_type ! nature of pt3d grid-points 4555 ! ! = T , U , V , F or W gridpoints 4556 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT( inout ) :: & 4557 pt3d ! 3D array on which the boundary condition is applied 4558 REAL(wp), INTENT( in ) :: & 4559 psgn ! control of the sign change 4560 ! ! = -1. , the sign is changed if north fold boundary 4561 ! ! = 1. , the sign is kept if north fold boundary 4562 4563 !! * Local declarations 4564 INTEGER :: ji, jj, jk, jr, jproc 4565 INTEGER :: ierr 4566 INTEGER :: ildi,ilei,iilb 4567 INTEGER :: ijpj,ijpjm1,ij,ijt,iju 4568 INTEGER :: itaille 4569 REAL(wp), DIMENSION(jpiglo,4,jpk) :: ztab 4570 REAL(wp), DIMENSION(jpi,4,jpk,jpni) :: znorthgloio 4571 REAL(wp), DIMENSION(jpi,4,jpk) :: znorthloc 4572 !!---------------------------------------------------------------------- 4573 4574 ! If we get in this routine it s because : North fold condition and mpp with more 4575 ! than one proc across i : we deal only with the North condition 4576 4577 ! 0. Sign setting 4578 ! --------------- 4579 4580 ijpj=4 4581 ijpjm1=3 4582 4583 ! put in znorthloc the last 4 jlines of pt3d 4584 DO jk = 1, jpk 4585 DO jj = nlcj - ijpj +1, nlcj 4586 ij = jj - nlcj + ijpj 4587 znorthloc(:,ij,jk) = pt3d(:,jj,jk) 4588 END DO 4589 END DO 4590 4591 4592 IF (npolj /= 0 ) THEN 4593 ! Build in proc 0 of ncomm_north the znorthgloio 4594 znorthgloio(:,:,:,:) = 0_wp 4595 4596 #ifdef key_mpp_shmem 4597 not done : compiler error 4598 #elif defined key_mpp_mpi 4599 itaille=jpi*jpk*ijpj 4600 CALL MPI_GATHER(znorthloc,itaille,MPI_DOUBLE_PRECISION,znorthgloio,itaille,MPI_DOUBLE_PRECISION,0,ncomm_north,ierr) 4601 #endif 4602 4603 ENDIF 4604 4605 IF (narea == north_root+1 ) THEN 4606 ! recover the global north array 4607 ztab(:,:,:) = 0_wp 4608 4609 DO jr = 1, ndim_rank_north 4610 jproc = nrank_north(jr) + 1 4611 ildi = nldit (jproc) 4612 ilei = nleit (jproc) 4613 iilb = nimppt(jproc) 4614 DO jk = 1, jpk 4615 DO jj = 1, 4 4616 DO ji = ildi, ilei 4617 ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr) 4618 END DO 4619 END DO 4620 END DO 4621 END DO 4622 4623 4624 ! Horizontal slab 4625 ! =============== 4626 4627 DO jk = 1, jpk 4628 4629 4630 ! 2. North-Fold boundary conditions 4631 ! ---------------------------------- 4632 4633 SELECT CASE ( npolj ) 4634 4635 CASE ( 3, 4 ) ! * North fold T-point pivot 4636 4637 ztab( 1 ,ijpj,jk) = 0.e0 4638 ztab(jpiglo,ijpj,jk) = 0.e0 4639 4640 SELECT CASE ( cd_type ) 4641 4642 CASE ( 'T' , 'S' , 'W' ) ! T-, W-point 4643 DO ji = 2, jpiglo 4644 ijt = jpiglo-ji+2 4645 ztab(ji,ijpj,jk) = psgn * ztab(ijt,ijpj-2,jk) 4646 END DO 4647 DO ji = jpiglo/2+1, jpiglo 4648 ijt = jpiglo-ji+2 4649 ztab(ji,ijpjm1,jk) = psgn * ztab(ijt,ijpjm1,jk) 4650 END DO 4651 4652 CASE ( 'U' ) ! U-point 4653 DO ji = 1, jpiglo-1 4654 iju = jpiglo-ji+1 4655 ztab(ji,ijpj,jk) = psgn * ztab(iju,ijpj-2,jk) 4656 END DO 4657 DO ji = jpiglo/2, jpiglo-1 4658 iju = jpiglo-ji+1 4659 ztab(ji,ijpjm1,jk) = psgn * ztab(iju,ijpjm1,jk) 4660 END DO 4661 4662 CASE ( 'V' ) ! V-point 4663 DO ji = 2, jpiglo 4664 ijt = jpiglo-ji+2 4665 ztab(ji,ijpj-1,jk) = psgn * ztab(ijt,ijpj-2,jk) 4666 ztab(ji,ijpj ,jk) = psgn * ztab(ijt,ijpj-3,jk) 4667 END DO 4668 4669 CASE ( 'F' , 'G' ) ! F-point 4670 DO ji = 1, jpiglo-1 4671 iju = jpiglo-ji+1 4672 ztab(ji,ijpj-1,jk) = psgn * ztab(iju,ijpj-2,jk) 4673 ztab(ji,ijpj ,jk) = psgn * ztab(iju,ijpj-3,jk) 4674 END DO 4675 4676 END SELECT 4677 4678 CASE ( 5, 6 ) ! * North fold F-point pivot 4679 4680 ztab( 1 ,ijpj,jk) = 0.e0 4681 ztab(jpiglo,ijpj,jk) = 0.e0 4682 4683 SELECT CASE ( cd_type ) 4684 4685 CASE ( 'T' , 'S' , 'W' ) ! T-, W-point 4686 DO ji = 1, jpiglo 4687 ijt = jpiglo-ji+1 4688 ztab(ji,ijpj,jk) = psgn * ztab(ijt,ijpj-1,jk) 4689 END DO 4690 4691 CASE ( 'U' ) ! U-point 4692 DO ji = 1, jpiglo-1 4693 iju = jpiglo-ji 4694 ztab(ji,ijpj,jk) = psgn * ztab(iju,ijpj-1,jk) 4695 END DO 4696 4697 CASE ( 'V' ) ! V-point 4698 DO ji = 1, jpiglo 4699 ijt = jpiglo-ji+1 4700 ztab(ji,ijpj,jk) = psgn * ztab(ijt,ijpj-2,jk) 4701 END DO 4702 DO ji = jpiglo/2+1, jpiglo 4703 ijt = jpiglo-ji+1 4704 ztab(ji,ijpjm1,jk) = psgn * ztab(ijt,ijpjm1,jk) 4705 END DO 4706 4707 CASE ( 'F' , 'G' ) ! F-point 4708 DO ji = 1, jpiglo-1 4709 iju = jpiglo-ji 4710 ztab(ji,ijpj ,jk) = psgn * ztab(iju,ijpj-2,jk) 4711 END DO 4712 DO ji = jpiglo/2+1, jpiglo-1 4713 iju = jpiglo-ji 4714 ztab(ji,ijpjm1,jk) = psgn * ztab(iju,ijpjm1,jk) 4715 END DO 4716 4717 END SELECT 4718 4719 CASE DEFAULT ! * closed 4720 4721 SELECT CASE ( cd_type) 4722 4723 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 4724 ztab(:, 1 ,jk) = 0.e0 4725 ztab(:,ijpj,jk) = 0.e0 4726 4727 CASE ( 'F' ) ! F-point 4728 ztab(:,ijpj,jk) = 0.e0 4729 4730 END SELECT 4731 4732 END SELECT 4733 4734 ! End of slab 4735 ! =========== 4736 4737 END DO 4738 4739 !! Scatter back to pt3d 4740 DO jr = 1, ndim_rank_north 4741 jproc=nrank_north(jr)+1 4742 ildi=nldit (jproc) 4743 ilei=nleit (jproc) 4744 iilb=nimppt(jproc) 4745 DO jk= 1, jpk 4746 DO jj=1,ijpj 4747 DO ji=ildi,ilei 4748 znorthgloio(ji,jj,jk,jr)=ztab(ji+iilb-1,jj,jk) 4749 END DO 4750 END DO 4751 END DO 4752 END DO 4753 4754 ENDIF ! only done on proc 0 of ncomm_north 4755 4756 #ifdef key_mpp_shmem 4757 not done yet in shmem : compiler error 4758 #elif key_mpp_mpi 4759 IF ( npolj /= 0 ) THEN 4760 itaille=jpi*jpk*ijpj 4761 CALL MPI_SCATTER(znorthgloio,itaille,MPI_DOUBLE_PRECISION,znorthloc,itaille,MPI_DOUBLE_PRECISION,0,ncomm_north,ierr) 4762 ENDIF 4763 #endif 4764 4765 ! put in the last ijpj jlines of pt3d znorthloc 4766 DO jk = 1 , jpk 4767 DO jj = nlcj - ijpj + 1 , nlcj 4768 ij = jj - nlcj + ijpj 4769 pt3d(:,jj,jk)= znorthloc(:,ij,jk) 4770 END DO 4771 END DO 4772 4773 END SUBROUTINE mpp_lbc_north_3d 4774 4775 4776 SUBROUTINE mpp_lbc_north_2d ( pt2d, cd_type, psgn) 4777 !!--------------------------------------------------------------------- 4778 !! *** routine mpp_lbc_north_2d *** 4779 !! 4780 !! ** Purpose : 4781 !! Ensure proper north fold horizontal bondary condition in mpp configuration 4782 !! in case of jpn1 > 1 (for 2d array ) 4783 !! 4784 !! ** Method : 4785 !! Gather the 4 northern lines of the global domain on 1 processor and 4786 !! apply lbc north-fold on this sub array. Then scatter the fold array 4787 !! back to the processors. 4788 !! 4789 !! History : 4790 !! 8.5 ! 03-09 (J.M. Molines ) For mpp folding condition at north 4791 !! from lbc routine 4792 !! 9.0 ! 03-12 (J.M. Molines ) encapsulation into lib_mpp, coding rules of lbc_lnk 4793 !!---------------------------------------------------------------------- 4794 4795 !! * Arguments 4796 CHARACTER(len=1), INTENT( in ) :: & 4797 cd_type ! nature of pt2d grid-points 4798 ! ! = T , U , V , F or W gridpoints 4799 REAL(wp), DIMENSION(jpi,jpj), INTENT( inout ) :: & 4800 pt2d ! 2D array on which the boundary condition is applied 4801 REAL(wp), INTENT( in ) :: & 4802 psgn ! control of the sign change 4803 ! ! = -1. , the sign is changed if north fold boundary 4804 ! ! = 1. , the sign is kept if north fold boundary 4805 4806 4807 !! * Local declarations 4808 4809 INTEGER :: ji, jj, jr, jproc 4810 INTEGER :: ierr 4811 INTEGER :: ildi,ilei,iilb 4812 INTEGER :: ijpj,ijpjm1,ij,ijt,iju 4813 INTEGER :: itaille 4814 4815 REAL(wp), DIMENSION(jpiglo,4) :: ztab 4816 REAL(wp), DIMENSION(jpi,4,jpni) :: znorthgloio 4817 REAL(wp), DIMENSION(jpi,4) :: znorthloc 4818 !!---------------------------------------------------------------------- 4819 !! OPA 8.5, LODYC-IPSL (2002) 4820 !!---------------------------------------------------------------------- 4821 ! If we get in this routine it s because : North fold condition and mpp with more 4822 ! than one proc across i : we deal only with the North condition 4823 4824 ! 0. Sign setting 4825 ! --------------- 4826 4827 ijpj=4 4828 ijpjm1=3 4829 4830 4831 ! put in znorthloc the last 4 jlines of pt2d 4832 DO jj = nlcj - ijpj +1, nlcj 4833 ij = jj - nlcj + ijpj 4834 znorthloc(:,ij)=pt2d(:,jj) 4835 END DO 4836 4837 IF (npolj /= 0 ) THEN 4838 ! Build in proc 0 of ncomm_north the znorthgloio 4839 znorthgloio(:,:,:) = 0_wp 4840 #ifdef key_mpp_shmem 4841 not done : compiler error 4842 #elif defined key_mpp_mpi 4843 itaille=jpi*ijpj 4844 CALL MPI_GATHER(znorthloc,itaille,MPI_DOUBLE_PRECISION,znorthgloio,itaille,MPI_DOUBLE_PRECISION,0,ncomm_north,ierr) 4845 #endif 4846 ENDIF 4847 4848 IF (narea == north_root+1 ) THEN 4849 ! recover the global north array 4850 ztab(:,:) = 0_wp 4851 4852 DO jr = 1, ndim_rank_north 4853 jproc=nrank_north(jr)+1 4854 ildi=nldit (jproc) 4855 ilei=nleit (jproc) 4856 iilb=nimppt(jproc) 4857 DO jj=1,4 4858 DO ji=ildi,ilei 4859 ztab(ji+iilb-1,jj)=znorthgloio(ji,jj,jr) 4860 END DO 4861 END DO 4862 END DO 4863 4864 4865 ! 2. North-Fold boundary conditions 4866 ! ---------------------------------- 4867 4868 SELECT CASE ( npolj ) 4869 4870 CASE ( 3, 4 ) ! * North fold T-point pivot 4871 4872 ztab( 1 ,ijpj) = 0.e0 4873 ztab(jpiglo,ijpj) = 0.e0 4874 4875 SELECT CASE ( cd_type ) 4876 4877 CASE ( 'T' , 'W' , 'S' ) ! T-, W-point 4878 DO ji = 2, jpiglo 4879 ijt = jpiglo-ji+2 4880 ztab(ji,ijpj) = psgn * ztab(ijt,ijpj-2) 4881 END DO 4882 DO ji = jpiglo/2+1, jpiglo 4883 ijt = jpiglo-ji+2 4884 ztab(ji,ijpjm1) = psgn * ztab(ijt,ijpjm1) 4885 END DO 4886 4887 CASE ( 'U' ) ! U-point 4888 DO ji = 1, jpiglo-1 4889 iju = jpiglo-ji+1 4890 ztab(ji,ijpj) = psgn * ztab(iju,ijpj-2) 4891 END DO 4892 DO ji = jpiglo/2, jpiglo-1 4893 iju = jpiglo-ji+1 4894 ztab(ji,ijpjm1) = psgn * ztab(iju,ijpjm1) 4895 END DO 4896 4897 CASE ( 'V' ) ! V-point 4898 DO ji = 2, jpiglo 4899 ijt = jpiglo-ji+2 4900 ztab(ji,ijpj-1) = psgn * ztab(ijt,ijpj-2) 4901 ztab(ji,ijpj ) = psgn * ztab(ijt,ijpj-3) 4902 END DO 4903 4904 CASE ( 'F' , 'G' ) ! F-point 4905 DO ji = 1, jpiglo-1 4906 iju = jpiglo-ji+1 4907 ztab(ji,ijpj-1) = psgn * ztab(iju,ijpj-2) 4908 ztab(ji,ijpj ) = psgn * ztab(iju,ijpj-3) 4909 END DO 4910 4911 CASE ( 'I' ) ! ice U-V point 4912 ztab(2,ijpj) = psgn * ztab(3,ijpj-1) 4913 DO ji = 3, jpiglo 4914 iju = jpiglo - ji + 3 4915 ztab(ji,ijpj) = psgn * ztab(iju,ijpj-1) 4916 END DO 4917 4918 END SELECT 4919 4920 CASE ( 5, 6 ) ! * North fold F-point pivot 4921 4922 ztab( 1 ,ijpj) = 0.e0 4923 ztab(jpiglo,ijpj) = 0.e0 4924 4925 SELECT CASE ( cd_type ) 4926 4927 CASE ( 'T' , 'W' ,'S' ) ! T-, W-point 4928 DO ji = 1, jpiglo 4929 ijt = jpiglo-ji+1 4930 ztab(ji,ijpj) = psgn * ztab(ijt,ijpj-1) 4931 END DO 4932 4933 CASE ( 'U' ) ! U-point 4934 DO ji = 1, jpiglo-1 4935 iju = jpiglo-ji 4936 ztab(ji,ijpj) = psgn * ztab(iju,ijpj-1) 4937 END DO 4938 4939 CASE ( 'V' ) ! V-point 4940 DO ji = 1, jpiglo 4941 ijt = jpiglo-ji+1 4942 ztab(ji,ijpj) = psgn * ztab(ijt,ijpj-2) 4943 END DO 4944 DO ji = jpiglo/2+1, jpiglo 4945 ijt = jpiglo-ji+1 4946 ztab(ji,ijpjm1) = psgn * ztab(ijt,ijpjm1) 4947 END DO 4948 4949 CASE ( 'F' , 'G' ) ! F-point 4950 DO ji = 1, jpiglo-1 4951 iju = jpiglo-ji 4952 ztab(ji,ijpj ) = psgn * ztab(iju,ijpj-2) 4953 END DO 4954 DO ji = jpiglo/2+1, jpiglo-1 4955 iju = jpiglo-ji 4956 ztab(ji,ijpjm1) = psgn * ztab(iju,ijpjm1) 4957 END DO 4958 4959 CASE ( 'I' ) ! ice U-V point 4960 ztab( 2 ,ijpj) = 0.e0 4961 DO ji = 2 , jpiglo-1 4962 ijt = jpiglo - ji + 2 4963 ztab(ji,ijpj)= 0.5 * ( ztab(ji,ijpj-1) + psgn * ztab(ijt,ijpj-1) ) 4964 END DO 4965 4966 END SELECT 4967 4968 CASE DEFAULT ! * closed : the code probably never go through 4969 4970 SELECT CASE ( cd_type) 4971 4972 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 4973 ztab(:, 1 ) = 0.e0 4974 ztab(:,ijpj) = 0.e0 4975 4976 CASE ( 'F' ) ! F-point 4977 ztab(:,ijpj) = 0.e0 4978 4979 CASE ( 'I' ) ! ice U-V point 4980 ztab(:, 1 ) = 0.e0 4981 ztab(:,ijpj) = 0.e0 4982 4983 END SELECT 4984 4985 END SELECT 4986 4987 ! End of slab 4988 ! =========== 4989 4990 !! Scatter back to pt2d 4991 DO jr = 1, ndim_rank_north 4992 jproc=nrank_north(jr)+1 4993 ildi=nldit (jproc) 4994 ilei=nleit (jproc) 4995 iilb=nimppt(jproc) 4996 DO jj=1,ijpj 4997 DO ji=ildi,ilei 4998 znorthgloio(ji,jj,jr)=ztab(ji+iilb-1,jj) 4999 END DO 2049 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2050 !! in mpp configuration in case of jpn1 > 1 2051 !! 2052 !! ** Method : North fold condition and mpp with more than one proc 2053 !! in i-direction require a specific treatment. We gather 2054 !! the 4 northern lines of the global domain on 1 processor 2055 !! and apply lbc north-fold on this sub array. Then we 2056 !! scatter the north fold array back to the processors. 2057 !! 2058 !!---------------------------------------------------------------------- 2059 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: pt3d ! 3D array on which the b.c. is applied 2060 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 2061 ! ! = T , U , V , F or W gridpoints 2062 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2063 !! ! = 1. , the sign is kept 2064 INTEGER :: ji, jj, jr 2065 INTEGER :: ierr, itaille, ildi, ilei, iilb 2066 INTEGER :: ijpj, ijpjm1, ij, iproc 2067 REAL(wp), DIMENSION(jpiglo,4,jpk) :: ztab 2068 REAL(wp), DIMENSION(jpi ,4,jpk) :: znorthloc 2069 REAL(wp), DIMENSION(jpi ,4,jpk,jpni) :: znorthgloio 2070 !!---------------------------------------------------------------------- 2071 ! 2072 ijpj = 4 2073 ijpjm1 = 3 2074 ! 2075 DO jj = nlcj - ijpj +1, nlcj ! put in znorthloc the last 4 jlines of pt3d 2076 ij = jj - nlcj + ijpj 2077 znorthloc(:,ij,:) = pt3d(:,jj,:) 2078 END DO 2079 ! 2080 ! ! Build in procs of ncomm_north the znorthgloio 2081 itaille = jpi * jpk * ijpj 2082 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2083 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2084 ! 2085 ! ! recover the global north array 2086 DO jr = 1, ndim_rank_north 2087 iproc = nrank_north(jr) + 1 2088 ildi = nldit (iproc) 2089 ilei = nleit (iproc) 2090 iilb = nimppt(iproc) 2091 DO jj = 1, 4 2092 DO ji = ildi, ilei 2093 ztab(ji+iilb-1,jj,:) = znorthgloio(ji,jj,:,jr) 5000 2094 END DO 5001 2095 END DO 5002 5003 ENDIF ! only done on proc 0 of ncomm_north 5004 5005 #ifdef key_mpp_shmem 5006 not done yet in shmem : compiler error 5007 #elif key_mpp_mpi 5008 IF ( npolj /= 0 ) THEN 5009 itaille=jpi*ijpj 5010 CALL MPI_SCATTER(znorthgloio,itaille,MPI_DOUBLE_PRECISION,znorthloc,itaille,MPI_DOUBLE_PRECISION,0,ncomm_north,ierr) 5011 ENDIF 5012 #endif 5013 5014 ! put in the last ijpj jlines of pt2d znorthloc 5015 DO jj = nlcj - ijpj + 1 , nlcj 2096 END DO 2097 ! 2098 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition 2099 ! 2100 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d 5016 2101 ij = jj - nlcj + ijpj 5017 pt2d(:,jj)= znorthloc(:,ij) 2102 DO ji= 1, nlci 2103 pt3d(ji,jj,:) = ztab(ji+nimpp-1,ij,:) 2104 END DO 5018 2105 END DO 5019 2106 ! 2107 END SUBROUTINE mpp_lbc_north_3d 2108 2109 2110 SUBROUTINE mpp_lbc_north_2d( pt2d, cd_type, psgn) 2111 !!--------------------------------------------------------------------- 2112 !! *** routine mpp_lbc_north_2d *** 2113 !! 2114 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2115 !! in mpp configuration in case of jpn1 > 1 (for 2d array ) 2116 !! 2117 !! ** Method : North fold condition and mpp with more than one proc 2118 !! in i-direction require a specific treatment. We gather 2119 !! the 4 northern lines of the global domain on 1 processor 2120 !! and apply lbc north-fold on this sub array. Then we 2121 !! scatter the north fold array back to the processors. 2122 !! 2123 !!---------------------------------------------------------------------- 2124 REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 3D array on which the b.c. is applied 2125 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 2126 ! ! = T , U , V , F or W gridpoints 2127 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2128 !! ! = 1. , the sign is kept 2129 INTEGER :: ji, jj, jr 2130 INTEGER :: ierr, itaille, ildi, ilei, iilb 2131 INTEGER :: ijpj, ijpjm1, ij, iproc 2132 REAL(wp), DIMENSION(jpiglo,4) :: ztab 2133 REAL(wp), DIMENSION(jpi ,4) :: znorthloc 2134 REAL(wp), DIMENSION(jpi ,4,jpni) :: znorthgloio 2135 !!---------------------------------------------------------------------- 2136 ! 2137 ijpj = 4 2138 ijpjm1 = 3 2139 ! 2140 DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d 2141 ij = jj - nlcj + ijpj 2142 znorthloc(:,ij) = pt2d(:,jj) 2143 END DO 2144 2145 ! ! Build in procs of ncomm_north the znorthgloio 2146 itaille = jpi * ijpj 2147 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2148 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2149 ! 2150 DO jr = 1, ndim_rank_north ! recover the global north array 2151 iproc = nrank_north(jr) + 1 2152 ildi=nldit (iproc) 2153 ilei=nleit (iproc) 2154 iilb=nimppt(iproc) 2155 DO jj = 1, 4 2156 DO ji = ildi, ilei 2157 ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr) 2158 END DO 2159 END DO 2160 END DO 2161 ! 2162 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition 2163 ! 2164 ! 2165 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 2166 ij = jj - nlcj + ijpj 2167 DO ji = 1, nlci 2168 pt2d(ji,jj) = ztab(ji+nimpp-1,ij) 2169 END DO 2170 END DO 2171 ! 5020 2172 END SUBROUTINE mpp_lbc_north_2d 5021 2173 5022 2174 5023 SUBROUTINE mpp_lbc_north_e ( pt2d, cd_type, psgn) 5024 !!--------------------------------------------------------------------- 5025 !! *** routine mpp_lbc_north_2d *** 5026 !! 5027 !! ** Purpose : 5028 !! Ensure proper north fold horizontal bondary condition in mpp configuration 5029 !! in case of jpn1 > 1 (for 2d array with outer extra halo) 5030 !! 5031 !! ** Method : 5032 !! Gather the 4+2*jpr2dj northern lines of the global domain on 1 processor and 5033 !! apply lbc north-fold on this sub array. Then scatter the fold array 5034 !! back to the processors. 5035 !! 5036 !! History : 5037 !! 8.5 ! 03-09 (J.M. Molines ) For mpp folding condition at north 5038 !! from lbc routine 5039 !! 9.0 ! 03-12 (J.M. Molines ) encapsulation into lib_mpp, coding rules of lbc_lnk 5040 !! 9.0 ! 05-09 (R. Benshila ) adapt mpp_lbc_north_2d 5041 !!---------------------------------------------------------------------- 5042 5043 !! * Arguments 5044 CHARACTER(len=1), INTENT( in ) :: & 5045 cd_type ! nature of pt2d grid-points 5046 ! ! = T , U , V , F or W gridpoints 5047 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT( inout ) :: & 5048 pt2d ! 2D array on which the boundary condition is applied 5049 REAL(wp), INTENT( in ) :: & 5050 psgn ! control of the sign change 5051 ! ! = -1. , the sign is changed if north fold boundary 5052 ! ! = 1. , the sign is kept if north fold boundary 5053 5054 5055 !! * Local declarations 5056 5057 INTEGER :: ji, jj, jr, jproc, jl 5058 INTEGER :: ierr 5059 INTEGER :: ildi,ilei,iilb 5060 INTEGER :: ijpj,ijpjm1,ij,ijt,iju, iprecj 5061 INTEGER :: itaille 5062 5063 REAL(wp), DIMENSION(jpiglo,1-jpr2dj:4+jpr2dj) :: ztab 5064 REAL(wp), DIMENSION(jpi,1-jpr2dj:4+jpr2dj,jpni) :: znorthgloio 5065 REAL(wp), DIMENSION(jpi,1-jpr2dj:4+jpr2dj) :: znorthloc 5066 5067 ! If we get in this routine it s because : North fold condition and mpp with more 5068 ! than one proc across i : we deal only with the North condition 5069 5070 ! 0. Sign setting 5071 ! --------------- 5072 5073 ijpj=4 5074 ijpjm1=3 5075 iprecj = jpr2dj+jprecj 5076 5077 ! put in znorthloc the last 4 jlines of pt2d 5078 DO jj = nlcj - ijpj + 1 - jpr2dj, nlcj +jpr2dj 5079 ij = jj - nlcj + ijpj 5080 znorthloc(:,ij)=pt2d(1:jpi,jj) 5081 END DO 5082 5083 IF (npolj /= 0 ) THEN 5084 ! Build in proc 0 of ncomm_north the znorthgloio 5085 znorthgloio(:,:,:) = 0_wp 5086 #ifdef key_mpp_shmem 5087 not done : compiler error 5088 #elif defined key_mpp_mpi 5089 itaille=jpi*(ijpj+2*jpr2dj) 5090 CALL MPI_GATHER(znorthloc(1,1-jpr2dj),itaille,MPI_DOUBLE_PRECISION, & 5091 & znorthgloio(1,1-jpr2dj,1),itaille,MPI_DOUBLE_PRECISION,0,ncomm_north,ierr) 5092 #endif 5093 ENDIF 5094 5095 IF (narea == north_root+1 ) THEN 5096 ! recover the global north array 5097 ztab(:,:) = 0_wp 5098 5099 DO jr = 1, ndim_rank_north 5100 jproc=nrank_north(jr)+1 5101 ildi=nldit (jproc) 5102 ilei=nleit (jproc) 5103 iilb=nimppt(jproc) 5104 DO jj=1-jpr2dj,ijpj+jpr2dj 5105 DO ji=ildi,ilei 5106 ztab(ji+iilb-1,jj)=znorthgloio(ji,jj,jr) 5107 END DO 5108 END DO 5109 END DO 5110 5111 5112 ! 2. North-Fold boundary conditions 5113 ! ---------------------------------- 5114 5115 SELECT CASE ( npolj ) 5116 5117 CASE ( 3, 4 ) ! * North fold T-point pivot 5118 5119 ztab( 1 ,ijpj:ijpj+jpr2dj) = 0.e0 5120 ztab(jpiglo,ijpj:ijpj+jpr2dj) = 0.e0 5121 5122 SELECT CASE ( cd_type ) 5123 5124 CASE ( 'T' , 'W' , 'S' ) ! T-, W-point 5125 DO jl =0, iprecj-1 5126 DO ji = 2, jpiglo 5127 ijt = jpiglo-ji+2 5128 ztab(ji,ijpj+jl) = psgn * ztab(ijt,ijpj-2-jl) 5129 END DO 5130 END DO 5131 DO ji = jpiglo/2+1, jpiglo 5132 ijt = jpiglo-ji+2 5133 ztab(ji,ijpjm1) = psgn * ztab(ijt,ijpjm1) 5134 END DO 5135 5136 CASE ( 'U' ) ! U-point 5137 DO jl =0, iprecj-1 5138 DO ji = 1, jpiglo-1 5139 iju = jpiglo-ji+1 5140 ztab(ji,ijpj+jl) = psgn * ztab(iju,ijpj-2-jl) 5141 END DO 5142 END DO 5143 DO ji = jpiglo/2, jpiglo-1 5144 iju = jpiglo-ji+1 5145 ztab(ji,ijpjm1) = psgn * ztab(iju,ijpjm1) 5146 END DO 5147 5148 CASE ( 'V' ) ! V-point 5149 DO jl =-1, iprecj-1 5150 DO ji = 2, jpiglo 5151 ijt = jpiglo-ji+2 5152 ztab(ji,ijpj+jl) = psgn * ztab(ijt,ijpj-3-jl) 5153 END DO 2175 SUBROUTINE mpp_lbc_north_e( pt2d, cd_type, psgn) 2176 !!--------------------------------------------------------------------- 2177 !! *** routine mpp_lbc_north_2d *** 2178 !! 2179 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2180 !! in mpp configuration in case of jpn1 > 1 and for 2d 2181 !! array with outer extra halo 2182 !! 2183 !! ** Method : North fold condition and mpp with more than one proc 2184 !! in i-direction require a specific treatment. We gather 2185 !! the 4+2*jpr2dj northern lines of the global domain on 1 2186 !! processor and apply lbc north-fold on this sub array. 2187 !! Then we scatter the north fold array back to the processors. 2188 !! 2189 !!---------------------------------------------------------------------- 2190 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo 2191 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 2192 ! ! = T , U , V , F or W -points 2193 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 2194 !! ! north fold, = 1. otherwise 2195 INTEGER :: ji, jj, jr 2196 INTEGER :: ierr, itaille, ildi, ilei, iilb 2197 INTEGER :: ijpj, ij, iproc 2198 REAL(wp), DIMENSION(jpiglo,4+2*jpr2dj) :: ztab 2199 REAL(wp), DIMENSION(jpi ,4+2*jpr2dj) :: znorthloc 2200 REAL(wp), DIMENSION(jpi ,4+2*jpr2dj,jpni) :: znorthgloio 2201 !!---------------------------------------------------------------------- 2202 ! 2203 ijpj=4 2204 2205 ij=0 2206 ! put in znorthloc the last 4 jlines of pt2d 2207 DO jj = nlcj - ijpj + 1 - jpr2dj, nlcj +jpr2dj 2208 ij = ij + 1 2209 DO ji = 1, jpi 2210 znorthloc(ji,ij)=pt2d(ji,jj) 2211 END DO 2212 END DO 2213 ! 2214 itaille = jpi * ( ijpj + 2 * jpr2dj ) 2215 CALL MPI_ALLGATHER( znorthloc(1,1) , itaille, MPI_DOUBLE_PRECISION, & 2216 & znorthgloio(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2217 ! 2218 DO jr = 1, ndim_rank_north ! recover the global north array 2219 iproc = nrank_north(jr) + 1 2220 ildi = nldit (iproc) 2221 ilei = nleit (iproc) 2222 iilb = nimppt(iproc) 2223 DO jj = 1, ijpj+2*jpr2dj 2224 DO ji = ildi, ilei 2225 ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr) 5154 2226 END DO 5155 5156 CASE ( 'F' , 'G' ) ! F-point 5157 DO jl =-1, iprecj-1 5158 DO ji = 1, jpiglo-1 5159 iju = jpiglo-ji+1 5160 ztab(ji,ijpj+jl) = psgn * ztab(iju,ijpj-3-jl) 5161 END DO 5162 END DO 5163 5164 CASE ( 'I' ) ! ice U-V point 5165 DO jl =0, iprecj-1 5166 ztab(2,ijpj+jl) = psgn * ztab(3,ijpj-1+jl) 5167 DO ji = 3, jpiglo 5168 iju = jpiglo - ji + 3 5169 ztab(ji,ijpj+jl) = psgn * ztab(iju,ijpj-1-jl) 5170 END DO 5171 END DO 5172 5173 END SELECT 5174 5175 CASE ( 5, 6 ) ! * North fold F-point pivot 5176 5177 ztab( 1 ,ijpj:ijpj+jpr2dj) = 0.e0 5178 ztab(jpiglo,ijpj:ijpj+jpr2dj) = 0.e0 5179 5180 SELECT CASE ( cd_type ) 5181 5182 CASE ( 'T' , 'W' ,'S' ) ! T-, W-point 5183 DO jl = 0, iprecj-1 5184 DO ji = 1, jpiglo 5185 ijt = jpiglo-ji+1 5186 ztab(ji,ijpj+jl) = psgn * ztab(ijt,ijpj-1-jl) 5187 END DO 5188 END DO 5189 5190 CASE ( 'U' ) ! U-point 5191 DO jl = 0, iprecj-1 5192 DO ji = 1, jpiglo-1 5193 iju = jpiglo-ji 5194 ztab(ji,ijpj+jl) = psgn * ztab(iju,ijpj-1-jl) 5195 END DO 5196 END DO 5197 5198 CASE ( 'V' ) ! V-point 5199 DO jl = 0, iprecj-1 5200 DO ji = 1, jpiglo 5201 ijt = jpiglo-ji+1 5202 ztab(ji,ijpj+jl) = psgn * ztab(ijt,ijpj-2-jl) 5203 END DO 5204 END DO 5205 DO ji = jpiglo/2+1, jpiglo 5206 ijt = jpiglo-ji+1 5207 ztab(ji,ijpjm1) = psgn * ztab(ijt,ijpjm1) 5208 END DO 5209 5210 CASE ( 'F' , 'G' ) ! F-point 5211 DO jl = 0, iprecj-1 5212 DO ji = 1, jpiglo-1 5213 iju = jpiglo-ji 5214 ztab(ji,ijpj+jl) = psgn * ztab(iju,ijpj-2-jl) 5215 END DO 5216 END DO 5217 DO ji = jpiglo/2+1, jpiglo-1 5218 iju = jpiglo-ji 5219 ztab(ji,ijpjm1) = psgn * ztab(iju,ijpjm1) 5220 END DO 5221 5222 CASE ( 'I' ) ! ice U-V point 5223 ztab( 2 ,ijpj:ijpj+jpr2dj) = 0.e0 5224 DO jl = 0, jpr2dj 5225 DO ji = 2 , jpiglo-1 5226 ijt = jpiglo - ji + 2 5227 ztab(ji,ijpj+jl)= 0.5 * ( ztab(ji,ijpj-1-jl) + psgn * ztab(ijt,ijpj-1-jl) ) 5228 END DO 5229 END DO 5230 5231 END SELECT 5232 5233 CASE DEFAULT ! * closed : the code probably never go through 5234 5235 SELECT CASE ( cd_type) 5236 5237 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 5238 ztab(:, 1:1-jpr2dj ) = 0.e0 5239 ztab(:,ijpj:ijpj+jpr2dj) = 0.e0 5240 5241 CASE ( 'F' ) ! F-point 5242 ztab(:,ijpj:ijpj+jpr2dj) = 0.e0 5243 5244 CASE ( 'I' ) ! ice U-V point 5245 ztab(:, 1:1-jpr2dj ) = 0.e0 5246 ztab(:,ijpj:ijpj+jpr2dj) = 0.e0 5247 5248 END SELECT 5249 5250 END SELECT 5251 5252 ! End of slab 5253 ! =========== 5254 5255 !! Scatter back to pt2d 5256 DO jr = 1, ndim_rank_north 5257 jproc=nrank_north(jr)+1 5258 ildi=nldit (jproc) 5259 ilei=nleit (jproc) 5260 iilb=nimppt(jproc) 5261 DO jj=1-jpr2dj,ijpj+jpr2dj 5262 DO ji=ildi,ilei 5263 znorthgloio(ji,jj,jr)=ztab(ji+iilb-1,jj) 5264 END DO 5265 END DO 5266 END DO 5267 5268 ENDIF ! only done on proc 0 of ncomm_north 5269 5270 #ifdef key_mpp_shmem 5271 not done yet in shmem : compiler error 5272 #elif key_mpp_mpi 5273 IF ( npolj /= 0 ) THEN 5274 itaille=jpi*(ijpj+2*jpr2dj) 5275 CALL MPI_SCATTER(znorthgloio(1,1-jpr2dj,1),itaille,MPI_DOUBLE_PRECISION, & 5276 & znorthloc(1,1-jpr2dj),itaille,MPI_DOUBLE_PRECISION,0,ncomm_north,ierr) 5277 ENDIF 5278 #endif 5279 5280 ! put in the last ijpj jlines of pt2d znorthloc 5281 DO jj = nlcj - ijpj -jpr2dj + 1 , nlcj +jpr2dj 5282 ij = jj - nlcj + ijpj 5283 pt2d(1:jpi,jj)= znorthloc(:,ij) 2227 END DO 5284 2228 END DO 5285 2229 2230 2231 ! 2. North-Fold boundary conditions 2232 ! ---------------------------------- 2233 CALL lbc_nfd( ztab(:,:), cd_type, psgn, pr2dj = jpr2dj ) 2234 2235 ij = jpr2dj 2236 !! Scatter back to pt2d 2237 DO jj = nlcj - ijpj + 1 , nlcj +jpr2dj 2238 ij = ij +1 2239 DO ji= 1, nlci 2240 pt2d(ji,jj) = ztab(ji+nimpp-1,ij) 2241 END DO 2242 END DO 2243 ! 5286 2244 END SUBROUTINE mpp_lbc_north_e 5287 2245 5288 SUBROUTINE mpi_init_opa(code) 5289 !!--------------------------------------------------------------------- 5290 !! *** routine mpp_init.opa *** 5291 !! 5292 !! ** Purpose :: export and attach a MPI buffer for bsend 5293 !! 5294 !! ** Method :: define buffer size in namelist, if 0 no buffer attachment 5295 !! but classical mpi_init 5296 !! 5297 !! History :: 01/11 :: IDRIS initial version for IBM only 5298 !! 08/04 :: R. Benshila, generalisation 5299 !! 5300 !!--------------------------------------------------------------------- 5301 2246 2247 SUBROUTINE mpi_init_opa( code ) 2248 !!--------------------------------------------------------------------- 2249 !! *** routine mpp_init.opa *** 2250 !! 2251 !! ** Purpose :: export and attach a MPI buffer for bsend 2252 !! 2253 !! ** Method :: define buffer size in namelist, if 0 no buffer attachment 2254 !! but classical mpi_init 2255 !! 2256 !! History :: 01/11 :: IDRIS initial version for IBM only 2257 !! 08/04 :: R. Benshila, generalisation 2258 !!--------------------------------------------------------------------- 5302 2259 INTEGER :: code, ierr 5303 2260 LOGICAL :: mpi_was_called 5304 5305 ! MPI initialization5306 CALL mpi_initialized( mpi_was_called, code)2261 !!--------------------------------------------------------------------- 2262 ! 2263 CALL mpi_initialized( mpi_was_called, code ) ! MPI initialization 5307 2264 IF ( code /= MPI_SUCCESS ) THEN 5308 CALL ctl_stop( ' lib_mpp: Error in routine mpi_initialized' )5309 CALL mpi_abort( mpi_comm_world, code, ierr )2265 CALL ctl_stop( ' lib_mpp: Error in routine mpi_initialized' ) 2266 CALL mpi_abort( mpi_comm_world, code, ierr ) 5310 2267 ENDIF 5311 5312 IF 5313 CALL mpi_init( code)5314 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code )2268 ! 2269 IF( .NOT. mpi_was_called ) THEN 2270 CALL mpi_init( code ) 2271 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code ) 5315 2272 IF ( code /= MPI_SUCCESS ) THEN 5316 2273 CALL ctl_stop( ' lib_mpp: Error in routine mpi_comm_dup' ) … … 5318 2275 ENDIF 5319 2276 ENDIF 5320 2277 ! 5321 2278 IF( nn_buffer > 0 ) THEN 5322 2279 IF ( lwp ) WRITE(numout,*) 'mpi_bsend, buffer allocation of : ', nn_buffer 5323 5324 2280 ! Buffer allocation and attachment 5325 ALLOCATE( tampon(nn_buffer))5326 CALL mpi_buffer_attach( tampon,nn_buffer,code)2281 ALLOCATE( tampon(nn_buffer) ) 2282 CALL mpi_buffer_attach( tampon, nn_buffer,code ) 5327 2283 ENDIF 5328 2284 ! 5329 2285 END SUBROUTINE mpi_init_opa 5330 2286 … … 5348 2304 MODULE PROCEDURE mppobc_1d, mppobc_2d, mppobc_3d, mppobc_4d 5349 2305 END INTERFACE 5350 INTERFACE mpp_minloc5351 MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d5352 END INTERFACE5353 INTERFACE mpp_maxloc5354 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d5355 END INTERFACE2306 INTERFACE mpp_minloc 2307 MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d 2308 END INTERFACE 2309 INTERFACE mpp_maxloc 2310 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 2311 END INTERFACE 5356 2312 5357 2313 … … 5455 2411 5456 2412 SUBROUTINE mppobc_1d( parr, kd1, kd2, kl, kk, ktype, kij ) 5457 INTEGER :: kd1, kd2, kl , kk, ktype, kij 5458 REAL, DIMENSION(:) :: parr ! variable array 5459 WRITE(*,*) 'mppobc: You should not have seen this print! error?', & 5460 & parr(1), kd1, kd2, kl, kk, ktype, kij 2413 INTEGER :: kd1, kd2, kl , kk, ktype, kij 2414 REAL, DIMENSION(:) :: parr ! variable array 2415 WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1), kd1, kd2, kl, kk, ktype, kij 5461 2416 END SUBROUTINE mppobc_1d 5462 2417 5463 2418 SUBROUTINE mppobc_2d( parr, kd1, kd2, kl, kk, ktype, kij ) 5464 INTEGER :: kd1, kd2, kl , kk, ktype, kij 5465 REAL, DIMENSION(:,:) :: parr ! variable array 5466 WRITE(*,*) 'mppobc: You should not have seen this print! error?', & 5467 & parr(1,1), kd1, kd2, kl, kk, ktype, kij 2419 INTEGER :: kd1, kd2, kl , kk, ktype, kij 2420 REAL, DIMENSION(:,:) :: parr ! variable array 2421 WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1,1), kd1, kd2, kl, kk, ktype, kij 5468 2422 END SUBROUTINE mppobc_2d 5469 2423 5470 2424 SUBROUTINE mppobc_3d( parr, kd1, kd2, kl, kk, ktype, kij ) 5471 INTEGER :: kd1, kd2, kl , kk, ktype, kij 5472 REAL, DIMENSION(:,:,:) :: parr ! variable array 5473 WRITE(*,*) 'mppobc: You should not have seen this print! error?', & 5474 & parr(1,1,1), kd1, kd2, kl, kk, ktype, kij 2425 INTEGER :: kd1, kd2, kl , kk, ktype, kij 2426 REAL, DIMENSION(:,:,:) :: parr ! variable array 2427 WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1,1,1), kd1, kd2, kl, kk, ktype, kij 5475 2428 END SUBROUTINE mppobc_3d 5476 2429 5477 2430 SUBROUTINE mppobc_4d( parr, kd1, kd2, kl, kk, ktype, kij ) 5478 INTEGER :: kd1, kd2, kl , kk, ktype, kij 5479 REAL, DIMENSION(:,:,:,:) :: parr ! variable array 5480 WRITE(*,*) 'mppobc: You should not have seen this print! error?', & 5481 & parr(1,1,1,1), kd1, kd2, kl, kk, ktype, kij 2431 INTEGER :: kd1, kd2, kl , kk, ktype, kij 2432 REAL, DIMENSION(:,:,:,:) :: parr ! variable array 2433 WRITE(*,*) 'mppobc: You should not have seen this print! error?', parr(1,1,1,1), kd1, kd2, kl, kk, ktype, kij 5482 2434 END SUBROUTINE mppobc_4d 5483 5484 5485 SUBROUTINE mpplnks( parr ) ! Dummy routine5486 REAL, DIMENSION(:,:) :: parr5487 WRITE(*,*) 'mpplnks: You should not have seen this print! error?', parr(1,1)5488 END SUBROUTINE mpplnks5489 2435 5490 2436 SUBROUTINE mppisl_a_int( karr, kdim ) … … 5510 2456 END SUBROUTINE mppisl_real 5511 2457 5512 SUBROUTINE mpp_minloc2d 2458 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki, kj ) 5513 2459 REAL :: pmin 5514 2460 REAL , DIMENSION (:,:) :: ptab, pmask 5515 2461 INTEGER :: ki, kj 5516 WRITE(*,*) 'mppisl_real: You should not have seen this print! error?', pmin, ki, kj 5517 WRITE(*,*) ' " ": " " ', ptab(1,1), pmask(1,1) 2462 WRITE(*,*) 'mppisl_real: You should not have seen this print! error?', pmin, ki, kj, ptab(1,1), pmask(1,1) 5518 2463 END SUBROUTINE mpp_minloc2d 5519 2464 5520 SUBROUTINE mpp_minloc3d 2465 SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj, kk ) 5521 2466 REAL :: pmin 5522 2467 REAL , DIMENSION (:,:,:) :: ptab, pmask 5523 2468 INTEGER :: ki, kj, kk 5524 WRITE(*,*) 'mppisl_real: You should not have seen this print! error?', pmin, ki, kj, kk 5525 WRITE(*,*) ' " ": " " ', ptab(1,1,1), pmask(1,1,1) 2469 WRITE(*,*) 'mppisl_real: You should not have seen this print! error?', pmin, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) 5526 2470 END SUBROUTINE mpp_minloc3d 5527 2471 5528 SUBROUTINE mpp_maxloc2d 2472 SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) 5529 2473 REAL :: pmax 5530 2474 REAL , DIMENSION (:,:) :: ptab, pmask 5531 2475 INTEGER :: ki, kj 5532 WRITE(*,*) 'mppisl_real: You should not have seen this print! error?', pmax, ki, kj 5533 WRITE(*,*) ' " ": " " ', ptab(1,1), pmask(1,1) 2476 WRITE(*,*) 'mppisl_real: You should not have seen this print! error?', pmax, ki, kj, ptab(1,1), pmask(1,1) 5534 2477 END SUBROUTINE mpp_maxloc2d 5535 2478 5536 SUBROUTINE mpp_maxloc3d 2479 SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) 5537 2480 REAL :: pmax 5538 2481 REAL , DIMENSION (:,:,:) :: ptab, pmask 5539 2482 INTEGER :: ki, kj, kk 5540 WRITE(*,*) 'mppisl_real: You should not have seen this print! error?', pmax, ki, kj, kk 5541 WRITE(*,*) ' " ": " " ', ptab(1,1,1), pmask(1,1,1) 2483 WRITE(*,*) 'mppisl_real: You should not have seen this print! error?', pmax, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) 5542 2484 END SUBROUTINE mpp_maxloc3d 5543 2485 … … 5546 2488 END SUBROUTINE mppstop 5547 2489 5548 SUBROUTINE mpp_ini_ice( kcom)2490 SUBROUTINE mpp_ini_ice( kcom ) 5549 2491 INTEGER :: kcom 5550 WRITE(*,*) 'mpp_ini_ice: You should not have seen this print! error?', kcom2492 WRITE(*,*) 'mpp_ini_ice: You should not have seen this print! error?', kcom 5551 2493 END SUBROUTINE mpp_ini_ice 5552 2494 5553 SUBROUTINE mpp_comm_free( kcom)2495 SUBROUTINE mpp_comm_free( kcom ) 5554 2496 INTEGER :: kcom 5555 WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom2497 WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom 5556 2498 END SUBROUTINE mpp_comm_free 5557 2499
Note: See TracChangeset
for help on using the changeset viewer.