1 | MODULE lib_mpp |
---|
2 | !!====================================================================== |
---|
3 | !! *** MODULE lib_mpp *** |
---|
4 | !! Ocean numerics: massively parallel processing library |
---|
5 | !!===================================================================== |
---|
6 | !! History : OPA ! 1994 (M. Guyon, J. Escobar, M. Imbard) Original code |
---|
7 | !! 7.0 ! 1997 (A.M. Treguier) SHMEM additions |
---|
8 | !! 8.0 ! 1998 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI |
---|
9 | !! ! 1998 (J.M. Molines) Open boundary conditions |
---|
10 | !! NEMO 1.0 ! 2003 (J.-M. Molines, G. Madec) F90, free form |
---|
11 | !! ! 2003 (J.M. Molines) add mpp_ini_north(_3d,_2d) |
---|
12 | !! - ! 2004 (R. Bourdalle Badie) isend option in mpi |
---|
13 | !! ! 2004 (J.M. Molines) minloc, maxloc |
---|
14 | !! - ! 2005 (G. Madec, S. Masson) npolj=5,6 F-point & ice cases |
---|
15 | !! - ! 2005 (R. Redler) Replacement of MPI_COMM_WORLD except for MPI_Abort |
---|
16 | !! - ! 2005 (R. Benshila, G. Madec) add extra halo case |
---|
17 | !! - ! 2008 (R. Benshila) add mpp_ini_ice |
---|
18 | !! 3.2 ! 2009 (R. Benshila) SHMEM suppression, north fold in lbc_nfd |
---|
19 | !! 3.2 ! 2009 (O. Marti) add mpp_ini_znl |
---|
20 | !! 4.0 ! 2011 (G. Madec) move ctl_ routines from in_out_manager |
---|
21 | !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) Add 'mpp_lnk_bdy_3d', 'mpp_lnk_obc_3d', |
---|
22 | !! 'mpp_lnk_bdy_2d' and 'mpp_lnk_obc_2d' routines and update |
---|
23 | !! the mppobc routine to optimize the BDY and OBC communications |
---|
24 | !! 3.5 ! 2013 ( C. Ethe, G. Madec ) message passing arrays as local variables |
---|
25 | !! 3.5 ! 2013 (S.Mocavero, I.Epicoco - CMCC) north fold optimizations |
---|
26 | !! 3.6 ! 2015 (O. Tintó and M. Castrillo - BSC) Added 'mpp_lnk_2d_multiple', 'mpp_lbc_north_2d_multiple', 'mpp_max_multiple' |
---|
27 | !!---------------------------------------------------------------------- |
---|
28 | |
---|
29 | !!---------------------------------------------------------------------- |
---|
30 | !! ctl_stop : update momentum and tracer Kz from a tke scheme |
---|
31 | !! ctl_warn : initialization, namelist read, and parameters control |
---|
32 | !! ctl_opn : Open file and check if required file is available. |
---|
33 | !! ctl_nam : Prints informations when an error occurs while reading a namelist |
---|
34 | !! get_unit : give the index of an unused logical unit |
---|
35 | !!---------------------------------------------------------------------- |
---|
36 | #if defined key_mpp_mpi |
---|
37 | !!---------------------------------------------------------------------- |
---|
38 | !! 'key_mpp_mpi' MPI massively parallel processing library |
---|
39 | !!---------------------------------------------------------------------- |
---|
40 | !! lib_mpp_alloc : allocate mpp arrays |
---|
41 | !! mynode : indentify the processor unit |
---|
42 | !! mpp_lnk : interface (defined in lbclnk) for message passing of 2d or 3d arrays (mpp_lnk_2d, mpp_lnk_3d) |
---|
43 | !! mpp_lnk_3d_gather : Message passing manadgement for two 3D arrays |
---|
44 | !! mpp_lnk_e : interface (defined in lbclnk) for message passing of 2d array with extra halo (mpp_lnk_2d_e) |
---|
45 | !! mpp_lnk_icb : interface for message passing of 2d arrays with extra halo for icebergs (mpp_lnk_2d_icb) |
---|
46 | !! mpprecv : |
---|
47 | !! mppsend : SUBROUTINE mpp_ini_znl |
---|
48 | !! mppscatter : |
---|
49 | !! mppgather : |
---|
50 | !! mpp_min : generic interface for mppmin_int , mppmin_a_int , mppmin_real, mppmin_a_real |
---|
51 | !! mpp_max : generic interface for mppmax_int , mppmax_a_int , mppmax_real, mppmax_a_real |
---|
52 | !! mpp_sum : generic interface for mppsum_int , mppsum_a_int , mppsum_real, mppsum_a_real |
---|
53 | !! mpp_minloc : |
---|
54 | !! mpp_maxloc : |
---|
55 | !! mppsync : |
---|
56 | !! mppstop : |
---|
57 | !! mpp_ini_north : initialisation of north fold |
---|
58 | !! mpp_lbc_north : north fold processors gathering |
---|
59 | !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo |
---|
60 | !! mpp_lbc_north_icb : variant of mpp_lbc_north for extra outer halo with icebergs |
---|
61 | !!---------------------------------------------------------------------- |
---|
62 | USE dom_oce ! ocean space and time domain |
---|
63 | USE lbcnfd ! north fold treatment |
---|
64 | USE in_out_manager ! I/O manager |
---|
65 | USE wrk_nemo ! work arrays |
---|
66 | |
---|
67 | IMPLICIT NONE |
---|
68 | PRIVATE |
---|
69 | |
---|
70 | PUBLIC ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam |
---|
71 | PUBLIC mynode, mppstop, mppsync, mpp_comm_free |
---|
72 | PUBLIC mpp_ini_north, mpp_lbc_north, mpp_lbc_north_e |
---|
73 | PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc |
---|
74 | PUBLIC mpp_max_multiple |
---|
75 | PUBLIC mpp_lnk_3d, mpp_lnk_3d_gather, mpp_lnk_2d, mpp_lnk_2d_e |
---|
76 | PUBLIC mpp_lnk_2d_9 , mpp_lnk_2d_multiple |
---|
77 | PUBLIC mppscatter, mppgather |
---|
78 | PUBLIC mpp_ini_ice, mpp_ini_znl |
---|
79 | PUBLIC mppsize |
---|
80 | PUBLIC mppsend, mpprecv ! needed by TAM and ICB routines |
---|
81 | PUBLIC mpp_lnk_bdy_2d, mpp_lnk_bdy_3d |
---|
82 | PUBLIC mpp_lbc_north_icb, mpp_lnk_2d_icb |
---|
83 | PUBLIC mpprank |
---|
84 | |
---|
85 | TYPE arrayptr |
---|
86 | REAL , DIMENSION (:,:), POINTER :: pt2d |
---|
87 | END TYPE arrayptr |
---|
88 | PUBLIC arrayptr |
---|
89 | |
---|
90 | !! * Interfaces |
---|
91 | !! define generic interface for these routine as they are called sometimes |
---|
92 | !! with scalar arguments instead of array arguments, which causes problems |
---|
93 | !! for the compilation on AIX system as well as NEC and SGI. Ok on COMPACQ |
---|
94 | INTERFACE mpp_min |
---|
95 | MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real |
---|
96 | END INTERFACE |
---|
97 | INTERFACE mpp_max |
---|
98 | MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real |
---|
99 | END INTERFACE |
---|
100 | INTERFACE mpp_sum |
---|
101 | MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real, & |
---|
102 | mppsum_realdd, mppsum_a_realdd |
---|
103 | END INTERFACE |
---|
104 | INTERFACE mpp_lbc_north |
---|
105 | MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d |
---|
106 | END INTERFACE |
---|
107 | INTERFACE mpp_minloc |
---|
108 | MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d |
---|
109 | END INTERFACE |
---|
110 | INTERFACE mpp_maxloc |
---|
111 | MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d |
---|
112 | END INTERFACE |
---|
113 | |
---|
114 | INTERFACE mpp_max_multiple |
---|
115 | MODULE PROCEDURE mppmax_real_multiple |
---|
116 | END INTERFACE |
---|
117 | |
---|
118 | !! ========================= !! |
---|
119 | !! MPI variable definition !! |
---|
120 | !! ========================= !! |
---|
121 | !$AGRIF_DO_NOT_TREAT |
---|
122 | INCLUDE 'mpif.h' |
---|
123 | !$AGRIF_END_DO_NOT_TREAT |
---|
124 | |
---|
125 | LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .TRUE. !: mpp flag |
---|
126 | |
---|
127 | INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2) |
---|
128 | |
---|
129 | INTEGER :: mppsize ! number of process |
---|
130 | INTEGER :: mpprank ! process number [ 0 - size-1 ] |
---|
131 | !$AGRIF_DO_NOT_TREAT |
---|
132 | INTEGER, PUBLIC :: mpi_comm_opa ! opa local communicator |
---|
133 | !$AGRIF_END_DO_NOT_TREAT |
---|
134 | |
---|
135 | INTEGER :: MPI_SUMDD |
---|
136 | |
---|
137 | ! variables used in case of sea-ice |
---|
138 | INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice (public so that it can be freed in limthd) |
---|
139 | INTEGER :: ngrp_iworld ! group ID for the world processors (for rheology) |
---|
140 | INTEGER :: ngrp_ice ! group ID for the ice processors (for rheology) |
---|
141 | INTEGER :: ndim_rank_ice ! number of 'ice' processors |
---|
142 | INTEGER :: n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm |
---|
143 | INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_ice ! dimension ndim_rank_ice |
---|
144 | |
---|
145 | ! variables used for zonal integration |
---|
146 | INTEGER, PUBLIC :: ncomm_znl !: communicator made by the processors on the same zonal average |
---|
147 | LOGICAL, PUBLIC :: l_znl_root ! True on the 'left'most processor on the same row |
---|
148 | INTEGER :: ngrp_znl ! group ID for the znl processors |
---|
149 | INTEGER :: ndim_rank_znl ! number of processors on the same zonal average |
---|
150 | INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_znl ! dimension ndim_rank_znl, number of the procs into the same znl domain |
---|
151 | |
---|
152 | ! North fold condition in mpp_mpi with jpni > 1 (PUBLIC for TAM) |
---|
153 | INTEGER, PUBLIC :: ngrp_world ! group ID for the world processors |
---|
154 | INTEGER, PUBLIC :: ngrp_opa ! group ID for the opa processors |
---|
155 | INTEGER, PUBLIC :: ngrp_north ! group ID for the northern processors (to be fold) |
---|
156 | INTEGER, PUBLIC :: ncomm_north ! communicator made by the processors belonging to ngrp_north |
---|
157 | INTEGER, PUBLIC :: ndim_rank_north ! number of 'sea' processor in the northern line (can be /= jpni !) |
---|
158 | INTEGER, PUBLIC :: njmppmax ! value of njmpp for the processors of the northern line |
---|
159 | INTEGER, PUBLIC :: north_root ! number (in the comm_opa) of proc 0 in the northern comm |
---|
160 | INTEGER, DIMENSION(:), ALLOCATABLE, SAVE, PUBLIC :: nrank_north ! dimension ndim_rank_north |
---|
161 | |
---|
162 | ! Type of send : standard, buffered, immediate |
---|
163 | CHARACTER(len=1), PUBLIC :: cn_mpi_send ! type od mpi send/recieve (S=standard, B=bsend, I=isend) |
---|
164 | LOGICAL, PUBLIC :: l_isend = .FALSE. ! isend use indicator (T if cn_mpi_send='I') |
---|
165 | INTEGER, PUBLIC :: nn_buffer ! size of the buffer in case of mpi_bsend |
---|
166 | |
---|
167 | REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend |
---|
168 | |
---|
169 | LOGICAL, PUBLIC :: ln_nnogather ! namelist control of northfold comms |
---|
170 | LOGICAL, PUBLIC :: l_north_nogather = .FALSE. ! internal control of northfold comms |
---|
171 | INTEGER, PUBLIC :: ityp |
---|
172 | !!---------------------------------------------------------------------- |
---|
173 | !! NEMO/OPA 3.3 , NEMO Consortium (2010) |
---|
174 | !! $Id: lib_mpp.F90 8537 2017-09-19 05:46:09Z gm $ |
---|
175 | !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) |
---|
176 | !!---------------------------------------------------------------------- |
---|
177 | CONTAINS |
---|
178 | |
---|
179 | |
---|
180 | FUNCTION mynode( ldtxt, ldname, kumnam_ref , kumnam_cfg , kumond , kstop, localComm ) |
---|
181 | !!---------------------------------------------------------------------- |
---|
182 | !! *** routine mynode *** |
---|
183 | !! |
---|
184 | !! ** Purpose : Find processor unit |
---|
185 | !!---------------------------------------------------------------------- |
---|
186 | CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt |
---|
187 | CHARACTER(len=*) , INTENT(in ) :: ldname |
---|
188 | INTEGER , INTENT(in ) :: kumnam_ref ! logical unit for reference namelist |
---|
189 | INTEGER , INTENT(in ) :: kumnam_cfg ! logical unit for configuration namelist |
---|
190 | INTEGER , INTENT(inout) :: kumond ! logical unit for namelist output |
---|
191 | INTEGER , INTENT(inout) :: kstop ! stop indicator |
---|
192 | INTEGER, OPTIONAL , INTENT(in ) :: localComm |
---|
193 | ! |
---|
194 | INTEGER :: mynode, ierr, code, ji, ii, ios |
---|
195 | LOGICAL :: mpi_was_called |
---|
196 | ! |
---|
197 | NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, jpnij, ln_nnogather |
---|
198 | !!---------------------------------------------------------------------- |
---|
199 | ! |
---|
200 | ii = 1 |
---|
201 | WRITE(ldtxt(ii),*) ; ii = ii + 1 |
---|
202 | WRITE(ldtxt(ii),*) 'mynode : mpi initialisation' ; ii = ii + 1 |
---|
203 | WRITE(ldtxt(ii),*) '~~~~~~ ' ; ii = ii + 1 |
---|
204 | ! |
---|
205 | |
---|
206 | REWIND( kumnam_ref ) ! Namelist nammpp in reference namelist: mpi variables |
---|
207 | READ ( kumnam_ref, nammpp, IOSTAT = ios, ERR = 901) |
---|
208 | 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in reference namelist', lwp ) |
---|
209 | |
---|
210 | REWIND( kumnam_cfg ) ! Namelist nammpp in configuration namelist: mpi variables |
---|
211 | READ ( kumnam_cfg, nammpp, IOSTAT = ios, ERR = 902 ) |
---|
212 | 902 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in configuration namelist', lwp ) |
---|
213 | |
---|
214 | ! ! control print |
---|
215 | WRITE(ldtxt(ii),*) ' Namelist nammpp' ; ii = ii + 1 |
---|
216 | WRITE(ldtxt(ii),*) ' mpi send type cn_mpi_send = ', cn_mpi_send ; ii = ii + 1 |
---|
217 | WRITE(ldtxt(ii),*) ' size in bytes of exported buffer nn_buffer = ', nn_buffer ; ii = ii + 1 |
---|
218 | |
---|
219 | #if defined key_agrif |
---|
220 | IF( .NOT. Agrif_Root() ) THEN |
---|
221 | jpni = Agrif_Parent(jpni ) |
---|
222 | jpnj = Agrif_Parent(jpnj ) |
---|
223 | jpnij = Agrif_Parent(jpnij) |
---|
224 | ENDIF |
---|
225 | #endif |
---|
226 | |
---|
227 | IF(jpnij < 1)THEN |
---|
228 | ! If jpnij is not specified in namelist then we calculate it - this |
---|
229 | ! means there will be no land cutting out. |
---|
230 | jpnij = jpni * jpnj |
---|
231 | END IF |
---|
232 | |
---|
233 | IF( (jpni < 1) .OR. (jpnj < 1) )THEN |
---|
234 | WRITE(ldtxt(ii),*) ' jpni, jpnj and jpnij will be calculated automatically'; ii = ii + 1 |
---|
235 | ELSE |
---|
236 | WRITE(ldtxt(ii),*) ' processor grid extent in i jpni = ',jpni; ii = ii + 1 |
---|
237 | WRITE(ldtxt(ii),*) ' processor grid extent in j jpnj = ',jpnj; ii = ii + 1 |
---|
238 | WRITE(ldtxt(ii),*) ' number of local domains jpnij = ',jpnij; ii = ii +1 |
---|
239 | END IF |
---|
240 | |
---|
241 | WRITE(ldtxt(ii),*) ' avoid use of mpi_allgather at the north fold ln_nnogather = ', ln_nnogather ; ii = ii + 1 |
---|
242 | |
---|
243 | CALL mpi_initialized ( mpi_was_called, code ) |
---|
244 | IF( code /= MPI_SUCCESS ) THEN |
---|
245 | DO ji = 1, SIZE(ldtxt) |
---|
246 | IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode |
---|
247 | END DO |
---|
248 | WRITE(*, cform_err) |
---|
249 | WRITE(*, *) 'lib_mpp: Error in routine mpi_initialized' |
---|
250 | CALL mpi_abort( mpi_comm_world, code, ierr ) |
---|
251 | ENDIF |
---|
252 | |
---|
253 | IF( mpi_was_called ) THEN |
---|
254 | ! |
---|
255 | SELECT CASE ( cn_mpi_send ) |
---|
256 | CASE ( 'S' ) ! Standard mpi send (blocking) |
---|
257 | WRITE(ldtxt(ii),*) ' Standard blocking mpi send (send)' ; ii = ii + 1 |
---|
258 | CASE ( 'B' ) ! Buffer mpi send (blocking) |
---|
259 | WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1 |
---|
260 | IF( Agrif_Root() ) CALL mpi_init_opa( ldtxt, ii, ierr ) |
---|
261 | CASE ( 'I' ) ! Immediate mpi send (non-blocking send) |
---|
262 | WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1 |
---|
263 | l_isend = .TRUE. |
---|
264 | CASE DEFAULT |
---|
265 | WRITE(ldtxt(ii),cform_err) ; ii = ii + 1 |
---|
266 | WRITE(ldtxt(ii),*) ' bad value for cn_mpi_send = ', cn_mpi_send ; ii = ii + 1 |
---|
267 | kstop = kstop + 1 |
---|
268 | END SELECT |
---|
269 | ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN |
---|
270 | WRITE(ldtxt(ii),*) ' lib_mpp: You cannot provide a local communicator ' ; ii = ii + 1 |
---|
271 | WRITE(ldtxt(ii),*) ' without calling MPI_Init before ! ' ; ii = ii + 1 |
---|
272 | kstop = kstop + 1 |
---|
273 | ELSE |
---|
274 | SELECT CASE ( cn_mpi_send ) |
---|
275 | CASE ( 'S' ) ! Standard mpi send (blocking) |
---|
276 | WRITE(ldtxt(ii),*) ' Standard blocking mpi send (send)' ; ii = ii + 1 |
---|
277 | CALL mpi_init( ierr ) |
---|
278 | CASE ( 'B' ) ! Buffer mpi send (blocking) |
---|
279 | WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1 |
---|
280 | IF( Agrif_Root() ) CALL mpi_init_opa( ldtxt, ii, ierr ) |
---|
281 | CASE ( 'I' ) ! Immediate mpi send (non-blocking send) |
---|
282 | WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1 |
---|
283 | l_isend = .TRUE. |
---|
284 | CALL mpi_init( ierr ) |
---|
285 | CASE DEFAULT |
---|
286 | WRITE(ldtxt(ii),cform_err) ; ii = ii + 1 |
---|
287 | WRITE(ldtxt(ii),*) ' bad value for cn_mpi_send = ', cn_mpi_send ; ii = ii + 1 |
---|
288 | kstop = kstop + 1 |
---|
289 | END SELECT |
---|
290 | ! |
---|
291 | ENDIF |
---|
292 | |
---|
293 | IF( PRESENT(localComm) ) THEN |
---|
294 | IF( Agrif_Root() ) THEN |
---|
295 | mpi_comm_opa = localComm |
---|
296 | ENDIF |
---|
297 | ELSE |
---|
298 | CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code) |
---|
299 | IF( code /= MPI_SUCCESS ) THEN |
---|
300 | DO ji = 1, SIZE(ldtxt) |
---|
301 | IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode |
---|
302 | END DO |
---|
303 | WRITE(*, cform_err) |
---|
304 | WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup' |
---|
305 | CALL mpi_abort( mpi_comm_world, code, ierr ) |
---|
306 | ENDIF |
---|
307 | ENDIF |
---|
308 | |
---|
309 | #if defined key_agrif |
---|
310 | IF (Agrif_Root()) THEN |
---|
311 | CALL Agrif_MPI_Init(mpi_comm_opa) |
---|
312 | ELSE |
---|
313 | CALL Agrif_MPI_set_grid_comm(mpi_comm_opa) |
---|
314 | ENDIF |
---|
315 | #endif |
---|
316 | |
---|
317 | CALL mpi_comm_rank( mpi_comm_opa, mpprank, ierr ) |
---|
318 | CALL mpi_comm_size( mpi_comm_opa, mppsize, ierr ) |
---|
319 | mynode = mpprank |
---|
320 | |
---|
321 | IF( mynode == 0 ) THEN |
---|
322 | CALL ctl_opn( kumond, TRIM(ldname), 'UNKNOWN', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. , 1 ) |
---|
323 | WRITE(kumond, nammpp) |
---|
324 | ENDIF |
---|
325 | ! |
---|
326 | CALL MPI_OP_CREATE(DDPDD_MPI, .TRUE., MPI_SUMDD, ierr) |
---|
327 | ! |
---|
328 | END FUNCTION mynode |
---|
329 | |
---|
330 | SUBROUTINE mpp_lnk_3d( ptab, cd_type, psgn, cd_mpp, pval ) |
---|
331 | !!---------------------------------------------------------------------- |
---|
332 | !! *** routine mpp_lnk_3d *** |
---|
333 | !! |
---|
334 | !! ** Purpose : Message passing manadgement |
---|
335 | !! |
---|
336 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
337 | !! between processors following neighboring subdomains. |
---|
338 | !! domain parameters |
---|
339 | !! nlci : first dimension of the local subdomain |
---|
340 | !! nlcj : second dimension of the local subdomain |
---|
341 | !! nbondi : mark for "east-west local boundary" |
---|
342 | !! nbondj : mark for "north-south local boundary" |
---|
343 | !! noea : number for local neighboring processors |
---|
344 | !! nowe : number for local neighboring processors |
---|
345 | !! noso : number for local neighboring processors |
---|
346 | !! nono : number for local neighboring processors |
---|
347 | !! |
---|
348 | !! ** Action : ptab with update value at its periphery |
---|
349 | !! |
---|
350 | !!---------------------------------------------------------------------- |
---|
351 | REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied |
---|
352 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points |
---|
353 | ! ! = T , U , V , F , W points |
---|
354 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary |
---|
355 | ! ! = 1. , the sign is kept |
---|
356 | CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only |
---|
357 | REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) |
---|
358 | !! |
---|
359 | INTEGER :: ji, jj, jk, jl ! dummy loop indices |
---|
360 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
361 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
362 | REAL(wp) :: zland |
---|
363 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
364 | ! |
---|
365 | REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north |
---|
366 | REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east |
---|
367 | |
---|
368 | !!---------------------------------------------------------------------- |
---|
369 | |
---|
370 | ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), & |
---|
371 | & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) ) |
---|
372 | |
---|
373 | ! |
---|
374 | IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value |
---|
375 | ELSE ; zland = 0.e0 ! zero by default |
---|
376 | ENDIF |
---|
377 | |
---|
378 | ! 1. standard boundary treatment |
---|
379 | ! ------------------------------ |
---|
380 | IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values |
---|
381 | ! |
---|
382 | ! WARNING ptab is defined only between nld and nle |
---|
383 | DO jk = 1, jpk |
---|
384 | DO jj = nlcj+1, jpj ! added line(s) (inner only) |
---|
385 | ptab(nldi :nlei , jj ,jk) = ptab(nldi:nlei, nlej,jk) |
---|
386 | ptab(1 :nldi-1, jj ,jk) = ptab(nldi , nlej,jk) |
---|
387 | ptab(nlei+1:nlci , jj ,jk) = ptab( nlei, nlej,jk) |
---|
388 | END DO |
---|
389 | DO ji = nlci+1, jpi ! added column(s) (full) |
---|
390 | ptab(ji ,nldj :nlej ,jk) = ptab( nlei,nldj:nlej,jk) |
---|
391 | ptab(ji ,1 :nldj-1,jk) = ptab( nlei,nldj ,jk) |
---|
392 | ptab(ji ,nlej+1:jpj ,jk) = ptab( nlei, nlej,jk) |
---|
393 | END DO |
---|
394 | END DO |
---|
395 | ! |
---|
396 | ELSE ! standard close or cyclic treatment |
---|
397 | ! |
---|
398 | ! ! East-West boundaries |
---|
399 | ! !* Cyclic east-west |
---|
400 | IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
401 | ptab( 1 ,:,:) = ptab(jpim1,:,:) |
---|
402 | ptab(jpi,:,:) = ptab( 2 ,:,:) |
---|
403 | ELSE !* closed |
---|
404 | IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point |
---|
405 | ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north |
---|
406 | ENDIF |
---|
407 | ! ! North-South boundaries (always closed) |
---|
408 | IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point |
---|
409 | ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north |
---|
410 | ! |
---|
411 | ENDIF |
---|
412 | |
---|
413 | ! 2. East and west directions exchange |
---|
414 | ! ------------------------------------ |
---|
415 | ! we play with the neigbours AND the row number because of the periodicity |
---|
416 | ! |
---|
417 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
418 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
419 | iihom = nlci-nreci |
---|
420 | DO jl = 1, jpreci |
---|
421 | zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) |
---|
422 | zt3we(:,jl,:,1) = ptab(iihom +jl,:,:) |
---|
423 | END DO |
---|
424 | END SELECT |
---|
425 | ! |
---|
426 | ! ! Migrations |
---|
427 | imigr = jpreci * jpj * jpk |
---|
428 | ! |
---|
429 | SELECT CASE ( nbondi ) |
---|
430 | CASE ( -1 ) |
---|
431 | CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 ) |
---|
432 | CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) |
---|
433 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
434 | CASE ( 0 ) |
---|
435 | CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) |
---|
436 | CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 ) |
---|
437 | CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) |
---|
438 | CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) |
---|
439 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
440 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
441 | CASE ( 1 ) |
---|
442 | CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) |
---|
443 | CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) |
---|
444 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
445 | END SELECT |
---|
446 | ! |
---|
447 | ! ! Write Dirichlet lateral conditions |
---|
448 | iihom = nlci-jpreci |
---|
449 | ! |
---|
450 | SELECT CASE ( nbondi ) |
---|
451 | CASE ( -1 ) |
---|
452 | DO jl = 1, jpreci |
---|
453 | ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) |
---|
454 | END DO |
---|
455 | CASE ( 0 ) |
---|
456 | DO jl = 1, jpreci |
---|
457 | ptab(jl ,:,:) = zt3we(:,jl,:,2) |
---|
458 | ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) |
---|
459 | END DO |
---|
460 | CASE ( 1 ) |
---|
461 | DO jl = 1, jpreci |
---|
462 | ptab(jl ,:,:) = zt3we(:,jl,:,2) |
---|
463 | END DO |
---|
464 | END SELECT |
---|
465 | |
---|
466 | |
---|
467 | ! 3. North and south directions |
---|
468 | ! ----------------------------- |
---|
469 | ! always closed : we play only with the neigbours |
---|
470 | ! |
---|
471 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
472 | ijhom = nlcj-nrecj |
---|
473 | DO jl = 1, jprecj |
---|
474 | zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:) |
---|
475 | zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:) |
---|
476 | END DO |
---|
477 | ENDIF |
---|
478 | ! |
---|
479 | ! ! Migrations |
---|
480 | imigr = jprecj * jpi * jpk |
---|
481 | ! |
---|
482 | SELECT CASE ( nbondj ) |
---|
483 | CASE ( -1 ) |
---|
484 | CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 ) |
---|
485 | CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) |
---|
486 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
487 | CASE ( 0 ) |
---|
488 | CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) |
---|
489 | CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 ) |
---|
490 | CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) |
---|
491 | CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) |
---|
492 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
493 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
494 | CASE ( 1 ) |
---|
495 | CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) |
---|
496 | CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) |
---|
497 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
498 | END SELECT |
---|
499 | ! |
---|
500 | ! ! Write Dirichlet lateral conditions |
---|
501 | ijhom = nlcj-jprecj |
---|
502 | ! |
---|
503 | SELECT CASE ( nbondj ) |
---|
504 | CASE ( -1 ) |
---|
505 | DO jl = 1, jprecj |
---|
506 | ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) |
---|
507 | END DO |
---|
508 | CASE ( 0 ) |
---|
509 | DO jl = 1, jprecj |
---|
510 | ptab(:,jl ,:) = zt3sn(:,jl,:,2) |
---|
511 | ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) |
---|
512 | END DO |
---|
513 | CASE ( 1 ) |
---|
514 | DO jl = 1, jprecj |
---|
515 | ptab(:,jl,:) = zt3sn(:,jl,:,2) |
---|
516 | END DO |
---|
517 | END SELECT |
---|
518 | |
---|
519 | |
---|
520 | ! 4. north fold treatment |
---|
521 | ! ----------------------- |
---|
522 | ! |
---|
523 | IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN |
---|
524 | ! |
---|
525 | SELECT CASE ( jpni ) |
---|
526 | CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp |
---|
527 | CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. |
---|
528 | END SELECT |
---|
529 | ! |
---|
530 | ENDIF |
---|
531 | ! |
---|
532 | DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we ) |
---|
533 | ! |
---|
534 | END SUBROUTINE mpp_lnk_3d |
---|
535 | |
---|
536 | SUBROUTINE mpp_lnk_2d_multiple( pt2d_array , type_array , psgn_array , num_fields , cd_mpp, pval ) |
---|
537 | !!---------------------------------------------------------------------- |
---|
538 | !! *** routine mpp_lnk_2d_multiple *** |
---|
539 | !! |
---|
540 | !! ** Purpose : Message passing management for multiple 2d arrays |
---|
541 | !! |
---|
542 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
543 | !! between processors following neighboring subdomains. |
---|
544 | !! domain parameters |
---|
545 | !! nlci : first dimension of the local subdomain |
---|
546 | !! nlcj : second dimension of the local subdomain |
---|
547 | !! nbondi : mark for "east-west local boundary" |
---|
548 | !! nbondj : mark for "north-south local boundary" |
---|
549 | !! noea : number for local neighboring processors |
---|
550 | !! nowe : number for local neighboring processors |
---|
551 | !! noso : number for local neighboring processors |
---|
552 | !! nono : number for local neighboring processors |
---|
553 | !! |
---|
554 | !!---------------------------------------------------------------------- |
---|
555 | |
---|
556 | INTEGER :: num_fields |
---|
557 | TYPE( arrayptr ), DIMENSION(:) :: pt2d_array |
---|
558 | CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: type_array ! define the nature of ptab array grid-points |
---|
559 | ! ! = T , U , V , F , W and I points |
---|
560 | REAL(wp) , DIMENSION(:), INTENT(in ) :: psgn_array ! =-1 the sign change across the north fold boundary |
---|
561 | ! ! = 1. , the sign is kept |
---|
562 | CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only |
---|
563 | REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) |
---|
564 | !! |
---|
565 | INTEGER :: ji, jj, jl ! dummy loop indices |
---|
566 | INTEGER :: ii !!MULTI SEND DUMMY LOOP INDICES |
---|
567 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
568 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
569 | |
---|
570 | REAL(wp) :: zland |
---|
571 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
572 | ! |
---|
573 | REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north |
---|
574 | REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east |
---|
575 | |
---|
576 | !!---------------------------------------------------------------------- |
---|
577 | |
---|
578 | ALLOCATE( zt2ns(jpi,jprecj,2*num_fields), zt2sn(jpi,jprecj,2*num_fields), & |
---|
579 | & zt2ew(jpj,jpreci,2*num_fields), zt2we(jpj,jpreci,2*num_fields) ) |
---|
580 | |
---|
581 | ! |
---|
582 | IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value |
---|
583 | ELSE ; zland = 0.e0 ! zero by default |
---|
584 | ENDIF |
---|
585 | |
---|
586 | ! 1. standard boundary treatment |
---|
587 | ! ------------------------------ |
---|
588 | ! |
---|
589 | !First Array |
---|
590 | DO ii = 1 , num_fields |
---|
591 | IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values |
---|
592 | ! |
---|
593 | ! WARNING pt2d is defined only between nld and nle |
---|
594 | DO jj = nlcj+1, jpj ! added line(s) (inner only) |
---|
595 | pt2d_array(ii)%pt2d(nldi :nlei , jj) = pt2d_array(ii)%pt2d(nldi:nlei, nlej) |
---|
596 | pt2d_array(ii)%pt2d(1 :nldi-1, jj) = pt2d_array(ii)%pt2d(nldi , nlej) |
---|
597 | pt2d_array(ii)%pt2d(nlei+1:nlci , jj) = pt2d_array(ii)%pt2d( nlei, nlej) |
---|
598 | END DO |
---|
599 | DO ji = nlci+1, jpi ! added column(s) (full) |
---|
600 | pt2d_array(ii)%pt2d(ji, nldj :nlej ) = pt2d_array(ii)%pt2d(nlei, nldj:nlej) |
---|
601 | pt2d_array(ii)%pt2d(ji, 1 :nldj-1) = pt2d_array(ii)%pt2d(nlei, nldj ) |
---|
602 | pt2d_array(ii)%pt2d(ji, nlej+1:jpj ) = pt2d_array(ii)%pt2d(nlei, nlej) |
---|
603 | END DO |
---|
604 | ! |
---|
605 | ELSE ! standard close or cyclic treatment |
---|
606 | ! |
---|
607 | ! ! East-West boundaries |
---|
608 | IF( nbondi == 2 .AND. & ! Cyclic east-west |
---|
609 | & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
610 | pt2d_array(ii)%pt2d( 1 , : ) = pt2d_array(ii)%pt2d( jpim1, : ) ! west |
---|
611 | pt2d_array(ii)%pt2d( jpi , : ) = pt2d_array(ii)%pt2d( 2 , : ) ! east |
---|
612 | ELSE ! closed |
---|
613 | IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d( 1 : jpreci,:) = zland ! south except F-point |
---|
614 | pt2d_array(ii)%pt2d(nlci-jpreci+1 : jpi ,:) = zland ! north |
---|
615 | ENDIF |
---|
616 | ! ! North-South boundaries (always closed) |
---|
617 | IF( .NOT. type_array(ii) == 'F' ) pt2d_array(ii)%pt2d(:, 1:jprecj ) = zland ! south except F-point |
---|
618 | pt2d_array(ii)%pt2d(:, nlcj-jprecj+1:jpj ) = zland ! north |
---|
619 | ! |
---|
620 | ENDIF |
---|
621 | END DO |
---|
622 | |
---|
623 | ! 2. East and west directions exchange |
---|
624 | ! ------------------------------------ |
---|
625 | ! we play with the neigbours AND the row number because of the periodicity |
---|
626 | ! |
---|
627 | DO ii = 1 , num_fields |
---|
628 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
629 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
630 | iihom = nlci-nreci |
---|
631 | DO jl = 1, jpreci |
---|
632 | zt2ew( : , jl , ii ) = pt2d_array(ii)%pt2d( jpreci+jl , : ) |
---|
633 | zt2we( : , jl , ii ) = pt2d_array(ii)%pt2d( iihom +jl , : ) |
---|
634 | END DO |
---|
635 | END SELECT |
---|
636 | END DO |
---|
637 | ! |
---|
638 | ! ! Migrations |
---|
639 | imigr = jpreci * jpj |
---|
640 | ! |
---|
641 | SELECT CASE ( nbondi ) |
---|
642 | CASE ( -1 ) |
---|
643 | CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req1 ) |
---|
644 | CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea ) |
---|
645 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
646 | CASE ( 0 ) |
---|
647 | CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 ) |
---|
648 | CALL mppsend( 2, zt2we(1,1,1), num_fields*imigr, noea, ml_req2 ) |
---|
649 | CALL mpprecv( 1, zt2ew(1,1,num_fields+1), num_fields*imigr, noea ) |
---|
650 | CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe ) |
---|
651 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
652 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
653 | CASE ( 1 ) |
---|
654 | CALL mppsend( 1, zt2ew(1,1,1), num_fields*imigr, nowe, ml_req1 ) |
---|
655 | CALL mpprecv( 2, zt2we(1,1,num_fields+1), num_fields*imigr, nowe ) |
---|
656 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
657 | END SELECT |
---|
658 | ! |
---|
659 | ! ! Write Dirichlet lateral conditions |
---|
660 | iihom = nlci - jpreci |
---|
661 | ! |
---|
662 | |
---|
663 | DO ii = 1 , num_fields |
---|
664 | SELECT CASE ( nbondi ) |
---|
665 | CASE ( -1 ) |
---|
666 | DO jl = 1, jpreci |
---|
667 | pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii) |
---|
668 | END DO |
---|
669 | CASE ( 0 ) |
---|
670 | DO jl = 1, jpreci |
---|
671 | pt2d_array(ii)%pt2d( jl , : ) = zt2we(:,jl,num_fields+ii) |
---|
672 | pt2d_array(ii)%pt2d( iihom+jl , : ) = zt2ew(:,jl,num_fields+ii) |
---|
673 | END DO |
---|
674 | CASE ( 1 ) |
---|
675 | DO jl = 1, jpreci |
---|
676 | pt2d_array(ii)%pt2d( jl , : )= zt2we(:,jl,num_fields+ii) |
---|
677 | END DO |
---|
678 | END SELECT |
---|
679 | END DO |
---|
680 | |
---|
681 | ! 3. North and south directions |
---|
682 | ! ----------------------------- |
---|
683 | ! always closed : we play only with the neigbours |
---|
684 | ! |
---|
685 | !First Array |
---|
686 | DO ii = 1 , num_fields |
---|
687 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
688 | ijhom = nlcj-nrecj |
---|
689 | DO jl = 1, jprecj |
---|
690 | zt2sn(:,jl , ii) = pt2d_array(ii)%pt2d( : , ijhom +jl ) |
---|
691 | zt2ns(:,jl , ii) = pt2d_array(ii)%pt2d( : , jprecj+jl ) |
---|
692 | END DO |
---|
693 | ENDIF |
---|
694 | END DO |
---|
695 | ! |
---|
696 | ! ! Migrations |
---|
697 | imigr = jprecj * jpi |
---|
698 | ! |
---|
699 | SELECT CASE ( nbondj ) |
---|
700 | CASE ( -1 ) |
---|
701 | CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req1 ) |
---|
702 | CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono ) |
---|
703 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
704 | CASE ( 0 ) |
---|
705 | CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 ) |
---|
706 | CALL mppsend( 4, zt2sn(1,1,1), num_fields*imigr, nono, ml_req2 ) |
---|
707 | CALL mpprecv( 3, zt2ns(1,1,num_fields+1), num_fields*imigr, nono ) |
---|
708 | CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso ) |
---|
709 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
710 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
711 | CASE ( 1 ) |
---|
712 | CALL mppsend( 3, zt2ns(1,1,1), num_fields*imigr, noso, ml_req1 ) |
---|
713 | CALL mpprecv( 4, zt2sn(1,1,num_fields+1), num_fields*imigr, noso ) |
---|
714 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
715 | END SELECT |
---|
716 | ! |
---|
717 | ! ! Write Dirichlet lateral conditions |
---|
718 | ijhom = nlcj - jprecj |
---|
719 | ! |
---|
720 | |
---|
721 | DO ii = 1 , num_fields |
---|
722 | !First Array |
---|
723 | SELECT CASE ( nbondj ) |
---|
724 | CASE ( -1 ) |
---|
725 | DO jl = 1, jprecj |
---|
726 | pt2d_array(ii)%pt2d( : , ijhom+jl ) = zt2ns( : , jl , num_fields+ii ) |
---|
727 | END DO |
---|
728 | CASE ( 0 ) |
---|
729 | DO jl = 1, jprecj |
---|
730 | pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii) |
---|
731 | pt2d_array(ii)%pt2d( : , ijhom + jl ) = zt2ns( : , jl , num_fields + ii ) |
---|
732 | END DO |
---|
733 | CASE ( 1 ) |
---|
734 | DO jl = 1, jprecj |
---|
735 | pt2d_array(ii)%pt2d( : , jl ) = zt2sn( : , jl , num_fields + ii ) |
---|
736 | END DO |
---|
737 | END SELECT |
---|
738 | END DO |
---|
739 | |
---|
740 | ! 4. north fold treatment |
---|
741 | ! ----------------------- |
---|
742 | ! |
---|
743 | !First Array |
---|
744 | IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN |
---|
745 | ! |
---|
746 | SELECT CASE ( jpni ) |
---|
747 | CASE ( 1 ) ; |
---|
748 | DO ii = 1 , num_fields |
---|
749 | CALL lbc_nfd ( pt2d_array(ii)%pt2d( : , : ), type_array(ii) , psgn_array(ii) ) ! only 1 northern proc, no mpp |
---|
750 | END DO |
---|
751 | CASE DEFAULT ; CALL mpp_lbc_north_2d_multiple( pt2d_array, type_array, psgn_array, num_fields ) ! for all northern procs. |
---|
752 | END SELECT |
---|
753 | ! |
---|
754 | ENDIF |
---|
755 | ! |
---|
756 | |
---|
757 | DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) |
---|
758 | ! |
---|
759 | END SUBROUTINE mpp_lnk_2d_multiple |
---|
760 | |
---|
761 | |
---|
762 | SUBROUTINE load_array(pt2d,cd_type,psgn,pt2d_array, type_array, psgn_array,num_fields) |
---|
763 | !!--------------------------------------------------------------------- |
---|
764 | REAL(wp), DIMENSION(jpi,jpj), TARGET , INTENT(inout) :: pt2d ! Second 2D array on which the boundary condition is applied |
---|
765 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points |
---|
766 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary |
---|
767 | TYPE(arrayptr) , DIMENSION(9) :: pt2d_array |
---|
768 | CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points |
---|
769 | REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary |
---|
770 | INTEGER , INTENT (inout):: num_fields |
---|
771 | !!--------------------------------------------------------------------- |
---|
772 | num_fields=num_fields+1 |
---|
773 | pt2d_array(num_fields)%pt2d=>pt2d |
---|
774 | type_array(num_fields)=cd_type |
---|
775 | psgn_array(num_fields)=psgn |
---|
776 | END SUBROUTINE load_array |
---|
777 | |
---|
778 | |
---|
779 | SUBROUTINE mpp_lnk_2d_9( pt2dA, cd_typeA, psgnA, pt2dB, cd_typeB, psgnB, pt2dC, cd_typeC, psgnC & |
---|
780 | & , pt2dD, cd_typeD, psgnD, pt2dE, cd_typeE, psgnE, pt2dF, cd_typeF, psgnF & |
---|
781 | & , pt2dG, cd_typeG, psgnG, pt2dH, cd_typeH, psgnH, pt2dI, cd_typeI, psgnI, cd_mpp, pval) |
---|
782 | !!--------------------------------------------------------------------- |
---|
783 | ! Second 2D array on which the boundary condition is applied |
---|
784 | REAL(wp), DIMENSION(jpi,jpj), TARGET , INTENT(inout) :: pt2dA |
---|
785 | REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dB , pt2dC , pt2dD , pt2dE |
---|
786 | REAL(wp), DIMENSION(jpi,jpj), TARGET, OPTIONAL, INTENT(inout) :: pt2dF , pt2dG , pt2dH , pt2dI |
---|
787 | ! define the nature of ptab array grid-points |
---|
788 | CHARACTER(len=1) , INTENT(in ) :: cd_typeA |
---|
789 | CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeB , cd_typeC , cd_typeD , cd_typeE |
---|
790 | CHARACTER(len=1) , OPTIONAL, INTENT(in ) :: cd_typeF , cd_typeG , cd_typeH , cd_typeI |
---|
791 | ! =-1 the sign change across the north fold boundary |
---|
792 | REAL(wp) , INTENT(in ) :: psgnA |
---|
793 | REAL(wp) , OPTIONAL, INTENT(in ) :: psgnB , psgnC , psgnD , psgnE |
---|
794 | REAL(wp) , OPTIONAL, INTENT(in ) :: psgnF , psgnG , psgnH , psgnI |
---|
795 | CHARACTER(len=3) , OPTIONAL, INTENT(in ) :: cd_mpp ! fill the overlap area only |
---|
796 | REAL(wp) , OPTIONAL, INTENT(in ) :: pval ! background value (used at closed boundaries) |
---|
797 | !! |
---|
798 | TYPE(arrayptr) , DIMENSION(9) :: pt2d_array |
---|
799 | CHARACTER(len=1) , DIMENSION(9) :: type_array ! define the nature of ptab array grid-points |
---|
800 | ! ! = T , U , V , F , W and I points |
---|
801 | REAL(wp) , DIMENSION(9) :: psgn_array ! =-1 the sign change across the north fold boundary |
---|
802 | INTEGER :: num_fields |
---|
803 | !!--------------------------------------------------------------------- |
---|
804 | |
---|
805 | num_fields = 0 |
---|
806 | |
---|
807 | !! Load the first array |
---|
808 | CALL load_array(pt2dA,cd_typeA,psgnA,pt2d_array, type_array, psgn_array,num_fields) |
---|
809 | |
---|
810 | !! Look if more arrays are added |
---|
811 | IF(PRESENT (psgnB) )CALL load_array(pt2dB,cd_typeB,psgnB,pt2d_array, type_array, psgn_array,num_fields) |
---|
812 | IF(PRESENT (psgnC) )CALL load_array(pt2dC,cd_typeC,psgnC,pt2d_array, type_array, psgn_array,num_fields) |
---|
813 | IF(PRESENT (psgnD) )CALL load_array(pt2dD,cd_typeD,psgnD,pt2d_array, type_array, psgn_array,num_fields) |
---|
814 | IF(PRESENT (psgnE) )CALL load_array(pt2dE,cd_typeE,psgnE,pt2d_array, type_array, psgn_array,num_fields) |
---|
815 | IF(PRESENT (psgnF) )CALL load_array(pt2dF,cd_typeF,psgnF,pt2d_array, type_array, psgn_array,num_fields) |
---|
816 | IF(PRESENT (psgnG) )CALL load_array(pt2dG,cd_typeG,psgnG,pt2d_array, type_array, psgn_array,num_fields) |
---|
817 | IF(PRESENT (psgnH) )CALL load_array(pt2dH,cd_typeH,psgnH,pt2d_array, type_array, psgn_array,num_fields) |
---|
818 | IF(PRESENT (psgnI) )CALL load_array(pt2dI,cd_typeI,psgnI,pt2d_array, type_array, psgn_array,num_fields) |
---|
819 | |
---|
820 | CALL mpp_lnk_2d_multiple(pt2d_array,type_array,psgn_array,num_fields,cd_mpp,pval) |
---|
821 | END SUBROUTINE mpp_lnk_2d_9 |
---|
822 | |
---|
823 | |
---|
824 | SUBROUTINE mpp_lnk_2d( pt2d, cd_type, psgn, cd_mpp, pval ) |
---|
825 | !!---------------------------------------------------------------------- |
---|
826 | !! *** routine mpp_lnk_2d *** |
---|
827 | !! |
---|
828 | !! ** Purpose : Message passing manadgement for 2d array |
---|
829 | !! |
---|
830 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
831 | !! between processors following neighboring subdomains. |
---|
832 | !! domain parameters |
---|
833 | !! nlci : first dimension of the local subdomain |
---|
834 | !! nlcj : second dimension of the local subdomain |
---|
835 | !! nbondi : mark for "east-west local boundary" |
---|
836 | !! nbondj : mark for "north-south local boundary" |
---|
837 | !! noea : number for local neighboring processors |
---|
838 | !! nowe : number for local neighboring processors |
---|
839 | !! noso : number for local neighboring processors |
---|
840 | !! nono : number for local neighboring processors |
---|
841 | !! |
---|
842 | !!---------------------------------------------------------------------- |
---|
843 | REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the boundary condition is applied |
---|
844 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points |
---|
845 | ! ! = T , U , V , F , W and I points |
---|
846 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary |
---|
847 | ! ! = 1. , the sign is kept |
---|
848 | CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only |
---|
849 | REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) |
---|
850 | !! |
---|
851 | INTEGER :: ji, jj, jl ! dummy loop indices |
---|
852 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
853 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
854 | REAL(wp) :: zland |
---|
855 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
856 | ! |
---|
857 | REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north |
---|
858 | REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east |
---|
859 | |
---|
860 | !!---------------------------------------------------------------------- |
---|
861 | |
---|
862 | ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), & |
---|
863 | & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) ) |
---|
864 | |
---|
865 | ! |
---|
866 | IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value |
---|
867 | ELSE ; zland = 0.e0 ! zero by default |
---|
868 | ENDIF |
---|
869 | |
---|
870 | ! 1. standard boundary treatment |
---|
871 | ! ------------------------------ |
---|
872 | ! |
---|
873 | IF( PRESENT( cd_mpp ) ) THEN ! only fill added line/raw with existing values |
---|
874 | ! |
---|
875 | ! WARNING pt2d is defined only between nld and nle |
---|
876 | DO jj = nlcj+1, jpj ! added line(s) (inner only) |
---|
877 | pt2d(nldi :nlei , jj ) = pt2d(nldi:nlei, nlej) |
---|
878 | pt2d(1 :nldi-1, jj ) = pt2d(nldi , nlej) |
---|
879 | pt2d(nlei+1:nlci , jj ) = pt2d( nlei, nlej) |
---|
880 | END DO |
---|
881 | DO ji = nlci+1, jpi ! added column(s) (full) |
---|
882 | pt2d(ji ,nldj :nlej ) = pt2d( nlei,nldj:nlej) |
---|
883 | pt2d(ji ,1 :nldj-1) = pt2d( nlei,nldj ) |
---|
884 | pt2d(ji ,nlej+1:jpj ) = pt2d( nlei, nlej) |
---|
885 | END DO |
---|
886 | ! |
---|
887 | ELSE ! standard close or cyclic treatment |
---|
888 | ! |
---|
889 | ! ! East-West boundaries |
---|
890 | IF( nbondi == 2 .AND. & ! Cyclic east-west |
---|
891 | & (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
892 | pt2d( 1 ,:) = pt2d(jpim1,:) ! west |
---|
893 | pt2d(jpi,:) = pt2d( 2 ,:) ! east |
---|
894 | ELSE ! closed |
---|
895 | IF( .NOT. cd_type == 'F' ) pt2d( 1 :jpreci,:) = zland ! south except F-point |
---|
896 | pt2d(nlci-jpreci+1:jpi ,:) = zland ! north |
---|
897 | ENDIF |
---|
898 | ! ! North-South boundaries (always closed) |
---|
899 | IF( .NOT. cd_type == 'F' ) pt2d(:, 1 :jprecj) = zland !south except F-point |
---|
900 | pt2d(:,nlcj-jprecj+1:jpj ) = zland ! north |
---|
901 | ! |
---|
902 | ENDIF |
---|
903 | |
---|
904 | ! 2. East and west directions exchange |
---|
905 | ! ------------------------------------ |
---|
906 | ! we play with the neigbours AND the row number because of the periodicity |
---|
907 | ! |
---|
908 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
909 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
910 | iihom = nlci-nreci |
---|
911 | DO jl = 1, jpreci |
---|
912 | zt2ew(:,jl,1) = pt2d(jpreci+jl,:) |
---|
913 | zt2we(:,jl,1) = pt2d(iihom +jl,:) |
---|
914 | END DO |
---|
915 | END SELECT |
---|
916 | ! |
---|
917 | ! ! Migrations |
---|
918 | imigr = jpreci * jpj |
---|
919 | ! |
---|
920 | SELECT CASE ( nbondi ) |
---|
921 | CASE ( -1 ) |
---|
922 | CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 ) |
---|
923 | CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) |
---|
924 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
925 | CASE ( 0 ) |
---|
926 | CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) |
---|
927 | CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 ) |
---|
928 | CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) |
---|
929 | CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) |
---|
930 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
931 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
932 | CASE ( 1 ) |
---|
933 | CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) |
---|
934 | CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) |
---|
935 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
936 | END SELECT |
---|
937 | ! |
---|
938 | ! ! Write Dirichlet lateral conditions |
---|
939 | iihom = nlci - jpreci |
---|
940 | ! |
---|
941 | SELECT CASE ( nbondi ) |
---|
942 | CASE ( -1 ) |
---|
943 | DO jl = 1, jpreci |
---|
944 | pt2d(iihom+jl,:) = zt2ew(:,jl,2) |
---|
945 | END DO |
---|
946 | CASE ( 0 ) |
---|
947 | DO jl = 1, jpreci |
---|
948 | pt2d(jl ,:) = zt2we(:,jl,2) |
---|
949 | pt2d(iihom+jl,:) = zt2ew(:,jl,2) |
---|
950 | END DO |
---|
951 | CASE ( 1 ) |
---|
952 | DO jl = 1, jpreci |
---|
953 | pt2d(jl ,:) = zt2we(:,jl,2) |
---|
954 | END DO |
---|
955 | END SELECT |
---|
956 | |
---|
957 | |
---|
958 | ! 3. North and south directions |
---|
959 | ! ----------------------------- |
---|
960 | ! always closed : we play only with the neigbours |
---|
961 | ! |
---|
962 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
963 | ijhom = nlcj-nrecj |
---|
964 | DO jl = 1, jprecj |
---|
965 | zt2sn(:,jl,1) = pt2d(:,ijhom +jl) |
---|
966 | zt2ns(:,jl,1) = pt2d(:,jprecj+jl) |
---|
967 | END DO |
---|
968 | ENDIF |
---|
969 | ! |
---|
970 | ! ! Migrations |
---|
971 | imigr = jprecj * jpi |
---|
972 | ! |
---|
973 | SELECT CASE ( nbondj ) |
---|
974 | CASE ( -1 ) |
---|
975 | CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 ) |
---|
976 | CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) |
---|
977 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
978 | CASE ( 0 ) |
---|
979 | CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) |
---|
980 | CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 ) |
---|
981 | CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) |
---|
982 | CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) |
---|
983 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
984 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
985 | CASE ( 1 ) |
---|
986 | CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) |
---|
987 | CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) |
---|
988 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
989 | END SELECT |
---|
990 | ! |
---|
991 | ! ! Write Dirichlet lateral conditions |
---|
992 | ijhom = nlcj - jprecj |
---|
993 | ! |
---|
994 | SELECT CASE ( nbondj ) |
---|
995 | CASE ( -1 ) |
---|
996 | DO jl = 1, jprecj |
---|
997 | pt2d(:,ijhom+jl) = zt2ns(:,jl,2) |
---|
998 | END DO |
---|
999 | CASE ( 0 ) |
---|
1000 | DO jl = 1, jprecj |
---|
1001 | pt2d(:,jl ) = zt2sn(:,jl,2) |
---|
1002 | pt2d(:,ijhom+jl) = zt2ns(:,jl,2) |
---|
1003 | END DO |
---|
1004 | CASE ( 1 ) |
---|
1005 | DO jl = 1, jprecj |
---|
1006 | pt2d(:,jl ) = zt2sn(:,jl,2) |
---|
1007 | END DO |
---|
1008 | END SELECT |
---|
1009 | |
---|
1010 | |
---|
1011 | ! 4. north fold treatment |
---|
1012 | ! ----------------------- |
---|
1013 | ! |
---|
1014 | IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN |
---|
1015 | ! |
---|
1016 | SELECT CASE ( jpni ) |
---|
1017 | CASE ( 1 ) ; CALL lbc_nfd ( pt2d, cd_type, psgn ) ! only 1 northern proc, no mpp |
---|
1018 | CASE DEFAULT ; CALL mpp_lbc_north( pt2d, cd_type, psgn ) ! for all northern procs. |
---|
1019 | END SELECT |
---|
1020 | ! |
---|
1021 | ENDIF |
---|
1022 | ! |
---|
1023 | DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) |
---|
1024 | ! |
---|
1025 | END SUBROUTINE mpp_lnk_2d |
---|
1026 | |
---|
1027 | |
---|
1028 | SUBROUTINE mpp_lnk_3d_gather( ptab1, cd_type1, ptab2, cd_type2, psgn ) |
---|
1029 | !!---------------------------------------------------------------------- |
---|
1030 | !! *** routine mpp_lnk_3d_gather *** |
---|
1031 | !! |
---|
1032 | !! ** Purpose : Message passing manadgement for two 3D arrays |
---|
1033 | !! |
---|
1034 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
1035 | !! between processors following neighboring subdomains. |
---|
1036 | !! domain parameters |
---|
1037 | !! nlci : first dimension of the local subdomain |
---|
1038 | !! nlcj : second dimension of the local subdomain |
---|
1039 | !! nbondi : mark for "east-west local boundary" |
---|
1040 | !! nbondj : mark for "north-south local boundary" |
---|
1041 | !! noea : number for local neighboring processors |
---|
1042 | !! nowe : number for local neighboring processors |
---|
1043 | !! noso : number for local neighboring processors |
---|
1044 | !! nono : number for local neighboring processors |
---|
1045 | !! |
---|
1046 | !! ** Action : ptab1 and ptab2 with update value at its periphery |
---|
1047 | !! |
---|
1048 | !!---------------------------------------------------------------------- |
---|
1049 | REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which |
---|
1050 | REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab2 ! the boundary condition is applied |
---|
1051 | CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays |
---|
1052 | CHARACTER(len=1) , INTENT(in ) :: cd_type2 ! i.e. grid-points = T , U , V , F or W points |
---|
1053 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary |
---|
1054 | !! ! = 1. , the sign is kept |
---|
1055 | INTEGER :: jl ! dummy loop indices |
---|
1056 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
1057 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
1058 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
1059 | ! |
---|
1060 | REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ns, zt4sn ! 2 x 3d for north-south & south-north |
---|
1061 | REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zt4ew, zt4we ! 2 x 3d for east-west & west-east |
---|
1062 | |
---|
1063 | !!---------------------------------------------------------------------- |
---|
1064 | ALLOCATE( zt4ns(jpi,jprecj,jpk,2,2), zt4sn(jpi,jprecj,jpk,2,2) , & |
---|
1065 | & zt4ew(jpj,jpreci,jpk,2,2), zt4we(jpj,jpreci,jpk,2,2) ) |
---|
1066 | |
---|
1067 | |
---|
1068 | ! 1. standard boundary treatment |
---|
1069 | ! ------------------------------ |
---|
1070 | ! ! East-West boundaries |
---|
1071 | ! !* Cyclic east-west |
---|
1072 | IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
1073 | ptab1( 1 ,:,:) = ptab1(jpim1,:,:) |
---|
1074 | ptab1(jpi,:,:) = ptab1( 2 ,:,:) |
---|
1075 | ptab2( 1 ,:,:) = ptab2(jpim1,:,:) |
---|
1076 | ptab2(jpi,:,:) = ptab2( 2 ,:,:) |
---|
1077 | ELSE !* closed |
---|
1078 | IF( .NOT. cd_type1 == 'F' ) ptab1( 1 :jpreci,:,:) = 0.e0 ! south except at F-point |
---|
1079 | IF( .NOT. cd_type2 == 'F' ) ptab2( 1 :jpreci,:,:) = 0.e0 |
---|
1080 | ptab1(nlci-jpreci+1:jpi ,:,:) = 0.e0 ! north |
---|
1081 | ptab2(nlci-jpreci+1:jpi ,:,:) = 0.e0 |
---|
1082 | ENDIF |
---|
1083 | |
---|
1084 | |
---|
1085 | ! ! North-South boundaries |
---|
1086 | IF( .NOT. cd_type1 == 'F' ) ptab1(:, 1 :jprecj,:) = 0.e0 ! south except at F-point |
---|
1087 | IF( .NOT. cd_type2 == 'F' ) ptab2(:, 1 :jprecj,:) = 0.e0 |
---|
1088 | ptab1(:,nlcj-jprecj+1:jpj ,:) = 0.e0 ! north |
---|
1089 | ptab2(:,nlcj-jprecj+1:jpj ,:) = 0.e0 |
---|
1090 | |
---|
1091 | |
---|
1092 | ! 2. East and west directions exchange |
---|
1093 | ! ------------------------------------ |
---|
1094 | ! we play with the neigbours AND the row number because of the periodicity |
---|
1095 | ! |
---|
1096 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
1097 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
1098 | iihom = nlci-nreci |
---|
1099 | DO jl = 1, jpreci |
---|
1100 | zt4ew(:,jl,:,1,1) = ptab1(jpreci+jl,:,:) |
---|
1101 | zt4we(:,jl,:,1,1) = ptab1(iihom +jl,:,:) |
---|
1102 | zt4ew(:,jl,:,2,1) = ptab2(jpreci+jl,:,:) |
---|
1103 | zt4we(:,jl,:,2,1) = ptab2(iihom +jl,:,:) |
---|
1104 | END DO |
---|
1105 | END SELECT |
---|
1106 | ! |
---|
1107 | ! ! Migrations |
---|
1108 | imigr = jpreci * jpj * jpk *2 |
---|
1109 | ! |
---|
1110 | SELECT CASE ( nbondi ) |
---|
1111 | CASE ( -1 ) |
---|
1112 | CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req1 ) |
---|
1113 | CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea ) |
---|
1114 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
1115 | CASE ( 0 ) |
---|
1116 | CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) |
---|
1117 | CALL mppsend( 2, zt4we(1,1,1,1,1), imigr, noea, ml_req2 ) |
---|
1118 | CALL mpprecv( 1, zt4ew(1,1,1,1,2), imigr, noea ) |
---|
1119 | CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe ) |
---|
1120 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
1121 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
1122 | CASE ( 1 ) |
---|
1123 | CALL mppsend( 1, zt4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) |
---|
1124 | CALL mpprecv( 2, zt4we(1,1,1,1,2), imigr, nowe ) |
---|
1125 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
1126 | END SELECT |
---|
1127 | ! |
---|
1128 | ! ! Write Dirichlet lateral conditions |
---|
1129 | iihom = nlci - jpreci |
---|
1130 | ! |
---|
1131 | SELECT CASE ( nbondi ) |
---|
1132 | CASE ( -1 ) |
---|
1133 | DO jl = 1, jpreci |
---|
1134 | ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2) |
---|
1135 | ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2) |
---|
1136 | END DO |
---|
1137 | CASE ( 0 ) |
---|
1138 | DO jl = 1, jpreci |
---|
1139 | ptab1(jl ,:,:) = zt4we(:,jl,:,1,2) |
---|
1140 | ptab1(iihom+jl,:,:) = zt4ew(:,jl,:,1,2) |
---|
1141 | ptab2(jl ,:,:) = zt4we(:,jl,:,2,2) |
---|
1142 | ptab2(iihom+jl,:,:) = zt4ew(:,jl,:,2,2) |
---|
1143 | END DO |
---|
1144 | CASE ( 1 ) |
---|
1145 | DO jl = 1, jpreci |
---|
1146 | ptab1(jl ,:,:) = zt4we(:,jl,:,1,2) |
---|
1147 | ptab2(jl ,:,:) = zt4we(:,jl,:,2,2) |
---|
1148 | END DO |
---|
1149 | END SELECT |
---|
1150 | |
---|
1151 | |
---|
1152 | ! 3. North and south directions |
---|
1153 | ! ----------------------------- |
---|
1154 | ! always closed : we play only with the neigbours |
---|
1155 | ! |
---|
1156 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
1157 | ijhom = nlcj - nrecj |
---|
1158 | DO jl = 1, jprecj |
---|
1159 | zt4sn(:,jl,:,1,1) = ptab1(:,ijhom +jl,:) |
---|
1160 | zt4ns(:,jl,:,1,1) = ptab1(:,jprecj+jl,:) |
---|
1161 | zt4sn(:,jl,:,2,1) = ptab2(:,ijhom +jl,:) |
---|
1162 | zt4ns(:,jl,:,2,1) = ptab2(:,jprecj+jl,:) |
---|
1163 | END DO |
---|
1164 | ENDIF |
---|
1165 | ! |
---|
1166 | ! ! Migrations |
---|
1167 | imigr = jprecj * jpi * jpk * 2 |
---|
1168 | ! |
---|
1169 | SELECT CASE ( nbondj ) |
---|
1170 | CASE ( -1 ) |
---|
1171 | CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req1 ) |
---|
1172 | CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono ) |
---|
1173 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
1174 | CASE ( 0 ) |
---|
1175 | CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 ) |
---|
1176 | CALL mppsend( 4, zt4sn(1,1,1,1,1), imigr, nono, ml_req2 ) |
---|
1177 | CALL mpprecv( 3, zt4ns(1,1,1,1,2), imigr, nono ) |
---|
1178 | CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso ) |
---|
1179 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
1180 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
1181 | CASE ( 1 ) |
---|
1182 | CALL mppsend( 3, zt4ns(1,1,1,1,1), imigr, noso, ml_req1 ) |
---|
1183 | CALL mpprecv( 4, zt4sn(1,1,1,1,2), imigr, noso ) |
---|
1184 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
1185 | END SELECT |
---|
1186 | ! |
---|
1187 | ! ! Write Dirichlet lateral conditions |
---|
1188 | ijhom = nlcj - jprecj |
---|
1189 | ! |
---|
1190 | SELECT CASE ( nbondj ) |
---|
1191 | CASE ( -1 ) |
---|
1192 | DO jl = 1, jprecj |
---|
1193 | ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2) |
---|
1194 | ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2) |
---|
1195 | END DO |
---|
1196 | CASE ( 0 ) |
---|
1197 | DO jl = 1, jprecj |
---|
1198 | ptab1(:,jl ,:) = zt4sn(:,jl,:,1,2) |
---|
1199 | ptab1(:,ijhom+jl,:) = zt4ns(:,jl,:,1,2) |
---|
1200 | ptab2(:,jl ,:) = zt4sn(:,jl,:,2,2) |
---|
1201 | ptab2(:,ijhom+jl,:) = zt4ns(:,jl,:,2,2) |
---|
1202 | END DO |
---|
1203 | CASE ( 1 ) |
---|
1204 | DO jl = 1, jprecj |
---|
1205 | ptab1(:,jl,:) = zt4sn(:,jl,:,1,2) |
---|
1206 | ptab2(:,jl,:) = zt4sn(:,jl,:,2,2) |
---|
1207 | END DO |
---|
1208 | END SELECT |
---|
1209 | |
---|
1210 | |
---|
1211 | ! 4. north fold treatment |
---|
1212 | ! ----------------------- |
---|
1213 | IF( npolj /= 0 ) THEN |
---|
1214 | ! |
---|
1215 | SELECT CASE ( jpni ) |
---|
1216 | CASE ( 1 ) |
---|
1217 | CALL lbc_nfd ( ptab1, cd_type1, psgn ) ! only for northern procs. |
---|
1218 | CALL lbc_nfd ( ptab2, cd_type2, psgn ) |
---|
1219 | CASE DEFAULT |
---|
1220 | CALL mpp_lbc_north( ptab1, cd_type1, psgn ) ! for all northern procs. |
---|
1221 | CALL mpp_lbc_north (ptab2, cd_type2, psgn) |
---|
1222 | END SELECT |
---|
1223 | ! |
---|
1224 | ENDIF |
---|
1225 | ! |
---|
1226 | DEALLOCATE( zt4ns, zt4sn, zt4ew, zt4we ) |
---|
1227 | ! |
---|
1228 | END SUBROUTINE mpp_lnk_3d_gather |
---|
1229 | |
---|
1230 | |
---|
1231 | SUBROUTINE mpp_lnk_2d_e( pt2d, cd_type, psgn, jpri, jprj ) |
---|
1232 | !!---------------------------------------------------------------------- |
---|
1233 | !! *** routine mpp_lnk_2d_e *** |
---|
1234 | !! |
---|
1235 | !! ** Purpose : Message passing manadgement for 2d array (with halo) |
---|
1236 | !! |
---|
1237 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
1238 | !! between processors following neighboring subdomains. |
---|
1239 | !! domain parameters |
---|
1240 | !! nlci : first dimension of the local subdomain |
---|
1241 | !! nlcj : second dimension of the local subdomain |
---|
1242 | !! jpri : number of rows for extra outer halo |
---|
1243 | !! jprj : number of columns for extra outer halo |
---|
1244 | !! nbondi : mark for "east-west local boundary" |
---|
1245 | !! nbondj : mark for "north-south local boundary" |
---|
1246 | !! noea : number for local neighboring processors |
---|
1247 | !! nowe : number for local neighboring processors |
---|
1248 | !! noso : number for local neighboring processors |
---|
1249 | !! nono : number for local neighboring processors |
---|
1250 | !! |
---|
1251 | !!---------------------------------------------------------------------- |
---|
1252 | INTEGER , INTENT(in ) :: jpri |
---|
1253 | INTEGER , INTENT(in ) :: jprj |
---|
1254 | REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo |
---|
1255 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points |
---|
1256 | ! ! = T , U , V , F , W and I points |
---|
1257 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the |
---|
1258 | !! ! north boundary, = 1. otherwise |
---|
1259 | INTEGER :: jl ! dummy loop indices |
---|
1260 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
1261 | INTEGER :: ipreci, iprecj ! temporary integers |
---|
1262 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
1263 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
1264 | !! |
---|
1265 | REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns |
---|
1266 | REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn |
---|
1267 | REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe |
---|
1268 | REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew |
---|
1269 | !!---------------------------------------------------------------------- |
---|
1270 | |
---|
1271 | ipreci = jpreci + jpri ! take into account outer extra 2D overlap area |
---|
1272 | iprecj = jprecj + jprj |
---|
1273 | |
---|
1274 | |
---|
1275 | ! 1. standard boundary treatment |
---|
1276 | ! ------------------------------ |
---|
1277 | ! Order matters Here !!!! |
---|
1278 | ! |
---|
1279 | ! !* North-South boundaries (always colsed) |
---|
1280 | IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jprj : jprecj ) = 0.e0 ! south except at F-point |
---|
1281 | pt2d(:,nlcj-jprecj+1:jpj+jprj) = 0.e0 ! north |
---|
1282 | |
---|
1283 | ! ! East-West boundaries |
---|
1284 | ! !* Cyclic east-west |
---|
1285 | IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
1286 | pt2d(1-jpri: 1 ,:) = pt2d(jpim1-jpri: jpim1 ,:) ! east |
---|
1287 | pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west |
---|
1288 | ! |
---|
1289 | ELSE !* closed |
---|
1290 | IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point |
---|
1291 | pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north |
---|
1292 | ENDIF |
---|
1293 | ! |
---|
1294 | |
---|
1295 | ! north fold treatment |
---|
1296 | ! ----------------------- |
---|
1297 | IF( npolj /= 0 ) THEN |
---|
1298 | ! |
---|
1299 | SELECT CASE ( jpni ) |
---|
1300 | CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj ) |
---|
1301 | CASE DEFAULT ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn ) |
---|
1302 | END SELECT |
---|
1303 | ! |
---|
1304 | ENDIF |
---|
1305 | |
---|
1306 | ! 2. East and west directions exchange |
---|
1307 | ! ------------------------------------ |
---|
1308 | ! we play with the neigbours AND the row number because of the periodicity |
---|
1309 | ! |
---|
1310 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
1311 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
1312 | iihom = nlci-nreci-jpri |
---|
1313 | DO jl = 1, ipreci |
---|
1314 | r2dew(:,jl,1) = pt2d(jpreci+jl,:) |
---|
1315 | r2dwe(:,jl,1) = pt2d(iihom +jl,:) |
---|
1316 | END DO |
---|
1317 | END SELECT |
---|
1318 | ! |
---|
1319 | ! ! Migrations |
---|
1320 | imigr = ipreci * ( jpj + 2*jprj) |
---|
1321 | ! |
---|
1322 | SELECT CASE ( nbondi ) |
---|
1323 | CASE ( -1 ) |
---|
1324 | CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req1 ) |
---|
1325 | CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) |
---|
1326 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
1327 | CASE ( 0 ) |
---|
1328 | CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) |
---|
1329 | CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req2 ) |
---|
1330 | CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) |
---|
1331 | CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) |
---|
1332 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
1333 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
1334 | CASE ( 1 ) |
---|
1335 | CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) |
---|
1336 | CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) |
---|
1337 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
1338 | END SELECT |
---|
1339 | ! |
---|
1340 | ! ! Write Dirichlet lateral conditions |
---|
1341 | iihom = nlci - jpreci |
---|
1342 | ! |
---|
1343 | SELECT CASE ( nbondi ) |
---|
1344 | CASE ( -1 ) |
---|
1345 | DO jl = 1, ipreci |
---|
1346 | pt2d(iihom+jl,:) = r2dew(:,jl,2) |
---|
1347 | END DO |
---|
1348 | CASE ( 0 ) |
---|
1349 | DO jl = 1, ipreci |
---|
1350 | pt2d(jl-jpri,:) = r2dwe(:,jl,2) |
---|
1351 | pt2d( iihom+jl,:) = r2dew(:,jl,2) |
---|
1352 | END DO |
---|
1353 | CASE ( 1 ) |
---|
1354 | DO jl = 1, ipreci |
---|
1355 | pt2d(jl-jpri,:) = r2dwe(:,jl,2) |
---|
1356 | END DO |
---|
1357 | END SELECT |
---|
1358 | |
---|
1359 | |
---|
1360 | ! 3. North and south directions |
---|
1361 | ! ----------------------------- |
---|
1362 | ! always closed : we play only with the neigbours |
---|
1363 | ! |
---|
1364 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
1365 | ijhom = nlcj-nrecj-jprj |
---|
1366 | DO jl = 1, iprecj |
---|
1367 | r2dsn(:,jl,1) = pt2d(:,ijhom +jl) |
---|
1368 | r2dns(:,jl,1) = pt2d(:,jprecj+jl) |
---|
1369 | END DO |
---|
1370 | ENDIF |
---|
1371 | ! |
---|
1372 | ! ! Migrations |
---|
1373 | imigr = iprecj * ( jpi + 2*jpri ) |
---|
1374 | ! |
---|
1375 | SELECT CASE ( nbondj ) |
---|
1376 | CASE ( -1 ) |
---|
1377 | CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req1 ) |
---|
1378 | CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) |
---|
1379 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
1380 | CASE ( 0 ) |
---|
1381 | CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) |
---|
1382 | CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req2 ) |
---|
1383 | CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) |
---|
1384 | CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) |
---|
1385 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
1386 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
1387 | CASE ( 1 ) |
---|
1388 | CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) |
---|
1389 | CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) |
---|
1390 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
1391 | END SELECT |
---|
1392 | ! |
---|
1393 | ! ! Write Dirichlet lateral conditions |
---|
1394 | ijhom = nlcj - jprecj |
---|
1395 | ! |
---|
1396 | SELECT CASE ( nbondj ) |
---|
1397 | CASE ( -1 ) |
---|
1398 | DO jl = 1, iprecj |
---|
1399 | pt2d(:,ijhom+jl) = r2dns(:,jl,2) |
---|
1400 | END DO |
---|
1401 | CASE ( 0 ) |
---|
1402 | DO jl = 1, iprecj |
---|
1403 | pt2d(:,jl-jprj) = r2dsn(:,jl,2) |
---|
1404 | pt2d(:,ijhom+jl ) = r2dns(:,jl,2) |
---|
1405 | END DO |
---|
1406 | CASE ( 1 ) |
---|
1407 | DO jl = 1, iprecj |
---|
1408 | pt2d(:,jl-jprj) = r2dsn(:,jl,2) |
---|
1409 | END DO |
---|
1410 | END SELECT |
---|
1411 | |
---|
1412 | END SUBROUTINE mpp_lnk_2d_e |
---|
1413 | |
---|
1414 | |
---|
1415 | SUBROUTINE mppsend( ktyp, pmess, kbytes, kdest, md_req ) |
---|
1416 | !!---------------------------------------------------------------------- |
---|
1417 | !! *** routine mppsend *** |
---|
1418 | !! |
---|
1419 | !! ** Purpose : Send messag passing array |
---|
1420 | !! |
---|
1421 | !!---------------------------------------------------------------------- |
---|
1422 | REAL(wp), INTENT(inout) :: pmess(*) ! array of real |
---|
1423 | INTEGER , INTENT(in ) :: kbytes ! size of the array pmess |
---|
1424 | INTEGER , INTENT(in ) :: kdest ! receive process number |
---|
1425 | INTEGER , INTENT(in ) :: ktyp ! tag of the message |
---|
1426 | INTEGER , INTENT(in ) :: md_req ! argument for isend |
---|
1427 | !! |
---|
1428 | INTEGER :: iflag |
---|
1429 | !!---------------------------------------------------------------------- |
---|
1430 | ! |
---|
1431 | SELECT CASE ( cn_mpi_send ) |
---|
1432 | CASE ( 'S' ) ! Standard mpi send (blocking) |
---|
1433 | CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag ) |
---|
1434 | CASE ( 'B' ) ! Buffer mpi send (blocking) |
---|
1435 | CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa , iflag ) |
---|
1436 | CASE ( 'I' ) ! Immediate mpi send (non-blocking send) |
---|
1437 | ! be carefull, one more argument here : the mpi request identifier.. |
---|
1438 | CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_opa, md_req, iflag ) |
---|
1439 | END SELECT |
---|
1440 | ! |
---|
1441 | END SUBROUTINE mppsend |
---|
1442 | |
---|
1443 | |
---|
1444 | SUBROUTINE mpprecv( ktyp, pmess, kbytes, ksource ) |
---|
1445 | !!---------------------------------------------------------------------- |
---|
1446 | !! *** routine mpprecv *** |
---|
1447 | !! |
---|
1448 | !! ** Purpose : Receive messag passing array |
---|
1449 | !! |
---|
1450 | !!---------------------------------------------------------------------- |
---|
1451 | REAL(wp), INTENT(inout) :: pmess(*) ! array of real |
---|
1452 | INTEGER , INTENT(in ) :: kbytes ! suze of the array pmess |
---|
1453 | INTEGER , INTENT(in ) :: ktyp ! Tag of the recevied message |
---|
1454 | INTEGER, OPTIONAL, INTENT(in) :: ksource ! source process number |
---|
1455 | !! |
---|
1456 | INTEGER :: istatus(mpi_status_size) |
---|
1457 | INTEGER :: iflag |
---|
1458 | INTEGER :: use_source |
---|
1459 | !!---------------------------------------------------------------------- |
---|
1460 | ! |
---|
1461 | |
---|
1462 | ! If a specific process number has been passed to the receive call, |
---|
1463 | ! use that one. Default is to use mpi_any_source |
---|
1464 | use_source=mpi_any_source |
---|
1465 | if(present(ksource)) then |
---|
1466 | use_source=ksource |
---|
1467 | end if |
---|
1468 | |
---|
1469 | CALL mpi_recv( pmess, kbytes, mpi_double_precision, use_source, ktyp, mpi_comm_opa, istatus, iflag ) |
---|
1470 | ! |
---|
1471 | END SUBROUTINE mpprecv |
---|
1472 | |
---|
1473 | |
---|
1474 | SUBROUTINE mppgather( ptab, kp, pio ) |
---|
1475 | !!---------------------------------------------------------------------- |
---|
1476 | !! *** routine mppgather *** |
---|
1477 | !! |
---|
1478 | !! ** Purpose : Transfert between a local subdomain array and a work |
---|
1479 | !! array which is distributed following the vertical level. |
---|
1480 | !! |
---|
1481 | !!---------------------------------------------------------------------- |
---|
1482 | REAL(wp), DIMENSION(jpi,jpj), INTENT(in ) :: ptab ! subdomain input array |
---|
1483 | INTEGER , INTENT(in ) :: kp ! record length |
---|
1484 | REAL(wp), DIMENSION(jpi,jpj,jpnij), INTENT( out) :: pio ! subdomain input array |
---|
1485 | !! |
---|
1486 | INTEGER :: itaille, ierror ! temporary integer |
---|
1487 | !!--------------------------------------------------------------------- |
---|
1488 | ! |
---|
1489 | itaille = jpi * jpj |
---|
1490 | CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , & |
---|
1491 | & mpi_double_precision, kp , mpi_comm_opa, ierror ) |
---|
1492 | ! |
---|
1493 | END SUBROUTINE mppgather |
---|
1494 | |
---|
1495 | |
---|
1496 | SUBROUTINE mppscatter( pio, kp, ptab ) |
---|
1497 | !!---------------------------------------------------------------------- |
---|
1498 | !! *** routine mppscatter *** |
---|
1499 | !! |
---|
1500 | !! ** Purpose : Transfert between awork array which is distributed |
---|
1501 | !! following the vertical level and the local subdomain array. |
---|
1502 | !! |
---|
1503 | !!---------------------------------------------------------------------- |
---|
1504 | REAL(wp), DIMENSION(jpi,jpj,jpnij) :: pio ! output array |
---|
1505 | INTEGER :: kp ! Tag (not used with MPI |
---|
1506 | REAL(wp), DIMENSION(jpi,jpj) :: ptab ! subdomain array input |
---|
1507 | !! |
---|
1508 | INTEGER :: itaille, ierror ! temporary integer |
---|
1509 | !!--------------------------------------------------------------------- |
---|
1510 | ! |
---|
1511 | itaille=jpi*jpj |
---|
1512 | ! |
---|
1513 | CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille , & |
---|
1514 | & mpi_double_precision, kp , mpi_comm_opa, ierror ) |
---|
1515 | ! |
---|
1516 | END SUBROUTINE mppscatter |
---|
1517 | |
---|
1518 | |
---|
1519 | SUBROUTINE mppmax_a_int( ktab, kdim, kcom ) |
---|
1520 | !!---------------------------------------------------------------------- |
---|
1521 | !! *** routine mppmax_a_int *** |
---|
1522 | !! |
---|
1523 | !! ** Purpose : Find maximum value in an integer layout array |
---|
1524 | !! |
---|
1525 | !!---------------------------------------------------------------------- |
---|
1526 | INTEGER , INTENT(in ) :: kdim ! size of array |
---|
1527 | INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array |
---|
1528 | INTEGER , INTENT(in ), OPTIONAL :: kcom ! |
---|
1529 | !! |
---|
1530 | INTEGER :: ierror, localcomm ! temporary integer |
---|
1531 | INTEGER, DIMENSION(kdim) :: iwork |
---|
1532 | !!---------------------------------------------------------------------- |
---|
1533 | ! |
---|
1534 | localcomm = mpi_comm_opa |
---|
1535 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1536 | ! |
---|
1537 | CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, localcomm, ierror ) |
---|
1538 | ! |
---|
1539 | ktab(:) = iwork(:) |
---|
1540 | ! |
---|
1541 | END SUBROUTINE mppmax_a_int |
---|
1542 | |
---|
1543 | |
---|
1544 | SUBROUTINE mppmax_int( ktab, kcom ) |
---|
1545 | !!---------------------------------------------------------------------- |
---|
1546 | !! *** routine mppmax_int *** |
---|
1547 | !! |
---|
1548 | !! ** Purpose : Find maximum value in an integer layout array |
---|
1549 | !! |
---|
1550 | !!---------------------------------------------------------------------- |
---|
1551 | INTEGER, INTENT(inout) :: ktab ! ??? |
---|
1552 | INTEGER, INTENT(in ), OPTIONAL :: kcom ! ??? |
---|
1553 | !! |
---|
1554 | INTEGER :: ierror, iwork, localcomm ! temporary integer |
---|
1555 | !!---------------------------------------------------------------------- |
---|
1556 | ! |
---|
1557 | localcomm = mpi_comm_opa |
---|
1558 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1559 | ! |
---|
1560 | CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, localcomm, ierror) |
---|
1561 | ! |
---|
1562 | ktab = iwork |
---|
1563 | ! |
---|
1564 | END SUBROUTINE mppmax_int |
---|
1565 | |
---|
1566 | |
---|
1567 | SUBROUTINE mppmin_a_int( ktab, kdim, kcom ) |
---|
1568 | !!---------------------------------------------------------------------- |
---|
1569 | !! *** routine mppmin_a_int *** |
---|
1570 | !! |
---|
1571 | !! ** Purpose : Find minimum value in an integer layout array |
---|
1572 | !! |
---|
1573 | !!---------------------------------------------------------------------- |
---|
1574 | INTEGER , INTENT( in ) :: kdim ! size of array |
---|
1575 | INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array |
---|
1576 | INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array |
---|
1577 | !! |
---|
1578 | INTEGER :: ierror, localcomm ! temporary integer |
---|
1579 | INTEGER, DIMENSION(kdim) :: iwork |
---|
1580 | !!---------------------------------------------------------------------- |
---|
1581 | ! |
---|
1582 | localcomm = mpi_comm_opa |
---|
1583 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1584 | ! |
---|
1585 | CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, localcomm, ierror ) |
---|
1586 | ! |
---|
1587 | ktab(:) = iwork(:) |
---|
1588 | ! |
---|
1589 | END SUBROUTINE mppmin_a_int |
---|
1590 | |
---|
1591 | |
---|
1592 | SUBROUTINE mppmin_int( ktab, kcom ) |
---|
1593 | !!---------------------------------------------------------------------- |
---|
1594 | !! *** routine mppmin_int *** |
---|
1595 | !! |
---|
1596 | !! ** Purpose : Find minimum value in an integer layout array |
---|
1597 | !! |
---|
1598 | !!---------------------------------------------------------------------- |
---|
1599 | INTEGER, INTENT(inout) :: ktab ! ??? |
---|
1600 | INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array |
---|
1601 | !! |
---|
1602 | INTEGER :: ierror, iwork, localcomm |
---|
1603 | !!---------------------------------------------------------------------- |
---|
1604 | ! |
---|
1605 | localcomm = mpi_comm_opa |
---|
1606 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1607 | ! |
---|
1608 | CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, localcomm, ierror ) |
---|
1609 | ! |
---|
1610 | ktab = iwork |
---|
1611 | ! |
---|
1612 | END SUBROUTINE mppmin_int |
---|
1613 | |
---|
1614 | |
---|
1615 | SUBROUTINE mppsum_a_int( ktab, kdim ) |
---|
1616 | !!---------------------------------------------------------------------- |
---|
1617 | !! *** routine mppsum_a_int *** |
---|
1618 | !! |
---|
1619 | !! ** Purpose : Global integer sum, 1D array case |
---|
1620 | !! |
---|
1621 | !!---------------------------------------------------------------------- |
---|
1622 | INTEGER, INTENT(in ) :: kdim ! ??? |
---|
1623 | INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? |
---|
1624 | !! |
---|
1625 | INTEGER :: ierror |
---|
1626 | INTEGER, DIMENSION (kdim) :: iwork |
---|
1627 | !!---------------------------------------------------------------------- |
---|
1628 | ! |
---|
1629 | CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) |
---|
1630 | ! |
---|
1631 | ktab(:) = iwork(:) |
---|
1632 | ! |
---|
1633 | END SUBROUTINE mppsum_a_int |
---|
1634 | |
---|
1635 | |
---|
1636 | SUBROUTINE mppsum_int( ktab ) |
---|
1637 | !!---------------------------------------------------------------------- |
---|
1638 | !! *** routine mppsum_int *** |
---|
1639 | !! |
---|
1640 | !! ** Purpose : Global integer sum |
---|
1641 | !! |
---|
1642 | !!---------------------------------------------------------------------- |
---|
1643 | INTEGER, INTENT(inout) :: ktab |
---|
1644 | !! |
---|
1645 | INTEGER :: ierror, iwork |
---|
1646 | !!---------------------------------------------------------------------- |
---|
1647 | ! |
---|
1648 | CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_opa, ierror ) |
---|
1649 | ! |
---|
1650 | ktab = iwork |
---|
1651 | ! |
---|
1652 | END SUBROUTINE mppsum_int |
---|
1653 | |
---|
1654 | |
---|
1655 | SUBROUTINE mppmax_a_real( ptab, kdim, kcom ) |
---|
1656 | !!---------------------------------------------------------------------- |
---|
1657 | !! *** routine mppmax_a_real *** |
---|
1658 | !! |
---|
1659 | !! ** Purpose : Maximum |
---|
1660 | !! |
---|
1661 | !!---------------------------------------------------------------------- |
---|
1662 | INTEGER , INTENT(in ) :: kdim |
---|
1663 | REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab |
---|
1664 | INTEGER , INTENT(in ), OPTIONAL :: kcom |
---|
1665 | !! |
---|
1666 | INTEGER :: ierror, localcomm |
---|
1667 | REAL(wp), DIMENSION(kdim) :: zwork |
---|
1668 | !!---------------------------------------------------------------------- |
---|
1669 | ! |
---|
1670 | localcomm = mpi_comm_opa |
---|
1671 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1672 | ! |
---|
1673 | CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, localcomm, ierror ) |
---|
1674 | ptab(:) = zwork(:) |
---|
1675 | ! |
---|
1676 | END SUBROUTINE mppmax_a_real |
---|
1677 | |
---|
1678 | |
---|
1679 | SUBROUTINE mppmax_real( ptab, kcom ) |
---|
1680 | !!---------------------------------------------------------------------- |
---|
1681 | !! *** routine mppmax_real *** |
---|
1682 | !! |
---|
1683 | !! ** Purpose : Maximum |
---|
1684 | !! |
---|
1685 | !!---------------------------------------------------------------------- |
---|
1686 | REAL(wp), INTENT(inout) :: ptab ! ??? |
---|
1687 | INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? |
---|
1688 | !! |
---|
1689 | INTEGER :: ierror, localcomm |
---|
1690 | REAL(wp) :: zwork |
---|
1691 | !!---------------------------------------------------------------------- |
---|
1692 | ! |
---|
1693 | localcomm = mpi_comm_opa |
---|
1694 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1695 | ! |
---|
1696 | CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, localcomm, ierror ) |
---|
1697 | ptab = zwork |
---|
1698 | ! |
---|
1699 | END SUBROUTINE mppmax_real |
---|
1700 | |
---|
1701 | SUBROUTINE mppmax_real_multiple( ptab, NUM , kcom ) |
---|
1702 | !!---------------------------------------------------------------------- |
---|
1703 | !! *** routine mppmax_real *** |
---|
1704 | !! |
---|
1705 | !! ** Purpose : Maximum |
---|
1706 | !! |
---|
1707 | !!---------------------------------------------------------------------- |
---|
1708 | REAL(wp), DIMENSION(:) , INTENT(inout) :: ptab ! ??? |
---|
1709 | INTEGER , INTENT(in ) :: NUM |
---|
1710 | INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? |
---|
1711 | !! |
---|
1712 | INTEGER :: ierror, localcomm |
---|
1713 | REAL(wp) , POINTER , DIMENSION(:) :: zwork |
---|
1714 | !!---------------------------------------------------------------------- |
---|
1715 | ! |
---|
1716 | CALL wrk_alloc(NUM , zwork) |
---|
1717 | localcomm = mpi_comm_opa |
---|
1718 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1719 | ! |
---|
1720 | CALL mpi_allreduce( ptab, zwork, NUM, mpi_double_precision, mpi_max, localcomm, ierror ) |
---|
1721 | ptab = zwork |
---|
1722 | CALL wrk_dealloc(NUM , zwork) |
---|
1723 | ! |
---|
1724 | END SUBROUTINE mppmax_real_multiple |
---|
1725 | |
---|
1726 | |
---|
1727 | SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) |
---|
1728 | !!---------------------------------------------------------------------- |
---|
1729 | !! *** routine mppmin_a_real *** |
---|
1730 | !! |
---|
1731 | !! ** Purpose : Minimum of REAL, array case |
---|
1732 | !! |
---|
1733 | !!----------------------------------------------------------------------- |
---|
1734 | INTEGER , INTENT(in ) :: kdim |
---|
1735 | REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab |
---|
1736 | INTEGER , INTENT(in ), OPTIONAL :: kcom |
---|
1737 | !! |
---|
1738 | INTEGER :: ierror, localcomm |
---|
1739 | REAL(wp), DIMENSION(kdim) :: zwork |
---|
1740 | !!----------------------------------------------------------------------- |
---|
1741 | ! |
---|
1742 | localcomm = mpi_comm_opa |
---|
1743 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1744 | ! |
---|
1745 | CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, localcomm, ierror ) |
---|
1746 | ptab(:) = zwork(:) |
---|
1747 | ! |
---|
1748 | END SUBROUTINE mppmin_a_real |
---|
1749 | |
---|
1750 | |
---|
1751 | SUBROUTINE mppmin_real( ptab, kcom ) |
---|
1752 | !!---------------------------------------------------------------------- |
---|
1753 | !! *** routine mppmin_real *** |
---|
1754 | !! |
---|
1755 | !! ** Purpose : minimum of REAL, scalar case |
---|
1756 | !! |
---|
1757 | !!----------------------------------------------------------------------- |
---|
1758 | REAL(wp), INTENT(inout) :: ptab ! |
---|
1759 | INTEGER , INTENT(in ), OPTIONAL :: kcom |
---|
1760 | !! |
---|
1761 | INTEGER :: ierror |
---|
1762 | REAL(wp) :: zwork |
---|
1763 | INTEGER :: localcomm |
---|
1764 | !!----------------------------------------------------------------------- |
---|
1765 | ! |
---|
1766 | localcomm = mpi_comm_opa |
---|
1767 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1768 | ! |
---|
1769 | CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, localcomm, ierror ) |
---|
1770 | ptab = zwork |
---|
1771 | ! |
---|
1772 | END SUBROUTINE mppmin_real |
---|
1773 | |
---|
1774 | |
---|
1775 | SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) |
---|
1776 | !!---------------------------------------------------------------------- |
---|
1777 | !! *** routine mppsum_a_real *** |
---|
1778 | !! |
---|
1779 | !! ** Purpose : global sum, REAL ARRAY argument case |
---|
1780 | !! |
---|
1781 | !!----------------------------------------------------------------------- |
---|
1782 | INTEGER , INTENT( in ) :: kdim ! size of ptab |
---|
1783 | REAL(wp), DIMENSION(kdim), INTENT( inout ) :: ptab ! input array |
---|
1784 | INTEGER , INTENT( in ), OPTIONAL :: kcom |
---|
1785 | !! |
---|
1786 | INTEGER :: ierror ! temporary integer |
---|
1787 | INTEGER :: localcomm |
---|
1788 | REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace |
---|
1789 | !!----------------------------------------------------------------------- |
---|
1790 | ! |
---|
1791 | localcomm = mpi_comm_opa |
---|
1792 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1793 | ! |
---|
1794 | CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, localcomm, ierror ) |
---|
1795 | ptab(:) = zwork(:) |
---|
1796 | ! |
---|
1797 | END SUBROUTINE mppsum_a_real |
---|
1798 | |
---|
1799 | |
---|
1800 | SUBROUTINE mppsum_real( ptab, kcom ) |
---|
1801 | !!---------------------------------------------------------------------- |
---|
1802 | !! *** routine mppsum_real *** |
---|
1803 | !! |
---|
1804 | !! ** Purpose : global sum, SCALAR argument case |
---|
1805 | !! |
---|
1806 | !!----------------------------------------------------------------------- |
---|
1807 | REAL(wp), INTENT(inout) :: ptab ! input scalar |
---|
1808 | INTEGER , INTENT(in ), OPTIONAL :: kcom |
---|
1809 | !! |
---|
1810 | INTEGER :: ierror, localcomm |
---|
1811 | REAL(wp) :: zwork |
---|
1812 | !!----------------------------------------------------------------------- |
---|
1813 | ! |
---|
1814 | localcomm = mpi_comm_opa |
---|
1815 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1816 | ! |
---|
1817 | CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, localcomm, ierror ) |
---|
1818 | ptab = zwork |
---|
1819 | ! |
---|
1820 | END SUBROUTINE mppsum_real |
---|
1821 | |
---|
1822 | SUBROUTINE mppsum_realdd( ytab, kcom ) |
---|
1823 | !!---------------------------------------------------------------------- |
---|
1824 | !! *** routine mppsum_realdd *** |
---|
1825 | !! |
---|
1826 | !! ** Purpose : global sum in Massively Parallel Processing |
---|
1827 | !! SCALAR argument case for double-double precision |
---|
1828 | !! |
---|
1829 | !!----------------------------------------------------------------------- |
---|
1830 | COMPLEX(wp), INTENT(inout) :: ytab ! input scalar |
---|
1831 | INTEGER , INTENT( in ), OPTIONAL :: kcom |
---|
1832 | |
---|
1833 | !! * Local variables (MPI version) |
---|
1834 | INTEGER :: ierror |
---|
1835 | INTEGER :: localcomm |
---|
1836 | COMPLEX(wp) :: zwork |
---|
1837 | |
---|
1838 | localcomm = mpi_comm_opa |
---|
1839 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1840 | |
---|
1841 | ! reduce local sums into global sum |
---|
1842 | CALL MPI_ALLREDUCE (ytab, zwork, 1, MPI_DOUBLE_COMPLEX, & |
---|
1843 | MPI_SUMDD,localcomm,ierror) |
---|
1844 | ytab = zwork |
---|
1845 | |
---|
1846 | END SUBROUTINE mppsum_realdd |
---|
1847 | |
---|
1848 | |
---|
1849 | SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) |
---|
1850 | !!---------------------------------------------------------------------- |
---|
1851 | !! *** routine mppsum_a_realdd *** |
---|
1852 | !! |
---|
1853 | !! ** Purpose : global sum in Massively Parallel Processing |
---|
1854 | !! COMPLEX ARRAY case for double-double precision |
---|
1855 | !! |
---|
1856 | !!----------------------------------------------------------------------- |
---|
1857 | INTEGER , INTENT( in ) :: kdim ! size of ytab |
---|
1858 | COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array |
---|
1859 | INTEGER , INTENT( in ), OPTIONAL :: kcom |
---|
1860 | |
---|
1861 | !! * Local variables (MPI version) |
---|
1862 | INTEGER :: ierror ! temporary integer |
---|
1863 | INTEGER :: localcomm |
---|
1864 | COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace |
---|
1865 | |
---|
1866 | localcomm = mpi_comm_opa |
---|
1867 | IF( PRESENT(kcom) ) localcomm = kcom |
---|
1868 | |
---|
1869 | CALL MPI_ALLREDUCE (ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, & |
---|
1870 | MPI_SUMDD,localcomm,ierror) |
---|
1871 | ytab(:) = zwork(:) |
---|
1872 | |
---|
1873 | END SUBROUTINE mppsum_a_realdd |
---|
1874 | |
---|
1875 | SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) |
---|
1876 | !!------------------------------------------------------------------------ |
---|
1877 | !! *** routine mpp_minloc *** |
---|
1878 | !! |
---|
1879 | !! ** Purpose : Compute the global minimum of an array ptab |
---|
1880 | !! and also give its global position |
---|
1881 | !! |
---|
1882 | !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC |
---|
1883 | !! |
---|
1884 | !!-------------------------------------------------------------------------- |
---|
1885 | REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array |
---|
1886 | REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask |
---|
1887 | REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab |
---|
1888 | INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame |
---|
1889 | !! |
---|
1890 | INTEGER , DIMENSION(2) :: ilocs |
---|
1891 | INTEGER :: ierror |
---|
1892 | REAL(wp) :: zmin ! local minimum |
---|
1893 | REAL(wp), DIMENSION(2,1) :: zain, zaout |
---|
1894 | !!----------------------------------------------------------------------- |
---|
1895 | ! |
---|
1896 | zmin = MINVAL( ptab(:,:) , mask= pmask == 1.e0 ) |
---|
1897 | ilocs = MINLOC( ptab(:,:) , mask= pmask == 1.e0 ) |
---|
1898 | ! |
---|
1899 | ki = ilocs(1) + nimpp - 1 |
---|
1900 | kj = ilocs(2) + njmpp - 1 |
---|
1901 | ! |
---|
1902 | zain(1,:)=zmin |
---|
1903 | zain(2,:)=ki+10000.*kj |
---|
1904 | ! |
---|
1905 | CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) |
---|
1906 | ! |
---|
1907 | pmin = zaout(1,1) |
---|
1908 | kj = INT(zaout(2,1)/10000.) |
---|
1909 | ki = INT(zaout(2,1) - 10000.*kj ) |
---|
1910 | ! |
---|
1911 | END SUBROUTINE mpp_minloc2d |
---|
1912 | |
---|
1913 | |
---|
1914 | SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj ,kk) |
---|
1915 | !!------------------------------------------------------------------------ |
---|
1916 | !! *** routine mpp_minloc *** |
---|
1917 | !! |
---|
1918 | !! ** Purpose : Compute the global minimum of an array ptab |
---|
1919 | !! and also give its global position |
---|
1920 | !! |
---|
1921 | !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC |
---|
1922 | !! |
---|
1923 | !!-------------------------------------------------------------------------- |
---|
1924 | REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array |
---|
1925 | REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask |
---|
1926 | REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab |
---|
1927 | INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame |
---|
1928 | !! |
---|
1929 | INTEGER :: ierror |
---|
1930 | REAL(wp) :: zmin ! local minimum |
---|
1931 | INTEGER , DIMENSION(3) :: ilocs |
---|
1932 | REAL(wp), DIMENSION(2,1) :: zain, zaout |
---|
1933 | !!----------------------------------------------------------------------- |
---|
1934 | ! |
---|
1935 | zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) |
---|
1936 | ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) |
---|
1937 | ! |
---|
1938 | ki = ilocs(1) + nimpp - 1 |
---|
1939 | kj = ilocs(2) + njmpp - 1 |
---|
1940 | kk = ilocs(3) |
---|
1941 | ! |
---|
1942 | zain(1,:)=zmin |
---|
1943 | zain(2,:)=ki+10000.*kj+100000000.*kk |
---|
1944 | ! |
---|
1945 | CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) |
---|
1946 | ! |
---|
1947 | pmin = zaout(1,1) |
---|
1948 | kk = INT( zaout(2,1) / 100000000. ) |
---|
1949 | kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 |
---|
1950 | ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) |
---|
1951 | ! |
---|
1952 | END SUBROUTINE mpp_minloc3d |
---|
1953 | |
---|
1954 | |
---|
1955 | SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) |
---|
1956 | !!------------------------------------------------------------------------ |
---|
1957 | !! *** routine mpp_maxloc *** |
---|
1958 | !! |
---|
1959 | !! ** Purpose : Compute the global maximum of an array ptab |
---|
1960 | !! and also give its global position |
---|
1961 | !! |
---|
1962 | !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC |
---|
1963 | !! |
---|
1964 | !!-------------------------------------------------------------------------- |
---|
1965 | REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array |
---|
1966 | REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask |
---|
1967 | REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab |
---|
1968 | INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame |
---|
1969 | !! |
---|
1970 | INTEGER :: ierror |
---|
1971 | INTEGER, DIMENSION (2) :: ilocs |
---|
1972 | REAL(wp) :: zmax ! local maximum |
---|
1973 | REAL(wp), DIMENSION(2,1) :: zain, zaout |
---|
1974 | !!----------------------------------------------------------------------- |
---|
1975 | ! |
---|
1976 | zmax = MAXVAL( ptab(:,:) , mask= pmask == 1.e0 ) |
---|
1977 | ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1.e0 ) |
---|
1978 | ! |
---|
1979 | ki = ilocs(1) + nimpp - 1 |
---|
1980 | kj = ilocs(2) + njmpp - 1 |
---|
1981 | ! |
---|
1982 | zain(1,:) = zmax |
---|
1983 | zain(2,:) = ki + 10000. * kj |
---|
1984 | ! |
---|
1985 | CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) |
---|
1986 | ! |
---|
1987 | pmax = zaout(1,1) |
---|
1988 | kj = INT( zaout(2,1) / 10000. ) |
---|
1989 | ki = INT( zaout(2,1) - 10000.* kj ) |
---|
1990 | ! |
---|
1991 | END SUBROUTINE mpp_maxloc2d |
---|
1992 | |
---|
1993 | |
---|
1994 | SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) |
---|
1995 | !!------------------------------------------------------------------------ |
---|
1996 | !! *** routine mpp_maxloc *** |
---|
1997 | !! |
---|
1998 | !! ** Purpose : Compute the global maximum of an array ptab |
---|
1999 | !! and also give its global position |
---|
2000 | !! |
---|
2001 | !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC |
---|
2002 | !! |
---|
2003 | !!-------------------------------------------------------------------------- |
---|
2004 | REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: ptab ! Local 2D array |
---|
2005 | REAL(wp), DIMENSION (jpi,jpj,jpk), INTENT(in ) :: pmask ! Local mask |
---|
2006 | REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab |
---|
2007 | INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame |
---|
2008 | !! |
---|
2009 | REAL(wp) :: zmax ! local maximum |
---|
2010 | REAL(wp), DIMENSION(2,1) :: zain, zaout |
---|
2011 | INTEGER , DIMENSION(3) :: ilocs |
---|
2012 | INTEGER :: ierror |
---|
2013 | !!----------------------------------------------------------------------- |
---|
2014 | ! |
---|
2015 | zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1.e0 ) |
---|
2016 | ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1.e0 ) |
---|
2017 | ! |
---|
2018 | ki = ilocs(1) + nimpp - 1 |
---|
2019 | kj = ilocs(2) + njmpp - 1 |
---|
2020 | kk = ilocs(3) |
---|
2021 | ! |
---|
2022 | zain(1,:)=zmax |
---|
2023 | zain(2,:)=ki+10000.*kj+100000000.*kk |
---|
2024 | ! |
---|
2025 | CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) |
---|
2026 | ! |
---|
2027 | pmax = zaout(1,1) |
---|
2028 | kk = INT( zaout(2,1) / 100000000. ) |
---|
2029 | kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 |
---|
2030 | ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) |
---|
2031 | ! |
---|
2032 | END SUBROUTINE mpp_maxloc3d |
---|
2033 | |
---|
2034 | |
---|
2035 | SUBROUTINE mppsync() |
---|
2036 | !!---------------------------------------------------------------------- |
---|
2037 | !! *** routine mppsync *** |
---|
2038 | !! |
---|
2039 | !! ** Purpose : Massively parallel processors, synchroneous |
---|
2040 | !! |
---|
2041 | !!----------------------------------------------------------------------- |
---|
2042 | INTEGER :: ierror |
---|
2043 | !!----------------------------------------------------------------------- |
---|
2044 | ! |
---|
2045 | CALL mpi_barrier( mpi_comm_opa, ierror ) |
---|
2046 | ! |
---|
2047 | END SUBROUTINE mppsync |
---|
2048 | |
---|
2049 | |
---|
2050 | SUBROUTINE mppstop |
---|
2051 | !!---------------------------------------------------------------------- |
---|
2052 | !! *** routine mppstop *** |
---|
2053 | !! |
---|
2054 | !! ** purpose : Stop massively parallel processors method |
---|
2055 | !! |
---|
2056 | !!---------------------------------------------------------------------- |
---|
2057 | INTEGER :: info |
---|
2058 | !!---------------------------------------------------------------------- |
---|
2059 | ! |
---|
2060 | CALL mppsync |
---|
2061 | CALL mpi_finalize( info ) |
---|
2062 | ! |
---|
2063 | END SUBROUTINE mppstop |
---|
2064 | |
---|
2065 | |
---|
2066 | SUBROUTINE mpp_comm_free( kcom ) |
---|
2067 | !!---------------------------------------------------------------------- |
---|
2068 | !!---------------------------------------------------------------------- |
---|
2069 | INTEGER, INTENT(in) :: kcom |
---|
2070 | !! |
---|
2071 | INTEGER :: ierr |
---|
2072 | !!---------------------------------------------------------------------- |
---|
2073 | ! |
---|
2074 | CALL MPI_COMM_FREE(kcom, ierr) |
---|
2075 | ! |
---|
2076 | END SUBROUTINE mpp_comm_free |
---|
2077 | |
---|
2078 | |
---|
2079 | SUBROUTINE mpp_ini_ice( pindic, kumout ) |
---|
2080 | !!---------------------------------------------------------------------- |
---|
2081 | !! *** routine mpp_ini_ice *** |
---|
2082 | !! |
---|
2083 | !! ** Purpose : Initialize special communicator for ice areas |
---|
2084 | !! condition together with global variables needed in the ddmpp folding |
---|
2085 | !! |
---|
2086 | !! ** Method : - Look for ice processors in ice routines |
---|
2087 | !! - Put their number in nrank_ice |
---|
2088 | !! - Create groups for the world processors and the ice processors |
---|
2089 | !! - Create a communicator for ice processors |
---|
2090 | !! |
---|
2091 | !! ** output |
---|
2092 | !! njmppmax = njmpp for northern procs |
---|
2093 | !! ndim_rank_ice = number of processors with ice |
---|
2094 | !! nrank_ice (ndim_rank_ice) = ice processors |
---|
2095 | !! ngrp_iworld = group ID for the world processors |
---|
2096 | !! ngrp_ice = group ID for the ice processors |
---|
2097 | !! ncomm_ice = communicator for the ice procs. |
---|
2098 | !! n_ice_root = number (in the world) of proc 0 in the ice comm. |
---|
2099 | !! |
---|
2100 | !!---------------------------------------------------------------------- |
---|
2101 | INTEGER, INTENT(in) :: pindic |
---|
2102 | INTEGER, INTENT(in) :: kumout ! ocean.output logical unit |
---|
2103 | !! |
---|
2104 | INTEGER :: jjproc |
---|
2105 | INTEGER :: ii, ierr |
---|
2106 | INTEGER, ALLOCATABLE, DIMENSION(:) :: kice |
---|
2107 | INTEGER, ALLOCATABLE, DIMENSION(:) :: zwork |
---|
2108 | !!---------------------------------------------------------------------- |
---|
2109 | ! |
---|
2110 | ! Since this is just an init routine and these arrays are of length jpnij |
---|
2111 | ! then don't use wrk_nemo module - just allocate and deallocate. |
---|
2112 | ALLOCATE( kice(jpnij), zwork(jpnij), STAT=ierr ) |
---|
2113 | IF( ierr /= 0 ) THEN |
---|
2114 | WRITE(kumout, cform_err) |
---|
2115 | WRITE(kumout,*) 'mpp_ini_ice : failed to allocate 2, 1D arrays (jpnij in length)' |
---|
2116 | CALL mppstop |
---|
2117 | ENDIF |
---|
2118 | |
---|
2119 | ! Look for how many procs with sea-ice |
---|
2120 | ! |
---|
2121 | kice = 0 |
---|
2122 | DO jjproc = 1, jpnij |
---|
2123 | IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 1 |
---|
2124 | END DO |
---|
2125 | ! |
---|
2126 | zwork = 0 |
---|
2127 | CALL MPI_ALLREDUCE( kice, zwork, jpnij, mpi_integer, mpi_sum, mpi_comm_opa, ierr ) |
---|
2128 | ndim_rank_ice = SUM( zwork ) |
---|
2129 | |
---|
2130 | ! Allocate the right size to nrank_north |
---|
2131 | IF( ALLOCATED ( nrank_ice ) ) DEALLOCATE( nrank_ice ) |
---|
2132 | ALLOCATE( nrank_ice(ndim_rank_ice) ) |
---|
2133 | ! |
---|
2134 | ii = 0 |
---|
2135 | nrank_ice = 0 |
---|
2136 | DO jjproc = 1, jpnij |
---|
2137 | IF( zwork(jjproc) == 1) THEN |
---|
2138 | ii = ii + 1 |
---|
2139 | nrank_ice(ii) = jjproc -1 |
---|
2140 | ENDIF |
---|
2141 | END DO |
---|
2142 | |
---|
2143 | ! Create the world group |
---|
2144 | CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_iworld, ierr ) |
---|
2145 | |
---|
2146 | ! Create the ice group from the world group |
---|
2147 | CALL MPI_GROUP_INCL( ngrp_iworld, ndim_rank_ice, nrank_ice, ngrp_ice, ierr ) |
---|
2148 | |
---|
2149 | ! Create the ice communicator , ie the pool of procs with sea-ice |
---|
2150 | CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_ice, ncomm_ice, ierr ) |
---|
2151 | |
---|
2152 | ! Find proc number in the world of proc 0 in the north |
---|
2153 | ! The following line seems to be useless, we just comment & keep it as reminder |
---|
2154 | ! CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_ice,1,0,ngrp_iworld,n_ice_root,ierr) |
---|
2155 | ! |
---|
2156 | CALL MPI_GROUP_FREE(ngrp_ice, ierr) |
---|
2157 | CALL MPI_GROUP_FREE(ngrp_iworld, ierr) |
---|
2158 | |
---|
2159 | DEALLOCATE(kice, zwork) |
---|
2160 | ! |
---|
2161 | END SUBROUTINE mpp_ini_ice |
---|
2162 | |
---|
2163 | |
---|
2164 | SUBROUTINE mpp_ini_znl( kumout ) |
---|
2165 | !!---------------------------------------------------------------------- |
---|
2166 | !! *** routine mpp_ini_znl *** |
---|
2167 | !! |
---|
2168 | !! ** Purpose : Initialize special communicator for computing zonal sum |
---|
2169 | !! |
---|
2170 | !! ** Method : - Look for processors in the same row |
---|
2171 | !! - Put their number in nrank_znl |
---|
2172 | !! - Create group for the znl processors |
---|
2173 | !! - Create a communicator for znl processors |
---|
2174 | !! - Determine if processor should write znl files |
---|
2175 | !! |
---|
2176 | !! ** output |
---|
2177 | !! ndim_rank_znl = number of processors on the same row |
---|
2178 | !! ngrp_znl = group ID for the znl processors |
---|
2179 | !! ncomm_znl = communicator for the ice procs. |
---|
2180 | !! n_znl_root = number (in the world) of proc 0 in the ice comm. |
---|
2181 | !! |
---|
2182 | !!---------------------------------------------------------------------- |
---|
2183 | INTEGER, INTENT(in) :: kumout ! ocean.output logical units |
---|
2184 | ! |
---|
2185 | INTEGER :: jproc ! dummy loop integer |
---|
2186 | INTEGER :: ierr, ii ! local integer |
---|
2187 | INTEGER, ALLOCATABLE, DIMENSION(:) :: kwork |
---|
2188 | !!---------------------------------------------------------------------- |
---|
2189 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_world : ', ngrp_world |
---|
2190 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_world : ', mpi_comm_world |
---|
2191 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_opa : ', mpi_comm_opa |
---|
2192 | ! |
---|
2193 | ALLOCATE( kwork(jpnij), STAT=ierr ) |
---|
2194 | IF( ierr /= 0 ) THEN |
---|
2195 | WRITE(kumout, cform_err) |
---|
2196 | WRITE(kumout,*) 'mpp_ini_znl : failed to allocate 1D array of length jpnij' |
---|
2197 | CALL mppstop |
---|
2198 | ENDIF |
---|
2199 | |
---|
2200 | IF( jpnj == 1 ) THEN |
---|
2201 | ngrp_znl = ngrp_world |
---|
2202 | ncomm_znl = mpi_comm_opa |
---|
2203 | ELSE |
---|
2204 | ! |
---|
2205 | CALL MPI_ALLGATHER ( njmpp, 1, mpi_integer, kwork, 1, mpi_integer, mpi_comm_opa, ierr ) |
---|
2206 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - kwork pour njmpp : ', kwork |
---|
2207 | !-$$ CALL flush(numout) |
---|
2208 | ! |
---|
2209 | ! Count number of processors on the same row |
---|
2210 | ndim_rank_znl = 0 |
---|
2211 | DO jproc=1,jpnij |
---|
2212 | IF ( kwork(jproc) == njmpp ) THEN |
---|
2213 | ndim_rank_znl = ndim_rank_znl + 1 |
---|
2214 | ENDIF |
---|
2215 | END DO |
---|
2216 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ndim_rank_znl : ', ndim_rank_znl |
---|
2217 | !-$$ CALL flush(numout) |
---|
2218 | ! Allocate the right size to nrank_znl |
---|
2219 | IF (ALLOCATED (nrank_znl)) DEALLOCATE(nrank_znl) |
---|
2220 | ALLOCATE(nrank_znl(ndim_rank_znl)) |
---|
2221 | ii = 0 |
---|
2222 | nrank_znl (:) = 0 |
---|
2223 | DO jproc=1,jpnij |
---|
2224 | IF ( kwork(jproc) == njmpp) THEN |
---|
2225 | ii = ii + 1 |
---|
2226 | nrank_znl(ii) = jproc -1 |
---|
2227 | ENDIF |
---|
2228 | END DO |
---|
2229 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - nrank_znl : ', nrank_znl |
---|
2230 | !-$$ CALL flush(numout) |
---|
2231 | |
---|
2232 | ! Create the opa group |
---|
2233 | CALL MPI_COMM_GROUP(mpi_comm_opa,ngrp_opa,ierr) |
---|
2234 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_opa : ', ngrp_opa |
---|
2235 | !-$$ CALL flush(numout) |
---|
2236 | |
---|
2237 | ! Create the znl group from the opa group |
---|
2238 | CALL MPI_GROUP_INCL ( ngrp_opa, ndim_rank_znl, nrank_znl, ngrp_znl, ierr ) |
---|
2239 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_znl ', ngrp_znl |
---|
2240 | !-$$ CALL flush(numout) |
---|
2241 | |
---|
2242 | ! Create the znl communicator from the opa communicator, ie the pool of procs in the same row |
---|
2243 | CALL MPI_COMM_CREATE ( mpi_comm_opa, ngrp_znl, ncomm_znl, ierr ) |
---|
2244 | !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ncomm_znl ', ncomm_znl |
---|
2245 | !-$$ CALL flush(numout) |
---|
2246 | ! |
---|
2247 | END IF |
---|
2248 | |
---|
2249 | ! Determines if processor if the first (starting from i=1) on the row |
---|
2250 | IF ( jpni == 1 ) THEN |
---|
2251 | l_znl_root = .TRUE. |
---|
2252 | ELSE |
---|
2253 | l_znl_root = .FALSE. |
---|
2254 | kwork (1) = nimpp |
---|
2255 | CALL mpp_min ( kwork(1), kcom = ncomm_znl) |
---|
2256 | IF ( nimpp == kwork(1)) l_znl_root = .TRUE. |
---|
2257 | END IF |
---|
2258 | |
---|
2259 | DEALLOCATE(kwork) |
---|
2260 | |
---|
2261 | END SUBROUTINE mpp_ini_znl |
---|
2262 | |
---|
2263 | |
---|
2264 | SUBROUTINE mpp_ini_north |
---|
2265 | !!---------------------------------------------------------------------- |
---|
2266 | !! *** routine mpp_ini_north *** |
---|
2267 | !! |
---|
2268 | !! ** Purpose : Initialize special communicator for north folding |
---|
2269 | !! condition together with global variables needed in the mpp folding |
---|
2270 | !! |
---|
2271 | !! ** Method : - Look for northern processors |
---|
2272 | !! - Put their number in nrank_north |
---|
2273 | !! - Create groups for the world processors and the north processors |
---|
2274 | !! - Create a communicator for northern processors |
---|
2275 | !! |
---|
2276 | !! ** output |
---|
2277 | !! njmppmax = njmpp for northern procs |
---|
2278 | !! ndim_rank_north = number of processors in the northern line |
---|
2279 | !! nrank_north (ndim_rank_north) = number of the northern procs. |
---|
2280 | !! ngrp_world = group ID for the world processors |
---|
2281 | !! ngrp_north = group ID for the northern processors |
---|
2282 | !! ncomm_north = communicator for the northern procs. |
---|
2283 | !! north_root = number (in the world) of proc 0 in the northern comm. |
---|
2284 | !! |
---|
2285 | !!---------------------------------------------------------------------- |
---|
2286 | INTEGER :: ierr |
---|
2287 | INTEGER :: jjproc |
---|
2288 | INTEGER :: ii, ji |
---|
2289 | !!---------------------------------------------------------------------- |
---|
2290 | ! |
---|
2291 | njmppmax = MAXVAL( njmppt ) |
---|
2292 | ! |
---|
2293 | ! Look for how many procs on the northern boundary |
---|
2294 | ndim_rank_north = 0 |
---|
2295 | DO jjproc = 1, jpnij |
---|
2296 | IF( njmppt(jjproc) == njmppmax ) ndim_rank_north = ndim_rank_north + 1 |
---|
2297 | END DO |
---|
2298 | ! |
---|
2299 | ! Allocate the right size to nrank_north |
---|
2300 | IF (ALLOCATED (nrank_north)) DEALLOCATE(nrank_north) |
---|
2301 | ALLOCATE( nrank_north(ndim_rank_north) ) |
---|
2302 | |
---|
2303 | ! Fill the nrank_north array with proc. number of northern procs. |
---|
2304 | ! Note : the rank start at 0 in MPI |
---|
2305 | ii = 0 |
---|
2306 | DO ji = 1, jpnij |
---|
2307 | IF ( njmppt(ji) == njmppmax ) THEN |
---|
2308 | ii=ii+1 |
---|
2309 | nrank_north(ii)=ji-1 |
---|
2310 | END IF |
---|
2311 | END DO |
---|
2312 | ! |
---|
2313 | ! create the world group |
---|
2314 | CALL MPI_COMM_GROUP( mpi_comm_opa, ngrp_world, ierr ) |
---|
2315 | ! |
---|
2316 | ! Create the North group from the world group |
---|
2317 | CALL MPI_GROUP_INCL( ngrp_world, ndim_rank_north, nrank_north, ngrp_north, ierr ) |
---|
2318 | ! |
---|
2319 | ! Create the North communicator , ie the pool of procs in the north group |
---|
2320 | CALL MPI_COMM_CREATE( mpi_comm_opa, ngrp_north, ncomm_north, ierr ) |
---|
2321 | ! |
---|
2322 | END SUBROUTINE mpp_ini_north |
---|
2323 | |
---|
2324 | |
---|
2325 | SUBROUTINE mpp_lbc_north_3d( pt3d, cd_type, psgn ) |
---|
2326 | !!--------------------------------------------------------------------- |
---|
2327 | !! *** routine mpp_lbc_north_3d *** |
---|
2328 | !! |
---|
2329 | !! ** Purpose : Ensure proper north fold horizontal bondary condition |
---|
2330 | !! in mpp configuration in case of jpn1 > 1 |
---|
2331 | !! |
---|
2332 | !! ** Method : North fold condition and mpp with more than one proc |
---|
2333 | !! in i-direction require a specific treatment. We gather |
---|
2334 | !! the 4 northern lines of the global domain on 1 processor |
---|
2335 | !! and apply lbc north-fold on this sub array. Then we |
---|
2336 | !! scatter the north fold array back to the processors. |
---|
2337 | !! |
---|
2338 | !!---------------------------------------------------------------------- |
---|
2339 | REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: pt3d ! 3D array on which the b.c. is applied |
---|
2340 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points |
---|
2341 | ! ! = T , U , V , F or W gridpoints |
---|
2342 | REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold |
---|
2343 | !! ! = 1. , the sign is kept |
---|
2344 | INTEGER :: ji, jj, jr, jk |
---|
2345 | INTEGER :: ierr, itaille, ildi, ilei, iilb |
---|
2346 | INTEGER :: ijpj, ijpjm1, ij, iproc |
---|
2347 | INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather |
---|
2348 | INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather |
---|
2349 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for mpi_isend when avoiding mpi_allgather |
---|
2350 | ! ! Workspace for message transfers avoiding mpi_allgather |
---|
2351 | REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab |
---|
2352 | REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk |
---|
2353 | REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio |
---|
2354 | REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr |
---|
2355 | |
---|
2356 | INTEGER :: istatus(mpi_status_size) |
---|
2357 | INTEGER :: iflag |
---|
2358 | !!---------------------------------------------------------------------- |
---|
2359 | ! |
---|
2360 | ALLOCATE( ztab(jpiglo,4,jpk) , znorthloc(jpi,4,jpk), zfoldwk(jpi,4,jpk), znorthgloio(jpi,4,jpk,jpni) ) |
---|
2361 | ALLOCATE( ztabl(jpi,4,jpk), ztabr(jpi*jpmaxngh, 4, jpk) ) |
---|
2362 | |
---|
2363 | ijpj = 4 |
---|
2364 | ijpjm1 = 3 |
---|
2365 | ! |
---|
2366 | znorthloc(:,:,:) = 0 |
---|
2367 | DO jk = 1, jpk |
---|
2368 | DO jj = nlcj - ijpj +1, nlcj ! put in xnorthloc the last 4 jlines of pt3d |
---|
2369 | ij = jj - nlcj + ijpj |
---|
2370 | znorthloc(:,ij,jk) = pt3d(:,jj,jk) |
---|
2371 | END DO |
---|
2372 | END DO |
---|
2373 | ! |
---|
2374 | ! ! Build in procs of ncomm_north the znorthgloio |
---|
2375 | itaille = jpi * jpk * ijpj |
---|
2376 | |
---|
2377 | IF ( l_north_nogather ) THEN |
---|
2378 | ! |
---|
2379 | ztabr(:,:,:) = 0 |
---|
2380 | ztabl(:,:,:) = 0 |
---|
2381 | |
---|
2382 | DO jk = 1, jpk |
---|
2383 | DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array |
---|
2384 | ij = jj - nlcj + ijpj |
---|
2385 | DO ji = nfsloop, nfeloop |
---|
2386 | ztabl(ji,ij,jk) = pt3d(ji,jj,jk) |
---|
2387 | END DO |
---|
2388 | END DO |
---|
2389 | END DO |
---|
2390 | |
---|
2391 | DO jr = 1,nsndto |
---|
2392 | IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN |
---|
2393 | CALL mppsend( 5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr) ) |
---|
2394 | ENDIF |
---|
2395 | END DO |
---|
2396 | DO jr = 1,nsndto |
---|
2397 | iproc = nfipproc(isendto(jr),jpnj) |
---|
2398 | IF(iproc .ne. -1) THEN |
---|
2399 | ilei = nleit (iproc+1) |
---|
2400 | ildi = nldit (iproc+1) |
---|
2401 | iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj) |
---|
2402 | ENDIF |
---|
2403 | IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN |
---|
2404 | CALL mpprecv(5, zfoldwk, itaille, iproc) |
---|
2405 | DO jk = 1, jpk |
---|
2406 | DO jj = 1, ijpj |
---|
2407 | DO ji = ildi, ilei |
---|
2408 | ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk) |
---|
2409 | END DO |
---|
2410 | END DO |
---|
2411 | END DO |
---|
2412 | ELSE IF (iproc .eq. (narea-1)) THEN |
---|
2413 | DO jk = 1, jpk |
---|
2414 | DO jj = 1, ijpj |
---|
2415 | DO ji = ildi, ilei |
---|
2416 | ztabr(iilb+ji,jj,jk) = pt3d(ji,nlcj-ijpj+jj,jk) |
---|
2417 | END DO |
---|
2418 | END DO |
---|
2419 | END DO |
---|
2420 | ENDIF |
---|
2421 | END DO |
---|
2422 | IF (l_isend) THEN |
---|
2423 | DO jr = 1,nsndto |
---|
2424 | IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN |
---|
2425 | CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err) |
---|
2426 | ENDIF |
---|
2427 | END DO |
---|
2428 | ENDIF |
---|
2429 | CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition |
---|
2430 | DO jk = 1, jpk |
---|
2431 | DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d |
---|
2432 | ij = jj - nlcj + ijpj |
---|
2433 | DO ji= 1, nlci |
---|
2434 | pt3d(ji,jj,jk) = ztabl(ji,ij,jk) |
---|
2435 | END DO |
---|
2436 | END DO |
---|
2437 | END DO |
---|
2438 | ! |
---|
2439 | |
---|
2440 | ELSE |
---|
2441 | CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & |
---|
2442 | & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) |
---|
2443 | ! |
---|
2444 | ztab(:,:,:) = 0.e0 |
---|
2445 | DO jr = 1, ndim_rank_north ! recover the global north array |
---|
2446 | iproc = nrank_north(jr) + 1 |
---|
2447 | ildi = nldit (iproc) |
---|
2448 | ilei = nleit (iproc) |
---|
2449 | iilb = nimppt(iproc) |
---|
2450 | DO jk = 1, jpk |
---|
2451 | DO jj = 1, ijpj |
---|
2452 | DO ji = ildi, ilei |
---|
2453 | ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr) |
---|
2454 | END DO |
---|
2455 | END DO |
---|
2456 | END DO |
---|
2457 | END DO |
---|
2458 | CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition |
---|
2459 | ! |
---|
2460 | DO jk = 1, jpk |
---|
2461 | DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt3d |
---|
2462 | ij = jj - nlcj + ijpj |
---|
2463 | DO ji= 1, nlci |
---|
2464 | pt3d(ji,jj,jk) = ztab(ji+nimpp-1,ij,jk) |
---|
2465 | END DO |
---|
2466 | END DO |
---|
2467 | END DO |
---|
2468 | ! |
---|
2469 | ENDIF |
---|
2470 | ! |
---|
2471 | ! The ztab array has been either: |
---|
2472 | ! a. Fully populated by the mpi_allgather operation or |
---|
2473 | ! b. Had the active points for this domain and northern neighbours populated |
---|
2474 | ! by peer to peer exchanges |
---|
2475 | ! Either way the array may be folded by lbc_nfd and the result for the span of |
---|
2476 | ! this domain will be identical. |
---|
2477 | ! |
---|
2478 | DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio ) |
---|
2479 | DEALLOCATE( ztabl, ztabr ) |
---|
2480 | ! |
---|
2481 | END SUBROUTINE mpp_lbc_north_3d |
---|
2482 | |
---|
2483 | |
---|
2484 | SUBROUTINE mpp_lbc_north_2d( pt2d, cd_type, psgn) |
---|
2485 | !!--------------------------------------------------------------------- |
---|
2486 | !! *** routine mpp_lbc_north_2d *** |
---|
2487 | !! |
---|
2488 | !! ** Purpose : Ensure proper north fold horizontal bondary condition |
---|
2489 | !! in mpp configuration in case of jpn1 > 1 (for 2d array ) |
---|
2490 | !! |
---|
2491 | !! ** Method : North fold condition and mpp with more than one proc |
---|
2492 | !! in i-direction require a specific treatment. We gather |
---|
2493 | !! the 4 northern lines of the global domain on 1 processor |
---|
2494 | !! and apply lbc north-fold on this sub array. Then we |
---|
2495 | !! scatter the north fold array back to the processors. |
---|
2496 | !! |
---|
2497 | !!---------------------------------------------------------------------- |
---|
2498 | REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: pt2d ! 2D array on which the b.c. is applied |
---|
2499 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt2d grid-points |
---|
2500 | ! ! = T , U , V , F or W gridpoints |
---|
2501 | REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold |
---|
2502 | !! ! = 1. , the sign is kept |
---|
2503 | INTEGER :: ji, jj, jr |
---|
2504 | INTEGER :: ierr, itaille, ildi, ilei, iilb |
---|
2505 | INTEGER :: ijpj, ijpjm1, ij, iproc |
---|
2506 | INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather |
---|
2507 | INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather |
---|
2508 | INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather |
---|
2509 | ! ! Workspace for message transfers avoiding mpi_allgather |
---|
2510 | REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab |
---|
2511 | REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: znorthloc, zfoldwk |
---|
2512 | REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio |
---|
2513 | REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztabl, ztabr |
---|
2514 | INTEGER :: istatus(mpi_status_size) |
---|
2515 | INTEGER :: iflag |
---|
2516 | !!---------------------------------------------------------------------- |
---|
2517 | ! |
---|
2518 | ALLOCATE( ztab(jpiglo,4), znorthloc(jpi,4), zfoldwk(jpi,4), znorthgloio(jpi,4,jpni) ) |
---|
2519 | ALLOCATE( ztabl(jpi,4), ztabr(jpi*jpmaxngh, 4) ) |
---|
2520 | ! |
---|
2521 | ijpj = 4 |
---|
2522 | ijpjm1 = 3 |
---|
2523 | ! |
---|
2524 | DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d |
---|
2525 | ij = jj - nlcj + ijpj |
---|
2526 | znorthloc(:,ij) = pt2d(:,jj) |
---|
2527 | END DO |
---|
2528 | |
---|
2529 | ! ! Build in procs of ncomm_north the znorthgloio |
---|
2530 | itaille = jpi * ijpj |
---|
2531 | IF ( l_north_nogather ) THEN |
---|
2532 | ! |
---|
2533 | ! Avoid the use of mpi_allgather by exchanging only with the processes already identified |
---|
2534 | ! (in nemo_northcomms) as being involved in this process' northern boundary exchange |
---|
2535 | ! |
---|
2536 | ztabr(:,:) = 0 |
---|
2537 | ztabl(:,:) = 0 |
---|
2538 | |
---|
2539 | DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array |
---|
2540 | ij = jj - nlcj + ijpj |
---|
2541 | DO ji = nfsloop, nfeloop |
---|
2542 | ztabl(ji,ij) = pt2d(ji,jj) |
---|
2543 | END DO |
---|
2544 | END DO |
---|
2545 | |
---|
2546 | DO jr = 1,nsndto |
---|
2547 | IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN |
---|
2548 | CALL mppsend(5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr)) |
---|
2549 | ENDIF |
---|
2550 | END DO |
---|
2551 | DO jr = 1,nsndto |
---|
2552 | iproc = nfipproc(isendto(jr),jpnj) |
---|
2553 | IF(iproc .ne. -1) THEN |
---|
2554 | ilei = nleit (iproc+1) |
---|
2555 | ildi = nldit (iproc+1) |
---|
2556 | iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj) |
---|
2557 | ENDIF |
---|
2558 | IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN |
---|
2559 | CALL mpprecv(5, zfoldwk, itaille, iproc) |
---|
2560 | DO jj = 1, ijpj |
---|
2561 | DO ji = ildi, ilei |
---|
2562 | ztabr(iilb+ji,jj) = zfoldwk(ji,jj) |
---|
2563 | END DO |
---|
2564 | END DO |
---|
2565 | ELSE IF (iproc .eq. (narea-1)) THEN |
---|
2566 | DO jj = 1, ijpj |
---|
2567 | DO ji = ildi, ilei |
---|
2568 | ztabr(iilb+ji,jj) = pt2d(ji,nlcj-ijpj+jj) |
---|
2569 | END DO |
---|
2570 | END DO |
---|
2571 | ENDIF |
---|
2572 | END DO |
---|
2573 | IF (l_isend) THEN |
---|
2574 | DO jr = 1,nsndto |
---|
2575 | IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN |
---|
2576 | CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err) |
---|
2577 | ENDIF |
---|
2578 | END DO |
---|
2579 | ENDIF |
---|
2580 | CALL mpp_lbc_nfd( ztabl, ztabr, cd_type, psgn ) ! North fold boundary condition |
---|
2581 | ! |
---|
2582 | DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d |
---|
2583 | ij = jj - nlcj + ijpj |
---|
2584 | DO ji = 1, nlci |
---|
2585 | pt2d(ji,jj) = ztabl(ji,ij) |
---|
2586 | END DO |
---|
2587 | END DO |
---|
2588 | ! |
---|
2589 | ELSE |
---|
2590 | CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & |
---|
2591 | & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) |
---|
2592 | ! |
---|
2593 | ztab(:,:) = 0.e0 |
---|
2594 | DO jr = 1, ndim_rank_north ! recover the global north array |
---|
2595 | iproc = nrank_north(jr) + 1 |
---|
2596 | ildi = nldit (iproc) |
---|
2597 | ilei = nleit (iproc) |
---|
2598 | iilb = nimppt(iproc) |
---|
2599 | DO jj = 1, ijpj |
---|
2600 | DO ji = ildi, ilei |
---|
2601 | ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr) |
---|
2602 | END DO |
---|
2603 | END DO |
---|
2604 | END DO |
---|
2605 | CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition |
---|
2606 | ! |
---|
2607 | DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d |
---|
2608 | ij = jj - nlcj + ijpj |
---|
2609 | DO ji = 1, nlci |
---|
2610 | pt2d(ji,jj) = ztab(ji+nimpp-1,ij) |
---|
2611 | END DO |
---|
2612 | END DO |
---|
2613 | ! |
---|
2614 | ENDIF |
---|
2615 | DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio ) |
---|
2616 | DEALLOCATE( ztabl, ztabr ) |
---|
2617 | ! |
---|
2618 | END SUBROUTINE mpp_lbc_north_2d |
---|
2619 | |
---|
2620 | SUBROUTINE mpp_lbc_north_2d_multiple( pt2d_array, cd_type, psgn, num_fields) |
---|
2621 | !!--------------------------------------------------------------------- |
---|
2622 | !! *** routine mpp_lbc_north_2d *** |
---|
2623 | !! |
---|
2624 | !! ** Purpose : Ensure proper north fold horizontal bondary condition |
---|
2625 | !! in mpp configuration in case of jpn1 > 1 |
---|
2626 | !! (for multiple 2d arrays ) |
---|
2627 | !! |
---|
2628 | !! ** Method : North fold condition and mpp with more than one proc |
---|
2629 | !! in i-direction require a specific treatment. We gather |
---|
2630 | !! the 4 northern lines of the global domain on 1 processor |
---|
2631 | !! and apply lbc north-fold on this sub array. Then we |
---|
2632 | !! scatter the north fold array back to the processors. |
---|
2633 | !! |
---|
2634 | !!---------------------------------------------------------------------- |
---|
2635 | INTEGER , INTENT (in ) :: num_fields ! number of variables contained in pt2d |
---|
2636 | TYPE( arrayptr ), DIMENSION(:) :: pt2d_array |
---|
2637 | CHARACTER(len=1), DIMENSION(:), INTENT(in ) :: cd_type ! nature of pt2d grid-points |
---|
2638 | ! ! = T , U , V , F or W gridpoints |
---|
2639 | REAL(wp), DIMENSION(:), INTENT(in ) :: psgn ! = -1. the sign change across the north fold |
---|
2640 | !! ! = 1. , the sign is kept |
---|
2641 | INTEGER :: ji, jj, jr, jk |
---|
2642 | INTEGER :: ierr, itaille, ildi, ilei, iilb |
---|
2643 | INTEGER :: ijpj, ijpjm1, ij, iproc |
---|
2644 | INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf !for mpi_isend when avoiding mpi_allgather |
---|
2645 | INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather |
---|
2646 | INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather |
---|
2647 | ! ! Workspace for message transfers avoiding mpi_allgather |
---|
2648 | REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztab |
---|
2649 | REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk |
---|
2650 | REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: znorthgloio |
---|
2651 | REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl, ztabr |
---|
2652 | INTEGER :: istatus(mpi_status_size) |
---|
2653 | INTEGER :: iflag |
---|
2654 | !!---------------------------------------------------------------------- |
---|
2655 | ! |
---|
2656 | ALLOCATE( ztab(jpiglo,4,num_fields), znorthloc(jpi,4,num_fields), zfoldwk(jpi,4,num_fields), & |
---|
2657 | & znorthgloio(jpi,4,num_fields,jpni) ) ! expanded to 3 dimensions |
---|
2658 | ALLOCATE( ztabl(jpi,4,num_fields), ztabr(jpi*jpmaxngh, 4,num_fields) ) |
---|
2659 | ! |
---|
2660 | ijpj = 4 |
---|
2661 | ijpjm1 = 3 |
---|
2662 | ! |
---|
2663 | |
---|
2664 | DO jk = 1, num_fields |
---|
2665 | DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d (for every variable) |
---|
2666 | ij = jj - nlcj + ijpj |
---|
2667 | znorthloc(:,ij,jk) = pt2d_array(jk)%pt2d(:,jj) |
---|
2668 | END DO |
---|
2669 | END DO |
---|
2670 | ! ! Build in procs of ncomm_north the znorthgloio |
---|
2671 | itaille = jpi * ijpj |
---|
2672 | |
---|
2673 | IF ( l_north_nogather ) THEN |
---|
2674 | ! |
---|
2675 | ! Avoid the use of mpi_allgather by exchanging only with the processes already identified |
---|
2676 | ! (in nemo_northcomms) as being involved in this process' northern boundary exchange |
---|
2677 | ! |
---|
2678 | ztabr(:,:,:) = 0 |
---|
2679 | ztabl(:,:,:) = 0 |
---|
2680 | |
---|
2681 | DO jk = 1, num_fields |
---|
2682 | DO jj = nlcj-ijpj+1, nlcj ! First put local values into the global array |
---|
2683 | ij = jj - nlcj + ijpj |
---|
2684 | DO ji = nfsloop, nfeloop |
---|
2685 | ztabl(ji,ij,jk) = pt2d_array(jk)%pt2d(ji,jj) |
---|
2686 | END DO |
---|
2687 | END DO |
---|
2688 | END DO |
---|
2689 | |
---|
2690 | DO jr = 1,nsndto |
---|
2691 | IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN |
---|
2692 | CALL mppsend(5, znorthloc, itaille*num_fields, nfipproc(isendto(jr),jpnj), ml_req_nf(jr)) ! Buffer expanded "num_fields" times |
---|
2693 | ENDIF |
---|
2694 | END DO |
---|
2695 | DO jr = 1,nsndto |
---|
2696 | iproc = nfipproc(isendto(jr),jpnj) |
---|
2697 | IF(iproc .ne. -1) THEN |
---|
2698 | ilei = nleit (iproc+1) |
---|
2699 | ildi = nldit (iproc+1) |
---|
2700 | iilb = nfiimpp(isendto(jr),jpnj) - nfiimpp(isendto(1),jpnj) |
---|
2701 | ENDIF |
---|
2702 | IF((iproc .ne. (narea-1)) .and. (iproc .ne. -1)) THEN |
---|
2703 | CALL mpprecv(5, zfoldwk, itaille*num_fields, iproc) ! Buffer expanded "num_fields" times |
---|
2704 | DO jk = 1 , num_fields |
---|
2705 | DO jj = 1, ijpj |
---|
2706 | DO ji = ildi, ilei |
---|
2707 | ztabr(iilb+ji,jj,jk) = zfoldwk(ji,jj,jk) ! Modified to 3D |
---|
2708 | END DO |
---|
2709 | END DO |
---|
2710 | END DO |
---|
2711 | ELSE IF (iproc .eq. (narea-1)) THEN |
---|
2712 | DO jk = 1, num_fields |
---|
2713 | DO jj = 1, ijpj |
---|
2714 | DO ji = ildi, ilei |
---|
2715 | ztabr(iilb+ji,jj,jk) = pt2d_array(jk)%pt2d(ji,nlcj-ijpj+jj) ! Modified to 3D |
---|
2716 | END DO |
---|
2717 | END DO |
---|
2718 | END DO |
---|
2719 | ENDIF |
---|
2720 | END DO |
---|
2721 | IF (l_isend) THEN |
---|
2722 | DO jr = 1,nsndto |
---|
2723 | IF ((nfipproc(isendto(jr),jpnj) .ne. (narea-1)) .and. (nfipproc(isendto(jr),jpnj) .ne. -1)) THEN |
---|
2724 | CALL mpi_wait(ml_req_nf(jr), ml_stat, ml_err) |
---|
2725 | ENDIF |
---|
2726 | END DO |
---|
2727 | ENDIF |
---|
2728 | ! |
---|
2729 | DO ji = 1, num_fields ! Loop to manage 3D variables |
---|
2730 | CALL mpp_lbc_nfd( ztabl(:,:,ji), ztabr(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition |
---|
2731 | END DO |
---|
2732 | ! |
---|
2733 | DO jk = 1, num_fields |
---|
2734 | DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d |
---|
2735 | ij = jj - nlcj + ijpj |
---|
2736 | DO ji = 1, nlci |
---|
2737 | pt2d_array(jk)%pt2d(ji,jj) = ztabl(ji,ij,jk) ! Modified to 3D |
---|
2738 | END DO |
---|
2739 | END DO |
---|
2740 | END DO |
---|
2741 | |
---|
2742 | ! |
---|
2743 | ELSE |
---|
2744 | ! |
---|
2745 | CALL MPI_ALLGATHER( znorthloc , itaille*num_fields, MPI_DOUBLE_PRECISION, & |
---|
2746 | & znorthgloio, itaille*num_fields, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) |
---|
2747 | ! |
---|
2748 | ztab(:,:,:) = 0.e0 |
---|
2749 | DO jk = 1, num_fields |
---|
2750 | DO jr = 1, ndim_rank_north ! recover the global north array |
---|
2751 | iproc = nrank_north(jr) + 1 |
---|
2752 | ildi = nldit (iproc) |
---|
2753 | ilei = nleit (iproc) |
---|
2754 | iilb = nimppt(iproc) |
---|
2755 | DO jj = 1, ijpj |
---|
2756 | DO ji = ildi, ilei |
---|
2757 | ztab(ji+iilb-1,jj,jk) = znorthgloio(ji,jj,jk,jr) |
---|
2758 | END DO |
---|
2759 | END DO |
---|
2760 | END DO |
---|
2761 | END DO |
---|
2762 | |
---|
2763 | DO ji = 1, num_fields |
---|
2764 | CALL lbc_nfd( ztab(:,:,ji), cd_type(ji), psgn(ji) ) ! North fold boundary condition |
---|
2765 | END DO |
---|
2766 | ! |
---|
2767 | DO jk = 1, num_fields |
---|
2768 | DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d |
---|
2769 | ij = jj - nlcj + ijpj |
---|
2770 | DO ji = 1, nlci |
---|
2771 | pt2d_array(jk)%pt2d(ji,jj) = ztab(ji+nimpp-1,ij,jk) |
---|
2772 | END DO |
---|
2773 | END DO |
---|
2774 | END DO |
---|
2775 | ! |
---|
2776 | ! |
---|
2777 | ENDIF |
---|
2778 | DEALLOCATE( ztab, znorthloc, zfoldwk, znorthgloio ) |
---|
2779 | DEALLOCATE( ztabl, ztabr ) |
---|
2780 | ! |
---|
2781 | END SUBROUTINE mpp_lbc_north_2d_multiple |
---|
2782 | |
---|
2783 | SUBROUTINE mpp_lbc_north_e( pt2d, cd_type, psgn) |
---|
2784 | !!--------------------------------------------------------------------- |
---|
2785 | !! *** routine mpp_lbc_north_2d *** |
---|
2786 | !! |
---|
2787 | !! ** Purpose : Ensure proper north fold horizontal bondary condition |
---|
2788 | !! in mpp configuration in case of jpn1 > 1 and for 2d |
---|
2789 | !! array with outer extra halo |
---|
2790 | !! |
---|
2791 | !! ** Method : North fold condition and mpp with more than one proc |
---|
2792 | !! in i-direction require a specific treatment. We gather |
---|
2793 | !! the 4+2*jpr2dj northern lines of the global domain on 1 |
---|
2794 | !! processor and apply lbc north-fold on this sub array. |
---|
2795 | !! Then we scatter the north fold array back to the processors. |
---|
2796 | !! |
---|
2797 | !!---------------------------------------------------------------------- |
---|
2798 | REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,1-jpr2dj:jpj+jpr2dj), INTENT(inout) :: pt2d ! 2D array with extra halo |
---|
2799 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points |
---|
2800 | ! ! = T , U , V , F or W -points |
---|
2801 | REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the |
---|
2802 | !! ! north fold, = 1. otherwise |
---|
2803 | INTEGER :: ji, jj, jr |
---|
2804 | INTEGER :: ierr, itaille, ildi, ilei, iilb |
---|
2805 | INTEGER :: ijpj, ij, iproc |
---|
2806 | ! |
---|
2807 | REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e |
---|
2808 | REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e |
---|
2809 | |
---|
2810 | !!---------------------------------------------------------------------- |
---|
2811 | ! |
---|
2812 | ALLOCATE( ztab_e(jpiglo,4+2*jpr2dj), znorthloc_e(jpi,4+2*jpr2dj), znorthgloio_e(jpi,4+2*jpr2dj,jpni) ) |
---|
2813 | |
---|
2814 | ! |
---|
2815 | ijpj=4 |
---|
2816 | ztab_e(:,:) = 0.e0 |
---|
2817 | |
---|
2818 | ij=0 |
---|
2819 | ! put in znorthloc_e the last 4 jlines of pt2d |
---|
2820 | DO jj = nlcj - ijpj + 1 - jpr2dj, nlcj +jpr2dj |
---|
2821 | ij = ij + 1 |
---|
2822 | DO ji = 1, jpi |
---|
2823 | znorthloc_e(ji,ij)=pt2d(ji,jj) |
---|
2824 | END DO |
---|
2825 | END DO |
---|
2826 | ! |
---|
2827 | itaille = jpi * ( ijpj + 2 * jpr2dj ) |
---|
2828 | CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, & |
---|
2829 | & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) |
---|
2830 | ! |
---|
2831 | DO jr = 1, ndim_rank_north ! recover the global north array |
---|
2832 | iproc = nrank_north(jr) + 1 |
---|
2833 | ildi = nldit (iproc) |
---|
2834 | ilei = nleit (iproc) |
---|
2835 | iilb = nimppt(iproc) |
---|
2836 | DO jj = 1, ijpj+2*jpr2dj |
---|
2837 | DO ji = ildi, ilei |
---|
2838 | ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr) |
---|
2839 | END DO |
---|
2840 | END DO |
---|
2841 | END DO |
---|
2842 | |
---|
2843 | |
---|
2844 | ! 2. North-Fold boundary conditions |
---|
2845 | ! ---------------------------------- |
---|
2846 | CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = jpr2dj ) |
---|
2847 | |
---|
2848 | ij = jpr2dj |
---|
2849 | !! Scatter back to pt2d |
---|
2850 | DO jj = nlcj - ijpj + 1 , nlcj +jpr2dj |
---|
2851 | ij = ij +1 |
---|
2852 | DO ji= 1, nlci |
---|
2853 | pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij) |
---|
2854 | END DO |
---|
2855 | END DO |
---|
2856 | ! |
---|
2857 | DEALLOCATE( ztab_e, znorthloc_e, znorthgloio_e ) |
---|
2858 | ! |
---|
2859 | END SUBROUTINE mpp_lbc_north_e |
---|
2860 | |
---|
2861 | SUBROUTINE mpp_lnk_bdy_3d( ptab, cd_type, psgn, ib_bdy ) |
---|
2862 | !!---------------------------------------------------------------------- |
---|
2863 | !! *** routine mpp_lnk_bdy_3d *** |
---|
2864 | !! |
---|
2865 | !! ** Purpose : Message passing management |
---|
2866 | !! |
---|
2867 | !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries |
---|
2868 | !! between processors following neighboring subdomains. |
---|
2869 | !! domain parameters |
---|
2870 | !! nlci : first dimension of the local subdomain |
---|
2871 | !! nlcj : second dimension of the local subdomain |
---|
2872 | !! nbondi_bdy : mark for "east-west local boundary" |
---|
2873 | !! nbondj_bdy : mark for "north-south local boundary" |
---|
2874 | !! noea : number for local neighboring processors |
---|
2875 | !! nowe : number for local neighboring processors |
---|
2876 | !! noso : number for local neighboring processors |
---|
2877 | !! nono : number for local neighboring processors |
---|
2878 | !! |
---|
2879 | !! ** Action : ptab with update value at its periphery |
---|
2880 | !! |
---|
2881 | !!---------------------------------------------------------------------- |
---|
2882 | REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied |
---|
2883 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points |
---|
2884 | ! ! = T , U , V , F , W points |
---|
2885 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary |
---|
2886 | ! ! = 1. , the sign is kept |
---|
2887 | INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set |
---|
2888 | ! |
---|
2889 | INTEGER :: ji, jj, jk, jl ! dummy loop indices |
---|
2890 | INTEGER :: imigr, iihom, ijhom ! local integers |
---|
2891 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
2892 | REAL(wp) :: zland ! local scalar |
---|
2893 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
2894 | ! |
---|
2895 | REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! 3d for north-south & south-north |
---|
2896 | REAL(wp), DIMENSION(:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! 3d for east-west & west-east |
---|
2897 | !!---------------------------------------------------------------------- |
---|
2898 | |
---|
2899 | ALLOCATE( zt3ns(jpi,jprecj,jpk,2), zt3sn(jpi,jprecj,jpk,2), & |
---|
2900 | & zt3ew(jpj,jpreci,jpk,2), zt3we(jpj,jpreci,jpk,2) ) |
---|
2901 | |
---|
2902 | zland = 0.e0 |
---|
2903 | |
---|
2904 | ! 1. standard boundary treatment |
---|
2905 | ! ------------------------------ |
---|
2906 | |
---|
2907 | ! ! East-West boundaries |
---|
2908 | ! !* Cyclic east-west |
---|
2909 | |
---|
2910 | IF( nbondi == 2) THEN |
---|
2911 | IF (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) THEN |
---|
2912 | ptab( 1 ,:,:) = ptab(jpim1,:,:) |
---|
2913 | ptab(jpi,:,:) = ptab( 2 ,:,:) |
---|
2914 | ELSE |
---|
2915 | IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point |
---|
2916 | ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north |
---|
2917 | ENDIF |
---|
2918 | ELSEIF(nbondi == -1) THEN |
---|
2919 | IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point |
---|
2920 | ELSEIF(nbondi == 1) THEN |
---|
2921 | ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north |
---|
2922 | ENDIF !* closed |
---|
2923 | |
---|
2924 | IF (nbondj == 2 .OR. nbondj == -1) THEN |
---|
2925 | IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point |
---|
2926 | ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN |
---|
2927 | ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north |
---|
2928 | ENDIF |
---|
2929 | |
---|
2930 | ! |
---|
2931 | |
---|
2932 | ! 2. East and west directions exchange |
---|
2933 | ! ------------------------------------ |
---|
2934 | ! we play with the neigbours AND the row number because of the periodicity |
---|
2935 | ! |
---|
2936 | SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions |
---|
2937 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
2938 | iihom = nlci-nreci |
---|
2939 | DO jl = 1, jpreci |
---|
2940 | zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) |
---|
2941 | zt3we(:,jl,:,1) = ptab(iihom +jl,:,:) |
---|
2942 | END DO |
---|
2943 | END SELECT |
---|
2944 | ! |
---|
2945 | ! ! Migrations |
---|
2946 | imigr = jpreci * jpj * jpk |
---|
2947 | ! |
---|
2948 | SELECT CASE ( nbondi_bdy(ib_bdy) ) |
---|
2949 | CASE ( -1 ) |
---|
2950 | CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req1 ) |
---|
2951 | CASE ( 0 ) |
---|
2952 | CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) |
---|
2953 | CALL mppsend( 2, zt3we(1,1,1,1), imigr, noea, ml_req2 ) |
---|
2954 | CASE ( 1 ) |
---|
2955 | CALL mppsend( 1, zt3ew(1,1,1,1), imigr, nowe, ml_req1 ) |
---|
2956 | END SELECT |
---|
2957 | ! |
---|
2958 | SELECT CASE ( nbondi_bdy_b(ib_bdy) ) |
---|
2959 | CASE ( -1 ) |
---|
2960 | CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) |
---|
2961 | CASE ( 0 ) |
---|
2962 | CALL mpprecv( 1, zt3ew(1,1,1,2), imigr, noea ) |
---|
2963 | CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) |
---|
2964 | CASE ( 1 ) |
---|
2965 | CALL mpprecv( 2, zt3we(1,1,1,2), imigr, nowe ) |
---|
2966 | END SELECT |
---|
2967 | ! |
---|
2968 | SELECT CASE ( nbondi_bdy(ib_bdy) ) |
---|
2969 | CASE ( -1 ) |
---|
2970 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
2971 | CASE ( 0 ) |
---|
2972 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
2973 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
2974 | CASE ( 1 ) |
---|
2975 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
2976 | END SELECT |
---|
2977 | ! |
---|
2978 | ! ! Write Dirichlet lateral conditions |
---|
2979 | iihom = nlci-jpreci |
---|
2980 | ! |
---|
2981 | SELECT CASE ( nbondi_bdy_b(ib_bdy) ) |
---|
2982 | CASE ( -1 ) |
---|
2983 | DO jl = 1, jpreci |
---|
2984 | ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) |
---|
2985 | END DO |
---|
2986 | CASE ( 0 ) |
---|
2987 | DO jl = 1, jpreci |
---|
2988 | ptab(jl ,:,:) = zt3we(:,jl,:,2) |
---|
2989 | ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) |
---|
2990 | END DO |
---|
2991 | CASE ( 1 ) |
---|
2992 | DO jl = 1, jpreci |
---|
2993 | ptab(jl ,:,:) = zt3we(:,jl,:,2) |
---|
2994 | END DO |
---|
2995 | END SELECT |
---|
2996 | |
---|
2997 | |
---|
2998 | ! 3. North and south directions |
---|
2999 | ! ----------------------------- |
---|
3000 | ! always closed : we play only with the neigbours |
---|
3001 | ! |
---|
3002 | IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
3003 | ijhom = nlcj-nrecj |
---|
3004 | DO jl = 1, jprecj |
---|
3005 | zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:) |
---|
3006 | zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:) |
---|
3007 | END DO |
---|
3008 | ENDIF |
---|
3009 | ! |
---|
3010 | ! ! Migrations |
---|
3011 | imigr = jprecj * jpi * jpk |
---|
3012 | ! |
---|
3013 | SELECT CASE ( nbondj_bdy(ib_bdy) ) |
---|
3014 | CASE ( -1 ) |
---|
3015 | CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req1 ) |
---|
3016 | CASE ( 0 ) |
---|
3017 | CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) |
---|
3018 | CALL mppsend( 4, zt3sn(1,1,1,1), imigr, nono, ml_req2 ) |
---|
3019 | CASE ( 1 ) |
---|
3020 | CALL mppsend( 3, zt3ns(1,1,1,1), imigr, noso, ml_req1 ) |
---|
3021 | END SELECT |
---|
3022 | ! |
---|
3023 | SELECT CASE ( nbondj_bdy_b(ib_bdy) ) |
---|
3024 | CASE ( -1 ) |
---|
3025 | CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) |
---|
3026 | CASE ( 0 ) |
---|
3027 | CALL mpprecv( 3, zt3ns(1,1,1,2), imigr, nono ) |
---|
3028 | CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) |
---|
3029 | CASE ( 1 ) |
---|
3030 | CALL mpprecv( 4, zt3sn(1,1,1,2), imigr, noso ) |
---|
3031 | END SELECT |
---|
3032 | ! |
---|
3033 | SELECT CASE ( nbondj_bdy(ib_bdy) ) |
---|
3034 | CASE ( -1 ) |
---|
3035 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
3036 | CASE ( 0 ) |
---|
3037 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
3038 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
3039 | CASE ( 1 ) |
---|
3040 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
3041 | END SELECT |
---|
3042 | ! |
---|
3043 | ! ! Write Dirichlet lateral conditions |
---|
3044 | ijhom = nlcj-jprecj |
---|
3045 | ! |
---|
3046 | SELECT CASE ( nbondj_bdy_b(ib_bdy) ) |
---|
3047 | CASE ( -1 ) |
---|
3048 | DO jl = 1, jprecj |
---|
3049 | ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) |
---|
3050 | END DO |
---|
3051 | CASE ( 0 ) |
---|
3052 | DO jl = 1, jprecj |
---|
3053 | ptab(:,jl ,:) = zt3sn(:,jl,:,2) |
---|
3054 | ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) |
---|
3055 | END DO |
---|
3056 | CASE ( 1 ) |
---|
3057 | DO jl = 1, jprecj |
---|
3058 | ptab(:,jl,:) = zt3sn(:,jl,:,2) |
---|
3059 | END DO |
---|
3060 | END SELECT |
---|
3061 | |
---|
3062 | |
---|
3063 | ! 4. north fold treatment |
---|
3064 | ! ----------------------- |
---|
3065 | ! |
---|
3066 | IF( npolj /= 0) THEN |
---|
3067 | ! |
---|
3068 | SELECT CASE ( jpni ) |
---|
3069 | CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp |
---|
3070 | CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. |
---|
3071 | END SELECT |
---|
3072 | ! |
---|
3073 | ENDIF |
---|
3074 | ! |
---|
3075 | DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we ) |
---|
3076 | ! |
---|
3077 | END SUBROUTINE mpp_lnk_bdy_3d |
---|
3078 | |
---|
3079 | |
---|
3080 | SUBROUTINE mpp_lnk_bdy_2d( ptab, cd_type, psgn, ib_bdy ) |
---|
3081 | !!---------------------------------------------------------------------- |
---|
3082 | !! *** routine mpp_lnk_bdy_2d *** |
---|
3083 | !! |
---|
3084 | !! ** Purpose : Message passing management |
---|
3085 | !! |
---|
3086 | !! ** Method : Use mppsend and mpprecv function for passing BDY boundaries |
---|
3087 | !! between processors following neighboring subdomains. |
---|
3088 | !! domain parameters |
---|
3089 | !! nlci : first dimension of the local subdomain |
---|
3090 | !! nlcj : second dimension of the local subdomain |
---|
3091 | !! nbondi_bdy : mark for "east-west local boundary" |
---|
3092 | !! nbondj_bdy : mark for "north-south local boundary" |
---|
3093 | !! noea : number for local neighboring processors |
---|
3094 | !! nowe : number for local neighboring processors |
---|
3095 | !! noso : number for local neighboring processors |
---|
3096 | !! nono : number for local neighboring processors |
---|
3097 | !! |
---|
3098 | !! ** Action : ptab with update value at its periphery |
---|
3099 | !! |
---|
3100 | !!---------------------------------------------------------------------- |
---|
3101 | REAL(wp), DIMENSION(jpi,jpj), INTENT(inout) :: ptab ! 3D array on which the boundary condition is applied |
---|
3102 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! define the nature of ptab array grid-points |
---|
3103 | ! ! = T , U , V , F , W points |
---|
3104 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary |
---|
3105 | ! ! = 1. , the sign is kept |
---|
3106 | INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set |
---|
3107 | ! |
---|
3108 | INTEGER :: ji, jj, jl ! dummy loop indices |
---|
3109 | INTEGER :: imigr, iihom, ijhom ! local integers |
---|
3110 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
3111 | REAL(wp) :: zland |
---|
3112 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
3113 | ! |
---|
3114 | REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ns, zt2sn ! 2d for north-south & south-north |
---|
3115 | REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: zt2ew, zt2we ! 2d for east-west & west-east |
---|
3116 | !!---------------------------------------------------------------------- |
---|
3117 | |
---|
3118 | ALLOCATE( zt2ns(jpi,jprecj,2), zt2sn(jpi,jprecj,2), & |
---|
3119 | & zt2ew(jpj,jpreci,2), zt2we(jpj,jpreci,2) ) |
---|
3120 | |
---|
3121 | zland = 0._wp |
---|
3122 | |
---|
3123 | ! 1. standard boundary treatment |
---|
3124 | ! ------------------------------ |
---|
3125 | |
---|
3126 | ! ! East-West boundaries |
---|
3127 | ! !* Cyclic east-west |
---|
3128 | |
---|
3129 | IF( nbondi == 2) THEN |
---|
3130 | IF (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) THEN |
---|
3131 | ptab( 1 ,:) = ptab(jpim1,:) |
---|
3132 | ptab(jpi,:) = ptab( 2 ,:) |
---|
3133 | ELSE |
---|
3134 | IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point |
---|
3135 | ptab(nlci-jpreci+1:jpi ,:) = zland ! north |
---|
3136 | ENDIF |
---|
3137 | ELSEIF(nbondi == -1) THEN |
---|
3138 | IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:) = zland ! south except F-point |
---|
3139 | ELSEIF(nbondi == 1) THEN |
---|
3140 | ptab(nlci-jpreci+1:jpi ,:) = zland ! north |
---|
3141 | ENDIF !* closed |
---|
3142 | |
---|
3143 | IF (nbondj == 2 .OR. nbondj == -1) THEN |
---|
3144 | IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj) = zland ! south except F-point |
---|
3145 | ELSEIF (nbondj == 2 .OR. nbondj == 1) THEN |
---|
3146 | ptab(:,nlcj-jprecj+1:jpj) = zland ! north |
---|
3147 | ENDIF |
---|
3148 | |
---|
3149 | ! |
---|
3150 | |
---|
3151 | ! 2. East and west directions exchange |
---|
3152 | ! ------------------------------------ |
---|
3153 | ! we play with the neigbours AND the row number because of the periodicity |
---|
3154 | ! |
---|
3155 | SELECT CASE ( nbondi_bdy(ib_bdy) ) ! Read Dirichlet lateral conditions |
---|
3156 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
3157 | iihom = nlci-nreci |
---|
3158 | DO jl = 1, jpreci |
---|
3159 | zt2ew(:,jl,1) = ptab(jpreci+jl,:) |
---|
3160 | zt2we(:,jl,1) = ptab(iihom +jl,:) |
---|
3161 | END DO |
---|
3162 | END SELECT |
---|
3163 | ! |
---|
3164 | ! ! Migrations |
---|
3165 | imigr = jpreci * jpj |
---|
3166 | ! |
---|
3167 | SELECT CASE ( nbondi_bdy(ib_bdy) ) |
---|
3168 | CASE ( -1 ) |
---|
3169 | CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req1 ) |
---|
3170 | CASE ( 0 ) |
---|
3171 | CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) |
---|
3172 | CALL mppsend( 2, zt2we(1,1,1), imigr, noea, ml_req2 ) |
---|
3173 | CASE ( 1 ) |
---|
3174 | CALL mppsend( 1, zt2ew(1,1,1), imigr, nowe, ml_req1 ) |
---|
3175 | END SELECT |
---|
3176 | ! |
---|
3177 | SELECT CASE ( nbondi_bdy_b(ib_bdy) ) |
---|
3178 | CASE ( -1 ) |
---|
3179 | CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) |
---|
3180 | CASE ( 0 ) |
---|
3181 | CALL mpprecv( 1, zt2ew(1,1,2), imigr, noea ) |
---|
3182 | CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) |
---|
3183 | CASE ( 1 ) |
---|
3184 | CALL mpprecv( 2, zt2we(1,1,2), imigr, nowe ) |
---|
3185 | END SELECT |
---|
3186 | ! |
---|
3187 | SELECT CASE ( nbondi_bdy(ib_bdy) ) |
---|
3188 | CASE ( -1 ) |
---|
3189 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
3190 | CASE ( 0 ) |
---|
3191 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
3192 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
3193 | CASE ( 1 ) |
---|
3194 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
3195 | END SELECT |
---|
3196 | ! |
---|
3197 | ! ! Write Dirichlet lateral conditions |
---|
3198 | iihom = nlci-jpreci |
---|
3199 | ! |
---|
3200 | SELECT CASE ( nbondi_bdy_b(ib_bdy) ) |
---|
3201 | CASE ( -1 ) |
---|
3202 | DO jl = 1, jpreci |
---|
3203 | ptab(iihom+jl,:) = zt2ew(:,jl,2) |
---|
3204 | END DO |
---|
3205 | CASE ( 0 ) |
---|
3206 | DO jl = 1, jpreci |
---|
3207 | ptab(jl ,:) = zt2we(:,jl,2) |
---|
3208 | ptab(iihom+jl,:) = zt2ew(:,jl,2) |
---|
3209 | END DO |
---|
3210 | CASE ( 1 ) |
---|
3211 | DO jl = 1, jpreci |
---|
3212 | ptab(jl ,:) = zt2we(:,jl,2) |
---|
3213 | END DO |
---|
3214 | END SELECT |
---|
3215 | |
---|
3216 | |
---|
3217 | ! 3. North and south directions |
---|
3218 | ! ----------------------------- |
---|
3219 | ! always closed : we play only with the neigbours |
---|
3220 | ! |
---|
3221 | IF( nbondj_bdy(ib_bdy) /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
3222 | ijhom = nlcj-nrecj |
---|
3223 | DO jl = 1, jprecj |
---|
3224 | zt2sn(:,jl,1) = ptab(:,ijhom +jl) |
---|
3225 | zt2ns(:,jl,1) = ptab(:,jprecj+jl) |
---|
3226 | END DO |
---|
3227 | ENDIF |
---|
3228 | ! |
---|
3229 | ! ! Migrations |
---|
3230 | imigr = jprecj * jpi |
---|
3231 | ! |
---|
3232 | SELECT CASE ( nbondj_bdy(ib_bdy) ) |
---|
3233 | CASE ( -1 ) |
---|
3234 | CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req1 ) |
---|
3235 | CASE ( 0 ) |
---|
3236 | CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) |
---|
3237 | CALL mppsend( 4, zt2sn(1,1,1), imigr, nono, ml_req2 ) |
---|
3238 | CASE ( 1 ) |
---|
3239 | CALL mppsend( 3, zt2ns(1,1,1), imigr, noso, ml_req1 ) |
---|
3240 | END SELECT |
---|
3241 | ! |
---|
3242 | SELECT CASE ( nbondj_bdy_b(ib_bdy) ) |
---|
3243 | CASE ( -1 ) |
---|
3244 | CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) |
---|
3245 | CASE ( 0 ) |
---|
3246 | CALL mpprecv( 3, zt2ns(1,1,2), imigr, nono ) |
---|
3247 | CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) |
---|
3248 | CASE ( 1 ) |
---|
3249 | CALL mpprecv( 4, zt2sn(1,1,2), imigr, noso ) |
---|
3250 | END SELECT |
---|
3251 | ! |
---|
3252 | SELECT CASE ( nbondj_bdy(ib_bdy) ) |
---|
3253 | CASE ( -1 ) |
---|
3254 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
3255 | CASE ( 0 ) |
---|
3256 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
3257 | IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) |
---|
3258 | CASE ( 1 ) |
---|
3259 | IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) |
---|
3260 | END SELECT |
---|
3261 | ! |
---|
3262 | ! ! Write Dirichlet lateral conditions |
---|
3263 | ijhom = nlcj-jprecj |
---|
3264 | ! |
---|
3265 | SELECT CASE ( nbondj_bdy_b(ib_bdy) ) |
---|
3266 | CASE ( -1 ) |
---|
3267 | DO jl = 1, jprecj |
---|
3268 | ptab(:,ijhom+jl) = zt2ns(:,jl,2) |
---|
3269 | END DO |
---|
3270 | CASE ( 0 ) |
---|
3271 | DO jl = 1, jprecj |
---|
3272 | ptab(:,jl ) = zt2sn(:,jl,2) |
---|
3273 | ptab(:,ijhom+jl) = zt2ns(:,jl,2) |
---|
3274 | END DO |
---|
3275 | CASE ( 1 ) |
---|
3276 | DO jl = 1, jprecj |
---|
3277 | ptab(:,jl) = zt2sn(:,jl,2) |
---|
3278 | END DO |
---|
3279 | END SELECT |
---|
3280 | |
---|
3281 | |
---|
3282 | ! 4. north fold treatment |
---|
3283 | ! ----------------------- |
---|
3284 | ! |
---|
3285 | IF( npolj /= 0) THEN |
---|
3286 | ! |
---|
3287 | SELECT CASE ( jpni ) |
---|
3288 | CASE ( 1 ) ; CALL lbc_nfd ( ptab, cd_type, psgn ) ! only 1 northern proc, no mpp |
---|
3289 | CASE DEFAULT ; CALL mpp_lbc_north( ptab, cd_type, psgn ) ! for all northern procs. |
---|
3290 | END SELECT |
---|
3291 | ! |
---|
3292 | ENDIF |
---|
3293 | ! |
---|
3294 | DEALLOCATE( zt2ns, zt2sn, zt2ew, zt2we ) |
---|
3295 | ! |
---|
3296 | END SUBROUTINE mpp_lnk_bdy_2d |
---|
3297 | |
---|
3298 | SUBROUTINE mpi_init_opa( ldtxt, ksft, code ) |
---|
3299 | !!--------------------------------------------------------------------- |
---|
3300 | !! *** routine mpp_init.opa *** |
---|
3301 | !! |
---|
3302 | !! ** Purpose :: export and attach a MPI buffer for bsend |
---|
3303 | !! |
---|
3304 | !! ** Method :: define buffer size in namelist, if 0 no buffer attachment |
---|
3305 | !! but classical mpi_init |
---|
3306 | !! |
---|
3307 | !! History :: 01/11 :: IDRIS initial version for IBM only |
---|
3308 | !! 08/04 :: R. Benshila, generalisation |
---|
3309 | !!--------------------------------------------------------------------- |
---|
3310 | CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt |
---|
3311 | INTEGER , INTENT(inout) :: ksft |
---|
3312 | INTEGER , INTENT( out) :: code |
---|
3313 | INTEGER :: ierr, ji |
---|
3314 | LOGICAL :: mpi_was_called |
---|
3315 | !!--------------------------------------------------------------------- |
---|
3316 | ! |
---|
3317 | CALL mpi_initialized( mpi_was_called, code ) ! MPI initialization |
---|
3318 | IF ( code /= MPI_SUCCESS ) THEN |
---|
3319 | DO ji = 1, SIZE(ldtxt) |
---|
3320 | IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode |
---|
3321 | END DO |
---|
3322 | WRITE(*, cform_err) |
---|
3323 | WRITE(*, *) ' lib_mpp: Error in routine mpi_initialized' |
---|
3324 | CALL mpi_abort( mpi_comm_world, code, ierr ) |
---|
3325 | ENDIF |
---|
3326 | ! |
---|
3327 | IF( .NOT. mpi_was_called ) THEN |
---|
3328 | CALL mpi_init( code ) |
---|
3329 | CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code ) |
---|
3330 | IF ( code /= MPI_SUCCESS ) THEN |
---|
3331 | DO ji = 1, SIZE(ldtxt) |
---|
3332 | IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode |
---|
3333 | END DO |
---|
3334 | WRITE(*, cform_err) |
---|
3335 | WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup' |
---|
3336 | CALL mpi_abort( mpi_comm_world, code, ierr ) |
---|
3337 | ENDIF |
---|
3338 | ENDIF |
---|
3339 | ! |
---|
3340 | IF( nn_buffer > 0 ) THEN |
---|
3341 | WRITE(ldtxt(ksft),*) 'mpi_bsend, buffer allocation of : ', nn_buffer ; ksft = ksft + 1 |
---|
3342 | ! Buffer allocation and attachment |
---|
3343 | ALLOCATE( tampon(nn_buffer), stat = ierr ) |
---|
3344 | IF( ierr /= 0 ) THEN |
---|
3345 | DO ji = 1, SIZE(ldtxt) |
---|
3346 | IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode |
---|
3347 | END DO |
---|
3348 | WRITE(*, cform_err) |
---|
3349 | WRITE(*, *) ' lib_mpp: Error in ALLOCATE', ierr |
---|
3350 | CALL mpi_abort( mpi_comm_world, code, ierr ) |
---|
3351 | END IF |
---|
3352 | CALL mpi_buffer_attach( tampon, nn_buffer, code ) |
---|
3353 | ENDIF |
---|
3354 | ! |
---|
3355 | END SUBROUTINE mpi_init_opa |
---|
3356 | |
---|
3357 | SUBROUTINE DDPDD_MPI (ydda, yddb, ilen, itype) |
---|
3358 | !!--------------------------------------------------------------------- |
---|
3359 | !! Routine DDPDD_MPI: used by reduction operator MPI_SUMDD |
---|
3360 | !! |
---|
3361 | !! Modification of original codes written by David H. Bailey |
---|
3362 | !! This subroutine computes yddb(i) = ydda(i)+yddb(i) |
---|
3363 | !!--------------------------------------------------------------------- |
---|
3364 | INTEGER, INTENT(in) :: ilen, itype |
---|
3365 | COMPLEX(wp), DIMENSION(ilen), INTENT(in) :: ydda |
---|
3366 | COMPLEX(wp), DIMENSION(ilen), INTENT(inout) :: yddb |
---|
3367 | ! |
---|
3368 | REAL(wp) :: zerr, zt1, zt2 ! local work variables |
---|
3369 | INTEGER :: ji, ztmp ! local scalar |
---|
3370 | |
---|
3371 | ztmp = itype ! avoid compilation warning |
---|
3372 | |
---|
3373 | DO ji=1,ilen |
---|
3374 | ! Compute ydda + yddb using Knuth's trick. |
---|
3375 | zt1 = real(ydda(ji)) + real(yddb(ji)) |
---|
3376 | zerr = zt1 - real(ydda(ji)) |
---|
3377 | zt2 = ((real(yddb(ji)) - zerr) + (real(ydda(ji)) - (zt1 - zerr))) & |
---|
3378 | + aimag(ydda(ji)) + aimag(yddb(ji)) |
---|
3379 | |
---|
3380 | ! The result is zt1 + zt2, after normalization. |
---|
3381 | yddb(ji) = cmplx ( zt1 + zt2, zt2 - ((zt1 + zt2) - zt1),wp ) |
---|
3382 | END DO |
---|
3383 | |
---|
3384 | END SUBROUTINE DDPDD_MPI |
---|
3385 | |
---|
3386 | SUBROUTINE mpp_lbc_north_icb( pt2d, cd_type, psgn, pr2dj) |
---|
3387 | !!--------------------------------------------------------------------- |
---|
3388 | !! *** routine mpp_lbc_north_icb *** |
---|
3389 | !! |
---|
3390 | !! ** Purpose : Ensure proper north fold horizontal bondary condition |
---|
3391 | !! in mpp configuration in case of jpn1 > 1 and for 2d |
---|
3392 | !! array with outer extra halo |
---|
3393 | !! |
---|
3394 | !! ** Method : North fold condition and mpp with more than one proc |
---|
3395 | !! in i-direction require a specific treatment. We gather |
---|
3396 | !! the 4+2*jpr2dj northern lines of the global domain on 1 |
---|
3397 | !! processor and apply lbc north-fold on this sub array. |
---|
3398 | !! Then we scatter the north fold array back to the processors. |
---|
3399 | !! This version accounts for an extra halo with icebergs. |
---|
3400 | !! |
---|
3401 | !!---------------------------------------------------------------------- |
---|
3402 | REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array with extra halo |
---|
3403 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points |
---|
3404 | ! ! = T , U , V , F or W -points |
---|
3405 | REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the |
---|
3406 | !! ! north fold, = 1. otherwise |
---|
3407 | INTEGER, OPTIONAL , INTENT(in ) :: pr2dj |
---|
3408 | INTEGER :: ji, jj, jr |
---|
3409 | INTEGER :: ierr, itaille, ildi, ilei, iilb |
---|
3410 | INTEGER :: ijpj, ij, iproc, ipr2dj |
---|
3411 | ! |
---|
3412 | REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e |
---|
3413 | REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e |
---|
3414 | |
---|
3415 | !!---------------------------------------------------------------------- |
---|
3416 | ! |
---|
3417 | ijpj=4 |
---|
3418 | IF( PRESENT(pr2dj) ) THEN ! use of additional halos |
---|
3419 | ipr2dj = pr2dj |
---|
3420 | ELSE |
---|
3421 | ipr2dj = 0 |
---|
3422 | ENDIF |
---|
3423 | ALLOCATE( ztab_e(jpiglo,4+2*ipr2dj), znorthloc_e(jpi,4+2*ipr2dj), znorthgloio_e(jpi,4+2*ipr2dj,jpni) ) |
---|
3424 | |
---|
3425 | ! |
---|
3426 | ztab_e(:,:) = 0.e0 |
---|
3427 | |
---|
3428 | ij=0 |
---|
3429 | ! put in znorthloc_e the last 4 jlines of pt2d |
---|
3430 | DO jj = nlcj - ijpj + 1 - ipr2dj, nlcj +ipr2dj |
---|
3431 | ij = ij + 1 |
---|
3432 | DO ji = 1, jpi |
---|
3433 | znorthloc_e(ji,ij)=pt2d(ji,jj) |
---|
3434 | END DO |
---|
3435 | END DO |
---|
3436 | ! |
---|
3437 | itaille = jpi * ( ijpj + 2 * ipr2dj ) |
---|
3438 | CALL MPI_ALLGATHER( znorthloc_e(1,1) , itaille, MPI_DOUBLE_PRECISION, & |
---|
3439 | & znorthgloio_e(1,1,1), itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) |
---|
3440 | ! |
---|
3441 | DO jr = 1, ndim_rank_north ! recover the global north array |
---|
3442 | iproc = nrank_north(jr) + 1 |
---|
3443 | ildi = nldit (iproc) |
---|
3444 | ilei = nleit (iproc) |
---|
3445 | iilb = nimppt(iproc) |
---|
3446 | DO jj = 1, ijpj+2*ipr2dj |
---|
3447 | DO ji = ildi, ilei |
---|
3448 | ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr) |
---|
3449 | END DO |
---|
3450 | END DO |
---|
3451 | END DO |
---|
3452 | |
---|
3453 | |
---|
3454 | ! 2. North-Fold boundary conditions |
---|
3455 | ! ---------------------------------- |
---|
3456 | CALL lbc_nfd( ztab_e(:,:), cd_type, psgn, pr2dj = ipr2dj ) |
---|
3457 | |
---|
3458 | ij = ipr2dj |
---|
3459 | !! Scatter back to pt2d |
---|
3460 | DO jj = nlcj - ijpj + 1 , nlcj +ipr2dj |
---|
3461 | ij = ij +1 |
---|
3462 | DO ji= 1, nlci |
---|
3463 | pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij) |
---|
3464 | END DO |
---|
3465 | END DO |
---|
3466 | ! |
---|
3467 | DEALLOCATE( ztab_e, znorthloc_e, znorthgloio_e ) |
---|
3468 | ! |
---|
3469 | END SUBROUTINE mpp_lbc_north_icb |
---|
3470 | |
---|
3471 | SUBROUTINE mpp_lnk_2d_icb( pt2d, cd_type, psgn, jpri, jprj ) |
---|
3472 | !!---------------------------------------------------------------------- |
---|
3473 | !! *** routine mpp_lnk_2d_icb *** |
---|
3474 | !! |
---|
3475 | !! ** Purpose : Message passing manadgement for 2d array (with extra halo and icebergs) |
---|
3476 | !! |
---|
3477 | !! ** Method : Use mppsend and mpprecv function for passing mask |
---|
3478 | !! between processors following neighboring subdomains. |
---|
3479 | !! domain parameters |
---|
3480 | !! nlci : first dimension of the local subdomain |
---|
3481 | !! nlcj : second dimension of the local subdomain |
---|
3482 | !! jpri : number of rows for extra outer halo |
---|
3483 | !! jprj : number of columns for extra outer halo |
---|
3484 | !! nbondi : mark for "east-west local boundary" |
---|
3485 | !! nbondj : mark for "north-south local boundary" |
---|
3486 | !! noea : number for local neighboring processors |
---|
3487 | !! nowe : number for local neighboring processors |
---|
3488 | !! noso : number for local neighboring processors |
---|
3489 | !! nono : number for local neighboring processors |
---|
3490 | !! |
---|
3491 | !!---------------------------------------------------------------------- |
---|
3492 | INTEGER , INTENT(in ) :: jpri |
---|
3493 | INTEGER , INTENT(in ) :: jprj |
---|
3494 | REAL(wp), DIMENSION(1-jpri:jpi+jpri,1-jprj:jpj+jprj), INTENT(inout) :: pt2d ! 2D array with extra halo |
---|
3495 | CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points |
---|
3496 | ! ! = T , U , V , F , W and I points |
---|
3497 | REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the |
---|
3498 | !! ! north boundary, = 1. otherwise |
---|
3499 | INTEGER :: jl ! dummy loop indices |
---|
3500 | INTEGER :: imigr, iihom, ijhom ! temporary integers |
---|
3501 | INTEGER :: ipreci, iprecj ! temporary integers |
---|
3502 | INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend |
---|
3503 | INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend |
---|
3504 | !! |
---|
3505 | REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dns |
---|
3506 | REAL(wp), DIMENSION(1-jpri:jpi+jpri,jprecj+jprj,2) :: r2dsn |
---|
3507 | REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dwe |
---|
3508 | REAL(wp), DIMENSION(1-jprj:jpj+jprj,jpreci+jpri,2) :: r2dew |
---|
3509 | !!---------------------------------------------------------------------- |
---|
3510 | |
---|
3511 | ipreci = jpreci + jpri ! take into account outer extra 2D overlap area |
---|
3512 | iprecj = jprecj + jprj |
---|
3513 | |
---|
3514 | |
---|
3515 | ! 1. standard boundary treatment |
---|
3516 | ! ------------------------------ |
---|
3517 | ! Order matters Here !!!! |
---|
3518 | ! |
---|
3519 | ! ! East-West boundaries |
---|
3520 | ! !* Cyclic east-west |
---|
3521 | IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN |
---|
3522 | pt2d(1-jpri: 1 ,:) = pt2d(jpim1-jpri: jpim1 ,:) ! east |
---|
3523 | pt2d( jpi :jpi+jpri,:) = pt2d( 2 :2+jpri,:) ! west |
---|
3524 | ! |
---|
3525 | ELSE !* closed |
---|
3526 | IF( .NOT. cd_type == 'F' ) pt2d( 1-jpri :jpreci ,:) = 0.e0 ! south except at F-point |
---|
3527 | pt2d(nlci-jpreci+1:jpi+jpri,:) = 0.e0 ! north |
---|
3528 | ENDIF |
---|
3529 | ! |
---|
3530 | |
---|
3531 | ! north fold treatment |
---|
3532 | ! ----------------------- |
---|
3533 | IF( npolj /= 0 ) THEN |
---|
3534 | ! |
---|
3535 | SELECT CASE ( jpni ) |
---|
3536 | CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jprj), cd_type, psgn, pr2dj=jprj ) |
---|
3537 | CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+jprj) , cd_type, psgn , pr2dj=jprj ) |
---|
3538 | END SELECT |
---|
3539 | ! |
---|
3540 | ENDIF |
---|
3541 | |
---|
3542 | ! 2. East and west directions exchange |
---|
3543 | ! ------------------------------------ |
---|
3544 | ! we play with the neigbours AND the row number because of the periodicity |
---|
3545 | ! |
---|
3546 | SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions |
---|
3547 | CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) |
---|
3548 | iihom = nlci-nreci-jpri |
---|
3549 | DO jl = 1, ipreci |
---|
3550 | r2dew(:,jl,1) = pt2d(jpreci+jl,:) |
---|
3551 | r2dwe(:,jl,1) = pt2d(iihom +jl,:) |
---|
3552 | END DO |
---|
3553 | END SELECT |
---|
3554 | ! |
---|
3555 | ! ! Migrations |
---|
3556 | imigr = ipreci * ( jpj + 2*jprj) |
---|
3557 | ! |
---|
3558 | SELECT CASE ( nbondi ) |
---|
3559 | CASE ( -1 ) |
---|
3560 | CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req1 ) |
---|
3561 | CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) |
---|
3562 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
3563 | CASE ( 0 ) |
---|
3564 | CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) |
---|
3565 | CALL mppsend( 2, r2dwe(1-jprj,1,1), imigr, noea, ml_req2 ) |
---|
3566 | CALL mpprecv( 1, r2dew(1-jprj,1,2), imigr, noea ) |
---|
3567 | CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) |
---|
3568 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
3569 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
3570 | CASE ( 1 ) |
---|
3571 | CALL mppsend( 1, r2dew(1-jprj,1,1), imigr, nowe, ml_req1 ) |
---|
3572 | CALL mpprecv( 2, r2dwe(1-jprj,1,2), imigr, nowe ) |
---|
3573 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
3574 | END SELECT |
---|
3575 | ! |
---|
3576 | ! ! Write Dirichlet lateral conditions |
---|
3577 | iihom = nlci - jpreci |
---|
3578 | ! |
---|
3579 | SELECT CASE ( nbondi ) |
---|
3580 | CASE ( -1 ) |
---|
3581 | DO jl = 1, ipreci |
---|
3582 | pt2d(iihom+jl,:) = r2dew(:,jl,2) |
---|
3583 | END DO |
---|
3584 | CASE ( 0 ) |
---|
3585 | DO jl = 1, ipreci |
---|
3586 | pt2d(jl-jpri,:) = r2dwe(:,jl,2) |
---|
3587 | pt2d( iihom+jl,:) = r2dew(:,jl,2) |
---|
3588 | END DO |
---|
3589 | CASE ( 1 ) |
---|
3590 | DO jl = 1, ipreci |
---|
3591 | pt2d(jl-jpri,:) = r2dwe(:,jl,2) |
---|
3592 | END DO |
---|
3593 | END SELECT |
---|
3594 | |
---|
3595 | |
---|
3596 | ! 3. North and south directions |
---|
3597 | ! ----------------------------- |
---|
3598 | ! always closed : we play only with the neigbours |
---|
3599 | ! |
---|
3600 | IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions |
---|
3601 | ijhom = nlcj-nrecj-jprj |
---|
3602 | DO jl = 1, iprecj |
---|
3603 | r2dsn(:,jl,1) = pt2d(:,ijhom +jl) |
---|
3604 | r2dns(:,jl,1) = pt2d(:,jprecj+jl) |
---|
3605 | END DO |
---|
3606 | ENDIF |
---|
3607 | ! |
---|
3608 | ! ! Migrations |
---|
3609 | imigr = iprecj * ( jpi + 2*jpri ) |
---|
3610 | ! |
---|
3611 | SELECT CASE ( nbondj ) |
---|
3612 | CASE ( -1 ) |
---|
3613 | CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req1 ) |
---|
3614 | CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) |
---|
3615 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
3616 | CASE ( 0 ) |
---|
3617 | CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) |
---|
3618 | CALL mppsend( 4, r2dsn(1-jpri,1,1), imigr, nono, ml_req2 ) |
---|
3619 | CALL mpprecv( 3, r2dns(1-jpri,1,2), imigr, nono ) |
---|
3620 | CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) |
---|
3621 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
3622 | IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) |
---|
3623 | CASE ( 1 ) |
---|
3624 | CALL mppsend( 3, r2dns(1-jpri,1,1), imigr, noso, ml_req1 ) |
---|
3625 | CALL mpprecv( 4, r2dsn(1-jpri,1,2), imigr, noso ) |
---|
3626 | IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) |
---|
3627 | END SELECT |
---|
3628 | ! |
---|
3629 | ! ! Write Dirichlet lateral conditions |
---|
3630 | ijhom = nlcj - jprecj |
---|
3631 | ! |
---|
3632 | SELECT CASE ( nbondj ) |
---|
3633 | CASE ( -1 ) |
---|
3634 | DO jl = 1, iprecj |
---|
3635 | pt2d(:,ijhom+jl) = r2dns(:,jl,2) |
---|
3636 | END DO |
---|
3637 | CASE ( 0 ) |
---|
3638 | DO jl = 1, iprecj |
---|
3639 | pt2d(:,jl-jprj) = r2dsn(:,jl,2) |
---|
3640 | pt2d(:,ijhom+jl ) = r2dns(:,jl,2) |
---|
3641 | END DO |
---|
3642 | CASE ( 1 ) |
---|
3643 | DO jl = 1, iprecj |
---|
3644 | pt2d(:,jl-jprj) = r2dsn(:,jl,2) |
---|
3645 | END DO |
---|
3646 | END SELECT |
---|
3647 | |
---|
3648 | END SUBROUTINE mpp_lnk_2d_icb |
---|
3649 | #else |
---|
3650 | !!---------------------------------------------------------------------- |
---|
3651 | !! Default case: Dummy module share memory computing |
---|
3652 | !!---------------------------------------------------------------------- |
---|
3653 | USE in_out_manager |
---|
3654 | |
---|
3655 | INTERFACE mpp_sum |
---|
3656 | MODULE PROCEDURE mpp_sum_a2s, mpp_sum_as, mpp_sum_ai, mpp_sum_s, mpp_sum_i, mppsum_realdd, mppsum_a_realdd |
---|
3657 | END INTERFACE |
---|
3658 | INTERFACE mpp_max |
---|
3659 | MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real |
---|
3660 | END INTERFACE |
---|
3661 | INTERFACE mpp_min |
---|
3662 | MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real |
---|
3663 | END INTERFACE |
---|
3664 | INTERFACE mpp_minloc |
---|
3665 | MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d |
---|
3666 | END INTERFACE |
---|
3667 | INTERFACE mpp_maxloc |
---|
3668 | MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d |
---|
3669 | END INTERFACE |
---|
3670 | |
---|
3671 | LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag |
---|
3672 | LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms (needed here in case "key_mpp_mpi" is not used) |
---|
3673 | INTEGER :: ncomm_ice |
---|
3674 | INTEGER, PUBLIC :: mpi_comm_opa ! opa local communicator |
---|
3675 | !!---------------------------------------------------------------------- |
---|
3676 | CONTAINS |
---|
3677 | |
---|
3678 | INTEGER FUNCTION lib_mpp_alloc(kumout) ! Dummy function |
---|
3679 | INTEGER, INTENT(in) :: kumout |
---|
3680 | lib_mpp_alloc = 0 |
---|
3681 | END FUNCTION lib_mpp_alloc |
---|
3682 | |
---|
3683 | FUNCTION mynode( ldtxt, ldname, kumnam_ref, knumnam_cfg, kumond , kstop, localComm ) RESULT (function_value) |
---|
3684 | INTEGER, OPTIONAL , INTENT(in ) :: localComm |
---|
3685 | CHARACTER(len=*),DIMENSION(:) :: ldtxt |
---|
3686 | CHARACTER(len=*) :: ldname |
---|
3687 | INTEGER :: kumnam_ref, knumnam_cfg , kumond , kstop |
---|
3688 | IF( PRESENT( localComm ) ) mpi_comm_opa = localComm |
---|
3689 | function_value = 0 |
---|
3690 | IF( .FALSE. ) ldtxt(:) = 'never done' |
---|
3691 | CALL ctl_opn( kumond, TRIM(ldname), 'UNKNOWN', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. , 1 ) |
---|
3692 | END FUNCTION mynode |
---|
3693 | |
---|
3694 | SUBROUTINE mppsync ! Dummy routine |
---|
3695 | END SUBROUTINE mppsync |
---|
3696 | |
---|
3697 | SUBROUTINE mpp_sum_as( parr, kdim, kcom ) ! Dummy routine |
---|
3698 | REAL , DIMENSION(:) :: parr |
---|
3699 | INTEGER :: kdim |
---|
3700 | INTEGER, OPTIONAL :: kcom |
---|
3701 | WRITE(*,*) 'mpp_sum_as: You should not have seen this print! error?', kdim, parr(1), kcom |
---|
3702 | END SUBROUTINE mpp_sum_as |
---|
3703 | |
---|
3704 | SUBROUTINE mpp_sum_a2s( parr, kdim, kcom ) ! Dummy routine |
---|
3705 | REAL , DIMENSION(:,:) :: parr |
---|
3706 | INTEGER :: kdim |
---|
3707 | INTEGER, OPTIONAL :: kcom |
---|
3708 | WRITE(*,*) 'mpp_sum_a2s: You should not have seen this print! error?', kdim, parr(1,1), kcom |
---|
3709 | END SUBROUTINE mpp_sum_a2s |
---|
3710 | |
---|
3711 | SUBROUTINE mpp_sum_ai( karr, kdim, kcom ) ! Dummy routine |
---|
3712 | INTEGER, DIMENSION(:) :: karr |
---|
3713 | INTEGER :: kdim |
---|
3714 | INTEGER, OPTIONAL :: kcom |
---|
3715 | WRITE(*,*) 'mpp_sum_ai: You should not have seen this print! error?', kdim, karr(1), kcom |
---|
3716 | END SUBROUTINE mpp_sum_ai |
---|
3717 | |
---|
3718 | SUBROUTINE mpp_sum_s( psca, kcom ) ! Dummy routine |
---|
3719 | REAL :: psca |
---|
3720 | INTEGER, OPTIONAL :: kcom |
---|
3721 | WRITE(*,*) 'mpp_sum_s: You should not have seen this print! error?', psca, kcom |
---|
3722 | END SUBROUTINE mpp_sum_s |
---|
3723 | |
---|
3724 | SUBROUTINE mpp_sum_i( kint, kcom ) ! Dummy routine |
---|
3725 | integer :: kint |
---|
3726 | INTEGER, OPTIONAL :: kcom |
---|
3727 | WRITE(*,*) 'mpp_sum_i: You should not have seen this print! error?', kint, kcom |
---|
3728 | END SUBROUTINE mpp_sum_i |
---|
3729 | |
---|
3730 | SUBROUTINE mppsum_realdd( ytab, kcom ) |
---|
3731 | COMPLEX(wp), INTENT(inout) :: ytab ! input scalar |
---|
3732 | INTEGER , INTENT( in ), OPTIONAL :: kcom |
---|
3733 | WRITE(*,*) 'mppsum_realdd: You should not have seen this print! error?', ytab |
---|
3734 | END SUBROUTINE mppsum_realdd |
---|
3735 | |
---|
3736 | SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) |
---|
3737 | INTEGER , INTENT( in ) :: kdim ! size of ytab |
---|
3738 | COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array |
---|
3739 | INTEGER , INTENT( in ), OPTIONAL :: kcom |
---|
3740 | WRITE(*,*) 'mppsum_a_realdd: You should not have seen this print! error?', kdim, ytab(1), kcom |
---|
3741 | END SUBROUTINE mppsum_a_realdd |
---|
3742 | |
---|
3743 | SUBROUTINE mppmax_a_real( parr, kdim, kcom ) |
---|
3744 | REAL , DIMENSION(:) :: parr |
---|
3745 | INTEGER :: kdim |
---|
3746 | INTEGER, OPTIONAL :: kcom |
---|
3747 | WRITE(*,*) 'mppmax_a_real: You should not have seen this print! error?', kdim, parr(1), kcom |
---|
3748 | END SUBROUTINE mppmax_a_real |
---|
3749 | |
---|
3750 | SUBROUTINE mppmax_real( psca, kcom ) |
---|
3751 | REAL :: psca |
---|
3752 | INTEGER, OPTIONAL :: kcom |
---|
3753 | WRITE(*,*) 'mppmax_real: You should not have seen this print! error?', psca, kcom |
---|
3754 | END SUBROUTINE mppmax_real |
---|
3755 | |
---|
3756 | SUBROUTINE mppmin_a_real( parr, kdim, kcom ) |
---|
3757 | REAL , DIMENSION(:) :: parr |
---|
3758 | INTEGER :: kdim |
---|
3759 | INTEGER, OPTIONAL :: kcom |
---|
3760 | WRITE(*,*) 'mppmin_a_real: You should not have seen this print! error?', kdim, parr(1), kcom |
---|
3761 | END SUBROUTINE mppmin_a_real |
---|
3762 | |
---|
3763 | SUBROUTINE mppmin_real( psca, kcom ) |
---|
3764 | REAL :: psca |
---|
3765 | INTEGER, OPTIONAL :: kcom |
---|
3766 | WRITE(*,*) 'mppmin_real: You should not have seen this print! error?', psca, kcom |
---|
3767 | END SUBROUTINE mppmin_real |
---|
3768 | |
---|
3769 | SUBROUTINE mppmax_a_int( karr, kdim ,kcom) |
---|
3770 | INTEGER, DIMENSION(:) :: karr |
---|
3771 | INTEGER :: kdim |
---|
3772 | INTEGER, OPTIONAL :: kcom |
---|
3773 | WRITE(*,*) 'mppmax_a_int: You should not have seen this print! error?', kdim, karr(1), kcom |
---|
3774 | END SUBROUTINE mppmax_a_int |
---|
3775 | |
---|
3776 | SUBROUTINE mppmax_int( kint, kcom) |
---|
3777 | INTEGER :: kint |
---|
3778 | INTEGER, OPTIONAL :: kcom |
---|
3779 | WRITE(*,*) 'mppmax_int: You should not have seen this print! error?', kint, kcom |
---|
3780 | END SUBROUTINE mppmax_int |
---|
3781 | |
---|
3782 | SUBROUTINE mppmin_a_int( karr, kdim, kcom ) |
---|
3783 | INTEGER, DIMENSION(:) :: karr |
---|
3784 | INTEGER :: kdim |
---|
3785 | INTEGER, OPTIONAL :: kcom |
---|
3786 | WRITE(*,*) 'mppmin_a_int: You should not have seen this print! error?', kdim, karr(1), kcom |
---|
3787 | END SUBROUTINE mppmin_a_int |
---|
3788 | |
---|
3789 | SUBROUTINE mppmin_int( kint, kcom ) |
---|
3790 | INTEGER :: kint |
---|
3791 | INTEGER, OPTIONAL :: kcom |
---|
3792 | WRITE(*,*) 'mppmin_int: You should not have seen this print! error?', kint, kcom |
---|
3793 | END SUBROUTINE mppmin_int |
---|
3794 | |
---|
3795 | SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki, kj ) |
---|
3796 | REAL :: pmin |
---|
3797 | REAL , DIMENSION (:,:) :: ptab, pmask |
---|
3798 | INTEGER :: ki, kj |
---|
3799 | WRITE(*,*) 'mpp_minloc2d: You should not have seen this print! error?', pmin, ki, kj, ptab(1,1), pmask(1,1) |
---|
3800 | END SUBROUTINE mpp_minloc2d |
---|
3801 | |
---|
3802 | SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj, kk ) |
---|
3803 | REAL :: pmin |
---|
3804 | REAL , DIMENSION (:,:,:) :: ptab, pmask |
---|
3805 | INTEGER :: ki, kj, kk |
---|
3806 | WRITE(*,*) 'mpp_minloc3d: You should not have seen this print! error?', pmin, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) |
---|
3807 | END SUBROUTINE mpp_minloc3d |
---|
3808 | |
---|
3809 | SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) |
---|
3810 | REAL :: pmax |
---|
3811 | REAL , DIMENSION (:,:) :: ptab, pmask |
---|
3812 | INTEGER :: ki, kj |
---|
3813 | WRITE(*,*) 'mpp_maxloc2d: You should not have seen this print! error?', pmax, ki, kj, ptab(1,1), pmask(1,1) |
---|
3814 | END SUBROUTINE mpp_maxloc2d |
---|
3815 | |
---|
3816 | SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) |
---|
3817 | REAL :: pmax |
---|
3818 | REAL , DIMENSION (:,:,:) :: ptab, pmask |
---|
3819 | INTEGER :: ki, kj, kk |
---|
3820 | WRITE(*,*) 'mpp_maxloc3d: You should not have seen this print! error?', pmax, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) |
---|
3821 | END SUBROUTINE mpp_maxloc3d |
---|
3822 | |
---|
3823 | SUBROUTINE mppstop |
---|
3824 | STOP ! non MPP case, just stop the run |
---|
3825 | END SUBROUTINE mppstop |
---|
3826 | |
---|
3827 | SUBROUTINE mpp_ini_ice( kcom, knum ) |
---|
3828 | INTEGER :: kcom, knum |
---|
3829 | WRITE(*,*) 'mpp_ini_ice: You should not have seen this print! error?', kcom, knum |
---|
3830 | END SUBROUTINE mpp_ini_ice |
---|
3831 | |
---|
3832 | SUBROUTINE mpp_ini_znl( knum ) |
---|
3833 | INTEGER :: knum |
---|
3834 | WRITE(*,*) 'mpp_ini_znl: You should not have seen this print! error?', knum |
---|
3835 | END SUBROUTINE mpp_ini_znl |
---|
3836 | |
---|
3837 | SUBROUTINE mpp_comm_free( kcom ) |
---|
3838 | INTEGER :: kcom |
---|
3839 | WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom |
---|
3840 | END SUBROUTINE mpp_comm_free |
---|
3841 | #endif |
---|
3842 | |
---|
3843 | !!---------------------------------------------------------------------- |
---|
3844 | !! All cases: ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam routines |
---|
3845 | !!---------------------------------------------------------------------- |
---|
3846 | |
---|
3847 | SUBROUTINE ctl_stop( cd1, cd2, cd3, cd4, cd5 , & |
---|
3848 | & cd6, cd7, cd8, cd9, cd10 ) |
---|
3849 | !!---------------------------------------------------------------------- |
---|
3850 | !! *** ROUTINE stop_opa *** |
---|
3851 | !! |
---|
3852 | !! ** Purpose : print in ocean.outpput file a error message and |
---|
3853 | !! increment the error number (nstop) by one. |
---|
3854 | !!---------------------------------------------------------------------- |
---|
3855 | CHARACTER(len=*), INTENT(in), OPTIONAL :: cd1, cd2, cd3, cd4, cd5 |
---|
3856 | CHARACTER(len=*), INTENT(in), OPTIONAL :: cd6, cd7, cd8, cd9, cd10 |
---|
3857 | !!---------------------------------------------------------------------- |
---|
3858 | ! |
---|
3859 | nstop = nstop + 1 |
---|
3860 | IF(lwp) THEN |
---|
3861 | WRITE(numout,cform_err) |
---|
3862 | IF( PRESENT(cd1 ) ) WRITE(numout,*) cd1 |
---|
3863 | IF( PRESENT(cd2 ) ) WRITE(numout,*) cd2 |
---|
3864 | IF( PRESENT(cd3 ) ) WRITE(numout,*) cd3 |
---|
3865 | IF( PRESENT(cd4 ) ) WRITE(numout,*) cd4 |
---|
3866 | IF( PRESENT(cd5 ) ) WRITE(numout,*) cd5 |
---|
3867 | IF( PRESENT(cd6 ) ) WRITE(numout,*) cd6 |
---|
3868 | IF( PRESENT(cd7 ) ) WRITE(numout,*) cd7 |
---|
3869 | IF( PRESENT(cd8 ) ) WRITE(numout,*) cd8 |
---|
3870 | IF( PRESENT(cd9 ) ) WRITE(numout,*) cd9 |
---|
3871 | IF( PRESENT(cd10) ) WRITE(numout,*) cd10 |
---|
3872 | ENDIF |
---|
3873 | CALL FLUSH(numout ) |
---|
3874 | IF( numstp /= -1 ) CALL FLUSH(numstp ) |
---|
3875 | IF( numsol /= -1 ) CALL FLUSH(numsol ) |
---|
3876 | IF( numevo_ice /= -1 ) CALL FLUSH(numevo_ice) |
---|
3877 | ! |
---|
3878 | IF( cd1 == 'STOP' ) THEN |
---|
3879 | IF(lwp) WRITE(numout,*) 'huge E-R-R-O-R : immediate stop' |
---|
3880 | CALL mppstop() |
---|
3881 | ENDIF |
---|
3882 | ! |
---|
3883 | END SUBROUTINE ctl_stop |
---|
3884 | |
---|
3885 | |
---|
3886 | SUBROUTINE ctl_warn( cd1, cd2, cd3, cd4, cd5, & |
---|
3887 | & cd6, cd7, cd8, cd9, cd10 ) |
---|
3888 | !!---------------------------------------------------------------------- |
---|
3889 | !! *** ROUTINE stop_warn *** |
---|
3890 | !! |
---|
3891 | !! ** Purpose : print in ocean.outpput file a error message and |
---|
3892 | !! increment the warning number (nwarn) by one. |
---|
3893 | !!---------------------------------------------------------------------- |
---|
3894 | CHARACTER(len=*), INTENT(in), OPTIONAL :: cd1, cd2, cd3, cd4, cd5 |
---|
3895 | CHARACTER(len=*), INTENT(in), OPTIONAL :: cd6, cd7, cd8, cd9, cd10 |
---|
3896 | !!---------------------------------------------------------------------- |
---|
3897 | ! |
---|
3898 | nwarn = nwarn + 1 |
---|
3899 | IF(lwp) THEN |
---|
3900 | WRITE(numout,cform_war) |
---|
3901 | IF( PRESENT(cd1 ) ) WRITE(numout,*) cd1 |
---|
3902 | IF( PRESENT(cd2 ) ) WRITE(numout,*) cd2 |
---|
3903 | IF( PRESENT(cd3 ) ) WRITE(numout,*) cd3 |
---|
3904 | IF( PRESENT(cd4 ) ) WRITE(numout,*) cd4 |
---|
3905 | IF( PRESENT(cd5 ) ) WRITE(numout,*) cd5 |
---|
3906 | IF( PRESENT(cd6 ) ) WRITE(numout,*) cd6 |
---|
3907 | IF( PRESENT(cd7 ) ) WRITE(numout,*) cd7 |
---|
3908 | IF( PRESENT(cd8 ) ) WRITE(numout,*) cd8 |
---|
3909 | IF( PRESENT(cd9 ) ) WRITE(numout,*) cd9 |
---|
3910 | IF( PRESENT(cd10) ) WRITE(numout,*) cd10 |
---|
3911 | ENDIF |
---|
3912 | CALL FLUSH(numout) |
---|
3913 | ! |
---|
3914 | END SUBROUTINE ctl_warn |
---|
3915 | |
---|
3916 | |
---|
3917 | SUBROUTINE ctl_opn( knum, cdfile, cdstat, cdform, cdacce, klengh, kout, ldwp, karea ) |
---|
3918 | !!---------------------------------------------------------------------- |
---|
3919 | !! *** ROUTINE ctl_opn *** |
---|
3920 | !! |
---|
3921 | !! ** Purpose : Open file and check if required file is available. |
---|
3922 | !! |
---|
3923 | !! ** Method : Fortan open |
---|
3924 | !!---------------------------------------------------------------------- |
---|
3925 | INTEGER , INTENT( out) :: knum ! logical unit to open |
---|
3926 | CHARACTER(len=*) , INTENT(in ) :: cdfile ! file name to open |
---|
3927 | CHARACTER(len=*) , INTENT(in ) :: cdstat ! disposition specifier |
---|
3928 | CHARACTER(len=*) , INTENT(in ) :: cdform ! formatting specifier |
---|
3929 | CHARACTER(len=*) , INTENT(in ) :: cdacce ! access specifier |
---|
3930 | INTEGER , INTENT(in ) :: klengh ! record length |
---|
3931 | INTEGER , INTENT(in ) :: kout ! number of logical units for write |
---|
3932 | LOGICAL , INTENT(in ) :: ldwp ! boolean term for print |
---|
3933 | INTEGER, OPTIONAL, INTENT(in ) :: karea ! proc number |
---|
3934 | !! |
---|
3935 | CHARACTER(len=80) :: clfile |
---|
3936 | INTEGER :: iost |
---|
3937 | !!---------------------------------------------------------------------- |
---|
3938 | |
---|
3939 | ! adapt filename |
---|
3940 | ! ---------------- |
---|
3941 | clfile = TRIM(cdfile) |
---|
3942 | IF( PRESENT( karea ) ) THEN |
---|
3943 | IF( karea > 1 ) WRITE(clfile, "(a,'_',i4.4)") TRIM(clfile), karea-1 |
---|
3944 | ENDIF |
---|
3945 | #if defined key_agrif |
---|
3946 | IF( .NOT. Agrif_Root() ) clfile = TRIM(Agrif_CFixed())//'_'//TRIM(clfile) |
---|
3947 | knum=Agrif_Get_Unit() |
---|
3948 | #else |
---|
3949 | knum=get_unit() |
---|
3950 | #endif |
---|
3951 | |
---|
3952 | iost=0 |
---|
3953 | IF( cdacce(1:6) == 'DIRECT' ) THEN |
---|
3954 | OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat, RECL=klengh, ERR=100, IOSTAT=iost ) |
---|
3955 | ELSE |
---|
3956 | OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) |
---|
3957 | ENDIF |
---|
3958 | IF( iost == 0 ) THEN |
---|
3959 | IF(ldwp) THEN |
---|
3960 | WRITE(kout,*) ' file : ', clfile,' open ok' |
---|
3961 | WRITE(kout,*) ' unit = ', knum |
---|
3962 | WRITE(kout,*) ' status = ', cdstat |
---|
3963 | WRITE(kout,*) ' form = ', cdform |
---|
3964 | WRITE(kout,*) ' access = ', cdacce |
---|
3965 | WRITE(kout,*) |
---|
3966 | ENDIF |
---|
3967 | ENDIF |
---|
3968 | 100 CONTINUE |
---|
3969 | IF( iost /= 0 ) THEN |
---|
3970 | IF(ldwp) THEN |
---|
3971 | WRITE(kout,*) |
---|
3972 | WRITE(kout,*) ' ===>>>> : bad opening file: ', clfile |
---|
3973 | WRITE(kout,*) ' ======= === ' |
---|
3974 | WRITE(kout,*) ' unit = ', knum |
---|
3975 | WRITE(kout,*) ' status = ', cdstat |
---|
3976 | WRITE(kout,*) ' form = ', cdform |
---|
3977 | WRITE(kout,*) ' access = ', cdacce |
---|
3978 | WRITE(kout,*) ' iostat = ', iost |
---|
3979 | WRITE(kout,*) ' we stop. verify the file ' |
---|
3980 | WRITE(kout,*) |
---|
3981 | ENDIF |
---|
3982 | STOP 'ctl_opn bad opening' |
---|
3983 | ENDIF |
---|
3984 | |
---|
3985 | END SUBROUTINE ctl_opn |
---|
3986 | |
---|
3987 | SUBROUTINE ctl_nam ( kios, cdnam, ldwp ) |
---|
3988 | !!---------------------------------------------------------------------- |
---|
3989 | !! *** ROUTINE ctl_nam *** |
---|
3990 | !! |
---|
3991 | !! ** Purpose : Informations when error while reading a namelist |
---|
3992 | !! |
---|
3993 | !! ** Method : Fortan open |
---|
3994 | !!---------------------------------------------------------------------- |
---|
3995 | INTEGER , INTENT(inout) :: kios ! IO status after reading the namelist |
---|
3996 | CHARACTER(len=*) , INTENT(in ) :: cdnam ! group name of namelist for which error occurs |
---|
3997 | CHARACTER(len=5) :: clios ! string to convert iostat in character for print |
---|
3998 | LOGICAL , INTENT(in ) :: ldwp ! boolean term for print |
---|
3999 | !!---------------------------------------------------------------------- |
---|
4000 | |
---|
4001 | ! |
---|
4002 | ! ---------------- |
---|
4003 | WRITE (clios, '(I5.0)') kios |
---|
4004 | IF( kios < 0 ) THEN |
---|
4005 | CALL ctl_warn( 'W A R N I N G: end of record or file while reading namelist ' & |
---|
4006 | & // TRIM(cdnam) // ' iostat = ' // TRIM(clios) ) |
---|
4007 | ENDIF |
---|
4008 | |
---|
4009 | IF( kios > 0 ) THEN |
---|
4010 | CALL ctl_stop( 'E R R O R : misspelled variable in namelist ' & |
---|
4011 | & // TRIM(cdnam) // ' iostat = ' // TRIM(clios) ) |
---|
4012 | ENDIF |
---|
4013 | kios = 0 |
---|
4014 | RETURN |
---|
4015 | |
---|
4016 | END SUBROUTINE ctl_nam |
---|
4017 | |
---|
4018 | INTEGER FUNCTION get_unit() |
---|
4019 | !!---------------------------------------------------------------------- |
---|
4020 | !! *** FUNCTION get_unit *** |
---|
4021 | !! |
---|
4022 | !! ** Purpose : return the index of an unused logical unit |
---|
4023 | !!---------------------------------------------------------------------- |
---|
4024 | LOGICAL :: llopn |
---|
4025 | !!---------------------------------------------------------------------- |
---|
4026 | ! |
---|
4027 | get_unit = 15 ! choose a unit that is big enough then it is not already used in NEMO |
---|
4028 | llopn = .TRUE. |
---|
4029 | DO WHILE( (get_unit < 998) .AND. llopn ) |
---|
4030 | get_unit = get_unit + 1 |
---|
4031 | INQUIRE( unit = get_unit, opened = llopn ) |
---|
4032 | END DO |
---|
4033 | IF( (get_unit == 999) .AND. llopn ) THEN |
---|
4034 | CALL ctl_stop( 'get_unit: All logical units until 999 are used...' ) |
---|
4035 | get_unit = -1 |
---|
4036 | ENDIF |
---|
4037 | ! |
---|
4038 | END FUNCTION get_unit |
---|
4039 | |
---|
4040 | !!---------------------------------------------------------------------- |
---|
4041 | END MODULE lib_mpp |
---|