1 | MODULE mppini |
---|
2 | !!====================================================================== |
---|
3 | !! *** MODULE mppini *** |
---|
4 | !! Ocean initialization : distributed memory computing initialization |
---|
5 | !!====================================================================== |
---|
6 | !! History : 6.0 ! 1994-11 (M. Guyon) Original code |
---|
7 | !! OPA 7.0 ! 1995-04 (J. Escobar, M. Imbard) |
---|
8 | !! 8.0 ! 1998-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI versions |
---|
9 | !! NEMO 1.0 ! 2004-01 (G. Madec, J.M Molines) F90 : free form , north fold jpni > 1 |
---|
10 | !! 3.4 ! 2011-10 (A. C. Coward, NOCS & J. Donners, PRACE) add init_nfdcom |
---|
11 | !! 3. ! 2013-06 (I. Epicoco, S. Mocavero, CMCC) init_nfdcom: setup avoiding MPI communication |
---|
12 | !! 4.0 ! 2016-06 (G. Madec) use domain configuration file instead of bathymetry file |
---|
13 | !! 4.0 ! 2017-06 (J.M. Molines, T. Lovato) merge of mppini and mppini_2 |
---|
14 | !!---------------------------------------------------------------------- |
---|
15 | |
---|
16 | !!---------------------------------------------------------------------- |
---|
17 | !! mpp_init : Lay out the global domain over processors with/without land processor elimination |
---|
18 | !! init_ioipsl: IOIPSL initialization in mpp |
---|
19 | !! init_nfdcom: Setup for north fold exchanges with explicit point-to-point messaging |
---|
20 | !! init_doloop: set the starting/ending indices of DO-loop used in do_loop_substitute |
---|
21 | !!---------------------------------------------------------------------- |
---|
22 | USE dom_oce ! ocean space and time domain |
---|
23 | USE bdy_oce ! open BounDarY |
---|
24 | ! |
---|
25 | USE lbcnfd , ONLY : isendto, nsndto ! Setup of north fold exchanges |
---|
26 | USE lib_mpp ! distribued memory computing library |
---|
27 | USE iom ! nemo I/O library |
---|
28 | USE ioipsl ! I/O IPSL library |
---|
29 | USE in_out_manager ! I/O Manager |
---|
30 | |
---|
31 | IMPLICIT NONE |
---|
32 | PRIVATE |
---|
33 | |
---|
34 | PUBLIC mpp_init ! called by nemogcm.F90 |
---|
35 | PUBLIC mpp_getnum ! called by prtctl |
---|
36 | PUBLIC mpp_basesplit ! called by prtctl |
---|
37 | PUBLIC mpp_is_ocean ! called by prtctl |
---|
38 | |
---|
39 | INTEGER :: numbot = -1 ! 'bottom_level' local logical unit |
---|
40 | INTEGER :: numbdy = -1 ! 'bdy_msk' local logical unit |
---|
41 | |
---|
42 | !!---------------------------------------------------------------------- |
---|
43 | !! NEMO/OCE 4.0 , NEMO Consortium (2018) |
---|
44 | !! $Id$ |
---|
45 | !! Software governed by the CeCILL license (see ./LICENSE) |
---|
46 | !!---------------------------------------------------------------------- |
---|
47 | CONTAINS |
---|
48 | |
---|
49 | #if defined key_mpi_off |
---|
50 | !!---------------------------------------------------------------------- |
---|
51 | !! Default option : shared memory computing |
---|
52 | !!---------------------------------------------------------------------- |
---|
53 | |
---|
54 | SUBROUTINE mpp_init |
---|
55 | !!---------------------------------------------------------------------- |
---|
56 | !! *** ROUTINE mpp_init *** |
---|
57 | !! |
---|
58 | !! ** Purpose : Lay out the global domain over processors. |
---|
59 | !! |
---|
60 | !! ** Method : Shared memory computing, set the local processor |
---|
61 | !! variables to the value of the global domain |
---|
62 | !!---------------------------------------------------------------------- |
---|
63 | ! |
---|
64 | nn_hls = 1 |
---|
65 | jpiglo = Ni0glo + 2 * nn_hls |
---|
66 | jpjglo = Nj0glo + 2 * nn_hls |
---|
67 | jpimax = jpiglo |
---|
68 | jpjmax = jpjglo |
---|
69 | jpi = jpiglo |
---|
70 | jpj = jpjglo |
---|
71 | jpk = MAX( 2, jpkglo ) |
---|
72 | jpij = jpi*jpj |
---|
73 | jpni = 1 |
---|
74 | jpnj = 1 |
---|
75 | jpnij = jpni*jpnj |
---|
76 | nimpp = 1 |
---|
77 | njmpp = 1 |
---|
78 | nidom = FLIO_DOM_NONE |
---|
79 | ! |
---|
80 | CALL init_doloop ! set start/end indices or do-loop depending on the halo width value (nn_hls) |
---|
81 | ! |
---|
82 | IF(lwp) THEN |
---|
83 | WRITE(numout,*) |
---|
84 | WRITE(numout,*) 'mpp_init : NO massively parallel processing' |
---|
85 | WRITE(numout,*) '~~~~~~~~ ' |
---|
86 | WRITE(numout,*) ' l_Iperio = ', l_Iperio, ' l_Jperio = ', l_Jperio |
---|
87 | WRITE(numout,*) ' njmpp = ', njmpp |
---|
88 | ENDIF |
---|
89 | ! |
---|
90 | #if defined key_agrif |
---|
91 | IF (.NOT.agrif_root()) THEN |
---|
92 | call agrif_nemo_init() |
---|
93 | ENDIF |
---|
94 | #endif |
---|
95 | END SUBROUTINE mpp_init |
---|
96 | |
---|
97 | #else |
---|
98 | !!---------------------------------------------------------------------- |
---|
99 | !! MPI massively parallel processing |
---|
100 | !!---------------------------------------------------------------------- |
---|
101 | |
---|
102 | |
---|
103 | SUBROUTINE mpp_init |
---|
104 | !!---------------------------------------------------------------------- |
---|
105 | !! *** ROUTINE mpp_init *** |
---|
106 | !! |
---|
107 | !! ** Purpose : Lay out the global domain over processors. |
---|
108 | !! If land processors are to be eliminated, this program requires the |
---|
109 | !! presence of the domain configuration file. Land processors elimination |
---|
110 | !! is performed if jpni x jpnj /= jpnij. In this case, using the MPP_PREP |
---|
111 | !! preprocessing tool, help for defining the best cutting out. |
---|
112 | !! |
---|
113 | !! ** Method : Global domain is distributed in smaller local domains. |
---|
114 | !! Periodic condition is a function of the local domain position |
---|
115 | !! (global boundary or neighbouring domain) and of the global periodic |
---|
116 | !! |
---|
117 | !! ** Action : - set domain parameters |
---|
118 | !! nimpp : longitudinal index |
---|
119 | !! njmpp : latitudinal index |
---|
120 | !! narea : number for local area |
---|
121 | !! mpinei : number of neighboring domains (starting at 0, -1 if no neighbourg) |
---|
122 | !!---------------------------------------------------------------------- |
---|
123 | INTEGER :: ji, jj, jn, jp |
---|
124 | INTEGER :: ii, ij, ii2, ij2 |
---|
125 | INTEGER :: inijmin ! number of oce subdomains |
---|
126 | INTEGER :: inum, inum0 |
---|
127 | INTEGER :: ifreq, il1, imil, il2, ijm1 |
---|
128 | INTEGER :: ierr, ios |
---|
129 | INTEGER :: inbi, inbj, iimax, ijmax, icnt1, icnt2 |
---|
130 | INTEGER, ALLOCATABLE, DIMENSION(: ) :: iin, ijn |
---|
131 | INTEGER, ALLOCATABLE, DIMENSION(:,: ) :: iimppt, ijpi, ipproc |
---|
132 | INTEGER, ALLOCATABLE, DIMENSION(:,: ) :: ijmppt, ijpj |
---|
133 | INTEGER, ALLOCATABLE, DIMENSION(:,: ) :: impi |
---|
134 | INTEGER, ALLOCATABLE, DIMENSION(:,:,:) :: inei |
---|
135 | LOGICAL :: llbest, llauto |
---|
136 | LOGICAL :: llwrtlay |
---|
137 | LOGICAL :: llmpi_Iperio, llmpi_Jperio, llmpiNFold |
---|
138 | LOGICAL :: ln_listonly |
---|
139 | LOGICAL, ALLOCATABLE, DIMENSION(:,: ) :: llisOce ! is not land-domain only? |
---|
140 | LOGICAL, ALLOCATABLE, DIMENSION(:,:,:) :: llnei ! are neighbourgs existing? |
---|
141 | NAMELIST/nambdy/ ln_bdy, nb_bdy, ln_coords_file, cn_coords_file, & |
---|
142 | & ln_mask_file, cn_mask_file, cn_dyn2d, nn_dyn2d_dta, & |
---|
143 | & cn_dyn3d, nn_dyn3d_dta, cn_tra, nn_tra_dta, & |
---|
144 | & ln_tra_dmp, ln_dyn3d_dmp, rn_time_dmp, rn_time_dmp_out, & |
---|
145 | & cn_ice, nn_ice_dta, & |
---|
146 | & ln_vol, nn_volctl, nn_rimwidth |
---|
147 | NAMELIST/nammpp/ jpni, jpnj, nn_hls, ln_nnogather, ln_listonly, nn_comm |
---|
148 | !!---------------------------------------------------------------------- |
---|
149 | ! |
---|
150 | llwrtlay = lwm .OR. sn_cfctl%l_layout |
---|
151 | ! |
---|
152 | ! 0. read namelists parameters |
---|
153 | ! ----------------------------------- |
---|
154 | ! |
---|
155 | READ ( numnam_ref, nammpp, IOSTAT = ios, ERR = 901 ) |
---|
156 | 901 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nammpp in reference namelist' ) |
---|
157 | READ ( numnam_cfg, nammpp, IOSTAT = ios, ERR = 902 ) |
---|
158 | 902 IF( ios > 0 ) CALL ctl_nam ( ios , 'nammpp in configuration namelist' ) |
---|
159 | ! |
---|
160 | nn_hls = MAX(1, nn_hls) ! nn_hls must be > 0 |
---|
161 | IF(lwp) THEN |
---|
162 | WRITE(numout,*) ' Namelist nammpp' |
---|
163 | IF( jpni < 1 .OR. jpnj < 1 ) THEN |
---|
164 | WRITE(numout,*) ' jpni and jpnj will be calculated automatically' |
---|
165 | ELSE |
---|
166 | WRITE(numout,*) ' processor grid extent in i jpni = ', jpni |
---|
167 | WRITE(numout,*) ' processor grid extent in j jpnj = ', jpnj |
---|
168 | ENDIF |
---|
169 | WRITE(numout,*) ' avoid use of mpi_allgather at the north fold ln_nnogather = ', ln_nnogather |
---|
170 | WRITE(numout,*) ' halo width (applies to both rows and columns) nn_hls = ', nn_hls |
---|
171 | ENDIF |
---|
172 | ! |
---|
173 | IF(lwm) WRITE( numond, nammpp ) |
---|
174 | ! |
---|
175 | jpiglo = Ni0glo + 2 * nn_hls |
---|
176 | jpjglo = Nj0glo + 2 * nn_hls |
---|
177 | ! |
---|
178 | ! do we need to take into account bdy_msk? |
---|
179 | READ ( numnam_ref, nambdy, IOSTAT = ios, ERR = 903) |
---|
180 | 903 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nambdy in reference namelist (mppini)' ) |
---|
181 | READ ( numnam_cfg, nambdy, IOSTAT = ios, ERR = 904 ) |
---|
182 | 904 IF( ios > 0 ) CALL ctl_nam ( ios , 'nambdy in configuration namelist (mppini)' ) |
---|
183 | ! |
---|
184 | IF( ln_read_cfg ) CALL iom_open( cn_domcfg, numbot ) |
---|
185 | IF( ln_bdy .AND. ln_mask_file ) CALL iom_open( cn_mask_file, numbdy ) |
---|
186 | ! |
---|
187 | IF( ln_listonly ) CALL bestpartition( MAX(mppsize,jpni*jpnj), ldlist = .TRUE. ) ! must be done by all core |
---|
188 | ! |
---|
189 | ! 1. Dimension arrays for subdomains |
---|
190 | ! ----------------------------------- |
---|
191 | ! |
---|
192 | ! If dimensions of MPI processes grid weren't specified in the namelist file |
---|
193 | ! then we calculate them here now that we have our communicator size |
---|
194 | IF(lwp) THEN |
---|
195 | WRITE(numout,*) |
---|
196 | WRITE(numout,*) 'mpp_init:' |
---|
197 | WRITE(numout,*) '~~~~~~~~ ' |
---|
198 | ENDIF |
---|
199 | IF( jpni < 1 .OR. jpnj < 1 ) THEN |
---|
200 | CALL bestpartition( mppsize, jpni, jpnj ) ! best mpi decomposition for mppsize mpi processes |
---|
201 | llauto = .TRUE. |
---|
202 | llbest = .TRUE. |
---|
203 | ELSE |
---|
204 | llauto = .FALSE. |
---|
205 | CALL bestpartition( mppsize, inbi, inbj, icnt2 ) ! best mpi decomposition for mppsize mpi processes |
---|
206 | ! largest subdomain size for mpi decoposition jpni*jpnj given in the namelist |
---|
207 | CALL mpp_basesplit( jpiglo, jpjglo, nn_hls, jpni, jpnj, jpimax, jpjmax ) |
---|
208 | ! largest subdomain size for mpi decoposition inbi*inbj given by bestpartition |
---|
209 | CALL mpp_basesplit( jpiglo, jpjglo, nn_hls, inbi, inbj, iimax, ijmax ) |
---|
210 | icnt1 = jpni*jpnj - mppsize ! number of land subdomains that should be removed to use mppsize mpi processes |
---|
211 | IF(lwp) THEN |
---|
212 | WRITE(numout,9000) ' The chosen domain decomposition ', jpni, ' x ', jpnj, ' with ', icnt1, ' land subdomains' |
---|
213 | WRITE(numout,9002) ' - uses a total of ', mppsize,' mpi process' |
---|
214 | WRITE(numout,9000) ' - has mpi subdomains with a maximum size of (jpi = ', jpimax, ', jpj = ', jpjmax, & |
---|
215 | & ', jpi*jpj = ', jpimax*jpjmax, ')' |
---|
216 | WRITE(numout,9000) ' The best domain decompostion ', inbi, ' x ', inbj, ' with ', icnt2, ' land subdomains' |
---|
217 | WRITE(numout,9002) ' - uses a total of ', inbi*inbj-icnt2,' mpi process' |
---|
218 | WRITE(numout,9000) ' - has mpi subdomains with a maximum size of (jpi = ', iimax, ', jpj = ', ijmax, & |
---|
219 | & ', jpi*jpj = ', iimax* ijmax, ')' |
---|
220 | ENDIF |
---|
221 | IF( iimax*ijmax < jpimax*jpjmax ) THEN ! chosen subdomain size is larger that the best subdomain size |
---|
222 | llbest = .FALSE. |
---|
223 | IF ( inbi*inbj-icnt2 < mppsize ) THEN |
---|
224 | WRITE(ctmp1,*) ' ==> You could therefore have smaller mpi subdomains with less mpi processes' |
---|
225 | ELSE |
---|
226 | WRITE(ctmp1,*) ' ==> You could therefore have smaller mpi subdomains with the same number of mpi processes' |
---|
227 | ENDIF |
---|
228 | CALL ctl_warn( ' ', ctmp1, ' ', ' --- YOU ARE WASTING CPU... ---', ' ' ) |
---|
229 | ELSE IF ( iimax*ijmax == jpimax*jpjmax .AND. (inbi*inbj-icnt2) < mppsize) THEN |
---|
230 | llbest = .FALSE. |
---|
231 | WRITE(ctmp1,*) ' ==> You could therefore have the same mpi subdomains size with less mpi processes' |
---|
232 | CALL ctl_warn( ' ', ctmp1, ' ', ' --- YOU ARE WASTING CPU... ---', ' ' ) |
---|
233 | ELSE |
---|
234 | llbest = .TRUE. |
---|
235 | ENDIF |
---|
236 | ENDIF |
---|
237 | |
---|
238 | ! look for land mpi subdomains... |
---|
239 | ALLOCATE( llisOce(jpni,jpnj) ) |
---|
240 | CALL mpp_is_ocean( llisOce ) |
---|
241 | inijmin = COUNT( llisOce ) ! number of oce subdomains |
---|
242 | |
---|
243 | IF( mppsize < inijmin ) THEN ! too many oce subdomains: can happen only if jpni and jpnj are prescribed... |
---|
244 | WRITE(ctmp1,9001) ' With this specified domain decomposition: jpni = ', jpni, ' jpnj = ', jpnj |
---|
245 | WRITE(ctmp2,9002) ' we can eliminate only ', jpni*jpnj - inijmin, ' land mpi subdomains therefore ' |
---|
246 | WRITE(ctmp3,9001) ' the number of ocean mpi subdomains (', inijmin,') exceed the number of MPI processes:', mppsize |
---|
247 | WRITE(ctmp4,*) ' ==>>> There is the list of best domain decompositions you should use: ' |
---|
248 | CALL ctl_stop( ctmp1, ctmp2, ctmp3, ' ', ctmp4, ' ' ) |
---|
249 | CALL bestpartition( mppsize, ldlist = .TRUE. ) ! must be done by all core |
---|
250 | ENDIF |
---|
251 | |
---|
252 | IF( mppsize > jpni*jpnj ) THEN ! not enough mpi subdomains for the total number of mpi processes |
---|
253 | IF(lwp) THEN |
---|
254 | WRITE(numout,9003) ' The number of mpi processes: ', mppsize |
---|
255 | WRITE(numout,9003) ' exceeds the maximum number of subdomains (ocean+land) = ', jpni*jpnj |
---|
256 | WRITE(numout,9001) ' defined by the following domain decomposition: jpni = ', jpni, ' jpnj = ', jpnj |
---|
257 | WRITE(numout, *) ' You should: ' |
---|
258 | IF( llauto ) THEN |
---|
259 | WRITE(numout,*) ' - either prescribe your domain decomposition with the namelist variables' |
---|
260 | WRITE(numout,*) ' jpni and jpnj to match the number of mpi process you want to use, ' |
---|
261 | WRITE(numout,*) ' even IF it not the best choice...' |
---|
262 | WRITE(numout,*) ' - or keep the automatic and optimal domain decomposition by picking up one' |
---|
263 | WRITE(numout,*) ' of the number of mpi process proposed in the list bellow' |
---|
264 | ELSE |
---|
265 | WRITE(numout,*) ' - either properly prescribe your domain decomposition with jpni and jpnj' |
---|
266 | WRITE(numout,*) ' in order to be consistent with the number of mpi process you want to use' |
---|
267 | WRITE(numout,*) ' even IF it not the best choice...' |
---|
268 | WRITE(numout,*) ' - or use the automatic and optimal domain decomposition and pick up one of' |
---|
269 | WRITE(numout,*) ' the domain decomposition proposed in the list bellow' |
---|
270 | ENDIF |
---|
271 | WRITE(numout,*) |
---|
272 | ENDIF |
---|
273 | CALL bestpartition( mppsize, ldlist = .TRUE. ) ! must be done by all core |
---|
274 | ENDIF |
---|
275 | |
---|
276 | jpnij = mppsize ! force jpnij definition <-- remove as much land subdomains as needed to reach this condition |
---|
277 | IF( mppsize > inijmin ) THEN |
---|
278 | WRITE(ctmp1,9003) ' The number of mpi processes: ', mppsize |
---|
279 | WRITE(ctmp2,9003) ' exceeds the maximum number of ocean subdomains = ', inijmin |
---|
280 | WRITE(ctmp3,9002) ' we suppressed ', jpni*jpnj - mppsize, ' land subdomains ' |
---|
281 | WRITE(ctmp4,9002) ' BUT we had to keep ', mppsize - inijmin, ' land subdomains that are useless...' |
---|
282 | CALL ctl_warn( ctmp1, ctmp2, ctmp3, ctmp4, ' ', ' --- YOU ARE WASTING CPU... ---', ' ' ) |
---|
283 | ELSE ! mppsize = inijmin |
---|
284 | IF(lwp) THEN |
---|
285 | IF(llbest) WRITE(numout,*) ' ==> you use the best mpi decomposition' |
---|
286 | WRITE(numout,*) |
---|
287 | WRITE(numout,9003) ' Number of mpi processes: ', mppsize |
---|
288 | WRITE(numout,9003) ' Number of ocean subdomains = ', inijmin |
---|
289 | WRITE(numout,9003) ' Number of suppressed land subdomains = ', jpni*jpnj - inijmin |
---|
290 | WRITE(numout,*) |
---|
291 | ENDIF |
---|
292 | ENDIF |
---|
293 | 9000 FORMAT (a, i4, a, i4, a, i7, a) |
---|
294 | 9001 FORMAT (a, i4, a, i4) |
---|
295 | 9002 FORMAT (a, i4, a) |
---|
296 | 9003 FORMAT (a, i5) |
---|
297 | |
---|
298 | ALLOCATE( nfimpp(jpni), nfproc(jpni), nfjpi(jpni), & |
---|
299 | & iin(jpnij), ijn(jpnij), & |
---|
300 | & iimppt(jpni,jpnj), ijmppt(jpni,jpnj), ijpi(jpni,jpnj), ijpj(jpni,jpnj), ipproc(jpni,jpnj), & |
---|
301 | & inei(8,jpni,jpnj), llnei(8,jpni,jpnj), & |
---|
302 | & impi(8,jpnij), & |
---|
303 | & STAT=ierr ) |
---|
304 | CALL mpp_sum( 'mppini', ierr ) |
---|
305 | IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'mpp_init: unable to allocate standard ocean arrays' ) |
---|
306 | |
---|
307 | #if defined key_agrif |
---|
308 | IF( .NOT. Agrif_Root() ) THEN ! AGRIF children: specific setting (cf. agrif_user.F90) |
---|
309 | CALL agrif_nemo_init() |
---|
310 | ENDIF |
---|
311 | #endif |
---|
312 | ! |
---|
313 | ! 2. Index arrays for subdomains |
---|
314 | ! ----------------------------------- |
---|
315 | ! |
---|
316 | CALL mpp_basesplit( jpiglo, jpjglo, nn_hls, jpni, jpnj, jpimax, jpjmax, iimppt, ijmppt, ijpi, ijpj ) |
---|
317 | CALL mpp_getnum( llisOce, ipproc, iin, ijn ) |
---|
318 | ! |
---|
319 | ii = iin(narea) |
---|
320 | ij = ijn(narea) |
---|
321 | jpi = ijpi(ii,ij) |
---|
322 | jpj = ijpj(ii,ij) |
---|
323 | jpk = MAX( 2, jpkglo ) |
---|
324 | jpij = jpi*jpj |
---|
325 | nimpp = iimppt(ii,ij) |
---|
326 | njmpp = ijmppt(ii,ij) |
---|
327 | ! |
---|
328 | CALL init_doloop ! set start/end indices of do-loop, depending on the halo width value (nn_hls) |
---|
329 | ! |
---|
330 | IF(lwp) THEN |
---|
331 | WRITE(numout,*) |
---|
332 | WRITE(numout,*) 'MPI Message Passing MPI - domain lay out over processors' |
---|
333 | WRITE(numout,*) |
---|
334 | WRITE(numout,*) ' defines mpp subdomains' |
---|
335 | WRITE(numout,*) ' jpni = ', jpni |
---|
336 | WRITE(numout,*) ' jpnj = ', jpnj |
---|
337 | WRITE(numout,*) ' jpnij = ', jpnij |
---|
338 | WRITE(numout,*) ' nimpp = ', nimpp |
---|
339 | WRITE(numout,*) ' njmpp = ', njmpp |
---|
340 | WRITE(numout,*) |
---|
341 | WRITE(numout,*) ' sum ijpi(i,1) = ', sum(ijpi(:,1)), ' jpiglo = ', jpiglo |
---|
342 | WRITE(numout,*) ' sum ijpj(1,j) = ', SUM(ijpj(1,:)), ' jpjglo = ', jpjglo |
---|
343 | |
---|
344 | ! Subdomain grid print |
---|
345 | ifreq = 4 |
---|
346 | il1 = 1 |
---|
347 | DO jn = 1, (jpni-1)/ifreq+1 |
---|
348 | il2 = MIN(jpni,il1+ifreq-1) |
---|
349 | WRITE(numout,*) |
---|
350 | WRITE(numout,9400) ('***',ji=il1,il2-1) |
---|
351 | DO jj = jpnj, 1, -1 |
---|
352 | WRITE(numout,9403) (' ',ji=il1,il2-1) |
---|
353 | WRITE(numout,9402) jj, (ijpi(ji,jj),ijpj(ji,jj),ji=il1,il2) |
---|
354 | WRITE(numout,9404) (ipproc(ji,jj),ji=il1,il2) |
---|
355 | WRITE(numout,9403) (' ',ji=il1,il2-1) |
---|
356 | WRITE(numout,9400) ('***',ji=il1,il2-1) |
---|
357 | END DO |
---|
358 | WRITE(numout,9401) (ji,ji=il1,il2) |
---|
359 | il1 = il1+ifreq |
---|
360 | END DO |
---|
361 | 9400 FORMAT(' ***' ,20('*************',a3) ) |
---|
362 | 9403 FORMAT(' * ',20(' * ',a3) ) |
---|
363 | 9401 FORMAT(' ' ,20(' ',i3,' ') ) |
---|
364 | 9402 FORMAT(' ',i3,' * ',20(i3,' x',i3,' * ') ) |
---|
365 | 9404 FORMAT(' * ' ,20(' ' ,i4,' * ') ) |
---|
366 | ENDIF |
---|
367 | ! |
---|
368 | ! Store informations for the north pole folding communications |
---|
369 | nfproc(:) = ipproc(:,jpnj) |
---|
370 | nfimpp(:) = iimppt(:,jpnj) |
---|
371 | nfjpi (:) = ijpi(:,jpnj) |
---|
372 | ! |
---|
373 | ! 3. Define Western, Eastern, Southern and Northern neighbors + corners in the subdomain grid reference |
---|
374 | ! ------------------------------------------------------------------------------------------------------ |
---|
375 | ! |
---|
376 | ! note that North fold is has specific treatment for its MPI communications. |
---|
377 | ! This must not be treated as a "usual" communication with a northern neighbor. |
---|
378 | ! -> North fold processes have no Northern neighbor in the definition done bellow |
---|
379 | ! |
---|
380 | llmpi_Iperio = jpni > 1 .AND. l_Iperio ! do i-periodicity with an MPI communication? |
---|
381 | llmpi_Jperio = jpnj > 1 .AND. l_Jperio ! do j-periodicity with an MPI communication? |
---|
382 | ! |
---|
383 | l_SelfPerio(1:2) = l_Iperio .AND. jpni == 1 ! west, east periodicity by itself |
---|
384 | l_SelfPerio(3:4) = l_Jperio .AND. jpnj == 1 ! south, north periodicity by itself |
---|
385 | l_SelfPerio(5:8) = l_SelfPerio(jpwe) .AND. l_SelfPerio(jpso) ! corners bi-periodicity by itself |
---|
386 | ! |
---|
387 | ! define neighbors mapping (1/2): default definition: ignore if neighbours are land-only subdomains or not |
---|
388 | DO jj = 1, jpnj |
---|
389 | DO ji = 1, jpni |
---|
390 | ! |
---|
391 | IF ( llisOce(ji,jj) ) THEN ! this subdomain has some ocean: it has neighbours |
---|
392 | ! |
---|
393 | inum0 = ji - 1 + ( jj - 1 ) * jpni ! index in the subdomains grid. start at 0 |
---|
394 | ! |
---|
395 | ! Is there a neighbor? |
---|
396 | llnei(jpwe,ji,jj) = ji > 1 .OR. llmpi_Iperio ! West nei exists if not the first column or llmpi_Iperio |
---|
397 | llnei(jpea,ji,jj) = ji < jpni .OR. llmpi_Iperio ! East nei exists if not the last column or llmpi_Iperio |
---|
398 | llnei(jpso,ji,jj) = jj > 1 .OR. llmpi_Jperio ! South nei exists if not the first line or llmpi_Jperio |
---|
399 | llnei(jpno,ji,jj) = jj < jpnj .OR. llmpi_Jperio ! North nei exists if not the last line or llmpi_Jperio |
---|
400 | llnei(jpsw,ji,jj) = llnei(jpwe,ji,jj) .AND. llnei(jpso,ji,jj) ! So-We nei exists if both South and West nei exist |
---|
401 | llnei(jpse,ji,jj) = llnei(jpea,ji,jj) .AND. llnei(jpso,ji,jj) ! So-Ea nei exists if both South and East nei exist |
---|
402 | llnei(jpnw,ji,jj) = llnei(jpwe,ji,jj) .AND. llnei(jpno,ji,jj) ! No-We nei exists if both North and West nei exist |
---|
403 | llnei(jpne,ji,jj) = llnei(jpea,ji,jj) .AND. llnei(jpno,ji,jj) ! No-Ea nei exists if both North and East nei exist |
---|
404 | ! |
---|
405 | ! Which index (starting at 0) have neighbors in the subdomains grid? |
---|
406 | IF( llnei(jpwe,ji,jj) ) inei(jpwe,ji,jj) = inum0 - 1 + jpni * COUNT( (/ ji == 1 /) ) |
---|
407 | IF( llnei(jpea,ji,jj) ) inei(jpea,ji,jj) = inum0 + 1 - jpni * COUNT( (/ ji == jpni /) ) |
---|
408 | IF( llnei(jpso,ji,jj) ) inei(jpso,ji,jj) = inum0 - jpni + jpni * jpnj * COUNT( (/ jj == 1 /) ) |
---|
409 | IF( llnei(jpno,ji,jj) ) inei(jpno,ji,jj) = inum0 + jpni - jpni * jpnj * COUNT( (/ jj == jpnj /) ) |
---|
410 | IF( llnei(jpsw,ji,jj) ) inei(jpsw,ji,jj) = inei(jpso,ji,jj) - 1 + jpni * COUNT( (/ ji == 1 /) ) |
---|
411 | IF( llnei(jpse,ji,jj) ) inei(jpse,ji,jj) = inei(jpso,ji,jj) + 1 - jpni * COUNT( (/ ji == jpni /) ) |
---|
412 | IF( llnei(jpnw,ji,jj) ) inei(jpnw,ji,jj) = inei(jpno,ji,jj) - 1 + jpni * COUNT( (/ ji == 1 /) ) |
---|
413 | IF( llnei(jpne,ji,jj) ) inei(jpne,ji,jj) = inei(jpno,ji,jj) + 1 - jpni * COUNT( (/ ji == jpni /) ) |
---|
414 | ! |
---|
415 | ELSE ! land-only domain has no neighbour |
---|
416 | llnei(:,ji,jj) = .FALSE. |
---|
417 | ENDIF |
---|
418 | ! |
---|
419 | END DO |
---|
420 | END DO |
---|
421 | ! |
---|
422 | ! define neighbors mapping (2/2): check if neighbours are not land-only subdomains |
---|
423 | DO jj = 1, jpnj |
---|
424 | DO ji = 1, jpni |
---|
425 | DO jn = 1, 8 |
---|
426 | IF( llnei(jn,ji,jj) ) THEN ! if a neighbour is existing -> this should not be a land-only domain |
---|
427 | ii = 1 + MOD( inei(jn,ji,jj) , jpni ) |
---|
428 | ij = 1 + inei(jn,ji,jj) / jpni |
---|
429 | llnei(jn,ji,jj) = llisOce( ii, ij ) |
---|
430 | ENDIF |
---|
431 | END DO |
---|
432 | END DO |
---|
433 | END DO |
---|
434 | ! |
---|
435 | ! update index of the neighbours in the subdomains grid |
---|
436 | WHERE( .NOT. llnei ) inei = -1 |
---|
437 | ! |
---|
438 | ! Save processor layout in ascii file |
---|
439 | IF (llwrtlay) THEN |
---|
440 | CALL ctl_opn( inum, 'layout.dat', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) |
---|
441 | WRITE(inum,'(a)') ' jpnij jpimax jpjmax jpk jpiglo jpjglo ( local: narea jpi jpj )' |
---|
442 | WRITE(inum,'(6i7,a,3i7,a)') jpnij,jpimax,jpjmax,jpk,jpiglo,jpjglo,' ( local: ',narea,jpi,jpj,' )' |
---|
443 | WRITE(inum,'(a)') ' narea ii ij jpi jpj nimpp njmpp mpiwe mpiea mpiso mpino mpisw mpise mpinw mpine' |
---|
444 | DO jp = 1, jpnij |
---|
445 | ii = iin(jp) |
---|
446 | ij = ijn(jp) |
---|
447 | WRITE(inum,'(15i6)') jp, ii, ij, ijpi(ii,ij), ijpj(ii,ij), iimppt(ii,ij), ijmppt(ii,ij), inei(:,ii,ij) |
---|
448 | END DO |
---|
449 | END IF |
---|
450 | |
---|
451 | ! |
---|
452 | ! 4. Define Western, Eastern, Southern and Northern neighbors + corners for each mpi process |
---|
453 | ! ------------------------------------------------------------------------------------------ |
---|
454 | ! |
---|
455 | ! rewrite information from "subdomain grid" to mpi process list |
---|
456 | ! Warning, for example: |
---|
457 | ! position of the northern neighbor in the "subdomain grid" |
---|
458 | ! position of the northern neighbor in the "mpi process list" |
---|
459 | |
---|
460 | ! default definition: no neighbors |
---|
461 | impi(:,:) = -1 ! (starting at 0, -1 if no neighbourg) |
---|
462 | |
---|
463 | DO jp = 1, jpnij |
---|
464 | ii = iin(jp) |
---|
465 | ij = ijn(jp) |
---|
466 | DO jn = 1, 8 |
---|
467 | IF( llnei(jn,ii,ij) ) THEN ! must be tested as some land-domain can be kept to fit mppsize |
---|
468 | ii2 = 1 + MOD( inei(jn,ii,ij) , jpni ) |
---|
469 | ij2 = 1 + inei(jn,ii,ij) / jpni |
---|
470 | impi(jn,jp) = ipproc( ii2, ij2 ) |
---|
471 | ENDIF |
---|
472 | END DO |
---|
473 | END DO |
---|
474 | |
---|
475 | ! |
---|
476 | ! 4. keep information for the local process |
---|
477 | ! ----------------------------------------- |
---|
478 | ! |
---|
479 | ! set default neighbours |
---|
480 | mpinei(:) = impi(:,narea) |
---|
481 | ! |
---|
482 | IF(lwp) THEN |
---|
483 | WRITE(numout,*) |
---|
484 | WRITE(numout,*) ' resulting internal parameters : ' |
---|
485 | WRITE(numout,*) ' narea = ', narea |
---|
486 | WRITE(numout,*) ' mpi nei west = ', mpinei(jpwe) , ' mpi nei east = ', mpinei(jpea) |
---|
487 | WRITE(numout,*) ' mpi nei south = ', mpinei(jpso) , ' mpi nei north = ', mpinei(jpno) |
---|
488 | WRITE(numout,*) ' mpi nei so-we = ', mpinei(jpsw) , ' mpi nei so-ea = ', mpinei(jpse) |
---|
489 | WRITE(numout,*) ' mpi nei no-we = ', mpinei(jpnw) , ' mpi nei no-ea = ', mpinei(jpne) |
---|
490 | ENDIF |
---|
491 | ! |
---|
492 | ! ! Prepare mpp north fold |
---|
493 | ! |
---|
494 | llmpiNFold = jpni > 1 .AND. l_NFold ! is the North fold done with an MPI communication? |
---|
495 | l_IdoNFold = ijn(narea) == jpnj .AND. l_NFold ! is this process doing North fold? |
---|
496 | ! |
---|
497 | IF( llmpiNFold ) THEN |
---|
498 | CALL mpp_ini_north |
---|
499 | IF (lwp) THEN |
---|
500 | WRITE(numout,*) |
---|
501 | WRITE(numout,*) ' ==>>> North fold boundary prepared for jpni >1' |
---|
502 | ENDIF |
---|
503 | IF (llwrtlay) THEN ! additional prints in layout.dat |
---|
504 | WRITE(inum,*) |
---|
505 | WRITE(inum,*) |
---|
506 | WRITE(inum,*) 'number of subdomains located along the north fold : ', ndim_rank_north |
---|
507 | WRITE(inum,*) 'Rank of the subdomains located along the north fold : ', ndim_rank_north |
---|
508 | DO jp = 1, ndim_rank_north, 5 |
---|
509 | WRITE(inum,*) nrank_north( jp:MINVAL( (/jp+4,ndim_rank_north/) ) ) |
---|
510 | END DO |
---|
511 | ENDIF |
---|
512 | IF ( l_IdoNFold .AND. ln_nnogather ) THEN |
---|
513 | CALL init_nfdcom ! northfold neighbour lists |
---|
514 | IF (llwrtlay) THEN |
---|
515 | WRITE(inum,*) |
---|
516 | WRITE(inum,*) 'north fold exchanges with explicit point-to-point messaging :' |
---|
517 | WRITE(inum,*) ' nsndto : ', nsndto |
---|
518 | WRITE(inum,*) ' isendto : ', isendto(1:nsndto) |
---|
519 | ENDIF |
---|
520 | ENDIF |
---|
521 | ENDIF |
---|
522 | ! |
---|
523 | CALL mpp_ini_nc ! Initialize communicator for neighbourhood collective communications |
---|
524 | ! |
---|
525 | CALL init_ioipsl ! Prepare NetCDF output file (if necessary) |
---|
526 | ! |
---|
527 | IF (llwrtlay) CLOSE(inum) |
---|
528 | ! |
---|
529 | DEALLOCATE(iin, ijn, iimppt, ijmppt, ijpi, ijpj, ipproc, inei, llnei, impi, llisOce) |
---|
530 | ! |
---|
531 | END SUBROUTINE mpp_init |
---|
532 | |
---|
533 | #endif |
---|
534 | |
---|
535 | SUBROUTINE mpp_basesplit( kiglo, kjglo, khls, knbi, knbj, kimax, kjmax, kimppt, kjmppt, klci, klcj) |
---|
536 | !!---------------------------------------------------------------------- |
---|
537 | !! *** ROUTINE mpp_basesplit *** |
---|
538 | !! |
---|
539 | !! ** Purpose : Lay out the global domain over processors. |
---|
540 | !! |
---|
541 | !! ** Method : Global domain is distributed in smaller local domains. |
---|
542 | !! |
---|
543 | !! ** Action : - set for all knbi*knbj domains: |
---|
544 | !! kimppt : longitudinal index |
---|
545 | !! kjmppt : latitudinal index |
---|
546 | !! klci : first dimension |
---|
547 | !! klcj : second dimension |
---|
548 | !!---------------------------------------------------------------------- |
---|
549 | INTEGER, INTENT(in ) :: kiglo, kjglo |
---|
550 | INTEGER, INTENT(in ) :: khls |
---|
551 | INTEGER, INTENT(in ) :: knbi, knbj |
---|
552 | INTEGER, INTENT( out) :: kimax, kjmax |
---|
553 | INTEGER, DIMENSION(knbi,knbj), OPTIONAL, INTENT( out) :: kimppt, kjmppt |
---|
554 | INTEGER, DIMENSION(knbi,knbj), OPTIONAL, INTENT( out) :: klci, klcj |
---|
555 | ! |
---|
556 | INTEGER :: ji, jj |
---|
557 | INTEGER :: i2hls |
---|
558 | INTEGER :: iresti, irestj, irm, ijpjmin |
---|
559 | !!---------------------------------------------------------------------- |
---|
560 | i2hls = 2*khls |
---|
561 | ! |
---|
562 | #if defined key_nemocice_decomp |
---|
563 | kimax = ( nx_global+2-i2hls + (knbi-1) ) / knbi + i2hls ! first dim. |
---|
564 | kjmax = ( ny_global+2-i2hls + (knbj-1) ) / knbj + i2hls ! second dim. |
---|
565 | #else |
---|
566 | kimax = ( kiglo - i2hls + (knbi-1) ) / knbi + i2hls ! first dim. |
---|
567 | kjmax = ( kjglo - i2hls + (knbj-1) ) / knbj + i2hls ! second dim. |
---|
568 | #endif |
---|
569 | IF( .NOT. PRESENT(kimppt) ) RETURN |
---|
570 | ! |
---|
571 | ! 1. Dimension arrays for subdomains |
---|
572 | ! ----------------------------------- |
---|
573 | ! Computation of local domain sizes klci() klcj() |
---|
574 | ! These dimensions depend on global sizes knbi,knbj and kiglo,kjglo |
---|
575 | ! The subdomains are squares lesser than or equal to the global |
---|
576 | ! dimensions divided by the number of processors minus the overlap array. |
---|
577 | ! |
---|
578 | iresti = 1 + MOD( kiglo - i2hls - 1 , knbi ) |
---|
579 | irestj = 1 + MOD( kjglo - i2hls - 1 , knbj ) |
---|
580 | ! |
---|
581 | ! Need to use kimax and kjmax here since jpi and jpj not yet defined |
---|
582 | #if defined key_nemocice_decomp |
---|
583 | ! Change padding to be consistent with CICE |
---|
584 | klci(1:knbi-1,: ) = kimax |
---|
585 | klci( knbi ,: ) = kiglo - (knbi - 1) * (kimax - i2hls) |
---|
586 | klcj(: ,1:knbj-1) = kjmax |
---|
587 | klcj(: , knbj ) = kjglo - (knbj - 1) * (kjmax - i2hls) |
---|
588 | #else |
---|
589 | klci(1:iresti ,:) = kimax |
---|
590 | klci(iresti+1:knbi ,:) = kimax-1 |
---|
591 | IF( MINVAL(klci) < 2*i2hls ) THEN |
---|
592 | WRITE(ctmp1,*) ' mpp_basesplit: minimum value of jpi must be >= ', 2*i2hls |
---|
593 | WRITE(ctmp2,*) ' We have ', MINVAL(klci) |
---|
594 | CALL ctl_stop( 'STOP', ctmp1, ctmp2 ) |
---|
595 | ENDIF |
---|
596 | IF( l_NFold ) THEN |
---|
597 | ! minimize the size of the last row to compensate for the north pole folding coast |
---|
598 | IF( c_NFtype == 'T' ) ijpjmin = 2+3*khls ! V and F folding must be outside of southern halos |
---|
599 | IF( c_NFtype == 'F' ) ijpjmin = 1+3*khls ! V and F folding must be outside of southern halos |
---|
600 | irm = knbj - irestj ! total number of lines to be removed |
---|
601 | klcj(:,knbj) = MAX( ijpjmin, kjmax-irm ) ! we must have jpj >= ijpjmin in the last row |
---|
602 | irm = irm - ( kjmax - klcj(1,knbj) ) ! remaining number of lines to remove |
---|
603 | irestj = knbj - 1 - irm |
---|
604 | klcj(:, irestj+1:knbj-1) = kjmax-1 |
---|
605 | ELSE |
---|
606 | klcj(:, irestj+1:knbj ) = kjmax-1 |
---|
607 | ENDIF |
---|
608 | klcj(:,1:irestj) = kjmax |
---|
609 | IF( MINVAL(klcj) < 2*i2hls ) THEN |
---|
610 | WRITE(ctmp1,*) ' mpp_basesplit: minimum value of jpj must be >= ', 2*i2hls |
---|
611 | WRITE(ctmp2,*) ' We have ', MINVAL(klcj) |
---|
612 | CALL ctl_stop( 'STOP', ctmp1, ctmp2 ) |
---|
613 | ENDIF |
---|
614 | #endif |
---|
615 | |
---|
616 | ! 2. Index arrays for subdomains |
---|
617 | ! ------------------------------- |
---|
618 | kimppt(:,:) = 1 |
---|
619 | kjmppt(:,:) = 1 |
---|
620 | ! |
---|
621 | IF( knbi > 1 ) THEN |
---|
622 | DO jj = 1, knbj |
---|
623 | DO ji = 2, knbi |
---|
624 | kimppt(ji,jj) = kimppt(ji-1,jj) + klci(ji-1,jj) - i2hls |
---|
625 | END DO |
---|
626 | END DO |
---|
627 | ENDIF |
---|
628 | ! |
---|
629 | IF( knbj > 1 )THEN |
---|
630 | DO jj = 2, knbj |
---|
631 | DO ji = 1, knbi |
---|
632 | kjmppt(ji,jj) = kjmppt(ji,jj-1) + klcj(ji,jj-1) - i2hls |
---|
633 | END DO |
---|
634 | END DO |
---|
635 | ENDIF |
---|
636 | |
---|
637 | END SUBROUTINE mpp_basesplit |
---|
638 | |
---|
639 | |
---|
640 | SUBROUTINE bestpartition( knbij, knbi, knbj, knbcnt, ldlist ) |
---|
641 | !!---------------------------------------------------------------------- |
---|
642 | !! *** ROUTINE bestpartition *** |
---|
643 | !! |
---|
644 | !! ** Purpose : |
---|
645 | !! |
---|
646 | !! ** Method : |
---|
647 | !!---------------------------------------------------------------------- |
---|
648 | INTEGER, INTENT(in ) :: knbij ! total number of subdomains (knbi*knbj) |
---|
649 | INTEGER, OPTIONAL, INTENT( out) :: knbi, knbj ! number if subdomains along i and j (knbi and knbj) |
---|
650 | INTEGER, OPTIONAL, INTENT( out) :: knbcnt ! number of land subdomains |
---|
651 | LOGICAL, OPTIONAL, INTENT(in ) :: ldlist ! .true.: print the list the best domain decompositions (with land) |
---|
652 | ! |
---|
653 | INTEGER :: ji, jj, ii, iitarget |
---|
654 | INTEGER :: iszitst, iszjtst |
---|
655 | INTEGER :: isziref, iszjref |
---|
656 | INTEGER :: iszimin, iszjmin |
---|
657 | INTEGER :: inbij, iszij |
---|
658 | INTEGER :: inbimax, inbjmax, inbijmax, inbijold |
---|
659 | INTEGER :: isz0, isz1 |
---|
660 | INTEGER, DIMENSION( :), ALLOCATABLE :: indexok |
---|
661 | INTEGER, DIMENSION( :), ALLOCATABLE :: inbi0, inbj0, inbij0 ! number of subdomains along i,j |
---|
662 | INTEGER, DIMENSION( :), ALLOCATABLE :: iszi0, iszj0, iszij0 ! max size of the subdomains along i,j |
---|
663 | INTEGER, DIMENSION( :), ALLOCATABLE :: inbi1, inbj1, inbij1 ! number of subdomains along i,j |
---|
664 | INTEGER, DIMENSION( :), ALLOCATABLE :: iszi1, iszj1, iszij1 ! max size of the subdomains along i,j |
---|
665 | LOGICAL :: llist |
---|
666 | LOGICAL, DIMENSION(:,:), ALLOCATABLE :: llmsk2d ! max size of the subdomains along i,j |
---|
667 | LOGICAL, DIMENSION(:,:), ALLOCATABLE :: llisOce ! - - |
---|
668 | REAL(wp):: zpropland |
---|
669 | !!---------------------------------------------------------------------- |
---|
670 | ! |
---|
671 | llist = .FALSE. |
---|
672 | IF( PRESENT(ldlist) ) llist = ldlist |
---|
673 | |
---|
674 | CALL mpp_init_landprop( zpropland ) ! get the proportion of land point over the gloal domain |
---|
675 | inbij = NINT( REAL(knbij, wp) / ( 1.0 - zpropland ) ) ! define the largest possible value for jpni*jpnj |
---|
676 | ! |
---|
677 | IF( llist ) THEN ; inbijmax = inbij*2 |
---|
678 | ELSE ; inbijmax = inbij |
---|
679 | ENDIF |
---|
680 | ! |
---|
681 | ALLOCATE(inbi0(inbijmax),inbj0(inbijmax),iszi0(inbijmax),iszj0(inbijmax)) |
---|
682 | ! |
---|
683 | inbimax = 0 |
---|
684 | inbjmax = 0 |
---|
685 | isziref = jpiglo*jpjglo+1 ! define a value that is larger than the largest possible |
---|
686 | iszjref = jpiglo*jpjglo+1 |
---|
687 | ! |
---|
688 | iszimin = 4*nn_hls ! minimum size of the MPI subdomain so halos are always adressing neighbor inner domain |
---|
689 | iszjmin = 4*nn_hls |
---|
690 | IF( c_NFtype == 'T' ) iszjmin = MAX(iszjmin, 2+3*nn_hls) ! V and F folding must be outside of southern halos |
---|
691 | IF( c_NFtype == 'F' ) iszjmin = MAX(iszjmin, 1+3*nn_hls) ! V and F folding must be outside of southern halos |
---|
692 | ! |
---|
693 | ! get the list of knbi that gives a smaller jpimax than knbi-1 |
---|
694 | ! get the list of knbj that gives a smaller jpjmax than knbj-1 |
---|
695 | DO ji = 1, inbijmax |
---|
696 | #if defined key_nemocice_decomp |
---|
697 | iszitst = ( nx_global+2-2*nn_hls + (ji-1) ) / ji + 2*nn_hls ! first dim. |
---|
698 | #else |
---|
699 | iszitst = ( Ni0glo + (ji-1) ) / ji + 2*nn_hls ! max subdomain i-size |
---|
700 | #endif |
---|
701 | IF( iszitst < isziref .AND. iszitst >= iszimin ) THEN |
---|
702 | isziref = iszitst |
---|
703 | inbimax = inbimax + 1 |
---|
704 | inbi0(inbimax) = ji |
---|
705 | iszi0(inbimax) = isziref |
---|
706 | ENDIF |
---|
707 | #if defined key_nemocice_decomp |
---|
708 | iszjtst = ( ny_global+2-2*nn_hls + (ji-1) ) / ji + 2*nn_hls ! first dim. |
---|
709 | #else |
---|
710 | iszjtst = ( Nj0glo + (ji-1) ) / ji + 2*nn_hls ! max subdomain j-size |
---|
711 | #endif |
---|
712 | IF( iszjtst < iszjref .AND. iszjtst >= iszjmin ) THEN |
---|
713 | iszjref = iszjtst |
---|
714 | inbjmax = inbjmax + 1 |
---|
715 | inbj0(inbjmax) = ji |
---|
716 | iszj0(inbjmax) = iszjref |
---|
717 | ENDIF |
---|
718 | END DO |
---|
719 | |
---|
720 | ! combine these 2 lists to get all possible knbi*knbj < inbijmax |
---|
721 | ALLOCATE( llmsk2d(inbimax,inbjmax) ) |
---|
722 | DO jj = 1, inbjmax |
---|
723 | DO ji = 1, inbimax |
---|
724 | IF ( inbi0(ji) * inbj0(jj) <= inbijmax ) THEN ; llmsk2d(ji,jj) = .TRUE. |
---|
725 | ELSE ; llmsk2d(ji,jj) = .FALSE. |
---|
726 | ENDIF |
---|
727 | END DO |
---|
728 | END DO |
---|
729 | isz1 = COUNT(llmsk2d) |
---|
730 | ALLOCATE( inbi1(isz1), inbj1(isz1), iszi1(isz1), iszj1(isz1) ) |
---|
731 | ii = 0 |
---|
732 | DO jj = 1, inbjmax |
---|
733 | DO ji = 1, inbimax |
---|
734 | IF( llmsk2d(ji,jj) .EQV. .TRUE. ) THEN |
---|
735 | ii = ii + 1 |
---|
736 | inbi1(ii) = inbi0(ji) |
---|
737 | inbj1(ii) = inbj0(jj) |
---|
738 | iszi1(ii) = iszi0(ji) |
---|
739 | iszj1(ii) = iszj0(jj) |
---|
740 | END IF |
---|
741 | END DO |
---|
742 | END DO |
---|
743 | DEALLOCATE( inbi0, inbj0, iszi0, iszj0 ) |
---|
744 | DEALLOCATE( llmsk2d ) |
---|
745 | |
---|
746 | ALLOCATE( inbij1(isz1), iszij1(isz1) ) |
---|
747 | inbij1(:) = inbi1(:) * inbj1(:) |
---|
748 | iszij1(:) = iszi1(:) * iszj1(:) |
---|
749 | |
---|
750 | ! if there is no land and no print |
---|
751 | IF( .NOT. llist .AND. numbot == -1 .AND. numbdy == -1 ) THEN |
---|
752 | ! get the smaller partition which gives the smallest subdomain size |
---|
753 | ii = MINLOC(inbij1, mask = iszij1 == MINVAL(iszij1), dim = 1) |
---|
754 | knbi = inbi1(ii) |
---|
755 | knbj = inbj1(ii) |
---|
756 | IF(PRESENT(knbcnt)) knbcnt = 0 |
---|
757 | DEALLOCATE( inbi1, inbj1, inbij1, iszi1, iszj1, iszij1 ) |
---|
758 | RETURN |
---|
759 | ENDIF |
---|
760 | |
---|
761 | ! extract only the partitions which reduce the subdomain size in comparison with smaller partitions |
---|
762 | ALLOCATE( indexok(isz1) ) ! to store indices of the best partitions |
---|
763 | isz0 = 0 ! number of best partitions |
---|
764 | inbij = 1 ! start with the min value of inbij1 => 1 |
---|
765 | iszij = jpiglo*jpjglo+1 ! default: larger than global domain |
---|
766 | DO WHILE( inbij <= inbijmax ) ! if we did not reach the max of inbij1 |
---|
767 | ii = MINLOC(iszij1, mask = inbij1 == inbij, dim = 1) ! warning: send back the first occurence if multiple results |
---|
768 | IF ( iszij1(ii) < iszij ) THEN |
---|
769 | ii = MINLOC( iszi1+iszj1, mask = iszij1 == iszij1(ii) .AND. inbij1 == inbij, dim = 1) ! select the smaller perimeter if multiple min |
---|
770 | isz0 = isz0 + 1 |
---|
771 | indexok(isz0) = ii |
---|
772 | iszij = iszij1(ii) |
---|
773 | ENDIF |
---|
774 | inbij = MINVAL(inbij1, mask = inbij1 > inbij) ! warning: return largest integer value if mask = .false. everywhere |
---|
775 | END DO |
---|
776 | DEALLOCATE( inbij1, iszij1 ) |
---|
777 | |
---|
778 | ! keep only the best partitions (sorted by increasing order of subdomains number and decreassing subdomain size) |
---|
779 | ALLOCATE( inbi0(isz0), inbj0(isz0), iszi0(isz0), iszj0(isz0) ) |
---|
780 | DO ji = 1, isz0 |
---|
781 | ii = indexok(ji) |
---|
782 | inbi0(ji) = inbi1(ii) |
---|
783 | inbj0(ji) = inbj1(ii) |
---|
784 | iszi0(ji) = iszi1(ii) |
---|
785 | iszj0(ji) = iszj1(ii) |
---|
786 | END DO |
---|
787 | DEALLOCATE( indexok, inbi1, inbj1, iszi1, iszj1 ) |
---|
788 | |
---|
789 | IF( llist ) THEN |
---|
790 | IF(lwp) THEN |
---|
791 | WRITE(numout,*) |
---|
792 | WRITE(numout,*) ' For your information:' |
---|
793 | WRITE(numout,*) ' list of the best partitions including land supression' |
---|
794 | WRITE(numout,*) ' -----------------------------------------------------' |
---|
795 | WRITE(numout,*) |
---|
796 | END IF |
---|
797 | ji = isz0 ! initialization with the largest value |
---|
798 | ALLOCATE( llisOce(inbi0(ji), inbj0(ji)) ) |
---|
799 | CALL mpp_is_ocean( llisOce ) ! Warning: must be call by all cores (call mpp_sum) |
---|
800 | inbijold = COUNT(llisOce) |
---|
801 | DEALLOCATE( llisOce ) |
---|
802 | DO ji =isz0-1,1,-1 |
---|
803 | ALLOCATE( llisOce(inbi0(ji), inbj0(ji)) ) |
---|
804 | CALL mpp_is_ocean( llisOce ) ! Warning: must be call by all cores (call mpp_sum) |
---|
805 | inbij = COUNT(llisOce) |
---|
806 | DEALLOCATE( llisOce ) |
---|
807 | IF(lwp .AND. inbij < inbijold) THEN |
---|
808 | WRITE(numout,'(a, i6, a, i6, a, f4.1, a, i9, a, i6, a, i6, a)') & |
---|
809 | & 'nb_cores oce: ', inbij, ', land domains excluded: ', inbi0(ji)*inbj0(ji) - inbij, & |
---|
810 | & ' (', REAL(inbi0(ji)*inbj0(ji) - inbij,wp) / REAL(inbi0(ji)*inbj0(ji),wp) *100., & |
---|
811 | & '%), largest oce domain: ', iszi0(ji)*iszj0(ji), ' ( ', iszi0(ji),' x ', iszj0(ji), ' )' |
---|
812 | inbijold = inbij |
---|
813 | END IF |
---|
814 | END DO |
---|
815 | DEALLOCATE( inbi0, inbj0, iszi0, iszj0 ) |
---|
816 | IF(lwp) THEN |
---|
817 | WRITE(numout,*) |
---|
818 | WRITE(numout,*) ' -----------------------------------------------------------' |
---|
819 | ENDIF |
---|
820 | CALL mppsync |
---|
821 | CALL mppstop( ld_abort = .TRUE. ) |
---|
822 | ENDIF |
---|
823 | |
---|
824 | DEALLOCATE( iszi0, iszj0 ) |
---|
825 | inbij = inbijmax + 1 ! default: larger than possible |
---|
826 | ii = isz0+1 ! start from the end of the list (smaller subdomains) |
---|
827 | DO WHILE( inbij > knbij ) ! while the number of ocean subdomains exceed the number of procs |
---|
828 | ii = ii -1 |
---|
829 | ALLOCATE( llisOce(inbi0(ii), inbj0(ii)) ) |
---|
830 | CALL mpp_is_ocean( llisOce ) ! must be done by all core |
---|
831 | inbij = COUNT(llisOce) |
---|
832 | DEALLOCATE( llisOce ) |
---|
833 | END DO |
---|
834 | knbi = inbi0(ii) |
---|
835 | knbj = inbj0(ii) |
---|
836 | IF(PRESENT(knbcnt)) knbcnt = knbi * knbj - inbij |
---|
837 | DEALLOCATE( inbi0, inbj0 ) |
---|
838 | ! |
---|
839 | END SUBROUTINE bestpartition |
---|
840 | |
---|
841 | |
---|
842 | SUBROUTINE mpp_init_landprop( propland ) |
---|
843 | !!---------------------------------------------------------------------- |
---|
844 | !! *** ROUTINE mpp_init_landprop *** |
---|
845 | !! |
---|
846 | !! ** Purpose : the the proportion of land points in the surface land-sea mask |
---|
847 | !! |
---|
848 | !! ** Method : read iproc strips (of length Ni0glo) of the land-sea mask |
---|
849 | !!---------------------------------------------------------------------- |
---|
850 | REAL(wp), INTENT( out) :: propland ! proportion of land points in the global domain (between 0 and 1) |
---|
851 | ! |
---|
852 | INTEGER, DIMENSION(jpni*jpnj) :: kusedom_1d |
---|
853 | INTEGER :: inboce, iarea |
---|
854 | INTEGER :: iproc, idiv, ijsz |
---|
855 | INTEGER :: ijstr |
---|
856 | LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: lloce |
---|
857 | !!---------------------------------------------------------------------- |
---|
858 | ! do nothing if there is no land-sea mask |
---|
859 | IF( numbot == -1 .and. numbdy == -1 ) THEN |
---|
860 | propland = 0. |
---|
861 | RETURN |
---|
862 | ENDIF |
---|
863 | |
---|
864 | ! number of processes reading the bathymetry file |
---|
865 | iproc = MINVAL( (/mppsize, Nj0glo/2, 100/) ) ! read a least 2 lines, no more that 100 processes reading at the same time |
---|
866 | |
---|
867 | ! we want to read iproc strips of the land-sea mask. -> pick up iproc processes every idiv processes starting at 1 |
---|
868 | IF( iproc == 1 ) THEN ; idiv = mppsize |
---|
869 | ELSE ; idiv = ( mppsize - 1 ) / ( iproc - 1 ) |
---|
870 | ENDIF |
---|
871 | |
---|
872 | iarea = (narea-1)/idiv ! involed process number (starting counting at 0) |
---|
873 | IF( MOD( narea-1, idiv ) == 0 .AND. iarea < iproc ) THEN ! beware idiv can be = to 1 |
---|
874 | ! |
---|
875 | ijsz = Nj0glo / iproc ! width of the stripe to read |
---|
876 | IF( iarea < MOD(Nj0glo,iproc) ) ijsz = ijsz + 1 |
---|
877 | ijstr = iarea*(Nj0glo/iproc) + MIN(iarea, MOD(Nj0glo,iproc)) + 1 ! starting j position of the reading |
---|
878 | ! |
---|
879 | ALLOCATE( lloce(Ni0glo, ijsz) ) ! allocate the strip |
---|
880 | CALL readbot_strip( ijstr, ijsz, lloce ) |
---|
881 | inboce = COUNT(lloce) ! number of ocean point in the stripe |
---|
882 | DEALLOCATE(lloce) |
---|
883 | ! |
---|
884 | ELSE |
---|
885 | inboce = 0 |
---|
886 | ENDIF |
---|
887 | CALL mpp_sum( 'mppini', inboce ) ! total number of ocean points over the global domain |
---|
888 | ! |
---|
889 | propland = REAL( Ni0glo*Nj0glo - inboce, wp ) / REAL( Ni0glo*Nj0glo, wp ) |
---|
890 | ! |
---|
891 | END SUBROUTINE mpp_init_landprop |
---|
892 | |
---|
893 | |
---|
894 | SUBROUTINE mpp_is_ocean( ldIsOce ) |
---|
895 | !!---------------------------------------------------------------------- |
---|
896 | !! *** ROUTINE mpp_is_ocean *** |
---|
897 | !! |
---|
898 | !! ** Purpose : Check for a mpi domain decomposition inbi x inbj which |
---|
899 | !! subdomains, including 1 halo (even if nn_hls>1), contain |
---|
900 | !! at least 1 ocean point. |
---|
901 | !! We must indeed ensure that each subdomain that is a neighbour |
---|
902 | !! of a land subdomain, has only land points on its boundary |
---|
903 | !! (inside the inner subdomain) with the land subdomain. |
---|
904 | !! This is needed to get the proper bondary conditions on |
---|
905 | !! a subdomain with a closed boundary. |
---|
906 | !! |
---|
907 | !! ** Method : read inbj strips (of length Ni0glo) of the land-sea mask |
---|
908 | !!---------------------------------------------------------------------- |
---|
909 | LOGICAL, DIMENSION(:,:), INTENT( out) :: ldIsOce ! .true. if a sub domain constains 1 ocean point |
---|
910 | ! |
---|
911 | INTEGER :: idiv, iimax, ijmax, iarea |
---|
912 | INTEGER :: inbi, inbj, inx, iny, inry, isty |
---|
913 | INTEGER :: ji, jn |
---|
914 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: inboce ! number oce oce pint in each mpi subdomain |
---|
915 | INTEGER, ALLOCATABLE, DIMENSION(: ) :: inboce_1d |
---|
916 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: iimppt, ijpi |
---|
917 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ijmppt, ijpj |
---|
918 | LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: lloce ! lloce(i,j) = .true. if the point (i,j) is ocean |
---|
919 | !!---------------------------------------------------------------------- |
---|
920 | ! do nothing if there is no land-sea mask |
---|
921 | IF( numbot == -1 .AND. numbdy == -1 ) THEN |
---|
922 | ldIsOce(:,:) = .TRUE. |
---|
923 | RETURN |
---|
924 | ENDIF |
---|
925 | ! |
---|
926 | inbi = SIZE( ldIsOce, dim = 1 ) |
---|
927 | inbj = SIZE( ldIsOce, dim = 2 ) |
---|
928 | ! |
---|
929 | ! we want to read inbj strips of the land-sea mask. -> pick up inbj processes every idiv processes starting at 1 |
---|
930 | IF ( inbj == 1 ) THEN ; idiv = mppsize |
---|
931 | ELSE IF ( mppsize < inbj ) THEN ; idiv = 1 |
---|
932 | ELSE ; idiv = ( mppsize - 1 ) / ( inbj - 1 ) |
---|
933 | ENDIF |
---|
934 | ! |
---|
935 | ALLOCATE( inboce(inbi,inbj), inboce_1d(inbi*inbj) ) |
---|
936 | inboce(:,:) = 0 ! default no ocean point found |
---|
937 | ! |
---|
938 | DO jn = 0, (inbj-1)/mppsize ! if mppsize < inbj : more strips than mpi processes (because of potential land domains) |
---|
939 | ! |
---|
940 | iarea = (narea-1)/idiv + jn * mppsize + 1 ! involed process number (starting counting at 1) |
---|
941 | IF( MOD( narea-1, idiv ) == 0 .AND. iarea <= inbj ) THEN ! beware idiv can be = to 1 |
---|
942 | ! |
---|
943 | ALLOCATE( iimppt(inbi,inbj), ijmppt(inbi,inbj), ijpi(inbi,inbj), ijpj(inbi,inbj) ) |
---|
944 | CALL mpp_basesplit( Ni0glo, Nj0glo, 0, inbi, inbj, iimax, ijmax, iimppt, ijmppt, ijpi, ijpj ) |
---|
945 | ! |
---|
946 | inx = Ni0glo + 2 ; iny = ijpj(1,iarea) + 2 ! strip size + 1 halo on each direction (even if nn_hls>1) |
---|
947 | ALLOCATE( lloce(inx, iny) ) ! allocate the strip |
---|
948 | inry = iny - COUNT( (/ iarea == 1, iarea == inbj /) ) ! number of point to read in y-direction |
---|
949 | isty = 1 + COUNT( (/ iarea == 1 /) ) ! read from the first or the second line? |
---|
950 | CALL readbot_strip( ijmppt(1,iarea) - 2 + isty, inry, lloce(2:inx-1, isty:inry+isty-1) ) ! read the strip |
---|
951 | ! |
---|
952 | IF( iarea == 1 ) THEN ! the first line was not read |
---|
953 | IF( l_Jperio ) THEN ! north-south periodocity |
---|
954 | CALL readbot_strip( Nj0glo, 1, lloce(2:inx-1, 1) ) ! read the last line -> first line of lloce |
---|
955 | ELSE |
---|
956 | lloce(2:inx-1, 1) = .FALSE. ! closed boundary |
---|
957 | ENDIF |
---|
958 | ENDIF |
---|
959 | IF( iarea == inbj ) THEN ! the last line was not read |
---|
960 | IF( l_Jperio ) THEN ! north-south periodocity |
---|
961 | CALL readbot_strip( 1, 1, lloce(2:inx-1,iny) ) ! read the first line -> last line of lloce |
---|
962 | ELSEIF( c_NFtype == 'T' ) THEN ! north-pole folding T-pivot, T-point |
---|
963 | lloce(2,iny) = lloce(2,iny-2) ! here we have 1 halo (even if nn_hls>1) |
---|
964 | DO ji = 3,inx-1 |
---|
965 | lloce(ji,iny ) = lloce(inx-ji+2,iny-2) ! ok, we have at least 3 lines |
---|
966 | END DO |
---|
967 | DO ji = inx/2+2,inx-1 |
---|
968 | lloce(ji,iny-1) = lloce(inx-ji+2,iny-1) |
---|
969 | END DO |
---|
970 | ELSEIF( c_NFtype == 'F' ) THEN ! north-pole folding F-pivot, T-point, 1 halo |
---|
971 | lloce(inx/2+1,iny-1) = lloce(inx/2,iny-1) ! here we have 1 halo (even if nn_hls>1) |
---|
972 | lloce(inx -1,iny-1) = lloce(2 ,iny-1) |
---|
973 | DO ji = 2,inx-1 |
---|
974 | lloce(ji,iny) = lloce(inx-ji+1,iny-1) |
---|
975 | END DO |
---|
976 | ELSE ! closed boundary |
---|
977 | lloce(2:inx-1,iny) = .FALSE. |
---|
978 | ENDIF |
---|
979 | ENDIF |
---|
980 | ! ! first and last column were not read |
---|
981 | IF( l_Iperio ) THEN |
---|
982 | lloce(1,:) = lloce(inx-1,:) ; lloce(inx,:) = lloce(2,:) ! east-west periodocity |
---|
983 | ELSE |
---|
984 | lloce(1,:) = .FALSE. ; lloce(inx,:) = .FALSE. ! closed boundary |
---|
985 | ENDIF |
---|
986 | ! |
---|
987 | DO ji = 1, inbi |
---|
988 | inboce(ji,iarea) = COUNT( lloce(iimppt(ji,1):iimppt(ji,1)+ijpi(ji,1)+1,:) ) ! lloce as 2 points more than Ni0glo |
---|
989 | END DO |
---|
990 | ! |
---|
991 | DEALLOCATE(lloce) |
---|
992 | DEALLOCATE(iimppt, ijmppt, ijpi, ijpj) |
---|
993 | ! |
---|
994 | ENDIF |
---|
995 | END DO |
---|
996 | |
---|
997 | inboce_1d = RESHAPE(inboce, (/ inbi*inbj /)) |
---|
998 | CALL mpp_sum( 'mppini', inboce_1d ) |
---|
999 | inboce = RESHAPE(inboce_1d, (/inbi, inbj/)) |
---|
1000 | ldIsOce(:,:) = inboce(:,:) /= 0 |
---|
1001 | DEALLOCATE(inboce, inboce_1d) |
---|
1002 | ! |
---|
1003 | END SUBROUTINE mpp_is_ocean |
---|
1004 | |
---|
1005 | |
---|
1006 | SUBROUTINE readbot_strip( kjstr, kjcnt, ldoce ) |
---|
1007 | !!---------------------------------------------------------------------- |
---|
1008 | !! *** ROUTINE readbot_strip *** |
---|
1009 | !! |
---|
1010 | !! ** Purpose : Read relevant bathymetric information in order to |
---|
1011 | !! provide a land/sea mask used for the elimination |
---|
1012 | !! of land domains, in an mpp computation. |
---|
1013 | !! |
---|
1014 | !! ** Method : read stipe of size (Ni0glo,...) |
---|
1015 | !!---------------------------------------------------------------------- |
---|
1016 | INTEGER , INTENT(in ) :: kjstr ! starting j position of the reading |
---|
1017 | INTEGER , INTENT(in ) :: kjcnt ! number of lines to read |
---|
1018 | LOGICAL, DIMENSION(Ni0glo,kjcnt), INTENT( out) :: ldoce ! ldoce(i,j) = .true. if the point (i,j) is ocean |
---|
1019 | ! |
---|
1020 | INTEGER :: inumsave ! local logical unit |
---|
1021 | REAL(wp), DIMENSION(Ni0glo,kjcnt) :: zbot, zbdy |
---|
1022 | !!---------------------------------------------------------------------- |
---|
1023 | ! |
---|
1024 | inumsave = numout ; numout = numnul ! redirect all print to /dev/null |
---|
1025 | ! |
---|
1026 | IF( numbot /= -1 ) THEN |
---|
1027 | CALL iom_get( numbot, jpdom_unknown, 'bottom_level', zbot, kstart = (/1,kjstr/), kcount = (/Ni0glo, kjcnt/) ) |
---|
1028 | ELSE |
---|
1029 | zbot(:,:) = 1._wp ! put a non-null value |
---|
1030 | ENDIF |
---|
1031 | ! |
---|
1032 | IF( numbdy /= -1 ) THEN ! Adjust with bdy_msk if it exists |
---|
1033 | CALL iom_get ( numbdy, jpdom_unknown, 'bdy_msk', zbdy, kstart = (/1,kjstr/), kcount = (/Ni0glo, kjcnt/) ) |
---|
1034 | zbot(:,:) = zbot(:,:) * zbdy(:,:) |
---|
1035 | ENDIF |
---|
1036 | ! |
---|
1037 | ldoce(:,:) = zbot(:,:) > 0._wp |
---|
1038 | numout = inumsave |
---|
1039 | ! |
---|
1040 | END SUBROUTINE readbot_strip |
---|
1041 | |
---|
1042 | |
---|
1043 | SUBROUTINE mpp_getnum( ldIsOce, kproc, kipos, kjpos ) |
---|
1044 | !!---------------------------------------------------------------------- |
---|
1045 | !! *** ROUTINE mpp_getnum *** |
---|
1046 | !! |
---|
1047 | !! ** Purpose : give a number to each MPI subdomains (starting at 0) |
---|
1048 | !! |
---|
1049 | !! ** Method : start from bottom left. First skip land subdomain, and finally use them if needed |
---|
1050 | !!---------------------------------------------------------------------- |
---|
1051 | LOGICAL, DIMENSION(:,:), INTENT(in ) :: ldIsOce ! F if land process |
---|
1052 | INTEGER, DIMENSION(:,:), INTENT( out) :: kproc ! subdomain number (-1 if not existing, starting at 0) |
---|
1053 | INTEGER, DIMENSION( :), INTENT( out) :: kipos ! i-position of the subdomain (from 1 to jpni) |
---|
1054 | INTEGER, DIMENSION( :), INTENT( out) :: kjpos ! j-position of the subdomain (from 1 to jpnj) |
---|
1055 | ! |
---|
1056 | INTEGER :: ii, ij, jarea, iarea0 |
---|
1057 | INTEGER :: icont, i2add , ini, inj, inij |
---|
1058 | !!---------------------------------------------------------------------- |
---|
1059 | ! |
---|
1060 | ini = SIZE(ldIsOce, dim = 1) |
---|
1061 | inj = SIZE(ldIsOce, dim = 2) |
---|
1062 | inij = SIZE(kipos) |
---|
1063 | ! |
---|
1064 | ! specify which subdomains are oce subdomains; other are land subdomains |
---|
1065 | kproc(:,:) = -1 |
---|
1066 | icont = -1 |
---|
1067 | DO jarea = 1, ini*inj |
---|
1068 | iarea0 = jarea - 1 |
---|
1069 | ii = 1 + MOD(iarea0,ini) |
---|
1070 | ij = 1 + iarea0/ini |
---|
1071 | IF( ldIsOce(ii,ij) ) THEN |
---|
1072 | icont = icont + 1 |
---|
1073 | kproc(ii,ij) = icont |
---|
1074 | kipos(icont+1) = ii |
---|
1075 | kjpos(icont+1) = ij |
---|
1076 | ENDIF |
---|
1077 | END DO |
---|
1078 | ! if needed add some land subdomains to reach inij active subdomains |
---|
1079 | i2add = inij - COUNT( ldIsOce ) |
---|
1080 | DO jarea = 1, ini*inj |
---|
1081 | iarea0 = jarea - 1 |
---|
1082 | ii = 1 + MOD(iarea0,ini) |
---|
1083 | ij = 1 + iarea0/ini |
---|
1084 | IF( .NOT. ldIsOce(ii,ij) .AND. i2add > 0 ) THEN |
---|
1085 | icont = icont + 1 |
---|
1086 | kproc(ii,ij) = icont |
---|
1087 | kipos(icont+1) = ii |
---|
1088 | kjpos(icont+1) = ij |
---|
1089 | i2add = i2add - 1 |
---|
1090 | ENDIF |
---|
1091 | END DO |
---|
1092 | ! |
---|
1093 | END SUBROUTINE mpp_getnum |
---|
1094 | |
---|
1095 | |
---|
1096 | SUBROUTINE init_ioipsl |
---|
1097 | !!---------------------------------------------------------------------- |
---|
1098 | !! *** ROUTINE init_ioipsl *** |
---|
1099 | !! |
---|
1100 | !! ** Purpose : |
---|
1101 | !! |
---|
1102 | !! ** Method : |
---|
1103 | !! |
---|
1104 | !! History : |
---|
1105 | !! 9.0 ! 04-03 (G. Madec ) MPP-IOIPSL |
---|
1106 | !! " " ! 08-12 (A. Coward) addition in case of jpni*jpnj < jpnij |
---|
1107 | !!---------------------------------------------------------------------- |
---|
1108 | INTEGER, DIMENSION(2) :: iglo, iloc, iabsf, iabsl, ihals, ihale, idid |
---|
1109 | !!---------------------------------------------------------------------- |
---|
1110 | |
---|
1111 | ! The domain is split only horizontally along i- or/and j- direction |
---|
1112 | ! So we need at the most only 1D arrays with 2 elements. |
---|
1113 | ! Set idompar values equivalent to the jpdom_local_noextra definition |
---|
1114 | ! used in IOM. This works even if jpnij .ne. jpni*jpnj. |
---|
1115 | iglo( :) = (/ Ni0glo, Nj0glo /) |
---|
1116 | iloc( :) = (/ Ni_0 , Nj_0 /) |
---|
1117 | iabsf(:) = (/ Nis0 , Njs0 /) + (/ nimpp, njmpp /) - 1 - nn_hls ! corresponds to mig0(Nis0) but mig0 is not yet defined! |
---|
1118 | iabsl(:) = iabsf(:) + iloc(:) - 1 |
---|
1119 | ihals(:) = (/ 0 , 0 /) |
---|
1120 | ihale(:) = (/ 0 , 0 /) |
---|
1121 | idid( :) = (/ 1 , 2 /) |
---|
1122 | |
---|
1123 | IF(lwp) THEN |
---|
1124 | WRITE(numout,*) |
---|
1125 | WRITE(numout,*) 'mpp init_ioipsl : iloc = ', iloc |
---|
1126 | WRITE(numout,*) '~~~~~~~~~~~~~~~ iabsf = ', iabsf |
---|
1127 | WRITE(numout,*) ' ihals = ', ihals |
---|
1128 | WRITE(numout,*) ' ihale = ', ihale |
---|
1129 | ENDIF |
---|
1130 | ! |
---|
1131 | CALL flio_dom_set ( jpnij, narea-1, idid, iglo, iloc, iabsf, iabsl, ihals, ihale, 'BOX', nidom) |
---|
1132 | ! |
---|
1133 | END SUBROUTINE init_ioipsl |
---|
1134 | |
---|
1135 | |
---|
1136 | SUBROUTINE init_nfdcom |
---|
1137 | !!---------------------------------------------------------------------- |
---|
1138 | !! *** ROUTINE init_nfdcom *** |
---|
1139 | !! ** Purpose : Setup for north fold exchanges with explicit |
---|
1140 | !! point-to-point messaging |
---|
1141 | !! |
---|
1142 | !! ** Method : Initialization of the northern neighbours lists. |
---|
1143 | !!---------------------------------------------------------------------- |
---|
1144 | !! 1.0 ! 2011-10 (A. C. Coward, NOCS & J. Donners, PRACE) |
---|
1145 | !! 2.0 ! 2013-06 Setup avoiding MPI communication (I. Epicoco, S. Mocavero, CMCC) |
---|
1146 | !!---------------------------------------------------------------------- |
---|
1147 | INTEGER :: sxM, dxM, sxT, dxT, jn |
---|
1148 | !!---------------------------------------------------------------------- |
---|
1149 | ! |
---|
1150 | !sxM is the first point (in the global domain) needed to compute the north-fold for the current process |
---|
1151 | sxM = jpiglo - nimpp - jpi + 1 |
---|
1152 | !dxM is the last point (in the global domain) needed to compute the north-fold for the current process |
---|
1153 | dxM = jpiglo - nimpp + 2 |
---|
1154 | ! |
---|
1155 | ! loop over the other north-fold processes to find the processes |
---|
1156 | ! managing the points belonging to the sxT-dxT range |
---|
1157 | ! |
---|
1158 | nsndto = 0 |
---|
1159 | DO jn = 1, jpni |
---|
1160 | ! |
---|
1161 | sxT = nfimpp(jn) ! sxT = 1st point (in the global domain) of the jn process |
---|
1162 | dxT = nfimpp(jn) + nfjpi(jn) - 1 ! dxT = last point (in the global domain) of the jn process |
---|
1163 | ! |
---|
1164 | IF ( sxT < sxM .AND. sxM < dxT ) THEN |
---|
1165 | nsndto = nsndto + 1 |
---|
1166 | isendto(nsndto) = jn |
---|
1167 | ELSEIF( sxM <= sxT .AND. dxM >= dxT ) THEN |
---|
1168 | nsndto = nsndto + 1 |
---|
1169 | isendto(nsndto) = jn |
---|
1170 | ELSEIF( dxM < dxT .AND. sxT < dxM ) THEN |
---|
1171 | nsndto = nsndto + 1 |
---|
1172 | isendto(nsndto) = jn |
---|
1173 | ENDIF |
---|
1174 | ! |
---|
1175 | END DO |
---|
1176 | ! |
---|
1177 | END SUBROUTINE init_nfdcom |
---|
1178 | |
---|
1179 | |
---|
1180 | SUBROUTINE init_doloop |
---|
1181 | !!---------------------------------------------------------------------- |
---|
1182 | !! *** ROUTINE init_doloop *** |
---|
1183 | !! |
---|
1184 | !! ** Purpose : set the starting/ending indices of DO-loop |
---|
1185 | !! These indices are used in do_loop_substitute.h90 |
---|
1186 | !!---------------------------------------------------------------------- |
---|
1187 | ! |
---|
1188 | Nis0 = 1+nn_hls |
---|
1189 | Njs0 = 1+nn_hls |
---|
1190 | Nie0 = jpi-nn_hls |
---|
1191 | Nje0 = jpj-nn_hls |
---|
1192 | ! |
---|
1193 | Ni_0 = Nie0 - Nis0 + 1 |
---|
1194 | Nj_0 = Nje0 - Njs0 + 1 |
---|
1195 | ! |
---|
1196 | ! old indices to be removed... |
---|
1197 | jpim1 = jpi-1 ! inner domain indices |
---|
1198 | jpjm1 = jpj-1 ! " " |
---|
1199 | jpkm1 = jpk-1 ! " " |
---|
1200 | ! |
---|
1201 | END SUBROUTINE init_doloop |
---|
1202 | |
---|
1203 | !!====================================================================== |
---|
1204 | END MODULE mppini |
---|