1 | MODULE mppini |
---|
2 | !!====================================================================== |
---|
3 | !! *** MODULE mppini *** |
---|
4 | !! Ocean initialization : distributed memory computing initialization |
---|
5 | !!====================================================================== |
---|
6 | !! History : 6.0 ! 1994-11 (M. Guyon) Original code |
---|
7 | !! OPA 7.0 ! 1995-04 (J. Escobar, M. Imbard) |
---|
8 | !! 8.0 ! 1998-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI versions |
---|
9 | !! NEMO 1.0 ! 2004-01 (G. Madec, J.M Molines) F90 : free form , north fold jpni > 1 |
---|
10 | !! 3.4 ! 2011-10 (A. C. Coward, NOCS & J. Donners, PRACE) add mpp_init_nfdcom |
---|
11 | !! 3. ! 2013-06 (I. Epicoco, S. Mocavero, CMCC) mpp_init_nfdcom: setup avoiding MPI communication |
---|
12 | !! 4.0 ! 2016-06 (G. Madec) use domain configuration file instead of bathymetry file |
---|
13 | !! 4.0 ! 2017-06 (J.M. Molines, T. Lovato) merge of mppini and mppini_2 |
---|
14 | !!---------------------------------------------------------------------- |
---|
15 | |
---|
16 | !!---------------------------------------------------------------------- |
---|
17 | !! mpp_init : Lay out the global domain over processors with/without land processor elimination |
---|
18 | !! mpp_init_mask : Read global bathymetric information to facilitate land suppression |
---|
19 | !! mpp_init_partition: Calculate MPP domain decomposition |
---|
20 | !! factorise : Calculate the factors of the no. of MPI processes |
---|
21 | !! mpp_init_nfdcom : Setup for north fold exchanges with explicit point-to-point messaging |
---|
22 | !!---------------------------------------------------------------------- |
---|
23 | USE dom_oce ! ocean space and time domain |
---|
24 | ! |
---|
25 | USE lbcnfd , ONLY : isendto, nsndto, nfsloop, nfeloop ! Setup of north fold exchanges |
---|
26 | USE lib_mpp ! distribued memory computing library |
---|
27 | USE iom ! nemo I/O library |
---|
28 | USE ioipsl ! I/O IPSL library |
---|
29 | USE in_out_manager ! I/O Manager |
---|
30 | |
---|
31 | IMPLICIT NONE |
---|
32 | PRIVATE |
---|
33 | |
---|
34 | PUBLIC mpp_init ! called by opa.F90 |
---|
35 | |
---|
36 | INTEGER :: numbot = -1 ! 'bottom_level' local logical unit |
---|
37 | |
---|
38 | !!---------------------------------------------------------------------- |
---|
39 | !! NEMO/OCE 4.0 , NEMO Consortium (2018) |
---|
40 | !! $Id: mppini.F90 10570 2019-01-24 15:14:49Z acc $ |
---|
41 | !! Software governed by the CeCILL license (see ./LICENSE) |
---|
42 | !!---------------------------------------------------------------------- |
---|
43 | CONTAINS |
---|
44 | |
---|
45 | #if ! defined key_mpp_mpi |
---|
46 | !!---------------------------------------------------------------------- |
---|
47 | !! Default option : shared memory computing |
---|
48 | !!---------------------------------------------------------------------- |
---|
49 | |
---|
50 | SUBROUTINE mpp_init |
---|
51 | !!---------------------------------------------------------------------- |
---|
52 | !! *** ROUTINE mpp_init *** |
---|
53 | !! |
---|
54 | !! ** Purpose : Lay out the global domain over processors. |
---|
55 | !! |
---|
56 | !! ** Method : Shared memory computing, set the local processor |
---|
57 | !! variables to the value of the global domain |
---|
58 | !!---------------------------------------------------------------------- |
---|
59 | ! |
---|
60 | jpimax = jpiglo |
---|
61 | jpjmax = jpjglo |
---|
62 | jpi = jpiglo |
---|
63 | jpj = jpjglo |
---|
64 | jpk = jpkglo |
---|
65 | jpim1 = jpi-1 ! inner domain indices |
---|
66 | jpjm1 = jpj-1 ! " " |
---|
67 | jpkm1 = MAX( 1, jpk-1 ) ! " " |
---|
68 | jpij = jpi*jpj |
---|
69 | jpni = 1 |
---|
70 | jpnj = 1 |
---|
71 | jpnij = jpni*jpnj |
---|
72 | nimpp = 1 ! |
---|
73 | njmpp = 1 |
---|
74 | nlci = jpi |
---|
75 | nlcj = jpj |
---|
76 | nldi = 1 |
---|
77 | nldj = 1 |
---|
78 | nlei = jpi |
---|
79 | nlej = jpj |
---|
80 | nbondi = 2 |
---|
81 | nbondj = 2 |
---|
82 | npolj = jperio |
---|
83 | l_Iperio = jpni == 1 .AND. (jperio == 1 .OR. jperio == 4 .OR. jperio == 6 .OR. jperio == 7) |
---|
84 | l_Jperio = jpnj == 1 .AND. (jperio == 2 .OR. jperio == 7) |
---|
85 | ! |
---|
86 | IF(lwp) THEN |
---|
87 | WRITE(numout,*) |
---|
88 | WRITE(numout,*) 'mpp_init : NO massively parallel processing' |
---|
89 | WRITE(numout,*) '~~~~~~~~ ' |
---|
90 | WRITE(numout,*) ' l_Iperio = ', l_Iperio, ' l_Jperio = ', l_Jperio |
---|
91 | WRITE(numout,*) ' npolj = ', npolj , ' njmpp = ', njmpp |
---|
92 | ENDIF |
---|
93 | ! |
---|
94 | IF( jpni /= 1 .OR. jpnj /= 1 .OR. jpnij /= 1 ) & |
---|
95 | CALL ctl_stop( 'mpp_init: equality jpni = jpnj = jpnij = 1 is not satisfied', & |
---|
96 | & 'the domain is lay out for distributed memory computing!' ) |
---|
97 | |
---|
98 | #if defined key_agrif |
---|
99 | IF (.not.agrif_root()) THEN |
---|
100 | CALL agrif_nemo_init |
---|
101 | ENDIF |
---|
102 | |
---|
103 | IF( .NOT. Agrif_Root() ) THEN ! AGRIF children: specific setting (cf. agrif_user.F90) |
---|
104 | print *,'nbcellsx = ',nbcellsx,nbghostcells_x |
---|
105 | print *,'nbcellsy = ',nbcellsy,nbghostcells_y_s,nbghostcells_y_n |
---|
106 | IF( jpiglo /= nbcellsx + 2 + 2*nbghostcells_x ) THEN |
---|
107 | IF(lwp) THEN |
---|
108 | WRITE(numout,*) |
---|
109 | WRITE(numout,*) 'jpiglo should be: ', nbcellsx + 2 + 2*nbghostcells_x |
---|
110 | ENDIF |
---|
111 | CALL ctl_stop( 'STOP', 'mpp_init: Agrif children requires jpiglo == nbcellsx + 2 + 2*nbghostcells_x' ) |
---|
112 | ENDIF |
---|
113 | IF( jpjglo /= nbcellsy + 2 + nbghostcells_y_s + nbghostcells_y_n ) THEN |
---|
114 | IF(lwp) THEN |
---|
115 | WRITE(numout,*) |
---|
116 | WRITE(numout,*) 'jpjglo shoud be: ', nbcellsy + 2 + nbghostcells_y_s + nbghostcells_y_n |
---|
117 | ENDIF |
---|
118 | CALL ctl_stop( 'STOP', & |
---|
119 | 'mpp_init: Agrif children requires jpjglo == nbcellsy + 2 + nbghostcells_y_s + nbghostcells_y_n' ) |
---|
120 | ENDIF |
---|
121 | IF( ln_use_jattr ) CALL ctl_stop( 'STOP', 'mpp_init:Agrif children requires ln_use_jattr = .false. ' ) |
---|
122 | ENDIF |
---|
123 | #endif |
---|
124 | ! |
---|
125 | END SUBROUTINE mpp_init |
---|
126 | |
---|
127 | #else |
---|
128 | !!---------------------------------------------------------------------- |
---|
129 | !! 'key_mpp_mpi' MPI massively parallel processing |
---|
130 | !!---------------------------------------------------------------------- |
---|
131 | |
---|
132 | |
---|
133 | SUBROUTINE mpp_init |
---|
134 | !!---------------------------------------------------------------------- |
---|
135 | !! *** ROUTINE mpp_init *** |
---|
136 | !! |
---|
137 | !! ** Purpose : Lay out the global domain over processors. |
---|
138 | !! If land processors are to be eliminated, this program requires the |
---|
139 | !! presence of the domain configuration file. Land processors elimination |
---|
140 | !! is performed if jpni x jpnj /= jpnij. In this case, using the MPP_PREP |
---|
141 | !! preprocessing tool, help for defining the best cutting out. |
---|
142 | !! |
---|
143 | !! ** Method : Global domain is distributed in smaller local domains. |
---|
144 | !! Periodic condition is a function of the local domain position |
---|
145 | !! (global boundary or neighbouring domain) and of the global |
---|
146 | !! periodic |
---|
147 | !! Type : jperio global periodic condition |
---|
148 | !! |
---|
149 | !! ** Action : - set domain parameters |
---|
150 | !! nimpp : longitudinal index |
---|
151 | !! njmpp : latitudinal index |
---|
152 | !! narea : number for local area |
---|
153 | !! nlci : first dimension |
---|
154 | !! nlcj : second dimension |
---|
155 | !! nbondi : mark for "east-west local boundary" |
---|
156 | !! nbondj : mark for "north-south local boundary" |
---|
157 | !! nproc : number for local processor |
---|
158 | !! noea : number for local neighboring processor |
---|
159 | !! nowe : number for local neighboring processor |
---|
160 | !! noso : number for local neighboring processor |
---|
161 | !! nono : number for local neighboring processor |
---|
162 | !!---------------------------------------------------------------------- |
---|
163 | INTEGER :: ji, jj, jn, jproc, jarea ! dummy loop indices |
---|
164 | INTEGER :: inijmin |
---|
165 | INTEGER :: i2add |
---|
166 | INTEGER :: inum ! local logical unit |
---|
167 | INTEGER :: idir, ifreq, icont ! local integers |
---|
168 | INTEGER :: ii, il1, ili, imil ! - - |
---|
169 | INTEGER :: ij, il2, ilj, ijm1 ! - - |
---|
170 | INTEGER :: iino, ijno, iiso, ijso ! - - |
---|
171 | INTEGER :: iiea, ijea, iiwe, ijwe ! - - |
---|
172 | INTEGER :: iarea0 ! - - |
---|
173 | INTEGER :: ierr, ios ! |
---|
174 | INTEGER :: inbi, inbj, iimax, ijmax, icnt1, icnt2 |
---|
175 | LOGICAL :: llbest |
---|
176 | LOGICAL :: llwrtlay |
---|
177 | INTEGER, ALLOCATABLE, DIMENSION(:) :: iin, ii_nono, ii_noea ! 1D workspace |
---|
178 | INTEGER, ALLOCATABLE, DIMENSION(:) :: ijn, ii_noso, ii_nowe ! - - |
---|
179 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: iimppt, ilci, ibondi, ipproc ! 2D workspace |
---|
180 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ijmppt, ilcj, ibondj, ipolj ! - - |
---|
181 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ilei, ildi, iono, ioea ! - - |
---|
182 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ilej, ildj, ioso, iowe ! - - |
---|
183 | LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: llisoce ! - - |
---|
184 | !!---------------------------------------------------------------------- |
---|
185 | |
---|
186 | llwrtlay = lwp |
---|
187 | ! |
---|
188 | IF( ln_read_cfg ) CALL iom_open( cn_domcfg, numbot ) |
---|
189 | ! |
---|
190 | ! 1. Dimension arrays for subdomains |
---|
191 | ! ----------------------------------- |
---|
192 | ! |
---|
193 | ! If dimensions of processor grid weren't specified in the namelist file |
---|
194 | ! then we calculate them here now that we have our communicator size |
---|
195 | IF( jpni < 1 .OR. jpnj < 1 ) THEN |
---|
196 | CALL mpp_init_bestpartition( mppsize, jpni, jpnj ) |
---|
197 | llbest = .TRUE. |
---|
198 | ELSE |
---|
199 | CALL mpp_init_bestpartition( mppsize, inbi, inbj, icnt2 ) |
---|
200 | CALL mpp_basic_decomposition( jpni, jpnj, jpimax, jpjmax ) |
---|
201 | CALL mpp_basic_decomposition( inbi, inbj, iimax, ijmax ) |
---|
202 | IF( iimax*ijmax < jpimax*jpjmax ) THEN |
---|
203 | llbest = .FALSE. |
---|
204 | icnt1 = jpni*jpnj - mppsize |
---|
205 | WRITE(ctmp1,9000) ' The chosen domain decomposition ', jpni, ' x ', jpnj, ' with ', icnt1, ' land sub-domains' |
---|
206 | WRITE(ctmp2,9000) ' has larger MPI subdomains (jpi = ', jpimax, ', jpj = ', jpjmax, ', jpi*jpj = ', jpimax*jpjmax, ')' |
---|
207 | WRITE(ctmp3,9000) ' than the following domain decompostion ', inbi, ' x ', inbj, ' with ', icnt2, ' land sub-domains' |
---|
208 | WRITE(ctmp4,9000) ' which MPI subdomains size is jpi = ', iimax, ', jpj = ', ijmax, ', jpi*jpj = ', iimax*ijmax, ' ' |
---|
209 | CALL ctl_warn( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4, ' ', ' --- YOU ARE WASTING CPU... ---', ' ' ) |
---|
210 | ELSE |
---|
211 | llbest = .TRUE. |
---|
212 | ENDIF |
---|
213 | ENDIF |
---|
214 | |
---|
215 | ! look for land mpi subdomains... |
---|
216 | ALLOCATE( llisoce(jpni,jpnj) ) |
---|
217 | CALL mpp_init_isoce( jpni, jpnj, llisoce ) |
---|
218 | inijmin = COUNT( llisoce ) ! number of oce subdomains |
---|
219 | |
---|
220 | IF( mppsize < inijmin ) THEN |
---|
221 | WRITE(ctmp1,9001) ' With this specified domain decomposition: jpni = ', jpni, ' jpnj = ', jpnj |
---|
222 | WRITE(ctmp2,9002) ' we can eliminate only ', jpni*jpnj - inijmin, ' land mpi subdomains therefore ' |
---|
223 | WRITE(ctmp3,9001) ' the number of ocean mpi subdomains (', inijmin,') exceed the number of MPI processes:', mppsize |
---|
224 | WRITE(ctmp4,*) ' ==>>> There is the list of best domain decompositions you should use: ' |
---|
225 | CALL ctl_stop( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4 ) |
---|
226 | CALL mpp_init_bestpartition( mppsize, ldlist = .TRUE. ) ! must be done by all core |
---|
227 | CALL ctl_stop( 'STOP' ) |
---|
228 | ENDIF |
---|
229 | |
---|
230 | IF( mppsize > jpni*jpnj ) THEN |
---|
231 | WRITE(ctmp1,9003) ' The number of mpi processes: ', mppsize |
---|
232 | WRITE(ctmp2,9003) ' exceeds the maximum number of subdomains (ocean+land) = ', jpni*jpnj |
---|
233 | WRITE(ctmp3,9001) ' defined by the following domain decomposition: jpni = ', jpni, ' jpnj = ', jpnj |
---|
234 | WRITE(ctmp4,*) ' ==>>> There is the list of best domain decompositions you should use: ' |
---|
235 | CALL ctl_stop( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4 ) |
---|
236 | CALL mpp_init_bestpartition( mppsize, ldlist = .TRUE. ) ! must be done by all core |
---|
237 | CALL ctl_stop( 'STOP' ) |
---|
238 | ENDIF |
---|
239 | |
---|
240 | jpnij = mppsize ! force jpnij definition <-- remove as much land subdomains as needed to reach this condition |
---|
241 | IF( mppsize > inijmin ) THEN |
---|
242 | WRITE(ctmp1,9003) ' The number of mpi processes: ', mppsize |
---|
243 | WRITE(ctmp2,9003) ' exceeds the maximum number of ocean subdomains = ', inijmin |
---|
244 | WRITE(ctmp3,9002) ' we suppressed ', jpni*jpnj - mppsize, ' land subdomains ' |
---|
245 | WRITE(ctmp4,9002) ' BUT we had to keep ', mppsize - inijmin, ' land subdomains that are useless...' |
---|
246 | CALL ctl_warn( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4, ' ', ' --- YOU ARE WASTING CPU... ---', ' ' ) |
---|
247 | ELSE ! mppsize = inijmin |
---|
248 | IF(lwp) THEN |
---|
249 | IF(llbest) WRITE(numout,*) 'mpp_init: You use an optimal domain decomposition' |
---|
250 | WRITE(numout,*) '~~~~~~~~ ' |
---|
251 | WRITE(numout,9003) ' Number of mpi processes: ', mppsize |
---|
252 | WRITE(numout,9003) ' Number of ocean subdomains = ', inijmin |
---|
253 | WRITE(numout,9003) ' Number of suppressed land subdomains = ', jpni*jpnj - inijmin |
---|
254 | WRITE(numout,*) |
---|
255 | ENDIF |
---|
256 | ENDIF |
---|
257 | 9000 FORMAT (a, i4, a, i4, a, i7, a) |
---|
258 | 9001 FORMAT (a, i4, a, i4) |
---|
259 | 9002 FORMAT (a, i4, a) |
---|
260 | 9003 FORMAT (a, i5) |
---|
261 | |
---|
262 | IF( numbot /= -1 ) CALL iom_close( numbot ) |
---|
263 | |
---|
264 | ALLOCATE( nfiimpp(jpni,jpnj), nfipproc(jpni,jpnj), nfilcit(jpni,jpnj) , & |
---|
265 | & nimppt(jpnij) , ibonit(jpnij) , nlcit(jpnij) , nlcjt(jpnij) , & |
---|
266 | & njmppt(jpnij) , ibonjt(jpnij) , nldit(jpnij) , nldjt(jpnij) , & |
---|
267 | & nleit(jpnij) , nlejt(jpnij) , & |
---|
268 | & iin(jpnij), ii_nono(jpnij), ii_noea(jpnij), & |
---|
269 | & ijn(jpnij), ii_noso(jpnij), ii_nowe(jpnij), & |
---|
270 | & iimppt(jpni,jpnj), ilci(jpni,jpnj), ibondi(jpni,jpnj), ipproc(jpni,jpnj), & |
---|
271 | & ijmppt(jpni,jpnj), ilcj(jpni,jpnj), ibondj(jpni,jpnj), ipolj(jpni,jpnj), & |
---|
272 | & ilei(jpni,jpnj), ildi(jpni,jpnj), iono(jpni,jpnj), ioea(jpni,jpnj), & |
---|
273 | & ilej(jpni,jpnj), ildj(jpni,jpnj), ioso(jpni,jpnj), iowe(jpni,jpnj), & |
---|
274 | & STAT=ierr ) |
---|
275 | CALL mpp_sum( 'mppini', ierr ) |
---|
276 | IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'mpp_init: unable to allocate standard ocean arrays' ) |
---|
277 | |
---|
278 | #if defined key_agrif |
---|
279 | IF( .NOT. Agrif_Root() ) THEN ! AGRIF children: specific setting (cf. agrif_user.F90) |
---|
280 | IF( jpiglo /= nbcellsx + 2 + 2*nbghostcells ) THEN |
---|
281 | IF(lwp) THEN |
---|
282 | WRITE(numout,*) |
---|
283 | WRITE(numout,*) 'jpiglo should be: ', nbcellsx + 2 + 2*nbghostcells |
---|
284 | ENDIF |
---|
285 | CALL ctl_stop( 'STOP', 'mpp_init: Agrif children requires jpiglo == nbcellsx + 2 + 2*nbghostcells' ) |
---|
286 | ENDIF |
---|
287 | IF( jpjglo /= nbcellsy + 2 + 2*nbghostcells ) THEN |
---|
288 | IF(lwp) THEN |
---|
289 | WRITE(numout,*) |
---|
290 | WRITE(numout,*) 'jpjglo shoud be: ', nbcellsy + 2 + 2*nbghostcells |
---|
291 | ENDIF |
---|
292 | CALL ctl_stop( 'STOP', 'mpp_init: Agrif children requires jpjglo == nbcellsy + 2 + 2*nbghostcells' ) |
---|
293 | ENDIF |
---|
294 | IF( ln_use_jattr ) CALL ctl_stop( 'STOP', 'mpp_init:Agrif children requires ln_use_jattr = .false. ' ) |
---|
295 | ENDIF |
---|
296 | #endif |
---|
297 | ! |
---|
298 | ! 2. Index arrays for subdomains |
---|
299 | ! ----------------------------------- |
---|
300 | ! |
---|
301 | nreci = 2 * nn_hls |
---|
302 | nrecj = 2 * nn_hls |
---|
303 | CALL mpp_basic_decomposition( jpni, jpnj, jpimax, jpjmax, iimppt, ijmppt, ilci, ilcj ) |
---|
304 | nfiimpp(:,:) = iimppt(:,:) |
---|
305 | nfilcit(:,:) = ilci(:,:) |
---|
306 | ! |
---|
307 | IF(lwp) THEN |
---|
308 | WRITE(numout,*) |
---|
309 | WRITE(numout,*) 'MPI Message Passing MPI - domain lay out over processors' |
---|
310 | WRITE(numout,*) |
---|
311 | WRITE(numout,*) ' defines mpp subdomains' |
---|
312 | WRITE(numout,*) ' jpni = ', jpni |
---|
313 | WRITE(numout,*) ' jpnj = ', jpnj |
---|
314 | WRITE(numout,*) |
---|
315 | WRITE(numout,*) ' sum ilci(i,1) = ', sum(ilci(:,1)), ' jpiglo = ', jpiglo |
---|
316 | WRITE(numout,*) ' sum ilcj(1,j) = ', sum(ilcj(1,:)), ' jpjglo = ', jpjglo |
---|
317 | ENDIF |
---|
318 | |
---|
319 | ! 3. Subdomain description in the Regular Case |
---|
320 | ! -------------------------------------------- |
---|
321 | ! specific cases where there is no communication -> must do the periodicity by itself |
---|
322 | ! Warning: because of potential land-area suppression, do not use nbond[ij] == 2 |
---|
323 | l_Iperio = jpni == 1 .AND. (jperio == 1 .OR. jperio == 4 .OR. jperio == 6 .OR. jperio == 7) |
---|
324 | l_Jperio = jpnj == 1 .AND. (jperio == 2 .OR. jperio == 7) |
---|
325 | |
---|
326 | DO jarea = 1, jpni*jpnj |
---|
327 | ! |
---|
328 | iarea0 = jarea - 1 |
---|
329 | ii = 1 + MOD(iarea0,jpni) |
---|
330 | ij = 1 + iarea0/jpni |
---|
331 | ili = ilci(ii,ij) |
---|
332 | ilj = ilcj(ii,ij) |
---|
333 | ibondi(ii,ij) = 0 ! default: has e-w neighbours |
---|
334 | IF( ii == 1 ) ibondi(ii,ij) = -1 ! first column, has only e neighbour |
---|
335 | IF( ii == jpni ) ibondi(ii,ij) = 1 ! last column, has only w neighbour |
---|
336 | IF( jpni == 1 ) ibondi(ii,ij) = 2 ! has no e-w neighbour |
---|
337 | ibondj(ii,ij) = 0 ! default: has n-s neighbours |
---|
338 | IF( ij == 1 ) ibondj(ii,ij) = -1 ! first row, has only n neighbour |
---|
339 | IF( ij == jpnj ) ibondj(ii,ij) = 1 ! last row, has only s neighbour |
---|
340 | IF( jpnj == 1 ) ibondj(ii,ij) = 2 ! has no n-s neighbour |
---|
341 | |
---|
342 | ! Subdomain neighbors (get their zone number): default definition |
---|
343 | ioso(ii,ij) = iarea0 - jpni |
---|
344 | iowe(ii,ij) = iarea0 - 1 |
---|
345 | ioea(ii,ij) = iarea0 + 1 |
---|
346 | iono(ii,ij) = iarea0 + jpni |
---|
347 | ildi(ii,ij) = 1 + nn_hls |
---|
348 | ilei(ii,ij) = ili - nn_hls |
---|
349 | ildj(ii,ij) = 1 + nn_hls |
---|
350 | ilej(ii,ij) = ilj - nn_hls |
---|
351 | |
---|
352 | ! East-West periodicity: change ibondi, ioea, iowe |
---|
353 | IF( jperio == 1 .OR. jperio == 4 .OR. jperio == 6 .OR. jperio == 7 ) THEN |
---|
354 | IF( jpni /= 1 ) ibondi(ii,ij) = 0 ! redefine: all have e-w neighbours |
---|
355 | IF( ii == 1 ) iowe(ii,ij) = iarea0 + (jpni-1) ! redefine: first column, address of w neighbour |
---|
356 | IF( ii == jpni ) ioea(ii,ij) = iarea0 - (jpni-1) ! redefine: last column, address of e neighbour |
---|
357 | ENDIF |
---|
358 | |
---|
359 | ! Simple North-South periodicity: change ibondj, ioso, iono |
---|
360 | IF( jperio == 2 .OR. jperio == 7 ) THEN |
---|
361 | IF( jpnj /= 1 ) ibondj(ii,ij) = 0 ! redefine: all have n-s neighbours |
---|
362 | IF( ij == 1 ) ioso(ii,ij) = iarea0 + jpni * (jpnj-1) ! redefine: first row, address of s neighbour |
---|
363 | IF( ij == jpnj ) iono(ii,ij) = iarea0 - jpni * (jpnj-1) ! redefine: last row, address of n neighbour |
---|
364 | ENDIF |
---|
365 | |
---|
366 | ! North fold: define ipolj, change iono. Warning: we do not change ibondj... |
---|
367 | ipolj(ii,ij) = 0 |
---|
368 | IF( jperio == 3 .OR. jperio == 4 ) THEN |
---|
369 | ijm1 = jpni*(jpnj-1) |
---|
370 | imil = ijm1+(jpni+1)/2 |
---|
371 | IF( jarea > ijm1 ) ipolj(ii,ij) = 3 |
---|
372 | IF( MOD(jpni,2) == 1 .AND. jarea == imil ) ipolj(ii,ij) = 4 |
---|
373 | IF( ipolj(ii,ij) == 3 ) iono(ii,ij) = jpni*jpnj-jarea+ijm1 ! MPI rank of northern neighbour |
---|
374 | ENDIF |
---|
375 | IF( jperio == 5 .OR. jperio == 6 ) THEN |
---|
376 | ijm1 = jpni*(jpnj-1) |
---|
377 | imil = ijm1+(jpni+1)/2 |
---|
378 | IF( jarea > ijm1) ipolj(ii,ij) = 5 |
---|
379 | IF( MOD(jpni,2) == 1 .AND. jarea == imil ) ipolj(ii,ij) = 6 |
---|
380 | IF( ipolj(ii,ij) == 5) iono(ii,ij) = jpni*jpnj-jarea+ijm1 ! MPI rank of northern neighbour |
---|
381 | ENDIF |
---|
382 | ! |
---|
383 | END DO |
---|
384 | |
---|
385 | ! 4. deal with land subdomains |
---|
386 | ! ---------------------------- |
---|
387 | ! |
---|
388 | ! specify which subdomains are oce subdomains; other are land subdomains |
---|
389 | ipproc(:,:) = -1 |
---|
390 | icont = -1 |
---|
391 | DO jarea = 1, jpni*jpnj |
---|
392 | iarea0 = jarea - 1 |
---|
393 | ii = 1 + MOD(iarea0,jpni) |
---|
394 | ij = 1 + iarea0/jpni |
---|
395 | IF( llisoce(ii,ij) ) THEN |
---|
396 | icont = icont + 1 |
---|
397 | ipproc(ii,ij) = icont |
---|
398 | iin(icont+1) = ii |
---|
399 | ijn(icont+1) = ij |
---|
400 | ENDIF |
---|
401 | END DO |
---|
402 | ! if needed add some land subdomains to reach jpnij active subdomains |
---|
403 | i2add = jpnij - inijmin |
---|
404 | DO jarea = 1, jpni*jpnj |
---|
405 | iarea0 = jarea - 1 |
---|
406 | ii = 1 + MOD(iarea0,jpni) |
---|
407 | ij = 1 + iarea0/jpni |
---|
408 | IF( .NOT. llisoce(ii,ij) .AND. i2add > 0 ) THEN |
---|
409 | icont = icont + 1 |
---|
410 | ipproc(ii,ij) = icont |
---|
411 | iin(icont+1) = ii |
---|
412 | ijn(icont+1) = ij |
---|
413 | i2add = i2add - 1 |
---|
414 | ENDIF |
---|
415 | END DO |
---|
416 | nfipproc(:,:) = ipproc(:,:) |
---|
417 | |
---|
418 | ! neighbour treatment: change ibondi, ibondj if next to a land zone |
---|
419 | DO jarea = 1, jpni*jpnj |
---|
420 | ii = 1 + MOD( jarea-1 , jpni ) |
---|
421 | ij = 1 + (jarea-1) / jpni |
---|
422 | ! land-only area with an active n neigbour |
---|
423 | IF ( ipproc(ii,ij) == -1 .AND. 0 <= iono(ii,ij) .AND. iono(ii,ij) <= jpni*jpnj-1 ) THEN |
---|
424 | iino = 1 + MOD( iono(ii,ij) , jpni ) ! ii index of this n neigbour |
---|
425 | ijno = 1 + iono(ii,ij) / jpni ! ij index of this n neigbour |
---|
426 | ! In case of north fold exchange: I am the n neigbour of my n neigbour!! (#1057) |
---|
427 | ! --> for northern neighbours of northern row processors (in case of north-fold) |
---|
428 | ! need to reverse the LOGICAL direction of communication |
---|
429 | idir = 1 ! we are indeed the s neigbour of this n neigbour |
---|
430 | IF( ij == jpnj .AND. ijno == jpnj ) idir = -1 ! both are on the last row, we are in fact the n neigbour |
---|
431 | IF( ibondj(iino,ijno) == idir ) ibondj(iino,ijno) = 2 ! this n neigbour had only a s/n neigbour -> no more |
---|
432 | IF( ibondj(iino,ijno) == 0 ) ibondj(iino,ijno) = -idir ! this n neigbour had both, n-s neighbours -> keep 1 |
---|
433 | ENDIF |
---|
434 | ! land-only area with an active s neigbour |
---|
435 | IF( ipproc(ii,ij) == -1 .AND. 0 <= ioso(ii,ij) .AND. ioso(ii,ij) <= jpni*jpnj-1 ) THEN |
---|
436 | iiso = 1 + MOD( ioso(ii,ij) , jpni ) ! ii index of this s neigbour |
---|
437 | ijso = 1 + ioso(ii,ij) / jpni ! ij index of this s neigbour |
---|
438 | IF( ibondj(iiso,ijso) == -1 ) ibondj(iiso,ijso) = 2 ! this s neigbour had only a n neigbour -> no more neigbour |
---|
439 | IF( ibondj(iiso,ijso) == 0 ) ibondj(iiso,ijso) = 1 ! this s neigbour had both, n-s neighbours -> keep s neigbour |
---|
440 | ENDIF |
---|
441 | ! land-only area with an active e neigbour |
---|
442 | IF( ipproc(ii,ij) == -1 .AND. 0 <= ioea(ii,ij) .AND. ioea(ii,ij) <= jpni*jpnj-1 ) THEN |
---|
443 | iiea = 1 + MOD( ioea(ii,ij) , jpni ) ! ii index of this e neigbour |
---|
444 | ijea = 1 + ioea(ii,ij) / jpni ! ij index of this e neigbour |
---|
445 | IF( ibondi(iiea,ijea) == 1 ) ibondi(iiea,ijea) = 2 ! this e neigbour had only a w neigbour -> no more neigbour |
---|
446 | IF( ibondi(iiea,ijea) == 0 ) ibondi(iiea,ijea) = -1 ! this e neigbour had both, e-w neighbours -> keep e neigbour |
---|
447 | ENDIF |
---|
448 | ! land-only area with an active w neigbour |
---|
449 | IF( ipproc(ii,ij) == -1 .AND. 0 <= iowe(ii,ij) .AND. iowe(ii,ij) <= jpni*jpnj-1) THEN |
---|
450 | iiwe = 1 + MOD( iowe(ii,ij) , jpni ) ! ii index of this w neigbour |
---|
451 | ijwe = 1 + iowe(ii,ij) / jpni ! ij index of this w neigbour |
---|
452 | IF( ibondi(iiwe,ijwe) == -1 ) ibondi(iiwe,ijwe) = 2 ! this w neigbour had only a e neigbour -> no more neigbour |
---|
453 | IF( ibondi(iiwe,ijwe) == 0 ) ibondi(iiwe,ijwe) = 1 ! this w neigbour had both, e-w neighbours -> keep w neigbour |
---|
454 | ENDIF |
---|
455 | END DO |
---|
456 | |
---|
457 | ! Update il[de][ij] according to modified ibond[ij] |
---|
458 | ! ---------------------- |
---|
459 | DO jproc = 1, jpnij |
---|
460 | ii = iin(jproc) |
---|
461 | ij = ijn(jproc) |
---|
462 | IF( ibondi(ii,ij) == -1 .OR. ibondi(ii,ij) == 2 ) ildi(ii,ij) = 1 |
---|
463 | IF( ibondi(ii,ij) == 1 .OR. ibondi(ii,ij) == 2 ) ilei(ii,ij) = ilci(ii,ij) |
---|
464 | IF( ibondj(ii,ij) == -1 .OR. ibondj(ii,ij) == 2 ) ildj(ii,ij) = 1 |
---|
465 | IF( ibondj(ii,ij) == 1 .OR. ibondj(ii,ij) == 2 ) ilej(ii,ij) = ilcj(ii,ij) |
---|
466 | END DO |
---|
467 | |
---|
468 | ! 5. Subdomain print |
---|
469 | ! ------------------ |
---|
470 | IF(lwp) THEN |
---|
471 | ifreq = 4 |
---|
472 | il1 = 1 |
---|
473 | DO jn = 1, (jpni-1)/ifreq+1 |
---|
474 | il2 = MIN(jpni,il1+ifreq-1) |
---|
475 | WRITE(numout,*) |
---|
476 | WRITE(numout,9400) ('***',ji=il1,il2-1) |
---|
477 | DO jj = jpnj, 1, -1 |
---|
478 | WRITE(numout,9403) (' ',ji=il1,il2-1) |
---|
479 | WRITE(numout,9402) jj, (ilci(ji,jj),ilcj(ji,jj),ji=il1,il2) |
---|
480 | WRITE(numout,9404) (ipproc(ji,jj),ji=il1,il2) |
---|
481 | WRITE(numout,9403) (' ',ji=il1,il2-1) |
---|
482 | WRITE(numout,9400) ('***',ji=il1,il2-1) |
---|
483 | END DO |
---|
484 | WRITE(numout,9401) (ji,ji=il1,il2) |
---|
485 | il1 = il1+ifreq |
---|
486 | END DO |
---|
487 | 9400 FORMAT(' ***' ,20('*************',a3) ) |
---|
488 | 9403 FORMAT(' * ',20(' * ',a3) ) |
---|
489 | 9401 FORMAT(' ' ,20(' ',i3,' ') ) |
---|
490 | 9402 FORMAT(' ',i3,' * ',20(i3,' x',i3,' * ') ) |
---|
491 | 9404 FORMAT(' * ' ,20(' ',i3,' * ') ) |
---|
492 | ENDIF |
---|
493 | |
---|
494 | ! just to save nono etc for all proc |
---|
495 | ! warning ii*ij (zone) /= nproc (processors)! |
---|
496 | ! ioso = zone number, ii_noso = proc number |
---|
497 | ii_noso(:) = -1 |
---|
498 | ii_nono(:) = -1 |
---|
499 | ii_noea(:) = -1 |
---|
500 | ii_nowe(:) = -1 |
---|
501 | DO jproc = 1, jpnij |
---|
502 | ii = iin(jproc) |
---|
503 | ij = ijn(jproc) |
---|
504 | IF( 0 <= ioso(ii,ij) .AND. ioso(ii,ij) <= (jpni*jpnj-1) ) THEN |
---|
505 | iiso = 1 + MOD( ioso(ii,ij) , jpni ) |
---|
506 | ijso = 1 + ioso(ii,ij) / jpni |
---|
507 | ii_noso(jproc) = ipproc(iiso,ijso) |
---|
508 | ENDIF |
---|
509 | IF( 0 <= iowe(ii,ij) .AND. iowe(ii,ij) <= (jpni*jpnj-1) ) THEN |
---|
510 | iiwe = 1 + MOD( iowe(ii,ij) , jpni ) |
---|
511 | ijwe = 1 + iowe(ii,ij) / jpni |
---|
512 | ii_nowe(jproc) = ipproc(iiwe,ijwe) |
---|
513 | ENDIF |
---|
514 | IF( 0 <= ioea(ii,ij) .AND. ioea(ii,ij) <= (jpni*jpnj-1) ) THEN |
---|
515 | iiea = 1 + MOD( ioea(ii,ij) , jpni ) |
---|
516 | ijea = 1 + ioea(ii,ij) / jpni |
---|
517 | ii_noea(jproc)= ipproc(iiea,ijea) |
---|
518 | ENDIF |
---|
519 | IF( 0 <= iono(ii,ij) .AND. iono(ii,ij) <= (jpni*jpnj-1) ) THEN |
---|
520 | iino = 1 + MOD( iono(ii,ij) , jpni ) |
---|
521 | ijno = 1 + iono(ii,ij) / jpni |
---|
522 | ii_nono(jproc)= ipproc(iino,ijno) |
---|
523 | ENDIF |
---|
524 | END DO |
---|
525 | |
---|
526 | ! 6. Change processor name |
---|
527 | ! ------------------------ |
---|
528 | ii = iin(narea) |
---|
529 | ij = ijn(narea) |
---|
530 | ! |
---|
531 | ! set default neighbours |
---|
532 | noso = ii_noso(narea) |
---|
533 | nowe = ii_nowe(narea) |
---|
534 | noea = ii_noea(narea) |
---|
535 | nono = ii_nono(narea) |
---|
536 | nlci = ilci(ii,ij) |
---|
537 | nldi = ildi(ii,ij) |
---|
538 | nlei = ilei(ii,ij) |
---|
539 | nlcj = ilcj(ii,ij) |
---|
540 | nldj = ildj(ii,ij) |
---|
541 | nlej = ilej(ii,ij) |
---|
542 | nbondi = ibondi(ii,ij) |
---|
543 | nbondj = ibondj(ii,ij) |
---|
544 | nimpp = iimppt(ii,ij) |
---|
545 | njmpp = ijmppt(ii,ij) |
---|
546 | jpi = nlci |
---|
547 | jpj = nlcj |
---|
548 | jpk = jpkglo ! third dim |
---|
549 | #if defined key_agrif |
---|
550 | ! simple trick to use same vertical grid as parent but different number of levels: |
---|
551 | ! Save maximum number of levels in jpkglo, then define all vertical grids with this number. |
---|
552 | ! Suppress once vertical online interpolation is ok |
---|
553 | !!$ IF(.NOT.Agrif_Root()) jpkglo = Agrif_Parent( jpkglo ) |
---|
554 | #endif |
---|
555 | jpim1 = jpi-1 ! inner domain indices |
---|
556 | jpjm1 = jpj-1 ! " " |
---|
557 | jpkm1 = MAX( 1, jpk-1 ) ! " " |
---|
558 | jpij = jpi*jpj ! jpi x j |
---|
559 | DO jproc = 1, jpnij |
---|
560 | ii = iin(jproc) |
---|
561 | ij = ijn(jproc) |
---|
562 | nlcit(jproc) = ilci(ii,ij) |
---|
563 | nldit(jproc) = ildi(ii,ij) |
---|
564 | nleit(jproc) = ilei(ii,ij) |
---|
565 | nlcjt(jproc) = ilcj(ii,ij) |
---|
566 | nldjt(jproc) = ildj(ii,ij) |
---|
567 | nlejt(jproc) = ilej(ii,ij) |
---|
568 | ibonit(jproc) = ibondi(ii,ij) |
---|
569 | ibonjt(jproc) = ibondj(ii,ij) |
---|
570 | nimppt(jproc) = iimppt(ii,ij) |
---|
571 | njmppt(jproc) = ijmppt(ii,ij) |
---|
572 | END DO |
---|
573 | |
---|
574 | ! Save processor layout in ascii file |
---|
575 | IF (llwrtlay) THEN |
---|
576 | CALL ctl_opn( inum, 'layout.dat', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) |
---|
577 | WRITE(inum,'(a)') ' jpnij jpimax jpjmax jpk jpiglo jpjglo'//& |
---|
578 | & ' ( local: narea jpi jpj )' |
---|
579 | WRITE(inum,'(6i8,a,3i8,a)') jpnij,jpimax,jpjmax,jpk,jpiglo,jpjglo,& |
---|
580 | & ' ( local: ',narea,jpi,jpj,' )' |
---|
581 | WRITE(inum,'(a)') 'nproc nlci nlcj nldi nldj nlei nlej nimp njmp nono noso nowe noea nbondi nbondj ' |
---|
582 | |
---|
583 | DO jproc = 1, jpnij |
---|
584 | WRITE(inum,'(13i5,2i7)') jproc-1, nlcit (jproc), nlcjt (jproc), & |
---|
585 | & nldit (jproc), nldjt (jproc), & |
---|
586 | & nleit (jproc), nlejt (jproc), & |
---|
587 | & nimppt (jproc), njmppt (jproc), & |
---|
588 | & ii_nono(jproc), ii_noso(jproc), & |
---|
589 | & ii_nowe(jproc), ii_noea(jproc), & |
---|
590 | & ibonit (jproc), ibonjt (jproc) |
---|
591 | END DO |
---|
592 | END IF |
---|
593 | |
---|
594 | ! ! north fold parameter |
---|
595 | ! Defined npolj, either 0, 3 , 4 , 5 , 6 |
---|
596 | ! In this case the important thing is that npolj /= 0 |
---|
597 | ! Because if we go through these line it is because jpni >1 and thus |
---|
598 | ! we must use lbcnorthmpp, which tests only npolj =0 or npolj /= 0 |
---|
599 | npolj = 0 |
---|
600 | ij = ijn(narea) |
---|
601 | IF( jperio == 3 .OR. jperio == 4 ) THEN |
---|
602 | IF( ij == jpnj ) npolj = 3 |
---|
603 | ENDIF |
---|
604 | IF( jperio == 5 .OR. jperio == 6 ) THEN |
---|
605 | IF( ij == jpnj ) npolj = 5 |
---|
606 | ENDIF |
---|
607 | ! |
---|
608 | nproc = narea-1 |
---|
609 | IF(lwp) THEN |
---|
610 | WRITE(numout,*) |
---|
611 | WRITE(numout,*) ' resulting internal parameters : ' |
---|
612 | WRITE(numout,*) ' nproc = ', nproc |
---|
613 | WRITE(numout,*) ' nowe = ', nowe , ' noea = ', noea |
---|
614 | WRITE(numout,*) ' nono = ', nono , ' noso = ', noso |
---|
615 | WRITE(numout,*) ' nbondi = ', nbondi |
---|
616 | WRITE(numout,*) ' nbondj = ', nbondj |
---|
617 | WRITE(numout,*) ' npolj = ', npolj |
---|
618 | WRITE(numout,*) ' l_Iperio = ', l_Iperio |
---|
619 | WRITE(numout,*) ' l_Jperio = ', l_Jperio |
---|
620 | WRITE(numout,*) ' nlci = ', nlci |
---|
621 | WRITE(numout,*) ' nlcj = ', nlcj |
---|
622 | WRITE(numout,*) ' nimpp = ', nimpp |
---|
623 | WRITE(numout,*) ' njmpp = ', njmpp |
---|
624 | WRITE(numout,*) ' nreci = ', nreci |
---|
625 | WRITE(numout,*) ' nrecj = ', nrecj |
---|
626 | WRITE(numout,*) ' nn_hls = ', nn_hls |
---|
627 | ENDIF |
---|
628 | |
---|
629 | ! ! Prepare mpp north fold |
---|
630 | IF( jperio >= 3 .AND. jperio <= 6 .AND. jpni > 1 ) THEN |
---|
631 | CALL mpp_ini_north |
---|
632 | IF (lwp) THEN |
---|
633 | WRITE(numout,*) |
---|
634 | WRITE(numout,*) ' ==>>> North fold boundary prepared for jpni >1' |
---|
635 | ! additional prints in layout.dat |
---|
636 | ENDIF |
---|
637 | IF (llwrtlay) THEN |
---|
638 | WRITE(inum,*) |
---|
639 | WRITE(inum,*) |
---|
640 | WRITE(inum,*) 'number of subdomains located along the north fold : ', ndim_rank_north |
---|
641 | WRITE(inum,*) 'Rank of the subdomains located along the north fold : ', ndim_rank_north |
---|
642 | DO jproc = 1, ndim_rank_north, 5 |
---|
643 | WRITE(inum,*) nrank_north( jproc:MINVAL( (/jproc+4,ndim_rank_north/) ) ) |
---|
644 | END DO |
---|
645 | ENDIF |
---|
646 | ENDIF |
---|
647 | ! |
---|
648 | IF( ln_nnogather ) THEN |
---|
649 | CALL mpp_init_nfdcom ! northfold neighbour lists |
---|
650 | IF (llwrtlay) THEN |
---|
651 | WRITE(inum,*) |
---|
652 | WRITE(inum,*) |
---|
653 | WRITE(inum,*) 'north fold exchanges with explicit point-to-point messaging :' |
---|
654 | WRITE(inum,*) 'nfsloop : ', nfsloop |
---|
655 | WRITE(inum,*) 'nfeloop : ', nfeloop |
---|
656 | WRITE(inum,*) 'nsndto : ', nsndto |
---|
657 | WRITE(inum,*) 'isendto : ', isendto |
---|
658 | ENDIF |
---|
659 | ENDIF |
---|
660 | ! |
---|
661 | IF (llwrtlay) CLOSE(inum) |
---|
662 | ! |
---|
663 | DEALLOCATE(iin, ijn, ii_nono, ii_noea, ii_noso, ii_nowe, & |
---|
664 | & iimppt, ijmppt, ibondi, ibondj, ipproc, ipolj, & |
---|
665 | & ilci, ilcj, ilei, ilej, ildi, ildj, & |
---|
666 | & iono, ioea, ioso, iowe, llisoce) |
---|
667 | ! |
---|
668 | END SUBROUTINE mpp_init |
---|
669 | |
---|
670 | |
---|
671 | SUBROUTINE mpp_basic_decomposition( knbi, knbj, kimax, kjmax, kimppt, kjmppt, klci, klcj) |
---|
672 | !!---------------------------------------------------------------------- |
---|
673 | !! *** ROUTINE mpp_basic_decomposition *** |
---|
674 | !! |
---|
675 | !! ** Purpose : Lay out the global domain over processors. |
---|
676 | !! |
---|
677 | !! ** Method : Global domain is distributed in smaller local domains. |
---|
678 | !! |
---|
679 | !! ** Action : - set for all knbi*knbj domains: |
---|
680 | !! kimppt : longitudinal index |
---|
681 | !! kjmppt : latitudinal index |
---|
682 | !! klci : first dimension |
---|
683 | !! klcj : second dimension |
---|
684 | !!---------------------------------------------------------------------- |
---|
685 | INTEGER, INTENT(in ) :: knbi, knbj |
---|
686 | INTEGER, INTENT( out) :: kimax, kjmax |
---|
687 | INTEGER, DIMENSION(knbi,knbj), OPTIONAL, INTENT( out) :: kimppt, kjmppt |
---|
688 | INTEGER, DIMENSION(knbi,knbj), OPTIONAL, INTENT( out) :: klci, klcj |
---|
689 | ! |
---|
690 | INTEGER :: ji, jj |
---|
691 | INTEGER :: iresti, irestj, irm, ijpjmin |
---|
692 | INTEGER :: ireci, irecj |
---|
693 | !!---------------------------------------------------------------------- |
---|
694 | ! |
---|
695 | #if defined key_nemocice_decomp |
---|
696 | kimax = ( nx_global+2-2*nn_hls + (knbi-1) ) / knbi + 2*nn_hls ! first dim. |
---|
697 | kjmax = ( ny_global+2-2*nn_hls + (knbj-1) ) / knbj + 2*nn_hls ! second dim. |
---|
698 | #else |
---|
699 | kimax = ( jpiglo - 2*nn_hls + (knbi-1) ) / knbi + 2*nn_hls ! first dim. |
---|
700 | kjmax = ( jpjglo - 2*nn_hls + (knbj-1) ) / knbj + 2*nn_hls ! second dim. |
---|
701 | #endif |
---|
702 | IF( .NOT. PRESENT(kimppt) ) RETURN |
---|
703 | ! |
---|
704 | ! 1. Dimension arrays for subdomains |
---|
705 | ! ----------------------------------- |
---|
706 | ! Computation of local domain sizes klci() klcj() |
---|
707 | ! These dimensions depend on global sizes knbi,knbj and jpiglo,jpjglo |
---|
708 | ! The subdomains are squares lesser than or equal to the global |
---|
709 | ! dimensions divided by the number of processors minus the overlap array. |
---|
710 | ! |
---|
711 | ireci = 2 * nn_hls |
---|
712 | irecj = 2 * nn_hls |
---|
713 | iresti = 1 + MOD( jpiglo - ireci -1 , knbi ) |
---|
714 | irestj = 1 + MOD( jpjglo - irecj -1 , knbj ) |
---|
715 | ! |
---|
716 | ! Need to use kimax and kjmax here since jpi and jpj not yet defined |
---|
717 | #if defined key_nemocice_decomp |
---|
718 | ! Change padding to be consistent with CICE |
---|
719 | klci(1:knbi-1 ,:) = kimax |
---|
720 | klci(knbi ,:) = jpiglo - (knbi - 1) * (kimax - nreci) |
---|
721 | klcj(:, 1:knbj-1) = kjmax |
---|
722 | klcj(:, knbj) = jpjglo - (knbj - 1) * (kjmax - nrecj) |
---|
723 | #else |
---|
724 | klci(1:iresti ,:) = kimax |
---|
725 | klci(iresti+1:knbi ,:) = kimax-1 |
---|
726 | IF( MINVAL(klci) < 3 ) THEN |
---|
727 | WRITE(ctmp1,*) ' mpp_basic_decomposition: minimum value of jpi must be >= 3' |
---|
728 | WRITE(ctmp2,*) ' We have ', MINVAL(klci) |
---|
729 | CALL ctl_stop( 'STOP', ctmp1, ctmp2 ) |
---|
730 | ENDIF |
---|
731 | IF( jperio == 3 .OR. jperio == 4 .OR. jperio == 5 .OR. jperio == 6 ) THEN |
---|
732 | ! minimize the size of the last row to compensate for the north pole folding coast |
---|
733 | IF( jperio == 3 .OR. jperio == 4 ) ijpjmin = 5 ! V and F folding involves line jpj-3 that must not be south boundary |
---|
734 | IF( jperio == 5 .OR. jperio == 6 ) ijpjmin = 4 ! V and F folding involves line jpj-2 that must not be south boundary |
---|
735 | irm = knbj - irestj ! total number of lines to be removed |
---|
736 | klcj(:, knbj) = MAX( ijpjmin, kjmax-irm ) ! we must have jpj >= ijpjmin in the last row |
---|
737 | irm = irm - ( kjmax - klcj(1,knbj) ) ! remaining number of lines to remove |
---|
738 | irestj = knbj - 1 - irm |
---|
739 | klcj(:, 1:irestj) = kjmax |
---|
740 | klcj(:, irestj+1:knbj-1) = kjmax-1 |
---|
741 | ELSE |
---|
742 | ijpjmin = 3 |
---|
743 | klcj(:, 1:irestj) = kjmax |
---|
744 | klcj(:, irestj+1:knbj) = kjmax-1 |
---|
745 | ENDIF |
---|
746 | IF( MINVAL(klcj) < ijpjmin ) THEN |
---|
747 | WRITE(ctmp1,*) ' mpp_basic_decomposition: minimum value of jpj must be >= ', ijpjmin |
---|
748 | WRITE(ctmp2,*) ' We have ', MINVAL(klcj) |
---|
749 | CALL ctl_stop( 'STOP', ctmp1, ctmp2 ) |
---|
750 | ENDIF |
---|
751 | #endif |
---|
752 | |
---|
753 | ! 2. Index arrays for subdomains |
---|
754 | ! ------------------------------- |
---|
755 | kimppt(:,:) = 1 |
---|
756 | kjmppt(:,:) = 1 |
---|
757 | ! |
---|
758 | IF( knbi > 1 ) THEN |
---|
759 | DO jj = 1, knbj |
---|
760 | DO ji = 2, knbi |
---|
761 | kimppt(ji,jj) = kimppt(ji-1,jj) + klci(ji-1,jj) - ireci |
---|
762 | END DO |
---|
763 | END DO |
---|
764 | ENDIF |
---|
765 | ! |
---|
766 | IF( knbj > 1 )THEN |
---|
767 | DO jj = 2, knbj |
---|
768 | DO ji = 1, knbi |
---|
769 | kjmppt(ji,jj) = kjmppt(ji,jj-1) + klcj(ji,jj-1) - irecj |
---|
770 | END DO |
---|
771 | END DO |
---|
772 | ENDIF |
---|
773 | |
---|
774 | END SUBROUTINE mpp_basic_decomposition |
---|
775 | |
---|
776 | |
---|
777 | SUBROUTINE mpp_init_bestpartition( knbij, knbi, knbj, knbcnt, ldlist ) |
---|
778 | !!---------------------------------------------------------------------- |
---|
779 | !! *** ROUTINE mpp_init_bestpartition *** |
---|
780 | !! |
---|
781 | !! ** Purpose : |
---|
782 | !! |
---|
783 | !! ** Method : |
---|
784 | !!---------------------------------------------------------------------- |
---|
785 | INTEGER, INTENT(in ) :: knbij ! total number if subdomains (knbi*knbj) |
---|
786 | INTEGER, OPTIONAL, INTENT( out) :: knbi, knbj ! number if subdomains along i and j (knbi and knbj) |
---|
787 | INTEGER, OPTIONAL, INTENT( out) :: knbcnt ! number of land subdomains |
---|
788 | LOGICAL, OPTIONAL, INTENT(in ) :: ldlist ! .true.: print the list the best domain decompositions (with land) |
---|
789 | ! |
---|
790 | INTEGER :: ji, jj, ii, iitarget |
---|
791 | INTEGER :: iszitst, iszjtst |
---|
792 | INTEGER :: isziref, iszjref |
---|
793 | INTEGER :: inbij, iszij |
---|
794 | INTEGER :: inbimax, inbjmax, inbijmax |
---|
795 | INTEGER :: isz0, isz1 |
---|
796 | INTEGER, DIMENSION( :), ALLOCATABLE :: indexok |
---|
797 | INTEGER, DIMENSION( :), ALLOCATABLE :: inbi0, inbj0, inbij0 ! number of subdomains along i,j |
---|
798 | INTEGER, DIMENSION( :), ALLOCATABLE :: iszi0, iszj0, iszij0 ! max size of the subdomains along i,j |
---|
799 | INTEGER, DIMENSION( :), ALLOCATABLE :: inbi1, inbj1, inbij1 ! number of subdomains along i,j |
---|
800 | INTEGER, DIMENSION( :), ALLOCATABLE :: iszi1, iszj1, iszij1 ! max size of the subdomains along i,j |
---|
801 | LOGICAL :: llist |
---|
802 | LOGICAL, DIMENSION(:,:), ALLOCATABLE :: llmsk2d ! max size of the subdomains along i,j |
---|
803 | LOGICAL, DIMENSION(:,:), ALLOCATABLE :: llisoce ! - - |
---|
804 | REAL(wp):: zpropland |
---|
805 | !!---------------------------------------------------------------------- |
---|
806 | ! |
---|
807 | llist = .FALSE. |
---|
808 | IF( PRESENT(ldlist) ) llist = ldlist |
---|
809 | |
---|
810 | CALL mpp_init_landprop( zpropland ) ! get the proportion of land point over the gloal domain |
---|
811 | inbij = NINT( REAL(knbij, wp) / ( 1.0 - zpropland ) ) ! define the largest possible value for jpni*jpnj |
---|
812 | ! |
---|
813 | IF( llist ) THEN ; inbijmax = inbij*2 |
---|
814 | ELSE ; inbijmax = inbij |
---|
815 | ENDIF |
---|
816 | ! |
---|
817 | ALLOCATE(inbi0(inbijmax),inbj0(inbijmax),iszi0(inbijmax),iszj0(inbijmax)) |
---|
818 | ! |
---|
819 | inbimax = 0 |
---|
820 | inbjmax = 0 |
---|
821 | isziref = jpiglo*jpjglo+1 |
---|
822 | iszjref = jpiglo*jpjglo+1 |
---|
823 | ! |
---|
824 | ! get the list of knbi that gives a smaller jpimax than knbi-1 |
---|
825 | ! get the list of knbj that gives a smaller jpjmax than knbj-1 |
---|
826 | DO ji = 1, inbijmax |
---|
827 | #if defined key_nemocice_decomp |
---|
828 | iszitst = ( nx_global+2-2*nn_hls + (ji-1) ) / ji + 2*nn_hls ! first dim. |
---|
829 | #else |
---|
830 | iszitst = ( jpiglo - 2*nn_hls + (ji-1) ) / ji + 2*nn_hls |
---|
831 | #endif |
---|
832 | IF( iszitst < isziref ) THEN |
---|
833 | isziref = iszitst |
---|
834 | inbimax = inbimax + 1 |
---|
835 | inbi0(inbimax) = ji |
---|
836 | iszi0(inbimax) = isziref |
---|
837 | ENDIF |
---|
838 | #if defined key_nemocice_decomp |
---|
839 | iszjtst = ( ny_global+2-2*nn_hls + (ji-1) ) / ji + 2*nn_hls ! first dim. |
---|
840 | #else |
---|
841 | iszjtst = ( jpjglo - 2*nn_hls + (ji-1) ) / ji + 2*nn_hls |
---|
842 | #endif |
---|
843 | IF( iszjtst < iszjref ) THEN |
---|
844 | iszjref = iszjtst |
---|
845 | inbjmax = inbjmax + 1 |
---|
846 | inbj0(inbjmax) = ji |
---|
847 | iszj0(inbjmax) = iszjref |
---|
848 | ENDIF |
---|
849 | END DO |
---|
850 | |
---|
851 | ! combine these 2 lists to get all possible knbi*knbj < inbijmax |
---|
852 | ALLOCATE( llmsk2d(inbimax,inbjmax) ) |
---|
853 | DO jj = 1, inbjmax |
---|
854 | DO ji = 1, inbimax |
---|
855 | IF ( inbi0(ji) * inbj0(jj) <= inbijmax ) THEN ; llmsk2d(ji,jj) = .TRUE. |
---|
856 | ELSE ; llmsk2d(ji,jj) = .FALSE. |
---|
857 | ENDIF |
---|
858 | END DO |
---|
859 | END DO |
---|
860 | isz1 = COUNT(llmsk2d) |
---|
861 | ALLOCATE( inbi1(isz1), inbj1(isz1), iszi1(isz1), iszj1(isz1) ) |
---|
862 | ii = 0 |
---|
863 | DO jj = 1, inbjmax |
---|
864 | DO ji = 1, inbimax |
---|
865 | IF( llmsk2d(ji,jj) .EQV. .TRUE. ) THEN |
---|
866 | ii = ii + 1 |
---|
867 | inbi1(ii) = inbi0(ji) |
---|
868 | inbj1(ii) = inbj0(jj) |
---|
869 | iszi1(ii) = iszi0(ji) |
---|
870 | iszj1(ii) = iszj0(jj) |
---|
871 | END IF |
---|
872 | END DO |
---|
873 | END DO |
---|
874 | DEALLOCATE( inbi0, inbj0, iszi0, iszj0 ) |
---|
875 | DEALLOCATE( llmsk2d ) |
---|
876 | |
---|
877 | ALLOCATE( inbij1(isz1), iszij1(isz1) ) |
---|
878 | inbij1(:) = inbi1(:) * inbj1(:) |
---|
879 | iszij1(:) = iszi1(:) * iszj1(:) |
---|
880 | |
---|
881 | ! if therr is no land and no print |
---|
882 | IF( .NOT. llist .AND. numbot == -1 ) THEN |
---|
883 | ! get the smaller partition which gives the smallest subdomain size |
---|
884 | ii = MINLOC(inbij1, mask = iszij1 == MINVAL(iszij1), dim = 1) |
---|
885 | knbi = inbi1(ii) |
---|
886 | knbj = inbj1(ii) |
---|
887 | IF(PRESENT(knbcnt)) knbcnt = 0 |
---|
888 | DEALLOCATE( inbi1, inbj1, inbij1, iszi1, iszj1, iszij1 ) |
---|
889 | RETURN |
---|
890 | ENDIF |
---|
891 | |
---|
892 | ! extract only the partitions which reduce the subdomain size in comparison with smaller partitions |
---|
893 | ALLOCATE( indexok(isz1) ) ! to store indices of the best partitions |
---|
894 | isz0 = 0 ! number of best partitions |
---|
895 | inbij = 1 ! start with the min value of inbij1 => 1 |
---|
896 | iszij = jpiglo*jpjglo+1 ! default: larger than global domain |
---|
897 | DO WHILE( inbij <= inbijmax ) ! if we did not reach the max of inbij1 |
---|
898 | ii = MINLOC(iszij1, mask = inbij1 == inbij, dim = 1) ! warning: send back the first occurence if multiple results |
---|
899 | IF ( iszij1(ii) < iszij ) THEN |
---|
900 | isz0 = isz0 + 1 |
---|
901 | indexok(isz0) = ii |
---|
902 | iszij = iszij1(ii) |
---|
903 | ENDIF |
---|
904 | inbij = MINVAL(inbij1, mask = inbij1 > inbij) ! warning: return largest integer value if mask = .false. everywhere |
---|
905 | END DO |
---|
906 | DEALLOCATE( inbij1, iszij1 ) |
---|
907 | |
---|
908 | ! keep only the best partitions (sorted by increasing order of subdomains number and decreassing subdomain size) |
---|
909 | ALLOCATE( inbi0(isz0), inbj0(isz0), iszi0(isz0), iszj0(isz0) ) |
---|
910 | DO ji = 1, isz0 |
---|
911 | ii = indexok(ji) |
---|
912 | inbi0(ji) = inbi1(ii) |
---|
913 | inbj0(ji) = inbj1(ii) |
---|
914 | iszi0(ji) = iszi1(ii) |
---|
915 | iszj0(ji) = iszj1(ii) |
---|
916 | END DO |
---|
917 | DEALLOCATE( indexok, inbi1, inbj1, iszi1, iszj1 ) |
---|
918 | |
---|
919 | IF( llist ) THEN ! we print about 21 best partitions |
---|
920 | IF(lwp) THEN |
---|
921 | WRITE(numout,*) |
---|
922 | WRITE(numout, *) ' For your information:' |
---|
923 | WRITE(numout,'(a,i5,a)') ' list of the best partitions around ', knbij, ' mpi processes' |
---|
924 | WRITE(numout, *) ' --------------------------------------', '-----', '--------------' |
---|
925 | WRITE(numout,*) |
---|
926 | END IF |
---|
927 | iitarget = MINLOC( inbi0(:)*inbj0(:), mask = inbi0(:)*inbj0(:) >= knbij, dim = 1 ) |
---|
928 | DO ji = MAX(1,iitarget-10), MIN(isz0,iitarget+10) |
---|
929 | ALLOCATE( llisoce(inbi0(ji), inbj0(ji)) ) |
---|
930 | CALL mpp_init_isoce( inbi0(ji), inbj0(ji), llisoce ) ! Warning: must be call by all cores (call mpp_sum) |
---|
931 | inbij = COUNT(llisoce) |
---|
932 | DEALLOCATE( llisoce ) |
---|
933 | IF(lwp) WRITE(numout,'(a, i5, a, i5, a, i4, a, i4, a, i9, a, i5, a, i5, a)') & |
---|
934 | & 'nb_cores ' , inbij,' oce + ', inbi0(ji)*inbj0(ji) - inbij & |
---|
935 | & , ' land ( ', inbi0(ji),' x ', inbj0(ji), & |
---|
936 | & ' ), nb_points ', iszi0(ji)*iszj0(ji),' ( ', iszi0(ji),' x ', iszj0(ji),' )' |
---|
937 | END DO |
---|
938 | DEALLOCATE( inbi0, inbj0, iszi0, iszj0 ) |
---|
939 | RETURN |
---|
940 | ENDIF |
---|
941 | |
---|
942 | DEALLOCATE( iszi0, iszj0 ) |
---|
943 | inbij = inbijmax + 1 ! default: larger than possible |
---|
944 | ii = isz0+1 ! start from the end of the list (smaller subdomains) |
---|
945 | DO WHILE( inbij > knbij ) ! while the number of ocean subdomains exceed the number of procs |
---|
946 | ii = ii -1 |
---|
947 | ALLOCATE( llisoce(inbi0(ii), inbj0(ii)) ) |
---|
948 | CALL mpp_init_isoce( inbi0(ii), inbj0(ii), llisoce ) ! must be done by all core |
---|
949 | inbij = COUNT(llisoce) |
---|
950 | DEALLOCATE( llisoce ) |
---|
951 | END DO |
---|
952 | knbi = inbi0(ii) |
---|
953 | knbj = inbj0(ii) |
---|
954 | IF(PRESENT(knbcnt)) knbcnt = knbi * knbj - inbij |
---|
955 | DEALLOCATE( inbi0, inbj0 ) |
---|
956 | ! |
---|
957 | END SUBROUTINE mpp_init_bestpartition |
---|
958 | |
---|
959 | |
---|
960 | SUBROUTINE mpp_init_landprop( propland ) |
---|
961 | !!---------------------------------------------------------------------- |
---|
962 | !! *** ROUTINE mpp_init_landprop *** |
---|
963 | !! |
---|
964 | !! ** Purpose : the the proportion of land points in the surface land-sea mask |
---|
965 | !! |
---|
966 | !! ** Method : read iproc strips (of length jpiglo) of the land-sea mask |
---|
967 | !!---------------------------------------------------------------------- |
---|
968 | REAL(wp), INTENT( out) :: propland ! proportion of land points in the global domain (between 0 and 1) |
---|
969 | ! |
---|
970 | INTEGER, DIMENSION(jpni*jpnj) :: kusedom_1d |
---|
971 | INTEGER :: inboce, iarea |
---|
972 | INTEGER :: iproc, idiv, ijsz |
---|
973 | INTEGER :: ijstr |
---|
974 | LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: lloce |
---|
975 | !!---------------------------------------------------------------------- |
---|
976 | ! do nothing if there is no land-sea mask |
---|
977 | IF( numbot == -1 ) THEN |
---|
978 | propland = 0. |
---|
979 | RETURN |
---|
980 | ENDIF |
---|
981 | |
---|
982 | ! number of processes reading the bathymetry file |
---|
983 | iproc = MINVAL( (/mppsize, jpjglo/2, 100/) ) ! read a least 2 lines, no more that 100 processes reading at the same time |
---|
984 | |
---|
985 | ! we want to read iproc strips of the land-sea mask. -> pick up iproc processes every idiv processes starting at 1 |
---|
986 | IF( iproc == 1 ) THEN ; idiv = mppsize |
---|
987 | ELSE ; idiv = ( mppsize - 1 ) / ( iproc - 1 ) |
---|
988 | ENDIF |
---|
989 | |
---|
990 | iarea = (narea-1)/idiv ! involed process number (starting counting at 0) |
---|
991 | IF( MOD( narea-1, idiv ) == 0 .AND. iarea < iproc ) THEN ! beware idiv can be = to 1 |
---|
992 | ! |
---|
993 | ijsz = jpjglo / iproc ! width of the stripe to read |
---|
994 | IF( iarea < MOD(jpjglo,iproc) ) ijsz = ijsz + 1 |
---|
995 | ijstr = iarea*(jpjglo/iproc) + MIN(iarea, MOD(jpjglo,iproc)) + 1 ! starting j position of the reading |
---|
996 | ! |
---|
997 | ALLOCATE( lloce(jpiglo, ijsz) ) ! allocate the strip |
---|
998 | CALL mpp_init_readbot_strip( ijstr, ijsz, lloce ) |
---|
999 | inboce = COUNT(lloce) ! number of ocean point in the stripe |
---|
1000 | DEALLOCATE(lloce) |
---|
1001 | ! |
---|
1002 | ELSE |
---|
1003 | inboce = 0 |
---|
1004 | ENDIF |
---|
1005 | CALL mpp_sum( 'mppini', inboce ) ! total number of ocean points over the global domain |
---|
1006 | ! |
---|
1007 | propland = REAL( jpiglo*jpjglo - inboce, wp ) / REAL( jpiglo*jpjglo, wp ) |
---|
1008 | ! |
---|
1009 | END SUBROUTINE mpp_init_landprop |
---|
1010 | |
---|
1011 | |
---|
1012 | SUBROUTINE mpp_init_isoce( knbi, knbj, ldisoce ) |
---|
1013 | !!---------------------------------------------------------------------- |
---|
1014 | !! *** ROUTINE mpp_init_nboce *** |
---|
1015 | !! |
---|
1016 | !! ** Purpose : check for a mpi domain decomposition knbi x knbj which |
---|
1017 | !! subdomains contain at least 1 ocean point |
---|
1018 | !! |
---|
1019 | !! ** Method : read knbj strips (of length jpiglo) of the land-sea mask |
---|
1020 | !!---------------------------------------------------------------------- |
---|
1021 | INTEGER, INTENT(in ) :: knbi, knbj ! domain decomposition |
---|
1022 | LOGICAL, DIMENSION(knbi,knbj), INTENT( out) :: ldisoce ! .true. if a sub domain constains 1 ocean point |
---|
1023 | ! |
---|
1024 | INTEGER, DIMENSION(knbi,knbj) :: inboce ! number oce oce pint in each mpi subdomain |
---|
1025 | INTEGER, DIMENSION(knbi*knbj) :: inboce_1d |
---|
1026 | INTEGER :: idiv, iimax, ijmax, iarea |
---|
1027 | INTEGER :: ji, jn |
---|
1028 | LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: lloce ! lloce(i,j) = .true. if the point (i,j) is ocean |
---|
1029 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: iimppt, ilci |
---|
1030 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ijmppt, ilcj |
---|
1031 | !!---------------------------------------------------------------------- |
---|
1032 | ! do nothing if there is no land-sea mask |
---|
1033 | IF( numbot == -1 ) THEN |
---|
1034 | ldisoce(:,:) = .TRUE. |
---|
1035 | RETURN |
---|
1036 | ENDIF |
---|
1037 | |
---|
1038 | ! we want to read knbj strips of the land-sea mask. -> pick up knbj processes every idiv processes starting at 1 |
---|
1039 | IF ( knbj == 1 ) THEN ; idiv = mppsize |
---|
1040 | ELSE IF ( mppsize < knbj ) THEN ; idiv = 1 |
---|
1041 | ELSE ; idiv = ( mppsize - 1 ) / ( knbj - 1 ) |
---|
1042 | ENDIF |
---|
1043 | inboce(:,:) = 0 ! default no ocean point found |
---|
1044 | |
---|
1045 | DO jn = 0, (knbj-1)/mppsize ! if mppsize < knbj : more strips than mpi processes (because of potential land domains) |
---|
1046 | ! |
---|
1047 | iarea = (narea-1)/idiv + jn * mppsize ! involed process number (starting counting at 0) |
---|
1048 | IF( MOD( narea-1, idiv ) == 0 .AND. iarea < knbj ) THEN ! beware idiv can be = to 1 |
---|
1049 | ! |
---|
1050 | ALLOCATE( iimppt(knbi,knbj), ijmppt(knbi,knbj), ilci(knbi,knbj), ilcj(knbi,knbj) ) |
---|
1051 | CALL mpp_basic_decomposition( knbi, knbj, iimax, ijmax, iimppt, ijmppt, ilci, ilcj ) |
---|
1052 | ! |
---|
1053 | ALLOCATE( lloce(jpiglo, ilcj(1,iarea+1)) ) ! allocate the strip |
---|
1054 | CALL mpp_init_readbot_strip( ijmppt(1,iarea+1), ilcj(1,iarea+1), lloce ) ! read the strip |
---|
1055 | DO ji = 1, knbi |
---|
1056 | inboce(ji,iarea+1) = COUNT( lloce(iimppt(ji,1):iimppt(ji,1)+ilci(ji,1)-1,:) ) ! number of ocean point in subdomain |
---|
1057 | END DO |
---|
1058 | ! |
---|
1059 | DEALLOCATE(lloce) |
---|
1060 | DEALLOCATE(iimppt, ijmppt, ilci, ilcj) |
---|
1061 | ! |
---|
1062 | ENDIF |
---|
1063 | END DO |
---|
1064 | |
---|
1065 | inboce_1d = RESHAPE(inboce, (/ knbi*knbj /)) |
---|
1066 | CALL mpp_sum( 'mppini', inboce_1d ) |
---|
1067 | inboce = RESHAPE(inboce_1d, (/knbi, knbj/)) |
---|
1068 | ldisoce(:,:) = inboce(:,:) /= 0 |
---|
1069 | ! |
---|
1070 | END SUBROUTINE mpp_init_isoce |
---|
1071 | |
---|
1072 | |
---|
1073 | SUBROUTINE mpp_init_readbot_strip( kjstr, kjcnt, ldoce ) |
---|
1074 | !!---------------------------------------------------------------------- |
---|
1075 | !! *** ROUTINE mpp_init_readbot_strip *** |
---|
1076 | !! |
---|
1077 | !! ** Purpose : Read relevant bathymetric information in order to |
---|
1078 | !! provide a land/sea mask used for the elimination |
---|
1079 | !! of land domains, in an mpp computation. |
---|
1080 | !! |
---|
1081 | !! ** Method : read stipe of size (jpiglo,...) |
---|
1082 | !!---------------------------------------------------------------------- |
---|
1083 | INTEGER , INTENT(in ) :: kjstr ! starting j position of the reading |
---|
1084 | INTEGER , INTENT(in ) :: kjcnt ! number of lines to read |
---|
1085 | LOGICAL, DIMENSION(jpiglo,kjcnt), INTENT( out) :: ldoce ! ldoce(i,j) = .true. if the point (i,j) is ocean |
---|
1086 | ! |
---|
1087 | INTEGER :: inumsave ! local logical unit |
---|
1088 | REAL(wp), DIMENSION(jpiglo,kjcnt) :: zbot |
---|
1089 | !!---------------------------------------------------------------------- |
---|
1090 | ! |
---|
1091 | inumsave = numout ; numout = numnul ! redirect all print to /dev/null |
---|
1092 | ! |
---|
1093 | IF( numbot /= -1 ) THEN |
---|
1094 | CALL iom_get( numbot, jpdom_unknown, 'bottom_level', zbot, kstart = (/1,kjstr/), kcount = (/jpiglo, kjcnt/) ) |
---|
1095 | ELSE |
---|
1096 | zbot(:,:) = 1. ! put a non-null value |
---|
1097 | ENDIF |
---|
1098 | |
---|
1099 | ! |
---|
1100 | ldoce(:,:) = zbot(:,:) > 0. |
---|
1101 | numout = inumsave |
---|
1102 | ! |
---|
1103 | END SUBROUTINE mpp_init_readbot_strip |
---|
1104 | |
---|
1105 | SUBROUTINE mpp_init_nfdcom |
---|
1106 | !!---------------------------------------------------------------------- |
---|
1107 | !! *** ROUTINE mpp_init_nfdcom *** |
---|
1108 | !! ** Purpose : Setup for north fold exchanges with explicit |
---|
1109 | !! point-to-point messaging |
---|
1110 | !! |
---|
1111 | !! ** Method : Initialization of the northern neighbours lists. |
---|
1112 | !!---------------------------------------------------------------------- |
---|
1113 | !! 1.0 ! 2011-10 (A. C. Coward, NOCS & J. Donners, PRACE) |
---|
1114 | !! 2.0 ! 2013-06 Setup avoiding MPI communication (I. Epicoco, S. Mocavero, CMCC) |
---|
1115 | !!---------------------------------------------------------------------- |
---|
1116 | INTEGER :: sxM, dxM, sxT, dxT, jn |
---|
1117 | INTEGER :: njmppmax |
---|
1118 | !!---------------------------------------------------------------------- |
---|
1119 | ! |
---|
1120 | njmppmax = MAXVAL( njmppt ) |
---|
1121 | ! |
---|
1122 | !initializes the north-fold communication variables |
---|
1123 | isendto(:) = 0 |
---|
1124 | nsndto = 0 |
---|
1125 | ! |
---|
1126 | IF ( njmpp == njmppmax ) THEN ! if I am a process in the north |
---|
1127 | ! |
---|
1128 | !sxM is the first point (in the global domain) needed to compute the north-fold for the current process |
---|
1129 | sxM = jpiglo - nimppt(narea) - nlcit(narea) + 1 |
---|
1130 | !dxM is the last point (in the global domain) needed to compute the north-fold for the current process |
---|
1131 | dxM = jpiglo - nimppt(narea) + 2 |
---|
1132 | ! |
---|
1133 | ! loop over the other north-fold processes to find the processes |
---|
1134 | ! managing the points belonging to the sxT-dxT range |
---|
1135 | ! |
---|
1136 | DO jn = 1, jpni |
---|
1137 | ! |
---|
1138 | sxT = nfiimpp(jn, jpnj) ! sxT = 1st point (in the global domain) of the jn process |
---|
1139 | dxT = nfiimpp(jn, jpnj) + nfilcit(jn, jpnj) - 1 ! dxT = last point (in the global domain) of the jn process |
---|
1140 | ! |
---|
1141 | IF ( sxT < sxM .AND. sxM < dxT ) THEN |
---|
1142 | nsndto = nsndto + 1 |
---|
1143 | isendto(nsndto) = jn |
---|
1144 | ELSEIF( sxM <= sxT .AND. dxM >= dxT ) THEN |
---|
1145 | nsndto = nsndto + 1 |
---|
1146 | isendto(nsndto) = jn |
---|
1147 | ELSEIF( dxM < dxT .AND. sxT < dxM ) THEN |
---|
1148 | nsndto = nsndto + 1 |
---|
1149 | isendto(nsndto) = jn |
---|
1150 | ENDIF |
---|
1151 | ! |
---|
1152 | END DO |
---|
1153 | nfsloop = 1 |
---|
1154 | nfeloop = nlci |
---|
1155 | DO jn = 2,jpni-1 |
---|
1156 | IF( nfipproc(jn,jpnj) == (narea - 1) ) THEN |
---|
1157 | IF( nfipproc(jn-1,jpnj) == -1 ) nfsloop = nldi |
---|
1158 | IF( nfipproc(jn+1,jpnj) == -1 ) nfeloop = nlei |
---|
1159 | ENDIF |
---|
1160 | END DO |
---|
1161 | ! |
---|
1162 | ENDIF |
---|
1163 | l_north_nogather = .TRUE. |
---|
1164 | ! |
---|
1165 | END SUBROUTINE mpp_init_nfdcom |
---|
1166 | |
---|
1167 | |
---|
1168 | #endif |
---|
1169 | |
---|
1170 | !!====================================================================== |
---|
1171 | END MODULE mppini |
---|