1 | MODULE obs_mpp |
---|
2 | !!====================================================================== |
---|
3 | !! *** MODULE obs_mpp *** |
---|
4 | !! Observation diagnostics: Various MPP support routines |
---|
5 | !!====================================================================== |
---|
6 | !! History : 2.0 ! 2006-03 (K. Mogensen) Original code |
---|
7 | !! - ! 2006-05 (K. Mogensen) Reformatted |
---|
8 | !! - ! 2008-01 (K. Mogensen) add mpp_global_max |
---|
9 | !! 3.6 ! 2015-01 (J. Waters) obs_mpp_find_obs_proc |
---|
10 | !! rewritten to avoid global arrays |
---|
11 | !!---------------------------------------------------------------------- |
---|
12 | # define mpivar mpi_double_precision |
---|
13 | !!---------------------------------------------------------------------- |
---|
14 | !! obs_mpp_bcast_integer : Broadcast an integer array from a processor to all processors |
---|
15 | !! obs_mpp_max_integer : Find maximum on all processors of each value in an integer on all processors |
---|
16 | !! obs_mpp_find_obs_proc : Find processors which should hold the observations, avoiding global arrays |
---|
17 | !! obs_mpp_sum_integers : Sum an integer array from all processors |
---|
18 | !! obs_mpp_sum_integer : Sum an integer from all processors |
---|
19 | !!---------------------------------------------------------------------- |
---|
20 | USE dom_oce, ONLY : nproc, mig, mjg ! Ocean space and time domain variables |
---|
21 | USE mpp_map, ONLY : mppmap |
---|
22 | USE in_out_manager |
---|
23 | #if defined key_mpp_mpi |
---|
24 | USE lib_mpp, ONLY : mpi_comm_opa ! MPP library |
---|
25 | #endif |
---|
26 | IMPLICIT NONE |
---|
27 | PRIVATE |
---|
28 | |
---|
29 | PUBLIC obs_mpp_bcast_integer, & !: Broadcast an integer array from a proc to all procs |
---|
30 | & obs_mpp_max_integer, & !: Find maximum across processors in an integer array |
---|
31 | & obs_mpp_find_obs_proc, & !: Find processors which should hold the observations |
---|
32 | & obs_mpp_sum_integers, & !: Sum an integer array from all processors |
---|
33 | & obs_mpp_sum_integer, & !: Sum an integer from all processors |
---|
34 | & mpp_alltoall_int, & |
---|
35 | & mpp_alltoallv_int, & |
---|
36 | & mpp_alltoallv_real, & |
---|
37 | & mpp_global_max |
---|
38 | |
---|
39 | !!---------------------------------------------------------------------- |
---|
40 | !! NEMO/OPA 3.3 , NEMO Consortium (2010) |
---|
41 | !! $Id$ |
---|
42 | !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) |
---|
43 | !!---------------------------------------------------------------------- |
---|
44 | CONTAINS |
---|
45 | |
---|
46 | SUBROUTINE obs_mpp_bcast_integer( kvals, kno, kroot ) |
---|
47 | !!---------------------------------------------------------------------- |
---|
48 | !! *** ROUTINE obs_mpp_bcast_integer *** |
---|
49 | !! |
---|
50 | !! ** Purpose : Send array kvals to all processors |
---|
51 | !! |
---|
52 | !! ** Method : MPI broadcast |
---|
53 | !! |
---|
54 | !! ** Action : This does only work for MPI. |
---|
55 | !! MPI_COMM_OPA needs to be replace for OASIS4.! |
---|
56 | !! |
---|
57 | !! References : http://www.mpi-forum.org |
---|
58 | !!---------------------------------------------------------------------- |
---|
59 | INTEGER , INTENT(in ) :: kno ! Number of elements in array |
---|
60 | INTEGER , INTENT(in ) :: kroot ! Processor to send data |
---|
61 | INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot |
---|
62 | ! |
---|
63 | #if defined key_mpp_mpi |
---|
64 | ! |
---|
65 | INTEGER :: ierr |
---|
66 | ! |
---|
67 | INCLUDE 'mpif.h' |
---|
68 | !!---------------------------------------------------------------------- |
---|
69 | |
---|
70 | ! Call the MPI library to broadcast data |
---|
71 | CALL mpi_bcast( kvals, kno, mpi_integer, & |
---|
72 | & kroot, mpi_comm_opa, ierr ) |
---|
73 | #else |
---|
74 | ! no MPI: empty routine |
---|
75 | #endif |
---|
76 | ! |
---|
77 | END SUBROUTINE obs_mpp_bcast_integer |
---|
78 | |
---|
79 | |
---|
80 | SUBROUTINE obs_mpp_max_integer( kvals, kno ) |
---|
81 | !!---------------------------------------------------------------------- |
---|
82 | !! *** ROUTINE obs_mpp_bcast_integer *** |
---|
83 | !! |
---|
84 | !! ** Purpose : Find maximum across processors in an integer array. |
---|
85 | !! |
---|
86 | !! ** Method : MPI all reduce. |
---|
87 | !! |
---|
88 | !! ** Action : This does only work for MPI. |
---|
89 | !! It does not work for SHMEM. |
---|
90 | !! MPI_COMM_OPA needs to be replace for OASIS4.! |
---|
91 | !! |
---|
92 | !! References : http://www.mpi-forum.org |
---|
93 | !!---------------------------------------------------------------------- |
---|
94 | INTEGER , INTENT(in ) :: kno ! Number of elements in array |
---|
95 | INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot |
---|
96 | ! |
---|
97 | #if defined key_mpp_mpi |
---|
98 | ! |
---|
99 | INTEGER :: ierr |
---|
100 | INTEGER, DIMENSION(:), ALLOCATABLE :: ivals |
---|
101 | ! |
---|
102 | INCLUDE 'mpif.h' |
---|
103 | !!---------------------------------------------------------------------- |
---|
104 | |
---|
105 | ALLOCATE( ivals(kno) ) |
---|
106 | |
---|
107 | ! Call the MPI library to find the maximum across processors |
---|
108 | CALL mpi_allreduce( kvals, ivals, kno, mpi_integer, & |
---|
109 | & mpi_max, mpi_comm_opa, ierr ) |
---|
110 | kvals(:) = ivals(:) |
---|
111 | |
---|
112 | DEALLOCATE( ivals ) |
---|
113 | #else |
---|
114 | ! no MPI: empty routine |
---|
115 | #endif |
---|
116 | END SUBROUTINE obs_mpp_max_integer |
---|
117 | |
---|
118 | |
---|
119 | SUBROUTINE obs_mpp_find_obs_proc( kobsp,kno ) |
---|
120 | !!---------------------------------------------------------------------- |
---|
121 | !! *** ROUTINE obs_mpp_find_obs_proc *** |
---|
122 | !! |
---|
123 | !! ** Purpose : From the array kobsp containing the results of the |
---|
124 | !! grid search on each processor the processor return a |
---|
125 | !! decision of which processors should hold the observation. |
---|
126 | !! |
---|
127 | !! ** Method : Synchronize the processor number for each obs using |
---|
128 | !! obs_mpp_max_integer. If an observation exists on two |
---|
129 | !! processors it will be allocated to the lower numbered |
---|
130 | !! processor. |
---|
131 | !! |
---|
132 | !! ** Action : This does only work for MPI. |
---|
133 | !! It does not work for SHMEM. |
---|
134 | !! |
---|
135 | !! References : http://www.mpi-forum.org |
---|
136 | !!---------------------------------------------------------------------- |
---|
137 | INTEGER , INTENT(in ) :: kno |
---|
138 | INTEGER, DIMENSION(kno), INTENT(inout) :: kobsp |
---|
139 | ! |
---|
140 | #if defined key_mpp_mpi |
---|
141 | ! |
---|
142 | ! |
---|
143 | INTEGER :: ji, isum |
---|
144 | INTEGER, DIMENSION(:), ALLOCATABLE :: iobsp |
---|
145 | !! |
---|
146 | !! |
---|
147 | |
---|
148 | ALLOCATE( iobsp(kno) ) |
---|
149 | |
---|
150 | iobsp(:)=kobsp(:) |
---|
151 | |
---|
152 | WHERE( iobsp(:) == -1 ) |
---|
153 | iobsp(:) = 9999999 |
---|
154 | END WHERE |
---|
155 | |
---|
156 | iobsp(:)=-1*iobsp(:) |
---|
157 | |
---|
158 | CALL obs_mpp_max_integer( iobsp, kno ) |
---|
159 | |
---|
160 | kobsp(:)=-1*iobsp(:) |
---|
161 | |
---|
162 | isum=0 |
---|
163 | DO ji = 1, kno |
---|
164 | IF ( kobsp(ji) == 9999999 ) THEN |
---|
165 | isum=isum+1 |
---|
166 | kobsp(ji)=-1 |
---|
167 | ENDIF |
---|
168 | ENDDO |
---|
169 | |
---|
170 | |
---|
171 | IF ( isum > 0 ) THEN |
---|
172 | IF (lwp) WRITE(numout,*) isum, ' observations failed the grid search.' |
---|
173 | IF (lwp) WRITE(numout,*)'If ln_grid_search_lookup=.TRUE., try reducing grid_search_res' |
---|
174 | ENDIF |
---|
175 | |
---|
176 | DEALLOCATE( iobsp ) |
---|
177 | |
---|
178 | #else |
---|
179 | ! no MPI: empty routine |
---|
180 | #endif |
---|
181 | |
---|
182 | END SUBROUTINE obs_mpp_find_obs_proc |
---|
183 | |
---|
184 | |
---|
185 | SUBROUTINE obs_mpp_sum_integers( kvalsin, kvalsout, kno ) |
---|
186 | !!---------------------------------------------------------------------- |
---|
187 | !! *** ROUTINE obs_mpp_sum_integers *** |
---|
188 | !! |
---|
189 | !! ** Purpose : Sum an integer array. |
---|
190 | !! |
---|
191 | !! ** Method : MPI all reduce. |
---|
192 | !! |
---|
193 | !! ** Action : This does only work for MPI. |
---|
194 | !! It does not work for SHMEM. |
---|
195 | !! |
---|
196 | !! References : http://www.mpi-forum.org |
---|
197 | !!---------------------------------------------------------------------- |
---|
198 | INTEGER , INTENT(in ) :: kno |
---|
199 | INTEGER, DIMENSION(kno), INTENT(in ) :: kvalsin |
---|
200 | INTEGER, DIMENSION(kno), INTENT( out) :: kvalsout |
---|
201 | ! |
---|
202 | #if defined key_mpp_mpi |
---|
203 | ! |
---|
204 | INTEGER :: ierr |
---|
205 | ! |
---|
206 | INCLUDE 'mpif.h' |
---|
207 | !!---------------------------------------------------------------------- |
---|
208 | ! |
---|
209 | !----------------------------------------------------------------------- |
---|
210 | ! Call the MPI library to find the sum across processors |
---|
211 | !----------------------------------------------------------------------- |
---|
212 | CALL mpi_allreduce( kvalsin, kvalsout, kno, mpi_integer, & |
---|
213 | & mpi_sum, mpi_comm_opa, ierr ) |
---|
214 | #else |
---|
215 | !----------------------------------------------------------------------- |
---|
216 | ! For no-MPP just return input values |
---|
217 | !----------------------------------------------------------------------- |
---|
218 | kvalsout(:) = kvalsin(:) |
---|
219 | #endif |
---|
220 | ! |
---|
221 | END SUBROUTINE obs_mpp_sum_integers |
---|
222 | |
---|
223 | |
---|
224 | SUBROUTINE obs_mpp_sum_integer( kvalin, kvalout ) |
---|
225 | !!---------------------------------------------------------------------- |
---|
226 | !! *** ROUTINE obs_mpp_sum_integers *** |
---|
227 | !! |
---|
228 | !! ** Purpose : Sum a single integer |
---|
229 | !! |
---|
230 | !! ** Method : MPI all reduce. |
---|
231 | !! |
---|
232 | !! ** Action : This does only work for MPI. |
---|
233 | !! It does not work for SHMEM. |
---|
234 | !! |
---|
235 | !! References : http://www.mpi-forum.org |
---|
236 | !!---------------------------------------------------------------------- |
---|
237 | INTEGER, INTENT(in ) :: kvalin |
---|
238 | INTEGER, INTENT( out) :: kvalout |
---|
239 | ! |
---|
240 | #if defined key_mpp_mpi |
---|
241 | ! |
---|
242 | INTEGER :: ierr |
---|
243 | ! |
---|
244 | INCLUDE 'mpif.h' |
---|
245 | !!---------------------------------------------------------------------- |
---|
246 | ! |
---|
247 | !----------------------------------------------------------------------- |
---|
248 | ! Call the MPI library to find the sum across processors |
---|
249 | !----------------------------------------------------------------------- |
---|
250 | CALL mpi_allreduce( kvalin, kvalout, 1, mpi_integer, & |
---|
251 | & mpi_sum, mpi_comm_opa, ierr ) |
---|
252 | #else |
---|
253 | !----------------------------------------------------------------------- |
---|
254 | ! For no-MPP just return input values |
---|
255 | !----------------------------------------------------------------------- |
---|
256 | kvalout = kvalin |
---|
257 | #endif |
---|
258 | ! |
---|
259 | END SUBROUTINE obs_mpp_sum_integer |
---|
260 | |
---|
261 | |
---|
262 | SUBROUTINE mpp_global_max( pval ) |
---|
263 | !!---------------------------------------------------------------------- |
---|
264 | !! *** ROUTINE mpp_global_or *** |
---|
265 | !! |
---|
266 | !! ** Purpose : Get the maximum value across processors for a global |
---|
267 | !! real array |
---|
268 | !! |
---|
269 | !! ** Method : MPI allreduce |
---|
270 | !! |
---|
271 | !! ** Action : This does only work for MPI. |
---|
272 | !! It does not work for SHMEM. |
---|
273 | !! |
---|
274 | !! References : http://www.mpi-forum.org |
---|
275 | !!---------------------------------------------------------------------- |
---|
276 | REAL(KIND=wp), DIMENSION(jpiglo,jpjglo), INTENT(inout) :: pval |
---|
277 | ! |
---|
278 | INTEGER :: ierr |
---|
279 | ! |
---|
280 | #if defined key_mpp_mpi |
---|
281 | ! |
---|
282 | INCLUDE 'mpif.h' |
---|
283 | REAL(KIND=wp), DIMENSION(:,:), ALLOCATABLE :: zcp |
---|
284 | !!---------------------------------------------------------------------- |
---|
285 | |
---|
286 | ! Copy data for input to MPI |
---|
287 | |
---|
288 | ALLOCATE( & |
---|
289 | & zcp(jpiglo,jpjglo) & |
---|
290 | & ) |
---|
291 | zcp(:,:) = pval(:,:) |
---|
292 | |
---|
293 | ! Call the MPI library to find the coast lines globally |
---|
294 | |
---|
295 | CALL mpi_allreduce( zcp, pval, jpiglo*jpjglo, mpivar, & |
---|
296 | & mpi_max, mpi_comm_opa, ierr ) |
---|
297 | |
---|
298 | DEALLOCATE( & |
---|
299 | & zcp & |
---|
300 | & ) |
---|
301 | |
---|
302 | #else |
---|
303 | ! no MPI: empty routine |
---|
304 | #endif |
---|
305 | ! |
---|
306 | END SUBROUTINE mpp_global_max |
---|
307 | |
---|
308 | |
---|
309 | SUBROUTINE mpp_alltoall_int( kno, kvalsin, kvalsout ) |
---|
310 | !!---------------------------------------------------------------------- |
---|
311 | !! *** ROUTINE mpp_allgatherv *** |
---|
312 | !! |
---|
313 | !! ** Purpose : all to all. |
---|
314 | !! |
---|
315 | !! ** Method : MPI alltoall |
---|
316 | !! |
---|
317 | !! ** Action : This does only work for MPI. |
---|
318 | !! It does not work for SHMEM. |
---|
319 | !! |
---|
320 | !! References : http://www.mpi-forum.org |
---|
321 | !!---------------------------------------------------------------------- |
---|
322 | INTEGER , INTENT(in ) :: kno |
---|
323 | INTEGER, DIMENSION(kno*jpnij), INTENT(in ) :: kvalsin |
---|
324 | INTEGER, DIMENSION(kno*jpnij), INTENT( out) :: kvalsout |
---|
325 | !! |
---|
326 | INTEGER :: ierr |
---|
327 | ! |
---|
328 | #if defined key_mpp_mpi |
---|
329 | ! |
---|
330 | INCLUDE 'mpif.h' |
---|
331 | !----------------------------------------------------------------------- |
---|
332 | ! Call the MPI library to do the all to all operation of the data |
---|
333 | !----------------------------------------------------------------------- |
---|
334 | CALL mpi_alltoall( kvalsin, kno, mpi_integer, & |
---|
335 | & kvalsout, kno, mpi_integer, & |
---|
336 | & mpi_comm_opa, ierr ) |
---|
337 | #else |
---|
338 | !----------------------------------------------------------------------- |
---|
339 | ! For no-MPP just return input values |
---|
340 | !----------------------------------------------------------------------- |
---|
341 | kvalsout = kvalsin |
---|
342 | #endif |
---|
343 | ! |
---|
344 | END SUBROUTINE mpp_alltoall_int |
---|
345 | |
---|
346 | |
---|
347 | SUBROUTINE mpp_alltoallv_int( kvalsin, knoin , kinv , kvalsout, & |
---|
348 | & knoout, koutv ) |
---|
349 | !!---------------------------------------------------------------------- |
---|
350 | !! *** ROUTINE mpp_alltoallv_int *** |
---|
351 | !! |
---|
352 | !! ** Purpose : all to all (integer version). |
---|
353 | !! |
---|
354 | !! ** Method : MPI alltoall |
---|
355 | !! |
---|
356 | !! ** Action : This does only work for MPI. |
---|
357 | !! It does not work for SHMEM. |
---|
358 | !! |
---|
359 | !! References : http://www.mpi-forum.org |
---|
360 | !!---------------------------------------------------------------------- |
---|
361 | INTEGER , INTENT(in) :: knoin |
---|
362 | INTEGER , INTENT(in) :: knoout |
---|
363 | INTEGER, DIMENSION(jpnij) :: kinv, koutv |
---|
364 | INTEGER, DIMENSION(knoin) , INTENT(in ) :: kvalsin |
---|
365 | INTEGER, DIMENSION(knoout), INTENT( out) :: kvalsout |
---|
366 | !! |
---|
367 | INTEGER :: ierr |
---|
368 | INTEGER :: jproc |
---|
369 | ! |
---|
370 | #if defined key_mpp_mpi |
---|
371 | ! |
---|
372 | INCLUDE 'mpif.h' |
---|
373 | INTEGER, DIMENSION(jpnij) :: irdsp, isdsp |
---|
374 | !----------------------------------------------------------------------- |
---|
375 | ! Compute displacements |
---|
376 | !----------------------------------------------------------------------- |
---|
377 | irdsp(1) = 0 |
---|
378 | isdsp(1) = 0 |
---|
379 | DO jproc = 2, jpnij |
---|
380 | isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1) |
---|
381 | irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1) |
---|
382 | END DO |
---|
383 | !----------------------------------------------------------------------- |
---|
384 | ! Call the MPI library to do the all to all operation of the data |
---|
385 | !----------------------------------------------------------------------- |
---|
386 | CALL mpi_alltoallv( kvalsin, kinv, isdsp, mpi_integer, & |
---|
387 | & kvalsout, koutv, irdsp, mpi_integer, & |
---|
388 | & mpi_comm_opa, ierr ) |
---|
389 | #else |
---|
390 | !----------------------------------------------------------------------- |
---|
391 | ! For no-MPP just return input values |
---|
392 | !----------------------------------------------------------------------- |
---|
393 | kvalsout = kvalsin |
---|
394 | #endif |
---|
395 | ! |
---|
396 | END SUBROUTINE mpp_alltoallv_int |
---|
397 | |
---|
398 | |
---|
399 | SUBROUTINE mpp_alltoallv_real( pvalsin, knoin , kinv , pvalsout, & |
---|
400 | & knoout, koutv ) |
---|
401 | !!---------------------------------------------------------------------- |
---|
402 | !! *** ROUTINE mpp_alltoallv_real *** |
---|
403 | !! |
---|
404 | !! ** Purpose : all to all (integer version). |
---|
405 | !! |
---|
406 | !! ** Method : MPI alltoall |
---|
407 | !! |
---|
408 | !! ** Action : This does only work for MPI. |
---|
409 | !! It does not work for SHMEM. |
---|
410 | !! |
---|
411 | !! References : http://www.mpi-forum.org |
---|
412 | !!---------------------------------------------------------------------- |
---|
413 | INTEGER , INTENT(in ) :: knoin |
---|
414 | INTEGER , INTENT(in ) :: knoout |
---|
415 | INTEGER , DIMENSION(jpnij) :: kinv, koutv |
---|
416 | REAL(wp), DIMENSION(knoin) , INTENT(in ) :: pvalsin |
---|
417 | REAL(wp), DIMENSION(knoout), INTENT( out) :: pvalsout |
---|
418 | !! |
---|
419 | INTEGER :: ierr |
---|
420 | INTEGER :: jproc |
---|
421 | ! |
---|
422 | #if defined key_mpp_mpi |
---|
423 | ! |
---|
424 | INCLUDE 'mpif.h' |
---|
425 | INTEGER, DIMENSION(jpnij) :: irdsp, isdsp |
---|
426 | !!---------------------------------------------------------------------- |
---|
427 | ! |
---|
428 | !----------------------------------------------------------------------- |
---|
429 | ! Compute displacements |
---|
430 | !----------------------------------------------------------------------- |
---|
431 | irdsp(1) = 0 |
---|
432 | isdsp(1) = 0 |
---|
433 | DO jproc = 2, jpnij |
---|
434 | isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1) |
---|
435 | irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1) |
---|
436 | END DO |
---|
437 | !----------------------------------------------------------------------- |
---|
438 | ! Call the MPI library to do the all to all operation of the data |
---|
439 | !----------------------------------------------------------------------- |
---|
440 | CALL mpi_alltoallv( pvalsin, kinv, isdsp, mpivar, & |
---|
441 | & pvalsout, koutv, irdsp, mpivar, & |
---|
442 | & mpi_comm_opa, ierr ) |
---|
443 | #else |
---|
444 | !----------------------------------------------------------------------- |
---|
445 | ! For no-MPP just return input values |
---|
446 | !----------------------------------------------------------------------- |
---|
447 | pvalsout = pvalsin |
---|
448 | #endif |
---|
449 | ! |
---|
450 | END SUBROUTINE mpp_alltoallv_real |
---|
451 | |
---|
452 | !!====================================================================== |
---|
453 | END MODULE obs_mpp |
---|