1 | MODULE obs_mpp |
---|
2 | !!====================================================================== |
---|
3 | !! *** MODULE obs_mpp *** |
---|
4 | !! Observation diagnostics: Various MPP support routines |
---|
5 | !!====================================================================== |
---|
6 | !! History : 2.0 ! 2006-03 (K. Mogensen) Original code |
---|
7 | !! - ! 2006-05 (K. Mogensen) Reformatted |
---|
8 | !! - ! 2008-01 (K. Mogensen) add mpp_global_max |
---|
9 | !!---------------------------------------------------------------------- |
---|
10 | # define mpivar mpi_double_precision |
---|
11 | !!---------------------------------------------------------------------- |
---|
12 | !! obs_mpp_bcast_integer : Broadcast an integer array from a processor to all processors |
---|
13 | !! obs_mpp_max_integer : Find maximum on all processors of each value in an integer on all processors |
---|
14 | !! obs_mpp_find_obs_proc : Find processors which should hold the observations |
---|
15 | !! obs_mpp_sum_integers : Sum an integer array from all processors |
---|
16 | !! obs_mpp_sum_integer : Sum an integer from all processors |
---|
17 | !!---------------------------------------------------------------------- |
---|
18 | USE dom_oce, ONLY : nproc, mig, mjg ! Ocean space and time domain variables |
---|
19 | USE mpp_map, ONLY : mppmap |
---|
20 | USE in_out_manager |
---|
21 | #if defined key_mpp_mpi |
---|
22 | USE lib_mpp, ONLY : mpi_comm_opa ! MPP library |
---|
23 | #endif |
---|
24 | IMPLICIT NONE |
---|
25 | PRIVATE |
---|
26 | |
---|
27 | PUBLIC obs_mpp_bcast_integer, & !: Broadcast an integer array from a proc to all procs |
---|
28 | & obs_mpp_max_integer, & !: Find maximum across processors in an integer array |
---|
29 | & obs_mpp_find_obs_proc, & !: Find processors which should hold the observations |
---|
30 | & obs_mpp_sum_integers, & !: Sum an integer array from all processors |
---|
31 | & obs_mpp_sum_integer, & !: Sum an integer from all processors |
---|
32 | & mpp_alltoall_int, & |
---|
33 | & mpp_alltoallv_int, & |
---|
34 | & mpp_alltoallv_real, & |
---|
35 | & mpp_global_max |
---|
36 | |
---|
37 | !!---------------------------------------------------------------------- |
---|
38 | !! NEMO/OPA 3.3 , NEMO Consortium (2010) |
---|
39 | !! $Id$ |
---|
40 | !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) |
---|
41 | !!---------------------------------------------------------------------- |
---|
42 | CONTAINS |
---|
43 | |
---|
44 | SUBROUTINE obs_mpp_bcast_integer( kvals, kno, kroot ) |
---|
45 | !!---------------------------------------------------------------------- |
---|
46 | !! *** ROUTINE obs_mpp_bcast_integer *** |
---|
47 | !! |
---|
48 | !! ** Purpose : Send array kvals to all processors |
---|
49 | !! |
---|
50 | !! ** Method : MPI broadcast |
---|
51 | !! |
---|
52 | !! ** Action : This does only work for MPI. |
---|
53 | !! MPI_COMM_OPA needs to be replace for OASIS4.! |
---|
54 | !! |
---|
55 | !! References : http://www.mpi-forum.org |
---|
56 | !!---------------------------------------------------------------------- |
---|
57 | INTEGER , INTENT(in ) :: kno ! Number of elements in array |
---|
58 | INTEGER , INTENT(in ) :: kroot ! Processor to send data |
---|
59 | INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot |
---|
60 | ! |
---|
61 | #if defined key_mpp_mpi |
---|
62 | ! |
---|
63 | INTEGER :: ierr |
---|
64 | ! |
---|
65 | INCLUDE 'mpif.h' |
---|
66 | !!---------------------------------------------------------------------- |
---|
67 | |
---|
68 | ! Call the MPI library to broadcast data |
---|
69 | CALL mpi_bcast( kvals, kno, mpi_integer, & |
---|
70 | & kroot, mpi_comm_opa, ierr ) |
---|
71 | #else |
---|
72 | ! no MPI: empty routine |
---|
73 | #endif |
---|
74 | ! |
---|
75 | END SUBROUTINE obs_mpp_bcast_integer |
---|
76 | |
---|
77 | |
---|
78 | SUBROUTINE obs_mpp_max_integer( kvals, kno ) |
---|
79 | !!---------------------------------------------------------------------- |
---|
80 | !! *** ROUTINE obs_mpp_bcast_integer *** |
---|
81 | !! |
---|
82 | !! ** Purpose : Find maximum across processors in an integer array. |
---|
83 | !! |
---|
84 | !! ** Method : MPI all reduce. |
---|
85 | !! |
---|
86 | !! ** Action : This does only work for MPI. |
---|
87 | !! It does not work for SHMEM. |
---|
88 | !! MPI_COMM_OPA needs to be replace for OASIS4.! |
---|
89 | !! |
---|
90 | !! References : http://www.mpi-forum.org |
---|
91 | !!---------------------------------------------------------------------- |
---|
92 | INTEGER , INTENT(in ) :: kno ! Number of elements in array |
---|
93 | INTEGER, DIMENSION(kno), INTENT(inout) :: kvals ! Array to send on kroot, receive for non-kroot |
---|
94 | ! |
---|
95 | #if defined key_mpp_mpi |
---|
96 | ! |
---|
97 | INTEGER :: ierr |
---|
98 | INTEGER, DIMENSION(kno) :: ivals |
---|
99 | ! |
---|
100 | INCLUDE 'mpif.h' |
---|
101 | !!---------------------------------------------------------------------- |
---|
102 | |
---|
103 | ! Call the MPI library to find the maximum across processors |
---|
104 | CALL mpi_allreduce( kvals, ivals, kno, mpi_integer, & |
---|
105 | & mpi_max, mpi_comm_opa, ierr ) |
---|
106 | kvals(:) = ivals(:) |
---|
107 | #else |
---|
108 | ! no MPI: empty routine |
---|
109 | #endif |
---|
110 | END SUBROUTINE obs_mpp_max_integer |
---|
111 | |
---|
112 | |
---|
113 | SUBROUTINE obs_mpp_find_obs_proc( kobsp, kobsi, kobsj, kno ) |
---|
114 | !!---------------------------------------------------------------------- |
---|
115 | !! *** ROUTINE obs_mpp_find_obs_proc *** |
---|
116 | !! |
---|
117 | !! ** Purpose : From the array kobsp containing the results of the grid |
---|
118 | !! grid search on each processor the processor return a |
---|
119 | !! decision of which processors should hold the observation. |
---|
120 | !! |
---|
121 | !! ** Method : A temporary 2D array holding all the decisions is |
---|
122 | !! constructed using mpi_allgather on each processor. |
---|
123 | !! If more than one processor has found the observation |
---|
124 | !! with the observation in the inner domain gets it |
---|
125 | !! |
---|
126 | !! ** Action : This does only work for MPI. |
---|
127 | !! It does not work for SHMEM. |
---|
128 | !! |
---|
129 | !! References : http://www.mpi-forum.org |
---|
130 | !!---------------------------------------------------------------------- |
---|
131 | INTEGER , INTENT(in ) :: kno |
---|
132 | INTEGER, DIMENSION(kno), INTENT(in ) :: kobsi, kobsj |
---|
133 | INTEGER, DIMENSION(kno), INTENT(inout) :: kobsp |
---|
134 | ! |
---|
135 | #if defined key_mpp_mpi |
---|
136 | ! |
---|
137 | INTEGER :: ji |
---|
138 | INTEGER :: jj |
---|
139 | INTEGER :: size |
---|
140 | INTEGER :: ierr |
---|
141 | INTEGER :: iobsip |
---|
142 | INTEGER :: iobsjp |
---|
143 | INTEGER :: num_sus_obs |
---|
144 | INTEGER, DIMENSION(kno) :: iobsig, iobsjg |
---|
145 | INTEGER, ALLOCATABLE, DIMENSION(:,:) :: iobsp, iobsi, iobsj |
---|
146 | !! |
---|
147 | INCLUDE 'mpif.h' |
---|
148 | !!---------------------------------------------------------------------- |
---|
149 | |
---|
150 | !----------------------------------------------------------------------- |
---|
151 | ! Call the MPI library to find the maximum accross processors |
---|
152 | !----------------------------------------------------------------------- |
---|
153 | CALL mpi_comm_size( mpi_comm_opa, size, ierr ) |
---|
154 | !----------------------------------------------------------------------- |
---|
155 | ! Convert local grids points to global grid points |
---|
156 | !----------------------------------------------------------------------- |
---|
157 | DO ji = 1, kno |
---|
158 | IF ( ( kobsi(ji) >= 1 ) .AND. ( kobsi(ji) <= jpi ) .AND. & |
---|
159 | & ( kobsj(ji) >= 1 ) .AND. ( kobsj(ji) <= jpj ) ) THEN |
---|
160 | iobsig(ji) = mig( kobsi(ji) ) |
---|
161 | iobsjg(ji) = mjg( kobsj(ji) ) |
---|
162 | ELSE |
---|
163 | iobsig(ji) = -1 |
---|
164 | iobsjg(ji) = -1 |
---|
165 | ENDIF |
---|
166 | END DO |
---|
167 | !----------------------------------------------------------------------- |
---|
168 | ! Get the decisions from all processors |
---|
169 | !----------------------------------------------------------------------- |
---|
170 | ALLOCATE( iobsp(kno,size) ) |
---|
171 | ALLOCATE( iobsi(kno,size) ) |
---|
172 | ALLOCATE( iobsj(kno,size) ) |
---|
173 | CALL mpi_allgather( kobsp, kno, mpi_integer, & |
---|
174 | & iobsp, kno, mpi_integer, & |
---|
175 | & mpi_comm_opa, ierr ) |
---|
176 | CALL mpi_allgather( iobsig, kno, mpi_integer, & |
---|
177 | & iobsi, kno, mpi_integer, & |
---|
178 | & mpi_comm_opa, ierr ) |
---|
179 | CALL mpi_allgather( iobsjg, kno, mpi_integer, & |
---|
180 | & iobsj, kno, mpi_integer, & |
---|
181 | & mpi_comm_opa, ierr ) |
---|
182 | |
---|
183 | !----------------------------------------------------------------------- |
---|
184 | ! Find the processor with observations from the lowest processor |
---|
185 | ! number among processors holding the observation. |
---|
186 | !----------------------------------------------------------------------- |
---|
187 | kobsp(:) = -1 |
---|
188 | num_sus_obs = 0 |
---|
189 | DO ji = 1, kno |
---|
190 | DO jj = 1, size |
---|
191 | IF ( ( kobsp(ji) == -1 ) .AND. ( iobsp(ji,jj) /= -1 ) ) THEN |
---|
192 | kobsp(ji) = iobsp(ji,jj) |
---|
193 | iobsip = iobsi(ji,jj) |
---|
194 | iobsjp = iobsj(ji,jj) |
---|
195 | ENDIF |
---|
196 | IF ( ( kobsp(ji) /= -1 ) .AND. ( iobsp(ji,jj) /= -1 ) ) THEN |
---|
197 | IF ( ( iobsip /= iobsi(ji,jj) ) .OR. & |
---|
198 | & ( iobsjp /= iobsj(ji,jj) ) ) THEN |
---|
199 | IF ( ( kobsp(ji) < 1000000 ) .AND. & |
---|
200 | & ( iobsp(ji,jj) < 1000000 ) ) THEN |
---|
201 | num_sus_obs=num_sus_obs+1 |
---|
202 | ENDIF |
---|
203 | ENDIF |
---|
204 | IF ( mppmap(iobsip,iobsjp) /= ( kobsp(ji)+1 ) ) THEN |
---|
205 | IF ( ( iobsi(ji,jj) /= -1 ) .AND. & |
---|
206 | & ( iobsj(ji,jj) /= -1 ) ) THEN |
---|
207 | IF ((mppmap(iobsi(ji,jj),iobsj(ji,jj)) == (iobsp(ji,jj)+1))& |
---|
208 | & .OR. ( iobsp(ji,jj) < kobsp(ji) ) ) THEN |
---|
209 | kobsp(ji) = iobsp(ji,jj) |
---|
210 | iobsip = iobsi(ji,jj) |
---|
211 | iobsjp = iobsj(ji,jj) |
---|
212 | ENDIF |
---|
213 | ENDIF |
---|
214 | ENDIF |
---|
215 | ENDIF |
---|
216 | END DO |
---|
217 | END DO |
---|
218 | IF (lwp) WRITE(numout,*) 'Number of suspicious observations: ',num_sus_obs |
---|
219 | |
---|
220 | DEALLOCATE( iobsj ) |
---|
221 | DEALLOCATE( iobsi ) |
---|
222 | DEALLOCATE( iobsp ) |
---|
223 | #else |
---|
224 | ! no MPI: empty routine |
---|
225 | #endif |
---|
226 | ! |
---|
227 | END SUBROUTINE obs_mpp_find_obs_proc |
---|
228 | |
---|
229 | |
---|
230 | SUBROUTINE obs_mpp_sum_integers( kvalsin, kvalsout, kno ) |
---|
231 | !!---------------------------------------------------------------------- |
---|
232 | !! *** ROUTINE obs_mpp_sum_integers *** |
---|
233 | !! |
---|
234 | !! ** Purpose : Sum an integer array. |
---|
235 | !! |
---|
236 | !! ** Method : MPI all reduce. |
---|
237 | !! |
---|
238 | !! ** Action : This does only work for MPI. |
---|
239 | !! It does not work for SHMEM. |
---|
240 | !! |
---|
241 | !! References : http://www.mpi-forum.org |
---|
242 | !!---------------------------------------------------------------------- |
---|
243 | INTEGER , INTENT(in ) :: kno |
---|
244 | INTEGER, DIMENSION(kno), INTENT(in ) :: kvalsin |
---|
245 | INTEGER, DIMENSION(kno), INTENT( out) :: kvalsout |
---|
246 | ! |
---|
247 | #if defined key_mpp_mpi |
---|
248 | ! |
---|
249 | INTEGER :: ierr |
---|
250 | ! |
---|
251 | INCLUDE 'mpif.h' |
---|
252 | !!---------------------------------------------------------------------- |
---|
253 | ! |
---|
254 | !----------------------------------------------------------------------- |
---|
255 | ! Call the MPI library to find the sum across processors |
---|
256 | !----------------------------------------------------------------------- |
---|
257 | CALL mpi_allreduce( kvalsin, kvalsout, kno, mpi_integer, & |
---|
258 | & mpi_sum, mpi_comm_opa, ierr ) |
---|
259 | #else |
---|
260 | !----------------------------------------------------------------------- |
---|
261 | ! For no-MPP just return input values |
---|
262 | !----------------------------------------------------------------------- |
---|
263 | kvalsout(:) = kvalsin(:) |
---|
264 | #endif |
---|
265 | ! |
---|
266 | END SUBROUTINE obs_mpp_sum_integers |
---|
267 | |
---|
268 | |
---|
269 | SUBROUTINE obs_mpp_sum_integer( kvalin, kvalout ) |
---|
270 | !!---------------------------------------------------------------------- |
---|
271 | !! *** ROUTINE obs_mpp_sum_integers *** |
---|
272 | !! |
---|
273 | !! ** Purpose : Sum a single integer |
---|
274 | !! |
---|
275 | !! ** Method : MPI all reduce. |
---|
276 | !! |
---|
277 | !! ** Action : This does only work for MPI. |
---|
278 | !! It does not work for SHMEM. |
---|
279 | !! |
---|
280 | !! References : http://www.mpi-forum.org |
---|
281 | !!---------------------------------------------------------------------- |
---|
282 | INTEGER, INTENT(in ) :: kvalin |
---|
283 | INTEGER, INTENT( out) :: kvalout |
---|
284 | ! |
---|
285 | #if defined key_mpp_mpi |
---|
286 | ! |
---|
287 | INTEGER :: ierr |
---|
288 | ! |
---|
289 | INCLUDE 'mpif.h' |
---|
290 | !!---------------------------------------------------------------------- |
---|
291 | ! |
---|
292 | !----------------------------------------------------------------------- |
---|
293 | ! Call the MPI library to find the sum across processors |
---|
294 | !----------------------------------------------------------------------- |
---|
295 | CALL mpi_allreduce( kvalin, kvalout, 1, mpi_integer, & |
---|
296 | & mpi_sum, mpi_comm_opa, ierr ) |
---|
297 | #else |
---|
298 | !----------------------------------------------------------------------- |
---|
299 | ! For no-MPP just return input values |
---|
300 | !----------------------------------------------------------------------- |
---|
301 | kvalout = kvalin |
---|
302 | #endif |
---|
303 | ! |
---|
304 | END SUBROUTINE obs_mpp_sum_integer |
---|
305 | |
---|
306 | |
---|
307 | SUBROUTINE mpp_global_max( pval ) |
---|
308 | !!---------------------------------------------------------------------- |
---|
309 | !! *** ROUTINE mpp_global_or *** |
---|
310 | !! |
---|
311 | !! ** Purpose : Get the maximum value across processors for a global |
---|
312 | !! real array |
---|
313 | !! |
---|
314 | !! ** Method : MPI allreduce |
---|
315 | !! |
---|
316 | !! ** Action : This does only work for MPI. |
---|
317 | !! It does not work for SHMEM. |
---|
318 | !! |
---|
319 | !! References : http://www.mpi-forum.org |
---|
320 | !!---------------------------------------------------------------------- |
---|
321 | REAL(KIND=wp), DIMENSION(jpiglo,jpjglo), INTENT(inout) :: pval |
---|
322 | ! |
---|
323 | INTEGER :: ierr |
---|
324 | ! |
---|
325 | #if defined key_mpp_mpi |
---|
326 | ! |
---|
327 | INCLUDE 'mpif.h' |
---|
328 | REAL(KIND=wp), DIMENSION(:,:), ALLOCATABLE :: zcp |
---|
329 | !!---------------------------------------------------------------------- |
---|
330 | |
---|
331 | ! Copy data for input to MPI |
---|
332 | |
---|
333 | ALLOCATE( & |
---|
334 | & zcp(jpiglo,jpjglo) & |
---|
335 | & ) |
---|
336 | zcp(:,:) = pval(:,:) |
---|
337 | |
---|
338 | ! Call the MPI library to find the coast lines globally |
---|
339 | |
---|
340 | CALL mpi_allreduce( zcp, pval, jpiglo*jpjglo, mpivar, & |
---|
341 | & mpi_max, mpi_comm_opa, ierr ) |
---|
342 | |
---|
343 | DEALLOCATE( & |
---|
344 | & zcp & |
---|
345 | & ) |
---|
346 | |
---|
347 | #else |
---|
348 | ! no MPI: empty routine |
---|
349 | #endif |
---|
350 | ! |
---|
351 | END SUBROUTINE mpp_global_max |
---|
352 | |
---|
353 | |
---|
354 | SUBROUTINE mpp_alltoall_int( kno, kvalsin, kvalsout ) |
---|
355 | !!---------------------------------------------------------------------- |
---|
356 | !! *** ROUTINE mpp_allgatherv *** |
---|
357 | !! |
---|
358 | !! ** Purpose : all to all. |
---|
359 | !! |
---|
360 | !! ** Method : MPI alltoall |
---|
361 | !! |
---|
362 | !! ** Action : This does only work for MPI. |
---|
363 | !! It does not work for SHMEM. |
---|
364 | !! |
---|
365 | !! References : http://www.mpi-forum.org |
---|
366 | !!---------------------------------------------------------------------- |
---|
367 | INTEGER , INTENT(in ) :: kno |
---|
368 | INTEGER, DIMENSION(kno*jpnij), INTENT(in ) :: kvalsin |
---|
369 | INTEGER, DIMENSION(kno*jpnij), INTENT( out) :: kvalsout |
---|
370 | !! |
---|
371 | INTEGER :: ierr |
---|
372 | ! |
---|
373 | #if defined key_mpp_mpi |
---|
374 | ! |
---|
375 | INCLUDE 'mpif.h' |
---|
376 | !----------------------------------------------------------------------- |
---|
377 | ! Call the MPI library to do the all to all operation of the data |
---|
378 | !----------------------------------------------------------------------- |
---|
379 | CALL mpi_alltoall( kvalsin, kno, mpi_integer, & |
---|
380 | & kvalsout, kno, mpi_integer, & |
---|
381 | & mpi_comm_opa, ierr ) |
---|
382 | #else |
---|
383 | !----------------------------------------------------------------------- |
---|
384 | ! For no-MPP just return input values |
---|
385 | !----------------------------------------------------------------------- |
---|
386 | kvalsout = kvalsin |
---|
387 | #endif |
---|
388 | ! |
---|
389 | END SUBROUTINE mpp_alltoall_int |
---|
390 | |
---|
391 | |
---|
392 | SUBROUTINE mpp_alltoallv_int( kvalsin, knoin , kinv , kvalsout, & |
---|
393 | & knoout, koutv ) |
---|
394 | !!---------------------------------------------------------------------- |
---|
395 | !! *** ROUTINE mpp_alltoallv_int *** |
---|
396 | !! |
---|
397 | !! ** Purpose : all to all (integer version). |
---|
398 | !! |
---|
399 | !! ** Method : MPI alltoall |
---|
400 | !! |
---|
401 | !! ** Action : This does only work for MPI. |
---|
402 | !! It does not work for SHMEM. |
---|
403 | !! |
---|
404 | !! References : http://www.mpi-forum.org |
---|
405 | !!---------------------------------------------------------------------- |
---|
406 | INTEGER , INTENT(in) :: knoin |
---|
407 | INTEGER , INTENT(in) :: knoout |
---|
408 | INTEGER, DIMENSION(jpnij) :: kinv, koutv |
---|
409 | INTEGER, DIMENSION(knoin) , INTENT(in ) :: kvalsin |
---|
410 | INTEGER, DIMENSION(knoout), INTENT( out) :: kvalsout |
---|
411 | !! |
---|
412 | INTEGER :: ierr |
---|
413 | INTEGER :: jproc |
---|
414 | ! |
---|
415 | #if defined key_mpp_mpi |
---|
416 | ! |
---|
417 | INCLUDE 'mpif.h' |
---|
418 | INTEGER, DIMENSION(jpnij) :: irdsp, isdsp |
---|
419 | !----------------------------------------------------------------------- |
---|
420 | ! Compute displacements |
---|
421 | !----------------------------------------------------------------------- |
---|
422 | irdsp(1) = 0 |
---|
423 | isdsp(1) = 0 |
---|
424 | DO jproc = 2, jpnij |
---|
425 | isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1) |
---|
426 | irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1) |
---|
427 | END DO |
---|
428 | !----------------------------------------------------------------------- |
---|
429 | ! Call the MPI library to do the all to all operation of the data |
---|
430 | !----------------------------------------------------------------------- |
---|
431 | CALL mpi_alltoallv( kvalsin, kinv, isdsp, mpi_integer, & |
---|
432 | & kvalsout, koutv, irdsp, mpi_integer, & |
---|
433 | & mpi_comm_opa, ierr ) |
---|
434 | #else |
---|
435 | !----------------------------------------------------------------------- |
---|
436 | ! For no-MPP just return input values |
---|
437 | !----------------------------------------------------------------------- |
---|
438 | kvalsout = kvalsin |
---|
439 | #endif |
---|
440 | ! |
---|
441 | END SUBROUTINE mpp_alltoallv_int |
---|
442 | |
---|
443 | |
---|
444 | SUBROUTINE mpp_alltoallv_real( pvalsin, knoin , kinv , pvalsout, & |
---|
445 | & knoout, koutv ) |
---|
446 | !!---------------------------------------------------------------------- |
---|
447 | !! *** ROUTINE mpp_alltoallv_real *** |
---|
448 | !! |
---|
449 | !! ** Purpose : all to all (integer version). |
---|
450 | !! |
---|
451 | !! ** Method : MPI alltoall |
---|
452 | !! |
---|
453 | !! ** Action : This does only work for MPI. |
---|
454 | !! It does not work for SHMEM. |
---|
455 | !! |
---|
456 | !! References : http://www.mpi-forum.org |
---|
457 | !!---------------------------------------------------------------------- |
---|
458 | INTEGER , INTENT(in ) :: knoin |
---|
459 | INTEGER , INTENT(in ) :: knoout |
---|
460 | INTEGER , DIMENSION(jpnij) :: kinv, koutv |
---|
461 | REAL(wp), DIMENSION(knoin) , INTENT(in ) :: pvalsin |
---|
462 | REAL(wp), DIMENSION(knoout), INTENT( out) :: pvalsout |
---|
463 | !! |
---|
464 | INTEGER :: ierr |
---|
465 | INTEGER :: jproc |
---|
466 | ! |
---|
467 | #if defined key_mpp_mpi |
---|
468 | ! |
---|
469 | INCLUDE 'mpif.h' |
---|
470 | INTEGER, DIMENSION(jpnij) :: irdsp, isdsp |
---|
471 | !!---------------------------------------------------------------------- |
---|
472 | ! |
---|
473 | !----------------------------------------------------------------------- |
---|
474 | ! Compute displacements |
---|
475 | !----------------------------------------------------------------------- |
---|
476 | irdsp(1) = 0 |
---|
477 | isdsp(1) = 0 |
---|
478 | DO jproc = 2, jpnij |
---|
479 | isdsp(jproc) = isdsp(jproc-1) + kinv(jproc-1) |
---|
480 | irdsp(jproc) = irdsp(jproc-1) + koutv(jproc-1) |
---|
481 | END DO |
---|
482 | !----------------------------------------------------------------------- |
---|
483 | ! Call the MPI library to do the all to all operation of the data |
---|
484 | !----------------------------------------------------------------------- |
---|
485 | CALL mpi_alltoallv( pvalsin, kinv, isdsp, mpivar, & |
---|
486 | & pvalsout, koutv, irdsp, mpivar, & |
---|
487 | & mpi_comm_opa, ierr ) |
---|
488 | #else |
---|
489 | !----------------------------------------------------------------------- |
---|
490 | ! For no-MPP just return input values |
---|
491 | !----------------------------------------------------------------------- |
---|
492 | pvalsout = pvalsin |
---|
493 | #endif |
---|
494 | ! |
---|
495 | END SUBROUTINE mpp_alltoallv_real |
---|
496 | |
---|
497 | !!====================================================================== |
---|
498 | END MODULE obs_mpp |
---|