New URL for NEMO forge!   http://forge.nemo-ocean.eu

Since March 2022 along with NEMO 4.2 release, the code development moved to a self-hosted GitLab.
This present forge is now archived and remained online for history.
mod_ioclient_para.F90 in branches/DEV_r2460_v3_3beta_NOL/NEMOGCM/EXTERNAL/XMLIO_SERVER/src/IOSERVER – NEMO

source: branches/DEV_r2460_v3_3beta_NOL/NEMOGCM/EXTERNAL/XMLIO_SERVER/src/IOSERVER/mod_ioclient_para.F90 @ 2462

Last change on this file since 2462 was 2462, checked in by acc, 13 years ago

DEV_r2460_v3_3beta_NOL. Changes to v3.3beta to improve assignment of ocean processes to ioserver processes when discarding land-only regions. See ticket #776

  • Property svn:keywords set to Id
File size: 3.8 KB
Line 
1  MODULE mod_ioclient_para
2    INTEGER,PARAMETER :: color_client=1
3    INTEGER,PARAMETER :: color_server=2 
4    INTEGER,SAVE      :: iocomm
5    INTEGER,SAVE      :: iosize
6    INTEGER,SAVE      :: iorank
7    INTEGER,SAVE      :: server_rank
8    INTEGER,SAVE      :: intracomm 
9  CONTAINS
10 
11 
12  SUBROUTINE Init_parallel
13  USE mpitrace
14  USE mod_ioserver_namelist
15#if defined key_oasis3 || defined key_oasis4
16  USE mod_prism_get_comm 
17#endif
18  USE mpi_mod
19  USE mod_event_parameters
20  IMPLICIT NONE
21    INTEGER :: NEW_COMM
22    INTEGER :: ierr
23    INTEGER :: global_rank
24    INTEGER :: global_size
25    INTEGER :: mpi_rank
26    INTEGER :: mpi_size
27    INTEGER :: nb_server_io 
28    INTEGER,ALLOCATABLE :: proc_color(:) 
29    INTEGER :: i, ix, jpni_reg, jpnj_reg
30    INTEGER :: div,remain
31    INTEGER :: group_color
32    INTEGER :: Comm_client_server
33    CHARACTER(LEN=6) :: oasis_server_id
34    LOGICAL :: ln_around, ln_use_allproc
35   
36    IF (using_oasis) THEN
37      oasis_server_id=server_id
38      PRINT *,'prism_get_intracomm'
39#if defined key_oasis3 || defined key_oasis4
40      CALL prism_get_intracomm(Comm_client_server,oasis_server_id,ierr)
41#endif
42    ELSE
43      CALL MPI_INIT(ierr)
44      Comm_client_server=MPI_COMM_WORLD
45    ENDIF
46
47    CALL MPI_COMM_RANK(Comm_client_server,global_rank,ierr)
48    CALL MPI_COMM_SIZE(Comm_client_server,global_size,ierr)
49
50    CALL MPI_COMM_SPLIT(Comm_client_server,color_client,global_rank,intracomm,ierr)
51    CALL MPI_COMM_SIZE(intracomm,mpi_size,ierr)
52    CALL MPI_COMM_RANK(intracomm,mpi_rank,ierr)
53
54    nb_server_io=global_size-mpi_size
55!   
56! Need to be cleverer at setting group_colour when jpni*jpnj /= jpnij
57! nregproc_in holds the list of equivalent ranks in a jpni by jpnj decomposition
58! which has retained the land-only areas. If jpni*jpnj= jpnij then nregproc_in(mpi_size)
59! should equal mpi_size-1 and nregproc_in(mpi_rank+1) = mpi_rank for all mpi_rank.
60!
61    ln_use_allproc = .true.
62    ALLOCATE(nregproc_in(mpi_size))
63    INQUIRE (file='layout.dat', exist=ln_around)
64    IF (ln_around) THEN
65! use it
66     OPEN(UNIT=123,FILE='layout.dat')
67     READ(123,'(48X,2i8,/)',ERR=606,END=606) jpni_reg, jpnj_reg
68     do ix = 1,mpi_size
69      READ(123,'(45X,I5)',ERR=606,END=606) nregproc_in(ix)
70     end do
71     ln_use_allproc = .false.
72 606 CLOSE(123)
73    ENDIF
74    IF ( ln_use_allproc ) THEN
75!
76! Either layout.dat does not exist or the reading of nregproc_in has failed.
77! Default to regular decomposition with no omitted land-only regions.
78!
79     do ix = 1,mpi_size
80      nregproc_in(ix) = ix - 1
81     end do
82!
83! jpni_reg and jpnj_reg are not known; just have to make sure jpni_reg*jpnj_reg=mpi_size
84!
85     jpni_reg=mpi_size
86     jpnj_reg=1
87    ENDIF
88
89    div=(jpni_reg*jpnj_reg)/nb_server_io
90    remain=MOD(jpni_reg*jpnj_reg,nb_server_io)
91 
92!
93! Note need to add 1 to mpi_rank when indexing nregproc_in since nregproc_in is
94! indexed 1 to mpi_size but mpi_rank ranges from 0 to mpi_size-1
95!
96    IF (nregproc_in(mpi_rank+1)<remain*(div+1)) THEN
97      group_color=nregproc_in(mpi_rank+1)/(div+1)
98    ELSE
99      group_color=(nb_server_io-1)-(jpni_reg*jpnj_reg - 1 -nregproc_in(mpi_rank+1))/div
100    ENDIF
101
102    CALL MPI_COMM_SPLIT(Comm_client_server,group_color,global_rank,iocomm,ierr)
103   
104    CALL MPI_COMM_SIZE(iocomm,iosize,ierr)
105    CALL MPI_COMM_RANK(iocomm,iorank,ierr)
106
107    ALLOCATE(proc_color(0:iosize-1))
108    CALL MPI_ALLGATHER(color_client,1,MPI_INTEGER,proc_color,1,MPI_INTEGER,iocomm,ierr)
109   
110    DO i=0,iosize-1
111      IF (proc_color(i)==color_server) THEN
112        server_rank=i
113        EXIT
114      ENDIF
115    ENDDO
116   
117    PRINT *,"Proces No",mpi_rank,"--> server",server_rank
118  END SUBROUTINE Init_parallel
119 
120  SUBROUTINE Finalize_parallel
121  USE mpi_mod
122  IMPLICIT NONE
123    INTEGER :: ierr
124   
125    CALL MPI_FINALIZE(ierr)
126
127  END SUBROUTINE Finalize_parallel
128 
129  END MODULE mod_ioclient_para
Note: See TracBrowser for help on using the repository browser.