MODULE mppini !!====================================================================== !! *** MODULE mppini *** !! Ocean initialization : distributed memory computing initialization !!====================================================================== !! History : 6.0 ! 1994-11 (M. Guyon) Original code !! OPA 7.0 ! 1995-04 (J. Escobar, M. Imbard) !! 8.0 ! 1998-05 (M. Imbard, J. Escobar, L. Colombet ) SHMEM and MPI versions !! NEMO 1.0 ! 2004-01 (G. Madec, J.M Molines) F90 : free form , north fold jpni > 1 !! 4.0 ! 2016-06 (G. Madec) use domain configuration file instead of bathymetry file !! 4.0 ! 2017-06 (J.M. Molines, T. Lovato) merge of mppini and mppini_2 !!---------------------------------------------------------------------- !!---------------------------------------------------------------------- !! mpp_init : Lay out the global domain over processors with/without land processor elimination !! mpp_init_mask : !! mpp_init_ioipsl: IOIPSL initialization in mpp !!---------------------------------------------------------------------- USE dom_oce ! ocean space and time domain USE bdy_oce ! open BounDarY ! USE lib_mpp ! distribued memory computing library USE iom ! nemo I/O library USE ioipsl ! I/O IPSL library USE in_out_manager ! I/O Manager IMPLICIT NONE PRIVATE PUBLIC mpp_init ! called by opa.F90 !!---------------------------------------------------------------------- !! NEMO/OPA 4.0 , NEMO Consortium (2017) !! $Id$ !! Software governed by the CeCILL licence (NEMOGCM/NEMO_CeCILL.txt) !!---------------------------------------------------------------------- CONTAINS #if ! defined key_mpp_mpi !!---------------------------------------------------------------------- !! Default option : shared memory computing !!---------------------------------------------------------------------- SUBROUTINE mpp_init !!---------------------------------------------------------------------- !! *** ROUTINE mpp_init *** !! !! ** Purpose : Lay out the global domain over processors. !! !! ** Method : Shared memory computing, set the local processor !! variables to the value of the global domain !!---------------------------------------------------------------------- ! nimpp = 1 ! njmpp = 1 nlci = jpi nlcj = jpj nldi = 1 nldj = 1 nlei = jpi nlej = jpj nperio = jperio nbondi = 2 nbondj = 2 nidom = FLIO_DOM_NONE npolj = jperio ! IF(lwp) THEN WRITE(numout,*) WRITE(numout,*) 'mpp_init : NO massively parallel processing' WRITE(numout,*) '~~~~~~~~ ' WRITE(numout,*) ' nperio = ', nperio, ' nimpp = ', nimpp WRITE(numout,*) ' npolj = ', npolj , ' njmpp = ', njmpp ENDIF ! IF( jpni /= 1 .OR. jpnj /= 1 .OR. jpnij /= 1 ) & CALL ctl_stop( 'mpp_init: equality jpni = jpnj = jpnij = 1 is not satisfied', & & 'the domain is lay out for distributed memory computing!' ) ! IF( jperio == 7 ) CALL ctl_stop( 'mpp_init: jperio = 7 needs distributed memory computing ', & & 'with 1 process. Add key_mpp_mpi in the list of active cpp keys ' ) ! END SUBROUTINE mpp_init #else !!---------------------------------------------------------------------- !! 'key_mpp_mpi' MPI massively parallel processing !!---------------------------------------------------------------------- SUBROUTINE mpp_init !!---------------------------------------------------------------------- !! *** ROUTINE mpp_init *** !! !! ** Purpose : Lay out the global domain over processors. !! If land processors are to be eliminated, this program requires the !! presence of the domain configuration file. Land processors elimination !! is performed if jpni x jpnj /= jpnij. In this case, using the MPP_PREP !! preprocessing tool, help for defining the best cutting out. !! !! ** Method : Global domain is distributed in smaller local domains. !! Periodic condition is a function of the local domain position !! (global boundary or neighbouring domain) and of the global !! periodic !! Type : jperio global periodic condition !! nperio local periodic condition !! !! ** Action : - set domain parameters !! nimpp : longitudinal index !! njmpp : latitudinal index !! nperio : lateral condition type !! narea : number for local area !! nlci : first dimension !! nlcj : second dimension !! nbondi : mark for "east-west local boundary" !! nbondj : mark for "north-south local boundary" !! nproc : number for local processor !! noea : number for local neighboring processor !! nowe : number for local neighboring processor !! noso : number for local neighboring processor !! nono : number for local neighboring processor !!---------------------------------------------------------------------- INTEGER :: ji, jj, jn, jproc, jarea ! dummy loop indices INTEGER :: inum ! local logical unit INTEGER :: idir, ifreq, icont, isurf ! local integers INTEGER :: ii, il1, ili, imil ! - - INTEGER :: ij, il2, ilj, ijm1 ! - - INTEGER :: iino, ijno, iiso, ijso ! - - INTEGER :: iiea, ijea, iiwe, ijwe ! - - INTEGER :: iresti, irestj, iproc ! - - INTEGER, DIMENSION(jpnij) :: iin, ii_nono, ii_noea ! 1D workspace INTEGER, DIMENSION(jpnij) :: ijn, ii_noso, ii_nowe ! - - INTEGER, DIMENSION(jpni,jpnj) :: iimppt, ilci, ibondi, ipproc ! 2D workspace INTEGER, DIMENSION(jpni,jpnj) :: ijmppt, ilcj, ibondj, ipolj ! - - INTEGER, DIMENSION(jpni,jpnj) :: ilei, ildi, iono, ioea ! - - INTEGER, DIMENSION(jpni,jpnj) :: ilej, ildj, ioso, iowe ! - - INTEGER, DIMENSION(jpiglo,jpjglo) :: imask ! 2D golbal domain workspace REAL(wp) :: zidom, zjdom ! local scalars !!---------------------------------------------------------------------- ! IF ( jpni * jpnj == jpnij ) THEN ! regular domain lay out over processors imask(:,:) = 1 ELSEIF ( jpni*jpnj > jpnij ) THEN ! remove land-only processor (i.e. where imask(:,:)=0) CALL mpp_init_mask( imask ) ELSE ! error CALL ctl_stop( 'mpp_init: jpnij > jpni x jpnj. Check namelist setting!' ) ENDIF ! ! 1. Dimension arrays for subdomains ! ----------------------------------- ! Computation of local domain sizes ilci() ilcj() ! These dimensions depend on global sizes jpni,jpnj and jpiglo,jpjglo ! The subdomains are squares lesser than or equal to the global ! dimensions divided by the number of processors minus the overlap array. ! nreci = 2 * nn_hls nrecj = 2 * nn_hls iresti = 1 + MOD( jpiglo - nreci -1 , jpni ) irestj = 1 + MOD( jpjglo - nrecj -1 , jpnj ) ! ! Need to use jpimax and jpjmax here since jpi and jpj have already been ! shrunk to local sizes in nemogcm #if defined key_nemocice_decomp ! Change padding to be consistent with CICE ilci(1:jpni-1 ,:) = jpimax ilci(jpni ,:) = jpiglo - (jpni - 1) * (jpimax - nreci) ! ilcj(:, 1:jpnj-1) = jpjmax ilcj(:, jpnj) = jpjglo - (jpnj - 1) * (jpjmax - nrecj) #else ilci(1:iresti ,:) = jpimax ilci(iresti+1:jpni ,:) = jpimax-1 ilcj(:, 1:irestj) = jpjmax ilcj(:, irestj+1:jpnj) = jpjmax-1 #endif ! nfilcit(:,:) = ilci(:,:) ! zidom = nreci + sum(ilci(:,1) - nreci ) zjdom = nrecj + sum(ilcj(1,:) - nrecj ) ! IF(lwp) THEN WRITE(numout,*) WRITE(numout,*) 'mpp_init : MPI Message Passing MPI - domain lay out over processors' WRITE(numout,*) '~~~~~~~~ ' WRITE(numout,*) ' defines mpp subdomains' WRITE(numout,*) ' iresti = ', iresti, ' jpni = ', jpni WRITE(numout,*) ' irestj = ', irestj, ' jpnj = ', jpnj WRITE(numout,*) WRITE(numout,*) ' sum ilci(i,1) = ', zidom, ' jpiglo = ', jpiglo WRITE(numout,*) ' sum ilcj(1,j) = ', zjdom, ' jpjglo = ', jpjglo ENDIF ! 2. Index arrays for subdomains ! ------------------------------- iimppt(:,:) = 1 ijmppt(:,:) = 1 ipproc(:,:) = -1 ! IF( jpni > 1 ) THEN DO jj = 1, jpnj DO ji = 2, jpni iimppt(ji,jj) = iimppt(ji-1,jj) + ilci(ji-1,jj) - nreci END DO END DO ENDIF nfiimpp(:,:) = iimppt(:,:) ! IF( jpnj > 1 )THEN DO jj = 2, jpnj DO ji = 1, jpni ijmppt(ji,jj) = ijmppt(ji,jj-1) + ilcj(ji,jj-1) - nrecj END DO END DO ENDIF ! 3. Subdomain description in the Regular Case ! -------------------------------------------- nperio = 0 icont = -1 DO jarea = 1, jpni*jpnj ii = 1 + MOD(jarea-1,jpni) ij = 1 + (jarea-1)/jpni ili = ilci(ii,ij) ilj = ilcj(ii,ij) ibondj(ii,ij) = -1 IF( jarea > jpni ) ibondj(ii,ij) = 0 IF( jarea > (jpnj-1)*jpni ) ibondj(ii,ij) = 1 IF( jpnj == 1 ) ibondj(ii,ij) = 2 ibondi(ii,ij) = 0 IF( MOD(jarea,jpni) == 1 ) ibondi(ii,ij) = -1 IF( MOD(jarea,jpni) == 0 ) ibondi(ii,ij) = 1 IF( jpni == 1 ) ibondi(ii,ij) = 2 ! Subdomain neighbors iproc = jarea - 1 ioso(ii,ij) = iproc - jpni iowe(ii,ij) = iproc - 1 ioea(ii,ij) = iproc + 1 iono(ii,ij) = iproc + jpni ildi(ii,ij) = 1 + nn_hls ilei(ii,ij) = ili - nn_hls IF( ibondi(ii,ij) == -1 .OR. ibondi(ii,ij) == 2 ) ildi(ii,ij) = 1 IF( ibondi(ii,ij) == 1 .OR. ibondi(ii,ij) == 2 ) ilei(ii,ij) = ili ildj(ii,ij) = 1 + nn_hls ilej(ii,ij) = ilj - nn_hls IF( ibondj(ii,ij) == -1 .OR. ibondj(ii,ij) == 2 ) ildj(ii,ij) = 1 IF( ibondj(ii,ij) == 1 .OR. ibondj(ii,ij) == 2 ) ilej(ii,ij) = ilj ! warning ii*ij (zone) /= nproc (processors)! IF( jperio == 1 .OR. jperio == 4 .OR. jperio == 6 ) THEN IF( jpni == 1 )THEN ibondi(ii,ij) = 2 nperio = 1 ELSE ibondi(ii,ij) = 0 ENDIF IF( MOD(jarea,jpni) == 0 ) THEN ioea(ii,ij) = iproc - (jpni-1) ENDIF IF( MOD(jarea,jpni) == 1 ) THEN iowe(ii,ij) = iproc + jpni - 1 ENDIF ENDIF ipolj(ii,ij) = 0 IF( jperio == 3 .OR. jperio == 4 ) THEN ijm1 = jpni*(jpnj-1) imil = ijm1+(jpni+1)/2 IF( jarea > ijm1 ) ipolj(ii,ij) = 3 IF( MOD(jpni,2) == 1 .AND. jarea == imil ) ipolj(ii,ij) = 4 IF( ipolj(ii,ij) == 3 ) iono(ii,ij) = jpni*jpnj-jarea+ijm1 ! MPI rank of northern neighbour ENDIF IF( jperio == 5 .OR. jperio == 6 ) THEN ijm1 = jpni*(jpnj-1) imil = ijm1+(jpni+1)/2 IF( jarea > ijm1) ipolj(ii,ij) = 5 IF( MOD(jpni,2) == 1 .AND. jarea == imil ) ipolj(ii,ij) = 6 IF( ipolj(ii,ij) == 5) iono(ii,ij) = jpni*jpnj-jarea+ijm1 ! MPI rank of northern neighbour ENDIF ! ! Check wet points over the entire domain to preserve the MPI communication stencil isurf = 0 DO jj = 1, ilj DO ji = 1, ili IF( imask(ji+iimppt(ii,ij)-1, jj+ijmppt(ii,ij)-1) == 1) isurf = isurf+1 END DO END DO ! IF( isurf /= 0 ) THEN icont = icont + 1 ipproc(ii,ij) = icont iin(icont+1) = ii ijn(icont+1) = ij ENDIF END DO ! nfipproc(:,:) = ipproc(:,:) ! Check potential error IF( icont+1 /= jpnij ) THEN WRITE(ctmp1,*) ' jpni =',jpni,' jpnj =',jpnj WRITE(ctmp2,*) ' jpnij =',jpnij, '< jpni x jpnj' WRITE(ctmp3,*) ' ***********, mpp_init2 finds jpnij=',icont+1 CALL ctl_stop( 'mpp_init: Eliminate land processors algorithm', '', ctmp1, ctmp2, '', ctmp3 ) ENDIF ! 4. Subdomain print ! ------------------ IF(lwp) THEN ifreq = 4 il1 = 1 DO jn = 1, (jpni-1)/ifreq+1 il2 = MIN(jpni,il1+ifreq-1) WRITE(numout,*) WRITE(numout,9400) ('***',ji=il1,il2-1) DO jj = jpnj, 1, -1 WRITE(numout,9403) (' ',ji=il1,il2-1) WRITE(numout,9402) jj, (ilci(ji,jj),ilcj(ji,jj),ji=il1,il2) WRITE(numout,9404) (ipproc(ji,jj),ji=il1,il2) WRITE(numout,9403) (' ',ji=il1,il2-1) WRITE(numout,9400) ('***',ji=il1,il2-1) END DO WRITE(numout,9401) (ji,ji=il1,il2) il1 = il1+ifreq END DO 9400 FORMAT(' ***',20('*************',a3)) 9403 FORMAT(' * ',20(' * ',a3)) 9401 FORMAT(' ',20(' ',i3,' ')) 9402 FORMAT(' ',i3,' * ',20(i3,' x',i3,' * ')) 9404 FORMAT(' * ',20(' ',i3,' * ')) ENDIF ! 5. neighbour treatment ! ---------------------- DO jarea = 1, jpni*jpnj iproc = jarea-1 ii = 1 + MOD( jarea-1 , jpni ) ij = 1 + (jarea-1) / jpni IF ( ipproc(ii,ij) == -1 .AND. 0 <= iono(ii,ij) .AND. iono(ii,ij) <= jpni*jpnj-1 ) THEN iino = 1 + MOD( iono(ii,ij) , jpni ) ijno = 1 + iono(ii,ij) / jpni ! Need to reverse the logical direction of communication ! for northern neighbours of northern row processors (north-fold) ! i.e. need to check that the northern neighbour only communicates ! to the SOUTH (or not at all) if this area is land-only (#1057) idir = 1 IF( ij == jpnj .AND. ijno == jpnj ) idir = -1 IF( ibondj(iino,ijno) == idir ) ibondj(iino,ijno) = 2 IF( ibondj(iino,ijno) == 0 ) ibondj(iino,ijno) = -idir ENDIF IF( ipproc(ii,ij) == -1 .AND. 0 <= ioso(ii,ij) .AND. ioso(ii,ij) <= jpni*jpnj-1 ) THEN iiso = 1 + MOD( ioso(ii,ij) , jpni ) ijso = 1 + ioso(ii,ij) / jpni IF( ibondj(iiso,ijso) == -1 ) ibondj(iiso,ijso) = 2 IF( ibondj(iiso,ijso) == 0 ) ibondj(iiso,ijso) = 1 ENDIF IF( ipproc(ii,ij) == -1 .AND. 0 <= ioea(ii,ij) .AND. ioea(ii,ij) <= jpni*jpnj-1 ) THEN iiea = 1 + MOD( ioea(ii,ij) , jpni ) ijea = 1 + ioea(ii,ij) / jpni IF( ibondi(iiea,ijea) == 1 ) ibondi(iiea,ijea) = 2 IF( ibondi(iiea,ijea) == 0 ) ibondi(iiea,ijea) = -1 ENDIF IF( ipproc(ii,ij) == -1 .AND. 0 <= iowe(ii,ij) .AND. iowe(ii,ij) <= jpni*jpnj-1) THEN iiwe = 1 + MOD( iowe(ii,ij) , jpni ) ijwe = 1 + iowe(ii,ij) / jpni IF( ibondi(iiwe,ijwe) == -1 ) ibondi(iiwe,ijwe) = 2 IF( ibondi(iiwe,ijwe) == 0 ) ibondi(iiwe,ijwe) = 1 ENDIF END DO ! just to save nono etc for all proc ii_noso(:) = -1 ii_nono(:) = -1 ii_noea(:) = -1 ii_nowe(:) = -1 nproc = narea-1 DO jarea = 1, jpnij ii = iin(jarea) ij = ijn(jarea) IF( 0 <= ioso(ii,ij) .AND. ioso(ii,ij) <= (jpni*jpnj-1) ) THEN iiso = 1 + MOD( ioso(ii,ij) , jpni ) ijso = 1 + ioso(ii,ij) / jpni noso = ipproc(iiso,ijso) ii_noso(jarea)= noso ENDIF IF( 0 <= iowe(ii,ij) .AND. iowe(ii,ij) <= (jpni*jpnj-1) ) THEN iiwe = 1 + MOD( iowe(ii,ij) , jpni ) ijwe = 1 + iowe(ii,ij) / jpni nowe = ipproc(iiwe,ijwe) ii_nowe(jarea)= nowe ENDIF IF( 0 <= ioea(ii,ij) .AND. ioea(ii,ij) <= (jpni*jpnj-1) ) THEN iiea = 1 + MOD( ioea(ii,ij) , jpni ) ijea = 1 + ioea(ii,ij) / jpni noea = ipproc(iiea,ijea) ii_noea(jarea)= noea ENDIF IF( 0 <= iono(ii,ij) .AND. iono(ii,ij) <= (jpni*jpnj-1) ) THEN iino = 1 + MOD( iono(ii,ij) , jpni ) ijno = 1 + iono(ii,ij) / jpni nono = ipproc(iino,ijno) ii_nono(jarea)= nono ENDIF END DO ! 6. Change processor name ! ------------------------ nproc = narea-1 ii = iin(narea) ij = ijn(narea) ! ! set default neighbours noso = ii_noso(narea) nowe = ii_nowe(narea) noea = ii_noea(narea) nono = ii_nono(narea) nlcj = ilcj(ii,ij) nlci = ilci(ii,ij) nldi = ildi(ii,ij) nlei = ilei(ii,ij) nldj = ildj(ii,ij) nlej = ilej(ii,ij) nbondi = ibondi(ii,ij) nbondj = ibondj(ii,ij) nimpp = iimppt(ii,ij) njmpp = ijmppt(ii,ij) DO jproc = 1, jpnij ii = iin(jproc) ij = ijn(jproc) nimppt(jproc) = iimppt(ii,ij) njmppt(jproc) = ijmppt(ii,ij) nlcjt(jproc) = ilcj(ii,ij) nlcit(jproc) = ilci(ii,ij) nldit(jproc) = ildi(ii,ij) nleit(jproc) = ilei(ii,ij) nldjt(jproc) = ildj(ii,ij) nlejt(jproc) = ilej(ii,ij) END DO ! Save processor layout in ascii file IF (lwp) THEN CALL ctl_opn( inum, 'layout.dat', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) WRITE(inum,'(a)') ' jpnij jpimax jpjmax jpk jpiglo jpjglo'//& & ' ( local: narea jpi jpj)' WRITE(inum,'(6i8,a,3i8,a)') jpnij,jpimax,jpjmax,jpk,jpiglo,jpjglo,& & ' ( local: ',narea,jpi,jpj,' )' WRITE(inum,'(a)') 'NAREA nlci nlcj nldi nldj nlei nlej nimp njmp nono noso nowe noea nbondi nbondj ' DO jproc = 1, jpnij ii = iin(jproc) ij = ijn(jproc) WRITE(inum,'(15i5)') jproc-1, nlcit (jproc), nlcjt (jproc), & & nldit (jproc), nldjt (jproc), & & nleit (jproc), nlejt (jproc), & & nimppt (jproc), njmppt (jproc), & & ii_nono(jproc), ii_noso(jproc), & & ii_nowe(jproc), ii_noea(jproc), & & ibondi (ii,ij), ibondj (ii,ij) END DO CLOSE(inum) END IF ! ! north fold parameter ! Defined npolj, either 0, 3 , 4 , 5 , 6 ! In this case the important thing is that npolj /= 0 ! Because if we go through these line it is because jpni >1 and thus ! we must use lbcnorthmpp, which tests only npolj =0 or npolj /= 0 npolj = 0 ij = ijn(narea) IF( jperio == 3 .OR. jperio == 4 ) THEN IF( ij == jpnj ) npolj = 3 ENDIF IF( jperio == 5 .OR. jperio == 6 ) THEN IF( ij == jpnj ) npolj = 5 ENDIF ! IF(lwp) THEN WRITE(numout,*) WRITE(numout,*) ' nproc = ', nproc WRITE(numout,*) ' nowe = ', nowe , ' noea = ', noea WRITE(numout,*) ' nono = ', nono , ' noso = ', noso WRITE(numout,*) ' nbondi = ', nbondi WRITE(numout,*) ' nbondj = ', nbondj WRITE(numout,*) ' npolj = ', npolj WRITE(numout,*) ' nperio = ', nperio WRITE(numout,*) ' nlci = ', nlci WRITE(numout,*) ' nlcj = ', nlcj WRITE(numout,*) ' nimpp = ', nimpp WRITE(numout,*) ' njmpp = ', njmpp WRITE(numout,*) ' nreci = ', nreci WRITE(numout,*) ' nrecj = ', nrecj WRITE(numout,*) ' nn_hls = ', nn_hls ENDIF IF( nperio == 1 .AND. jpni /= 1 ) CALL ctl_stop( 'mpp_init: error on cyclicity' ) ! ! Prepare mpp north fold IF( jperio >= 3 .AND. jperio <= 6 .AND. jpni > 1 ) THEN CALL mpp_ini_north IF(lwp) WRITE(numout,*) ' mpp_init : North fold boundary prepared for jpni >1' ENDIF ! CALL mpp_init_ioipsl ! Prepare NetCDF output file (if necessary) ! END SUBROUTINE mpp_init SUBROUTINE mpp_init_mask( kmask ) !!---------------------------------------------------------------------- !! *** ROUTINE mpp_init_mask *** !! !! ** Purpose : Read relevant bathymetric information in a global array !! in order to provide a land/sea mask used for the elimination !! of land domains, in an mpp computation. !! !! ** Method : Read the namelist ln_zco and ln_isfcav in namelist namzgr !! in order to choose the correct bathymetric information !! (file and variables) !!---------------------------------------------------------------------- INTEGER, DIMENSION(jpiglo,jpjglo), INTENT(out) :: kmask ! global domain INTEGER :: inum !: logical unit for configuration file INTEGER :: ios !: iostat error flag INTEGER :: ijstartrow ! temporary integers REAL(wp), DIMENSION(jpiglo,jpjglo) :: zbot, zbdy ! global workspace REAL(wp) :: zidom , zjdom ! local scalars NAMELIST/nambdy/ ln_bdy, nb_bdy, ln_coords_file, cn_coords_file, & & ln_mask_file, cn_mask_file, cn_dyn2d, nn_dyn2d_dta, & & cn_dyn3d, nn_dyn3d_dta, cn_tra, nn_tra_dta, & & ln_tra_dmp, ln_dyn3d_dmp, rn_time_dmp, rn_time_dmp_out, & & cn_ice_lim, nn_ice_lim_dta, & & rn_ice_tem, rn_ice_sal, rn_ice_age, & & ln_vol, nn_volctl, nn_rimwidth, nb_jpk_bdy !!---------------------------------------------------------------------- ! 0. initialisation ! ----------------- CALL iom_open( cn_domcfg, inum ) ! ! ocean bottom level CALL iom_get( inum, jpdom_unknown, 'bottom_level' , zbot , lrowattr=ln_use_jattr ) ! nb of ocean T-points ! CALL iom_close( inum ) ! ! 2D ocean mask (=1 if at least one level of the water column is ocean, =0 otherwise) WHERE( zbot(:,:) > 0 ) ; kmask(:,:) = 1 ELSEWHERE ; kmask(:,:) = 0 END WHERE ! Adjust kmask with bdy_msk if it exists REWIND( numnam_ref ) ! Namelist nambdy in reference namelist : BDY READ ( numnam_ref, nambdy, IOSTAT = ios, ERR = 903) 903 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nambdy in reference namelist (mppini)', lwp ) REWIND( numnam_cfg ) ! Namelist nambdy in configuration namelist : BDY READ ( numnam_cfg, nambdy, IOSTAT = ios, ERR = 904 ) 904 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nambdy in configuration namelist (mppini)', lwp ) IF( ln_bdy .AND. ln_mask_file ) THEN CALL iom_open( cn_mask_file, inum ) CALL iom_get ( inum, jpdom_unknown, 'bdy_msk', zbdy ) CALL iom_close( inum ) WHERE ( zbdy(:,:) <= 0. ) kmask = 0 ENDIF ! END SUBROUTINE mpp_init_mask SUBROUTINE mpp_init_ioipsl !!---------------------------------------------------------------------- !! *** ROUTINE mpp_init_ioipsl *** !! !! ** Purpose : !! !! ** Method : !! !! History : !! 9.0 ! 04-03 (G. Madec ) MPP-IOIPSL !! " " ! 08-12 (A. Coward) addition in case of jpni*jpnj < jpnij !!---------------------------------------------------------------------- INTEGER, DIMENSION(2) :: iglo, iloc, iabsf, iabsl, ihals, ihale, idid !!---------------------------------------------------------------------- ! The domain is split only horizontally along i- or/and j- direction ! So we need at the most only 1D arrays with 2 elements. ! Set idompar values equivalent to the jpdom_local_noextra definition ! used in IOM. This works even if jpnij .ne. jpni*jpnj. iglo(1) = jpiglo iglo(2) = jpjglo iloc(1) = nlci iloc(2) = nlcj iabsf(1) = nimppt(narea) iabsf(2) = njmppt(narea) iabsl(:) = iabsf(:) + iloc(:) - 1 ihals(1) = nldi - 1 ihals(2) = nldj - 1 ihale(1) = nlci - nlei ihale(2) = nlcj - nlej idid(1) = 1 idid(2) = 2 IF(lwp) THEN WRITE(numout,*) WRITE(numout,*) 'mpp_init_ioipsl : iloc = ', iloc (1), iloc (2) WRITE(numout,*) '~~~~~~~~~~~~~~~ iabsf = ', iabsf(1), iabsf(2) WRITE(numout,*) ' ihals = ', ihals(1), ihals(2) WRITE(numout,*) ' ihale = ', ihale(1), ihale(2) ENDIF ! CALL flio_dom_set ( jpnij, nproc, idid, iglo, iloc, iabsf, iabsl, ihals, ihale, 'BOX', nidom) ! END SUBROUTINE mpp_init_ioipsl #endif !!====================================================================== END MODULE mppini