Changeset 9772 for NEMO/branches
- Timestamp:
- 2018-06-08T14:52:48+02:00 (6 years ago)
- Location:
- NEMO/branches/2018/dev_r9759_HPC09_ESIWACE
- Files:
-
- 1 added
- 7 edited
Legend:
- Unmodified
- Added
- Removed
-
NEMO/branches/2018/dev_r9759_HPC09_ESIWACE/src/OCE/LBC/lib_mpp.F90
r9667 r9772 160 160 161 161 ! Type of send : standard, buffered, immediate 162 CHARACTER(len=1), PUBLIC :: cn_mpi_send !: type od mpi send/recieve (S=standard, B=bsend, I=isend) 163 LOGICAL , PUBLIC :: l_isend = .FALSE. !: isend use indicator (T if cn_mpi_send='I') 164 INTEGER , PUBLIC :: nn_buffer !: size of the buffer in case of mpi_bsend 162 CHARACTER(len=1), PUBLIC :: cn_mpi_send !: type od mpi send/recieve (S=standard, B=bsend, I=isend) 163 LOGICAL , PUBLIC :: l_isend = .FALSE. !: isend use indicator (T if cn_mpi_send='I') 164 INTEGER , PUBLIC :: nn_buffer !: size of the buffer in case of mpi_bsend 165 INTEGER , PUBLIC :: nn_comm_mod = 1 !: frequency of communication calls (1 = every time step) 166 LOGICAL , PUBLIC :: ln_comm_only = .FALSE. !: replace step routine by realistic communications only 167 168 INTEGER , PUBLIC :: ncom_stp = 0 !: copy of time step # istp 169 INTEGER , PUBLIC :: icomm_sequence(500,2) = 0 !: size of communicated arrays (halos) 170 INTEGER , PUBLIC :: n_sequence = 0 !: # of communicated arrays 171 165 172 166 173 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend … … 168 175 LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms 169 176 LOGICAL, PUBLIC :: l_north_nogather = .FALSE. !: internal control of northfold comms 177 178 LOGICAL :: l_print_comm_report = .TRUE. 170 179 171 180 !!---------------------------------------------------------------------- … … 193 202 LOGICAL :: mpi_was_called 194 203 ! 195 NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, jpnij, ln_nnogather 204 NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, jpnij, ln_nnogather, nn_comm_mod, ln_comm_only 196 205 !!---------------------------------------------------------------------- 197 206 ! … … 1602 1611 ! 1603 1612 END SUBROUTINE mpp_lnk_2d_icb 1613 1614 SUBROUTINE tic_tac (l_tic) 1615 1616 LOGICAL, INTENT(IN) :: l_tic 1617 LOGICAL, SAVE :: l_write_wt = .TRUE. 1618 REAL(wp),SAVE :: tic_wt=0., tac_wt=0., tic_ct=0., tac_ct=0. 1619 1620 ! start count 1621 IF ( l_tic ) THEN 1622 IF ( ncom_stp > nit000 ) tic_wt = MPI_Wtime() 1623 IF ( tic_ct > 0.0_wp ) tac_ct = tac_ct + MPI_Wtime() - tic_ct 1624 ! stop count and sum up 1625 ELSE 1626 IF ( ncom_stp > nit000 ) tac_wt = tac_wt + MPI_Wtime() - tic_wt 1627 IF ( ( ncom_stp == ( nitend - 1 ) ) .AND. l_write_wt ) then 1628 WRITE(6,'(A20,F11.6,A15,I8)') 'Computing time : ',tac_ct,' on MPI rank : ', narea 1629 WRITE(6,'(A20,F11.6,A15,I8)') 'Waiting time : ',tac_wt,' on MPI rank : ', narea 1630 l_write_wt = .FALSE. 1631 END IF 1632 IF ( ncom_stp > nit000 ) tic_ct = MPI_Wtime() 1633 ENDIF 1634 END SUBROUTINE tic_tac 1635 1604 1636 1605 1637 #else -
NEMO/branches/2018/dev_r9759_HPC09_ESIWACE/src/OCE/LBC/mpp_lnk_generic.h90
r9690 r9772 150 150 imigr = nn_hls * jpj * ipk * ipl * ipf 151 151 ! 152 SELECT CASE ( nbondi ) 153 CASE ( -1 ) 154 CALL mppsend( 2, zt3we(1,1,1,1,1,1), imigr, noea, ml_req1 ) 155 CALL mpprecv( 1, zt3ew(1,1,1,1,1,2), imigr, noea ) 156 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 157 CASE ( 0 ) 158 CALL mppsend( 1, zt3ew(1,1,1,1,1,1), imigr, nowe, ml_req1 ) 159 CALL mppsend( 2, zt3we(1,1,1,1,1,1), imigr, noea, ml_req2 ) 160 CALL mpprecv( 1, zt3ew(1,1,1,1,1,2), imigr, noea ) 161 CALL mpprecv( 2, zt3we(1,1,1,1,1,2), imigr, nowe ) 162 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 163 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 164 CASE ( 1 ) 165 CALL mppsend( 1, zt3ew(1,1,1,1,1,1), imigr, nowe, ml_req1 ) 166 CALL mpprecv( 2, zt3we(1,1,1,1,1,2), imigr, nowe ) 167 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 168 END SELECT 152 IF ( ncom_stp == nit000 ) then 153 n_sequence = n_sequence + 1 154 icomm_sequence(n_sequence,1) = ipk 155 icomm_sequence(n_sequence,2) = ipf 156 ! write(6,'(A,6I4)') 'size comm ', nn_hls, jpi, jpj, ipk, ipl, ipf 157 ELSE IF ( mpprank == 0 .AND. ncom_stp == (nit000+1) .AND. l_print_comm_report ) THEN 158 write(6,*) 'Communication pattern report : ' 159 write(6,*) ' ' 160 write(6,'(A,I3)') ' Exchanged halos : ', n_sequence 161 jj = 0; jk = 0; jf = 0; jh = 0 162 DO ji = 1, n_sequence 163 IF ( icomm_sequence(ji,1) .gt. 1 ) jk = jk + 1 164 IF ( icomm_sequence(ji,2) .gt. 1 ) jf = jf + 1 165 IF ( icomm_sequence(ji,1) .gt. 1 .AND. icomm_sequence(ji,2) .gt. 1 ) jj = jj + 1 166 jh = MAX (jh, icomm_sequence(ji,1)*icomm_sequence(ji,2)) 167 END DO 168 write(6,'(A,I3)') ' 3D Exchanged halos : ', jk 169 write(6,'(A,I3)') ' Multi arrays exchanged halos : ', jf 170 write(6,'(A,I3)') ' from which 3D : ', jj 171 write(6,'(A,I10)') ' array max size : ', jh*jpi*jpj 172 l_print_comm_report = .FALSE. 173 END IF 174 ! 175 IF (ncom_stp <= ( nit000 + 1 ) .or. mod(ncom_stp,nn_comm_mod) == 0 ) THEN 176 ! 177 CALL tic_tac(.TRUE.) 178 ! 179 SELECT CASE ( nbondi ) 180 CASE ( -1 ) 181 CALL mppsend( 2, zt3we(1,1,1,1,1,1), imigr, noea, ml_req1 ) 182 CALL mpprecv( 1, zt3ew(1,1,1,1,1,2), imigr, noea ) 183 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 184 CASE ( 0 ) 185 CALL mppsend( 1, zt3ew(1,1,1,1,1,1), imigr, nowe, ml_req1 ) 186 CALL mppsend( 2, zt3we(1,1,1,1,1,1), imigr, noea, ml_req2 ) 187 CALL mpprecv( 1, zt3ew(1,1,1,1,1,2), imigr, noea ) 188 CALL mpprecv( 2, zt3we(1,1,1,1,1,2), imigr, nowe ) 189 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 190 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 191 CASE ( 1 ) 192 CALL mppsend( 1, zt3ew(1,1,1,1,1,1), imigr, nowe, ml_req1 ) 193 CALL mpprecv( 2, zt3we(1,1,1,1,1,2), imigr, nowe ) 194 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 195 END SELECT 196 END IF 169 197 ! 170 198 ! ! Write Dirichlet lateral conditions … … 226 254 imigr = nn_hls * jpi * ipk * ipl * ipf 227 255 ! 228 SELECT CASE ( nbondj ) 229 CASE ( -1 ) 230 CALL mppsend( 4, zt3sn(1,1,1,1,1,1), imigr, nono, ml_req1 ) 231 CALL mpprecv( 3, zt3ns(1,1,1,1,1,2), imigr, nono ) 232 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 233 CASE ( 0 ) 234 CALL mppsend( 3, zt3ns(1,1,1,1,1,1), imigr, noso, ml_req1 ) 235 CALL mppsend( 4, zt3sn(1,1,1,1,1,1), imigr, nono, ml_req2 ) 236 CALL mpprecv( 3, zt3ns(1,1,1,1,1,2), imigr, nono ) 237 CALL mpprecv( 4, zt3sn(1,1,1,1,1,2), imigr, noso ) 238 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 239 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err ) 240 CASE ( 1 ) 241 CALL mppsend( 3, zt3ns(1,1,1,1,1,1), imigr, noso, ml_req1 ) 242 CALL mpprecv( 4, zt3sn(1,1,1,1,1,2), imigr, noso ) 243 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 244 END SELECT 256 IF (ncom_stp <= ( nit000 + 1 ) .or. mod(ncom_stp,nn_comm_mod) == 0 ) THEN 257 258 SELECT CASE ( nbondj ) 259 CASE ( -1 ) 260 CALL mppsend( 4, zt3sn(1,1,1,1,1,1), imigr, nono, ml_req1 ) 261 CALL mpprecv( 3, zt3ns(1,1,1,1,1,2), imigr, nono ) 262 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 263 CASE ( 0 ) 264 CALL mppsend( 3, zt3ns(1,1,1,1,1,1), imigr, noso, ml_req1 ) 265 CALL mppsend( 4, zt3sn(1,1,1,1,1,1), imigr, nono, ml_req2 ) 266 CALL mpprecv( 3, zt3ns(1,1,1,1,1,2), imigr, nono ) 267 CALL mpprecv( 4, zt3sn(1,1,1,1,1,2), imigr, noso ) 268 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 269 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err ) 270 CASE ( 1 ) 271 CALL mppsend( 3, zt3ns(1,1,1,1,1,1), imigr, noso, ml_req1 ) 272 CALL mpprecv( 4, zt3sn(1,1,1,1,1,2), imigr, noso ) 273 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 274 END SELECT 275 ! imbalance measurement 276 CALL tic_tac(.FALSE.) 277 ! 278 END IF 245 279 ! 246 280 ! ! Write Dirichlet lateral conditions -
NEMO/branches/2018/dev_r9759_HPC09_ESIWACE/src/OCE/LBC/mppini.F90
r9667 r9772 151 151 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ilei, ildi, iono, ioea ! - - 152 152 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ilej, ildj, ioso, iowe ! - - 153 INTEGER, DIMENSION(jpiglo,jpjglo) :: imask ! 2D global domain workspace153 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: imask ! 2D global domain 154 154 !!---------------------------------------------------------------------- 155 155 … … 196 196 197 197 ! 198 IF ( jpni * jpnj == jpnij ) THEN ! regular domain lay out over processors 199 imask(:,:) = 1 200 ELSEIF ( jpni*jpnj > jpnij ) THEN ! remove land-only processor (i.e. where imask(:,:)=0) 198 ALLOCATE ( imask(jpiglo,jpjglo), stat=ierr ) 199 ! 200 IF ( jpni*jpnj > jpnij ) THEN ! remove land-only processor (i.e. where imask(:,:)=0) 201 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'mpp_init: unable to allocate global ocean arrays' ) 201 202 CALL mpp_init_mask( imask ) 202 ELSE 203 ELSEIF ( jpnij > jpni*jpnj ) THEN ! error 203 204 CALL ctl_stop( 'mpp_init: jpnij > jpni x jpnj. Check namelist setting!' ) 204 205 ENDIF … … 335 336 ! 336 337 ! Check wet points over the entire domain to preserve the MPI communication stencil 337 isurf = 0 338 DO jj = 1, ilj 339 DO ji = 1, ili 340 IF( imask(ji+iimppt(ii,ij)-1, jj+ijmppt(ii,ij)-1) == 1) isurf = isurf+1 341 END DO 342 END DO 338 IF ( jpni * jpnj == jpnij ) THEN ! regular domain lay out over processors 339 isurf = ili * ilj 340 ELSE 341 isurf = 0 342 DO jj = 1, ilj 343 DO ji = 1, ili 344 IF( imask(ji+iimppt(ii,ij)-1, jj+ijmppt(ii,ij)-1) == 1) isurf = isurf+1 345 END DO 346 END DO 347 ENDIF 343 348 ! 344 349 IF( isurf /= 0 ) THEN … … 349 354 ENDIF 350 355 END DO 356 DEALLOCATE ( imask ) 351 357 ! 352 358 nfipproc(:,:) = ipproc(:,:) -
NEMO/branches/2018/dev_r9759_HPC09_ESIWACE/src/OCE/nemogcm.F90
r9656 r9772 99 99 CHARACTER(lc) :: cform_aaa="( /, 'AAAAAAAA', / ) " ! flag for output listing 100 100 101 #if defined key_mpp_mpi 102 INCLUDE 'mpif.h' 103 #endif 104 101 105 !!---------------------------------------------------------------------- 102 106 !! NEMO/OCE 4.0 , NEMO Consortium (2018) … … 121 125 !!---------------------------------------------------------------------- 122 126 INTEGER :: istp ! time step index 127 INTEGER :: i, ik, id, iend, iseq ! time step index 128 REAL(wp) :: tic 129 123 130 !!---------------------------------------------------------------------- 124 131 ! … … 178 185 ! 179 186 DO WHILE( istp <= nitend .AND. nstop == 0 ) 180 CALL stp ( istp ) 187 #if defined key_mpp_mpi 188 ncom_stp = istp 189 #endif 190 IF ( istp == ( nit000 + 1 ) ) tic = MPI_Wtime() 191 IF ( ln_comm_only .AND. istp >= ( nit000 + 1 )) THEN 192 DO i = 1, n_sequence 193 IF ( icomm_sequence(i,1) == 1 ) THEN 194 SELECT CASE ( icomm_sequence(i,2) ) 195 CASE (1) 196 CALL lbc_lnk_multi( un(:,:,1) , 'U', -1. ) 197 CASE (2) 198 CALL lbc_lnk_multi( un(:,:,1) , 'U', -1., vn(:,:,1) , 'V', -1.) 199 CASE (3) 200 CALL lbc_lnk_multi( un(:,:,1) , 'U', -1., vn(:,:,1) , 'V', -1., ua(:,:,1), 'U', -1.) 201 CASE (4) 202 CALL lbc_lnk_multi( un(:,:,1) , 'U', -1., vn(:,:,1) , 'V', -1., ua(:,:,1), 'U', -1., va(:,:,1) , 'V', -1.) 203 CASE (5) 204 CALL lbc_lnk_multi( un(:,:,1) , 'U', -1., vn(:,:,1) , 'V', -1., ua(:,:,1), 'U', -1., va(:,:,1) , 'V', -1., ub(:,:,1) , 'U', -1.) 205 CASE (6) 206 CALL lbc_lnk_multi( un(:,:,1) , 'U', -1., vn(:,:,1) , 'V', -1., ua(:,:,1), 'U', -1., va(:,:,1) , 'V', -1., ub(:,:,1) , 'U', -1., vb(:,:,1) , 'V', -1. ) 207 END SELECT 208 ELSE 209 SELECT CASE ( icomm_sequence(i,2) ) 210 CASE (1) 211 CALL lbc_lnk_multi( un , 'U', -1. ) 212 CASE (2) 213 CALL lbc_lnk_multi( un , 'U', -1., vn , 'V', -1.) 214 CASE (3) 215 CALL lbc_lnk_multi( un , 'U', -1., vn , 'V', -1., ua, 'U', -1.) 216 CASE (4) 217 CALL lbc_lnk_multi( un , 'U', -1., vn , 'V', -1., ua, 'U', -1., va , 'V', -1.) 218 CASE (5) 219 CALL lbc_lnk_multi( un , 'U', -1., vn , 'V', -1., ua, 'U', -1., va , 'V', -1., ub , 'U', -1.) 220 CASE (6) 221 CALL lbc_lnk_multi( un , 'U', -1., vn , 'V', -1., ua, 'U', -1., va , 'V', -1., ub , 'U', -1., vb , 'V', -1. ) 222 END SELECT 223 ENDIF 224 ENDDO 225 ELSE 226 CALL stp ( istp ) 227 ENDIF 181 228 istp = istp + 1 229 IF ( istp == ( nitend - 1) ) tic = MPI_Wtime() - tic 182 230 END DO 231 WRITE(6,'(A20,F11.6,A15,I8)') 'timeloop duration : ',tic,' on MPI rank : ', narea 183 232 ! 184 233 ELSE !== diurnal SST time-steeping only ==! … … 596 645 ! 597 646 ierr = oce_alloc () ! ocean 598 ierr = ierr + dia_wri_alloc() 647 !EM useless alloc for BENCH 648 !EM ierr = ierr + dia_wri_alloc() 599 649 ierr = ierr + dom_oce_alloc() ! ocean domain 600 650 ierr = ierr + zdf_oce_alloc() ! ocean vertical physics -
NEMO/branches/2018/dev_r9759_HPC09_ESIWACE/tests/BENCH/EXPREF/namelist_cfg_orca025_like
r9762 r9772 26 26 jpni = 3 ! jpni number of processors following i (set automatically if < 1) 27 27 jpnj = 3 ! jpnj number of processors following j (set automatically if < 1) 28 nn_comm_mod = 1 ! step routine call frequency 29 ln_comm_only = .false. ! replace step routine by realistic communications only 28 30 / 29 31 !----------------------------------------------------------------------- -
NEMO/branches/2018/dev_r9759_HPC09_ESIWACE/tests/BENCH/EXPREF/namelist_cfg_orca12_like
r9766 r9772 26 26 jpni = 3 ! jpni number of processors following i (set automatically if < 1) 27 27 jpnj = 3 ! jpnj number of processors following j (set automatically if < 1) 28 nn_comm_mod = 1 ! step routine call frequency 29 ln_comm_only = .false. ! replace step routine by realistic communications only 28 30 / 29 31 !----------------------------------------------------------------------- -
NEMO/branches/2018/dev_r9759_HPC09_ESIWACE/tests/BENCH/EXPREF/namelist_cfg_orca1_like
r9762 r9772 26 26 jpni = 3 ! jpni number of processors following i (set automatically if < 1) 27 27 jpnj = 3 ! jpnj number of processors following j (set automatically if < 1) 28 nn_comm_mod = 1 ! step routine call frequency 29 ln_comm_only = .false. ! replace step routine by realistic communications only 28 30 / 29 31 !-----------------------------------------------------------------------
Note: See TracChangeset
for help on using the changeset viewer.