- Timestamp:
- 2018-11-12T16:20:57+01:00 (5 years ago)
- Location:
- NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src
- Files:
-
- 82 edited
Legend:
- Unmodified
- Added
- Removed
-
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/ICE/icectl.F90
r10069 r10297 402 402 IF( lk_mpp ) THEN 403 403 DO ialert_id = 1, inb_altests 404 CALL mpp_sum( inb_alp(ialert_id))404 CALL mpp_sum('icectl', inb_alp(ialert_id)) 405 405 END DO 406 406 ENDIF -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/ICE/icedia.F90
r10069 r10297 52 52 ALLOCATE( vol_loc_ini(jpi,jpj), sal_loc_ini(jpi,jpj), tem_loc_ini(jpi,jpj), STAT=ice_dia_alloc ) 53 53 54 IF( lk_mpp ) CALL mpp_sum ( ice_dia_alloc )54 IF( lk_mpp ) CALL mpp_sum ( 'icedia', ice_dia_alloc ) 55 55 IF( ice_dia_alloc /= 0 ) CALL ctl_warn( 'ice_dia_alloc: failed to allocate arrays' ) 56 56 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/ICE/icedyn_adv_pra.F90
r10170 r10297 101 101 zcfl = MAXVAL( ABS( pu_ice(:,:) ) * rdt_ice * r1_e1u(:,:) ) 102 102 zcfl = MAX( zcfl, MAXVAL( ABS( pv_ice(:,:) ) * rdt_ice * r1_e2v(:,:) ) ) 103 IF( lk_mpp ) CALL mpp_max( zcfl )103 IF( lk_mpp ) CALL mpp_max( 'icedyn_adv_pra', zcfl ) 104 104 105 105 IF( zcfl > 0.5 ) THEN ; initad = 2 ; zusnit = 0.5_wp … … 640 640 & STAT = ierr ) 641 641 ! 642 IF( lk_mpp ) CALL mpp_sum( ierr )642 IF( lk_mpp ) CALL mpp_sum( 'icedyn_adv_pra', ierr ) 643 643 IF( ierr /= 0 ) CALL ctl_stop('STOP', 'adv_pra_init : unable to allocate ice arrays for Prather advection scheme') 644 644 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/ICE/icedyn_rdgrft.F90
r10170 r10297 92 92 & ze_i_2d(jpij,nlay_i,jpl), ze_s_2d(jpij,nlay_s,jpl), STAT=ice_dyn_rdgrft_alloc ) 93 93 94 IF( lk_mpp ) CALL mpp_sum ( ice_dyn_rdgrft_alloc )94 IF( lk_mpp ) CALL mpp_sum ( 'icedyn_rdgrft', ice_dyn_rdgrft_alloc ) 95 95 IF( ice_dyn_rdgrft_alloc /= 0 ) CALL ctl_warn( 'ice_dyn_rdgrft_alloc: failed to allocate arrays' ) 96 96 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/ICE/icedyn_rhg_evp.F90
r10170 r10297 653 653 END DO 654 654 zresm = MAXVAL( zresr( 1:jpi, 2:jpjm1 ) ) 655 IF( lk_mpp ) CALL mpp_max( zresm ) ! max over the global domain655 IF( lk_mpp ) CALL mpp_max( 'icedyn_rhg_evp', zresm ) ! max over the global domain 656 656 ENDIF 657 657 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/ICE/icestp.F90
r10069 r10297 248 248 ierr = ierr + ice1D_alloc () ! thermodynamics 249 249 ! 250 IF( lk_mpp ) CALL mpp_sum( ierr )250 IF( lk_mpp ) CALL mpp_sum( 'icestp', ierr ) 251 251 IF( ierr /= 0 ) CALL ctl_stop('STOP', 'ice_init : unable to allocate ice arrays') 252 252 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/ICE/iceupdate.F90
r10170 r10297 59 59 ALLOCATE( utau_oce(jpi,jpj), vtau_oce(jpi,jpj), tmod_io(jpi,jpj), STAT=ice_update_alloc ) 60 60 ! 61 IF( lk_mpp ) CALL mpp_sum( ice_update_alloc )61 IF( lk_mpp ) CALL mpp_sum( 'iceupdate', ice_update_alloc ) 62 62 IF( ice_update_alloc /= 0 ) CALL ctl_warn('ice_update_alloc: failed to allocate arrays') 63 63 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/NST/agrif_user.F90
r10068 r10297 280 280 CALL Agrif_Bc_variable(e3t_id,calledweight=1.,procname=interpe3t) 281 281 ! 282 IF (lk_mpp) CALL mpp_sum( kindic_agr )282 IF (lk_mpp) CALL mpp_sum( 'agrif_user', kindic_agr ) 283 283 IF( kindic_agr /= 0 ) THEN 284 284 CALL ctl_stop('Child Bathymetry is not correct near boundaries.') -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/BDY/bdy_oce.F90
r10068 r10297 161 161 bdyvmask(:,:) = 1._wp 162 162 ! 163 IF( lk_mpp ) CALL mpp_sum ( bdy_oce_alloc )163 IF( lk_mpp ) CALL mpp_sum ( 'bdy_oce', bdy_oce_alloc ) 164 164 IF( bdy_oce_alloc /= 0 ) CALL ctl_warn('bdy_oce_alloc: failed to allocate arrays.') 165 165 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/BDY/bdyini.F90
r10170 r10297 1280 1280 END DO 1281 1281 ! 1282 IF( lk_mpp ) CALL mpp_sum( bdysurftot ) ! sum over the global domain1282 IF( lk_mpp ) CALL mpp_sum( 'bdyini', bdysurftot ) ! sum over the global domain 1283 1283 END IF 1284 1284 ! … … 1520 1520 END DO 1521 1521 END DO 1522 IF( lk_mpp ) CALL mpp_sum( ztestmask, 2 ) ! sum over the global domain1522 IF( lk_mpp ) CALL mpp_sum( 'bdyini', ztestmask, 2 ) ! sum over the global domain 1523 1523 1524 1524 IF (ztestmask(1)==1) THEN … … 1564 1564 END DO 1565 1565 END DO 1566 IF( lk_mpp ) CALL mpp_sum( ztestmask, 2 ) ! sum over the global domain1566 IF( lk_mpp ) CALL mpp_sum( 'bdyini', ztestmask, 2 ) ! sum over the global domain 1567 1567 1568 1568 IF (ztestmask(1)==1) THEN … … 1608 1608 END DO 1609 1609 END DO 1610 IF( lk_mpp ) CALL mpp_sum( ztestmask, 2 ) ! sum over the global domain1610 IF( lk_mpp ) CALL mpp_sum( 'bdyini', ztestmask, 2 ) ! sum over the global domain 1611 1611 1612 1612 IF ((ztestmask(1)==1).AND.(icorns(ib,1)==0)) THEN … … 1638 1638 END DO 1639 1639 END DO 1640 IF( lk_mpp ) CALL mpp_sum( ztestmask, 2 ) ! sum over the global domain1640 IF( lk_mpp ) CALL mpp_sum( 'bdyini', ztestmask, 2 ) ! sum over the global domain 1641 1641 1642 1642 IF ((ztestmask(1)==1).AND.(icornn(ib,1)==0)) THEN -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/BDY/bdyvol.F90
r10068 r10297 85 85 !!gm replace these lines : 86 86 z_cflxemp = SUM ( ( emp(:,:) - rnf(:,:) + fwfisf(:,:) ) * bdytmask(:,:) * e1e2t(:,:) ) / rau0 87 IF( lk_mpp ) CALL mpp_sum( z_cflxemp ) ! sum over the global domain87 IF( lk_mpp ) CALL mpp_sum( 'bdyvol', z_cflxemp ) ! sum over the global domain 88 88 !!gm by : 89 89 !!gm z_cflxemp = glob_sum( ( emp(:,:)-rnf(:,:)+fwfisf(:,:) ) * bdytmask(:,:) * e1e2t(:,:) ) / rau0 … … 114 114 ! 115 115 END DO 116 IF( lk_mpp ) CALL mpp_sum( zubtpecor ) ! sum over the global domain116 IF( lk_mpp ) CALL mpp_sum( 'bdyvol', zubtpecor ) ! sum over the global domain 117 117 118 118 ! The normal velocity correction … … 148 148 ! 149 149 END DO 150 IF( lk_mpp ) CALL mpp_sum( ztranst ) ! sum over the global domain150 IF( lk_mpp ) CALL mpp_sum( 'bdyvol', ztranst ) ! sum over the global domain 151 151 152 152 ! Check the cumulated transport through unstructured OBC once barotropic velocities corrected -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/C1D/dyndmp.F90
r10068 r10297 57 57 ALLOCATE( utrdmp(jpi,jpj,jpk), vtrdmp(jpi,jpj,jpk), resto_uv(jpi,jpj,jpk), STAT= dyn_dmp_alloc ) 58 58 ! 59 IF( lk_mpp ) CALL mpp_sum ( dyn_dmp_alloc )59 IF( lk_mpp ) CALL mpp_sum ( 'dyndmp', dyn_dmp_alloc ) 60 60 IF( dyn_dmp_alloc > 0 ) CALL ctl_warn('dyn_dmp_alloc: allocation of arrays failed') 61 61 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DIA/diaar5.F90
r10170 r10297 56 56 ALLOCATE( area(jpi,jpj), thick0(jpi,jpj) , sn0(jpi,jpj,jpk) , STAT=dia_ar5_alloc ) 57 57 ! 58 IF( lk_mpp ) CALL mpp_sum ( dia_ar5_alloc )58 IF( lk_mpp ) CALL mpp_sum ( 'diaar5', dia_ar5_alloc ) 59 59 IF( dia_ar5_alloc /= 0 ) CALL ctl_warn('dia_ar5_alloc: failed to allocate arrays') 60 60 ! … … 95 95 ! ! total volume of liquid seawater 96 96 zvolssh = SUM( zarea_ssh(:,:) ) 97 IF( lk_mpp ) CALL mpp_sum( zvolssh )97 IF( lk_mpp ) CALL mpp_sum( 'diaar5', zvolssh ) 98 98 zvol = vol0 + zvolssh 99 99 … … 130 130 ! 131 131 zarho = SUM( area(:,:) * zbotpres(:,:) ) 132 IF( lk_mpp ) CALL mpp_sum( zarho )132 IF( lk_mpp ) CALL mpp_sum( 'diaar5', zarho ) 133 133 zssh_steric = - zarho / area_tot 134 134 CALL iom_put( 'sshthster', zssh_steric ) … … 156 156 ! 157 157 zarho = SUM( area(:,:) * zbotpres(:,:) ) 158 IF( lk_mpp ) CALL mpp_sum( zarho )158 IF( lk_mpp ) CALL mpp_sum( 'diaar5', zarho ) 159 159 zssh_steric = - zarho / area_tot 160 160 CALL iom_put( 'sshsteric', zssh_steric ) … … 194 194 ENDIF 195 195 IF( lk_mpp ) THEN 196 CALL mpp_sum( ztemp )197 CALL mpp_sum( zsal )196 CALL mpp_sum( 'diaar5', ztemp ) 197 CALL mpp_sum( 'diaar5', zsal ) 198 198 END IF 199 199 ! … … 342 342 area(:,:) = e1e2t(:,:) * tmask_i(:,:) 343 343 344 area_tot = SUM( area(:,:) ) ; IF( lk_mpp ) CALL mpp_sum( area_tot )344 area_tot = SUM( area(:,:) ) ; IF( lk_mpp ) CALL mpp_sum( 'diaar5', area_tot ) 345 345 346 346 vol0 = 0._wp … … 350 350 thick0(:,:) = thick0(:,:) + tmask_i(:,:) * tmask(:,:,jk) * e3t_0(:,:,jk) 351 351 END DO 352 IF( lk_mpp ) CALL mpp_sum( vol0 )352 IF( lk_mpp ) CALL mpp_sum( 'diaar5', vol0 ) 353 353 354 354 IF( iom_use( 'sshthster' ) ) THEN -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DIA/diadct.F90
r10068 r10297 258 258 DO jsec=1,nb_sec ; zsum(jsec,:,:) = secs(jsec)%transport(:,:) ; ENDDO 259 259 zwork(:)= RESHAPE(zsum(:,:,:), ish ) 260 CALL mpp_sum( zwork, ish(1))260 CALL mpp_sum('diadct', zwork, ish(1)) 261 261 zsum(:,:,:)= RESHAPE(zwork,ish2) 262 262 DO jsec=1,nb_sec ; secs(jsec)%transport(:,:) = zsum(jsec,:,:) ; ENDDO -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DIA/diahth.F90
r10068 r10297 54 54 ALLOCATE( hth(jpi,jpj), hd20(jpi,jpj), hd28(jpi,jpj), htc3(jpi,jpj), STAT=dia_hth_alloc ) 55 55 ! 56 IF( lk_mpp ) CALL mpp_sum ( dia_hth_alloc )56 IF( lk_mpp ) CALL mpp_sum ( 'diahth', dia_hth_alloc ) 57 57 IF(dia_hth_alloc /= 0) CALL ctl_warn('dia_hth_alloc: failed to allocate arrays.') 58 58 ! … … 123 123 & zthick(jpi,jpj), & 124 124 & zdelr(jpi,jpj), STAT=ji) 125 IF( lk_mpp ) CALL mpp_sum( ji)125 IF( lk_mpp ) CALL mpp_sum('diahth', ji) 126 126 IF( ji /= 0 ) CALL ctl_stop( 'STOP', 'dia_hth : unable to allocate standard ocean arrays' ) 127 127 END IF -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DIA/diaptr.F90
r10068 r10297 549 549 ! 550 550 dia_ptr_alloc = MAXVAL( ierr ) 551 IF(lk_mpp) CALL mpp_sum( dia_ptr_alloc )551 IF(lk_mpp) CALL mpp_sum( 'diaptr', dia_ptr_alloc ) 552 552 ! 553 553 END FUNCTION dia_ptr_alloc … … 595 595 ENDIF 596 596 #if defined key_mpp_mpi 597 IF(lk_mpp) CALL mpp_sum( p_fval, ijpj, ncomm_znl)597 IF(lk_mpp) CALL mpp_sum( 'diaptr', p_fval, ijpj, ncomm_znl) 598 598 #endif 599 599 ! … … 638 638 ENDIF 639 639 #if defined key_mpp_mpi 640 CALL mpp_sum( p_fval, ijpj, ncomm_znl )640 CALL mpp_sum( 'diaptr', p_fval, ijpj, ncomm_znl ) 641 641 #endif 642 642 ! … … 696 696 ish(1) = ijpjjpk ; ish2(1) = jpj ; ish2(2) = jpk 697 697 zwork(1:ijpjjpk) = RESHAPE( p_fval, ish ) 698 CALL mpp_sum( zwork, ijpjjpk, ncomm_znl )698 CALL mpp_sum( 'diaptr', zwork, ijpjjpk, ncomm_znl ) 699 699 p_fval(:,:) = RESHAPE( zwork, ish2 ) 700 700 #endif -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DIA/diawri.F90
r10170 r10297 410 410 ! 411 411 dia_wri_alloc = MAXVAL(ierr) 412 IF( lk_mpp ) CALL mpp_sum( dia_wri_alloc )412 IF( lk_mpp ) CALL mpp_sum( 'diawri', dia_wri_alloc ) 413 413 ! 414 414 END FUNCTION dia_wri_alloc … … 519 519 !! that routine is called from nemogcm, so do it here immediately before its needed 520 520 ALLOCATE( ndex_bT(jpi*jpj*nclasses), STAT=ierror ) 521 IF( lk_mpp ) CALL mpp_sum( ierror )521 IF( lk_mpp ) CALL mpp_sum( 'diawri', ierror ) 522 522 IF( ierror /= 0 ) THEN 523 523 CALL ctl_stop('dia_wri: failed to allocate iceberg diagnostic array') -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DOM/closea.F90
r10170 r10297 118 118 ! number of closed seas = global maximum value in closea_mask field 119 119 jncs = maxval(closea_mask(:,:)) 120 IF( lk_mpp ) CALL mpp_max( jncs)120 IF( lk_mpp ) CALL mpp_max('closea', jncs) 121 121 IF( jncs > 0 ) THEN 122 122 IF( lwp ) WRITE(numout,*) 'Number of closed seas : ',jncs … … 146 146 ! number of closed seas rnf mappings = global maximum in closea_mask_rnf field 147 147 jncsr = maxval(closea_mask_rnf(:,:)) 148 IF( lk_mpp ) CALL mpp_max( jncsr)148 IF( lk_mpp ) CALL mpp_max('closea', jncsr) 149 149 IF( jncsr > 0 ) THEN 150 150 IF( lwp ) WRITE(numout,*) 'Number of closed seas rnf mappings : ',jncsr … … 166 166 ! number of closed seas empmr mappings = global maximum value in closea_mask_empmr field 167 167 jncse = maxval(closea_mask_empmr(:,:)) 168 IF( lk_mpp ) CALL mpp_max( jncse)168 IF( lk_mpp ) CALL mpp_max('closea', jncse) 169 169 IF( jncse > 0 ) THEN 170 170 IF( lwp ) WRITE(numout,*) 'Number of closed seas empmr mappings : ',jncse -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DOM/domvvl.F90
r10170 r10297 79 79 & dtilde_e3t_a(jpi,jpj,jpk) , un_td (jpi,jpj,jpk) , vn_td (jpi,jpj,jpk) , & 80 80 & STAT = dom_vvl_alloc ) 81 IF( lk_mpp ) CALL mpp_sum ( dom_vvl_alloc )81 IF( lk_mpp ) CALL mpp_sum ( 'domvvl', dom_vvl_alloc ) 82 82 IF( dom_vvl_alloc /= 0 ) CALL ctl_warn('dom_vvl_alloc: failed to allocate arrays') 83 83 un_td = 0._wp … … 86 86 IF( ln_vvl_ztilde ) THEN 87 87 ALLOCATE( frq_rst_e3t(jpi,jpj) , frq_rst_hdv(jpi,jpj) , hdiv_lf(jpi,jpj,jpk) , STAT= dom_vvl_alloc ) 88 IF( lk_mpp ) CALL mpp_sum ( dom_vvl_alloc )88 IF( lk_mpp ) CALL mpp_sum ( 'domvvl', dom_vvl_alloc ) 89 89 IF( dom_vvl_alloc /= 0 ) CALL ctl_warn('dom_vvl_alloc: failed to allocate arrays') 90 90 ENDIF … … 429 429 END DO 430 430 z_tmax = MAXVAL( ze3t(:,:,:) ) 431 IF( lk_mpp ) CALL mpp_max( z_tmax ) ! max over the global domain431 IF( lk_mpp ) CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 432 432 z_tmin = MINVAL( ze3t(:,:,:) ) 433 IF( lk_mpp ) CALL mpp_min( z_tmin ) ! min over the global domain433 IF( lk_mpp ) CALL mpp_min( 'domvvl', z_tmin ) ! min over the global domain 434 434 ! - ML - test: for the moment, stop simulation for too large e3_t variations 435 435 IF( ( z_tmax > rn_zdef_max ) .OR. ( z_tmin < - rn_zdef_max ) ) THEN … … 493 493 IF ( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN 494 494 z_tmax = MAXVAL( tmask(:,:,1) * tmask_i(:,:) * ABS( zht(:,:) ) ) 495 IF( lk_mpp ) CALL mpp_max( z_tmax ) ! max over the global domain495 IF( lk_mpp ) CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 496 496 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(SUM(tilde_e3t_a))) =', z_tmax 497 497 END IF … … 502 502 END DO 503 503 z_tmax = MAXVAL( tmask(:,:,1) * tmask_i(:,:) * ABS( ht_0(:,:) + sshn(:,:) - zht(:,:) ) ) 504 IF( lk_mpp ) CALL mpp_max( z_tmax ) ! max over the global domain504 IF( lk_mpp ) CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 505 505 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(ht_0+sshn-SUM(e3t_n))) =', z_tmax 506 506 ! … … 510 510 END DO 511 511 z_tmax = MAXVAL( tmask(:,:,1) * tmask_i(:,:) * ABS( ht_0(:,:) + ssha(:,:) - zht(:,:) ) ) 512 IF( lk_mpp ) CALL mpp_max( z_tmax ) ! max over the global domain512 IF( lk_mpp ) CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 513 513 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(ht_0+ssha-SUM(e3t_a))) =', z_tmax 514 514 ! … … 518 518 END DO 519 519 z_tmax = MAXVAL( tmask(:,:,1) * tmask_i(:,:) * ABS( ht_0(:,:) + sshb(:,:) - zht(:,:) ) ) 520 IF( lk_mpp ) CALL mpp_max( z_tmax ) ! max over the global domain520 IF( lk_mpp ) CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 521 521 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(ht_0+sshb-SUM(e3t_b))) =', z_tmax 522 522 ! 523 523 z_tmax = MAXVAL( tmask(:,:,1) * ABS( sshb(:,:) ) ) 524 IF( lk_mpp ) CALL mpp_max( z_tmax ) ! max over the global domain524 IF( lk_mpp ) CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 525 525 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(sshb))) =', z_tmax 526 526 ! 527 527 z_tmax = MAXVAL( tmask(:,:,1) * ABS( sshn(:,:) ) ) 528 IF( lk_mpp ) CALL mpp_max( z_tmax ) ! max over the global domain528 IF( lk_mpp ) CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 529 529 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(sshn))) =', z_tmax 530 530 ! 531 531 z_tmax = MAXVAL( tmask(:,:,1) * ABS( ssha(:,:) ) ) 532 IF( lk_mpp ) CALL mpp_max( z_tmax ) ! max over the global domain532 IF( lk_mpp ) CALL mpp_max( 'domvvl', z_tmax ) ! max over the global domain 533 533 IF( lwp ) WRITE(numout, *) kt,' MAXVAL(abs(ssha))) =', z_tmax 534 534 END IF -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DOM/domwri.F90
r10170 r10297 277 277 zrxmax = MAXVAL( zx1 ) 278 278 ! 279 IF( lk_mpp ) CALL mpp_max( zrxmax ) ! max over the global domain279 IF( lk_mpp ) CALL mpp_max( 'domwri', zrxmax ) ! max over the global domain 280 280 ! 281 281 IF(lwp) THEN -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DOM/iscplhsb.F90
r10069 r10297 206 206 207 207 ! build array of total problematic point on each cpu (share to each cpu) 208 CALL mpp_max( inpts,jpnij)208 CALL mpp_max('iscplhsb', inpts,jpnij) 209 209 210 210 ! size of the new variable … … 243 243 ! build array of total problematic point on each cpu (share to each cpu) 244 244 ! point coordinates 245 CALL mpp_max( zlat ,npts)246 CALL mpp_max( zlon ,npts)247 CALL mpp_max( izpts,npts)245 CALL mpp_max('iscplhsb', zlat ,npts) 246 CALL mpp_max('iscplhsb', zlon ,npts) 247 CALL mpp_max('iscplhsb', izpts,npts) 248 248 249 249 ! correction values 250 CALL mpp_max( zcorr_vol,npts)251 CALL mpp_max( zcorr_sal,npts)252 CALL mpp_max( zcorr_tem,npts)250 CALL mpp_max('iscplhsb', zcorr_vol,npts) 251 CALL mpp_max('iscplhsb', zcorr_sal,npts) 252 CALL mpp_max('iscplhsb', zcorr_tem,npts) 253 253 254 254 ! put correction term in the closest cell -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DOM/iscplini.F90
r10069 r10297 48 48 ALLOCATE( htsc_iscpl(jpi,jpj,jpk,jpts) , hdiv_iscpl(jpi,jpj,jpk) , STAT=iscpl_alloc ) 49 49 ! 50 IF( lk_mpp ) CALL mpp_sum ( iscpl_alloc )50 IF( lk_mpp ) CALL mpp_sum ( 'iscplini', iscpl_alloc ) 51 51 IF( iscpl_alloc > 0 ) CALL ctl_warn('iscpl_alloc: allocation of arrays failed') 52 52 END FUNCTION iscpl_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DYN/dynspg_ts.F90
r10170 r10297 112 112 dyn_spg_ts_alloc = MAXVAL( ierr(:) ) 113 113 ! 114 IF( lk_mpp ) CALL mpp_sum( dyn_spg_ts_alloc )114 IF( lk_mpp ) CALL mpp_sum( 'dynspg_ts', dyn_spg_ts_alloc ) 115 115 IF( dyn_spg_ts_alloc /= 0 ) CALL ctl_warn('dyn_spg_ts_alloc: failed to allocate arrays') 116 116 ! … … 1478 1478 ! 1479 1479 zcmax = MAXVAL( zcu(:,:) ) 1480 IF( lk_mpp ) CALL mpp_max( zcmax )1480 IF( lk_mpp ) CALL mpp_max( 'dynspg_ts', zcmax ) 1481 1481 1482 1482 ! Estimate number of iterations to satisfy a max courant number= rn_bt_cmax -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/DYN/wet_dry.F90
r10170 r10297 243 243 CALL lbc_lnk_multi( 'wet_dry', zwdlmtu, 'U', 1., zwdlmtv, 'V', 1. ) 244 244 ! 245 IF( lk_mpp ) CALL mpp_max( jflag) !max over the global domain245 IF( lk_mpp ) CALL mpp_max('wet_dry', jflag) !max over the global domain 246 246 ! 247 247 IF( jflag == 0 ) EXIT … … 372 372 CALL lbc_lnk_multi( 'wet_dry', zwdlmtu, 'U', 1., zwdlmtv, 'V', 1. ) 373 373 ! 374 IF(lk_mpp) CALL mpp_max( jflag) !max over the global domain374 IF(lk_mpp) CALL mpp_max('wet_dry', jflag) !max over the global domain 375 375 ! 376 376 IF(jflag == 0) EXIT -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/FLO/flo_oce.F90
r10068 r10297 64 64 & tpifl(jpnfl) , tpjfl(jpnfl) , tpkfl(jpnfl) , STAT=flo_oce_alloc ) 65 65 ! 66 IF( lk_mpp ) CALL mpp_sum ( flo_oce_alloc )66 IF( lk_mpp ) CALL mpp_sum ( 'flo_oce', flo_oce_alloc ) 67 67 IF( flo_oce_alloc /= 0 ) CALL ctl_warn('flo_oce_alloc: failed to allocate arrays') 68 68 END FUNCTION flo_oce_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/FLO/floblk.F90
r10068 r10297 330 330 331 331 ! synchronisation 332 IF( lk_mpp ) CALL mpp_sum( zgifl , jpnfl ) ! sums over the global domain333 IF( lk_mpp ) CALL mpp_sum( zgjfl , jpnfl )334 IF( lk_mpp ) CALL mpp_sum( zgkfl , jpnfl )335 IF( lk_mpp ) CALL mpp_sum( zagefl, jpnfl )336 IF( lk_mpp ) CALL mpp_sum( iil , jpnfl )337 IF( lk_mpp ) CALL mpp_sum( ijl , jpnfl )332 IF( lk_mpp ) CALL mpp_sum( 'floblk', zgifl , jpnfl ) ! sums over the global domain 333 IF( lk_mpp ) CALL mpp_sum( 'floblk', zgjfl , jpnfl ) 334 IF( lk_mpp ) CALL mpp_sum( 'floblk', zgkfl , jpnfl ) 335 IF( lk_mpp ) CALL mpp_sum( 'floblk', zagefl, jpnfl ) 336 IF( lk_mpp ) CALL mpp_sum( 'floblk', iil , jpnfl ) 337 IF( lk_mpp ) CALL mpp_sum( 'floblk', ijl , jpnfl ) 338 338 339 339 ! Test to know if a float hasn't integrated enought time -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/FLO/flodom.F90
r10068 r10297 193 193 !---------------------------------------------- 194 194 IF( lk_mpp ) THEN 195 CALL mpp_sum( ihtest,jpnfl)196 CALL mpp_sum( ivtest,jpnfl)195 CALL mpp_sum('flodom', ihtest,jpnfl) 196 CALL mpp_sum('flodom', ivtest,jpnfl) 197 197 ENDIF 198 198 DO jfl = kfl_start,kfl_end … … 252 252 ! The sum of all the arrays zgifl, zgjfl, zgkfl give 3 arrays with the positions of all the floats. 253 253 IF( lk_mpp ) THEN 254 CALL mpp_sum( zgjfl, ifl ) ! sums over the global domain255 CALL mpp_sum( zgkfl, ifl )254 CALL mpp_sum( 'flodom', zgjfl, ifl ) ! sums over the global domain 255 CALL mpp_sum( 'flodom', zgkfl, ifl ) 256 256 ENDIF 257 257 … … 451 451 zgifl(jpnfl) , zgjfl(jpnfl) , zgkfl(jpnfl) , STAT=flo_dom_alloc ) 452 452 ! 453 IF( lk_mpp ) CALL mpp_sum ( flo_dom_alloc )453 IF( lk_mpp ) CALL mpp_sum ( 'flodom', flo_dom_alloc ) 454 454 IF( flo_dom_alloc /= 0 ) CALL ctl_warn('flo_dom_alloc: failed to allocate arrays') 455 455 END FUNCTION flo_dom_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/FLO/florst.F90
r10068 r10297 39 39 ALLOCATE( iperproc(jpnij), STAT=flo_rst_alloc ) 40 40 ! 41 IF( lk_mpp ) CALL mpp_sum ( flo_rst_alloc )41 IF( lk_mpp ) CALL mpp_sum ( 'florst', flo_rst_alloc ) 42 42 IF( flo_rst_alloc /= 0 ) CALL ctl_warn('flo_rst_alloc: failed to allocate arrays.') 43 43 END FUNCTION flo_rst_alloc … … 109 109 ENDIF 110 110 END DO 111 CALL mpp_sum( iperproc, jpnij )111 CALL mpp_sum( 'florst', iperproc, jpnij ) 112 112 ! 113 113 IF(lwp) THEN -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/FLO/flowri.F90
r10068 r10297 51 51 zlon(jpnfl) , zlat(jpnfl) , zdep(jpnfl) , STAT=flo_wri_alloc) 52 52 ! 53 IF( lk_mpp ) CALL mpp_sum ( flo_wri_alloc )53 IF( lk_mpp ) CALL mpp_sum ( 'flowri', flo_wri_alloc ) 54 54 IF( flo_wri_alloc /= 0 ) CALL ctl_warn('flo_wri_alloc: failed to allocate arrays.') 55 55 END FUNCTION flo_wri_alloc … … 153 153 !Only proc 0 writes all positions : SUM of positions on all procs 154 154 IF( lk_mpp ) THEN 155 CALL mpp_sum( zlon, jpnfl ) ! sums over the global domain156 CALL mpp_sum( zlat, jpnfl ) ! sums over the global domain157 CALL mpp_sum( zdep, jpnfl ) ! sums over the global domain158 CALL mpp_sum( ztem, jpnfl ) ! sums over the global domain159 CALL mpp_sum( zsal, jpnfl ) ! sums over the global domain160 CALL mpp_sum( zrho, jpnfl ) ! sums over the global domain155 CALL mpp_sum( 'flowri', zlon, jpnfl ) ! sums over the global domain 156 CALL mpp_sum( 'flowri', zlat, jpnfl ) ! sums over the global domain 157 CALL mpp_sum( 'flowri', zdep, jpnfl ) ! sums over the global domain 158 CALL mpp_sum( 'flowri', ztem, jpnfl ) ! sums over the global domain 159 CALL mpp_sum( 'flowri', zsal, jpnfl ) ! sums over the global domain 160 CALL mpp_sum( 'flowri', zrho, jpnfl ) ! sums over the global domain 161 161 ENDIF 162 162 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ICB/icb_oce.F90
r10068 r10297 190 190 icb_alloc = icb_alloc + ill 191 191 192 IF( lk_mpp ) CALL mpp_sum ( icb_alloc )192 IF( lk_mpp ) CALL mpp_sum ( 'icb_oce', icb_alloc ) 193 193 IF( icb_alloc > 0 ) CALL ctl_warn('icb_alloc: allocation of arrays failed') 194 194 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ICB/icbdia.F90
r10068 r10297 161 161 rsumbuf(2) = bergs_mass_start 162 162 rsumbuf(3) = bits_mass_start 163 CALL mpp_sum( rsumbuf(1:3), 3 )163 CALL mpp_sum( 'icbdia', rsumbuf(1:3), 3 ) 164 164 floating_mass_start = rsumbuf(1) 165 165 bergs_mass_start = rsumbuf(2) … … 236 236 rsumbuf(23) = zgrdd_bits_mass 237 237 ! 238 CALL mpp_sum( rsumbuf(1:23), 23)238 CALL mpp_sum( 'icbdia', rsumbuf(1:23), 23) 239 239 ! 240 240 stored_end = rsumbuf( 1) … … 269 269 nsumbuf(4+ik) = nbergs_calved_by_class(ik) 270 270 END DO 271 CALL mpp_sum( nsumbuf(1:nclasses+4), nclasses+4 )271 CALL mpp_sum( 'icbdia', nsumbuf(1:nclasses+4), nclasses+4 ) 272 272 ! 273 273 nbergs_end = nsumbuf(1) … … 436 436 IF( kt == nit000 ) THEN 437 437 stored_start = SUM( berg_grid%stored_ice(:,:,:) ) 438 IF( lk_mpp ) CALL mpp_sum( stored_start )438 IF( lk_mpp ) CALL mpp_sum( 'icbdia', stored_start ) 439 439 WRITE(numicb,'(a,es13.6,a)') 'icb_dia_income: initial stored mass=',stored_start,' kg' 440 440 ! 441 441 stored_heat_start = SUM( berg_grid%stored_heat(:,:) ) 442 IF( lk_mpp ) CALL mpp_sum( stored_heat_start )442 IF( lk_mpp ) CALL mpp_sum( 'icbdia', stored_heat_start ) 443 443 WRITE(numicb,'(a,es13.6,a)') 'icb_dia_income: initial stored heat=',stored_heat_start,' J' 444 444 ENDIF -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ICB/icbini.F90
r10170 r10297 335 335 ! 336 336 ibergs = icb_utl_count() 337 IF( lk_mpp ) CALL mpp_sum( ibergs)337 IF( lk_mpp ) CALL mpp_sum('icbini', ibergs) 338 338 WRITE(numicb,'(a,i6,a)') 'diamonds, icb_ini_gen: ',ibergs,' were generated' 339 339 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ICB/icblbc.F90
r10068 r10297 551 551 this => this%next 552 552 ENDDO ! WHILE 553 CALL mpp_sum( i)553 CALL mpp_sum('icblbc', i) 554 554 IF( i .GT. 0 ) THEN 555 555 WRITE( numicb,'(a,i4)') 'send_bergs_to_other_pes: # of bergs outside computational domain = ',i -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ICB/icbrst.F90
r10068 r10297 141 141 IF( lk_mpp ) THEN 142 142 ! Only mpp_sum ibergs_in_file if we are reading from multiple restart files. 143 IF( INDEX(iom_file(ncid)%name,'icebergs.nc' ) .EQ. 0 ) CALL mpp_sum( ibergs_in_file)144 CALL mpp_sum( jn)143 IF( INDEX(iom_file(ncid)%name,'icebergs.nc' ) .EQ. 0 ) CALL mpp_sum('icbrst', ibergs_in_file) 144 CALL mpp_sum('icbrst', jn) 145 145 ENDIF 146 146 IF( lwp ) WRITE(numout,'(a,i5,a,i5,a)') 'icebergs, icb_rst_read: there were',ibergs_in_file, & … … 164 164 ENDIF 165 165 IF( lk_mpp ) THEN 166 CALL mpp_sum( ibase_err)166 CALL mpp_sum('icbrst', ibase_err) 167 167 ENDIF 168 168 IF( ibase_err > 0 ) THEN … … 171 171 ! all future icebergs numbers will be greater than the current global maximum 172 172 IF( lk_mpp ) THEN 173 CALL mpp_max( imax_icb)173 CALL mpp_max('icbrst', imax_icb) 174 174 ENDIF 175 175 num_bergs(1) = imax_icb - jpnij + narea … … 404 404 WRITE(numout,'(2(a,i5))') 'icebergs, icb_rst_write: # bergs =',jn,' on PE',narea-1 405 405 IF( lk_mpp ) THEN 406 CALL mpp_sum( jn)406 CALL mpp_sum('icbrst', jn) 407 407 ENDIF 408 408 IF(lwp) WRITE(numout,'(a,i5,a,i5,a)') 'icebergs, icb_rst_write: ', jn, & -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ICB/icbutl.F90
r10068 r10297 652 652 ibergs = icb_utl_count() 653 653 inbergs = ibergs 654 IF( lk_mpp ) CALL mpp_sum( inbergs)654 IF( lk_mpp ) CALL mpp_sum('icbutl', inbergs) 655 655 IF( ibergs > 0 ) WRITE(numicb,'(a," there are",i5," bergs out of",i6," on PE ",i4)') & 656 656 & cd_label, ibergs, inbergs, narea -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/LBC/lib_mpp.F90
r10292 r10297 158 158 159 159 ! Communications summary report 160 CHARACTER(len=128), DIMENSION(:), ALLOCATABLE :: crname !: names of calling routines 160 CHARACTER(len=128), DIMENSION(:), ALLOCATABLE :: crname_lbc !: names of lbc_lnk calling routines 161 CHARACTER(len=128), DIMENSION(:), ALLOCATABLE :: crname_glb !: names of global comm calling routines 161 162 INTEGER, PUBLIC :: ncom_stp = 0 !: copy of time step # istp 163 INTEGER, PUBLIC :: ncom_fsbc = 1 !: copy of sbc time step # nn_fsbc 164 INTEGER, PUBLIC :: ncom_dttrc = 1 !: copy of top time step # nn_dttrc 162 165 INTEGER, PUBLIC , DIMENSION(:,:), ALLOCATABLE :: ncomm_sequence !: size of communicated arrays (halos) 163 INTEGER, PUBLIC :: n_sequence = 0 !: # of communicated arrays 164 LOGICAL :: l_comm_report_done = .false. !: print report only once 165 166 INTEGER, PUBLIC :: n_sequence_lbc = 0 !: # of communicated arraysvia lbc 167 INTEGER, PUBLIC :: n_sequence_glb = 0 !: # of global communications 168 INTEGER, PUBLIC :: numcom = -1 !: logical unit for communicaton report 169 166 170 ! timing summary report 167 171 REAL(wp), PUBLIC :: waiting_time = 0._wp, compute_time = 0._wp, elapsed_time = 0._wp … … 577 581 END SUBROUTINE mppscatter 578 582 579 !!----------------------------------------------------------------------580 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real ***581 !!582 !!----------------------------------------------------------------------583 !!584 SUBROUTINE mppmax_a_int( ktab, kdim, kcom )585 !!----------------------------------------------------------------------586 INTEGER , INTENT(in ) :: kdim ! size of array587 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array588 INTEGER , INTENT(in ), OPTIONAL :: kcom !589 INTEGER :: ierror, ilocalcomm ! temporary integer590 INTEGER, DIMENSION(kdim) :: iwork591 !!----------------------------------------------------------------------592 ilocalcomm = mpi_comm_oce593 IF( PRESENT(kcom) ) ilocalcomm = kcom594 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, ilocalcomm, ierror )595 ktab(:) = iwork(:)596 END SUBROUTINE mppmax_a_int597 !!598 SUBROUTINE mppmax_int( ktab, kcom )599 !!----------------------------------------------------------------------600 INTEGER, INTENT(inout) :: ktab ! ???601 INTEGER, INTENT(in ), OPTIONAL :: kcom ! ???602 INTEGER :: ierror, iwork, ilocalcomm ! temporary integer603 !!----------------------------------------------------------------------604 ilocalcomm = mpi_comm_oce605 IF( PRESENT(kcom) ) ilocalcomm = kcom606 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, ilocalcomm, ierror )607 ktab = iwork608 END SUBROUTINE mppmax_int609 !!610 SUBROUTINE mppmax_a_real( ptab, kdim, kcom )611 !!----------------------------------------------------------------------612 REAL(wp), DIMENSION(kdim), INTENT(inout) :: ptab613 INTEGER , INTENT(in ) :: kdim614 INTEGER , OPTIONAL , INTENT(in ) :: kcom615 INTEGER :: ierror, ilocalcomm616 REAL(wp), DIMENSION(kdim) :: zwork617 !!----------------------------------------------------------------------618 ilocalcomm = mpi_comm_oce619 IF( PRESENT(kcom) ) ilocalcomm = kcom620 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, ilocalcomm, ierror )621 ptab(:) = zwork(:)622 END SUBROUTINE mppmax_a_real623 !!624 SUBROUTINE mppmax_real( ptab, kcom )625 !!----------------------------------------------------------------------626 REAL(wp), INTENT(inout) :: ptab ! ???627 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ???628 INTEGER :: ierror, ilocalcomm629 REAL(wp) :: zwork630 !!----------------------------------------------------------------------631 ilocalcomm = mpi_comm_oce632 IF( PRESENT(kcom) ) ilocalcomm = kcom!633 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, ilocalcomm, ierror )634 ptab = zwork635 END SUBROUTINE mppmax_real636 583 !! 637 584 SUBROUTINE mpp_ilor( ld_switch, ldlast, kcom ) … … 656 603 657 604 END SUBROUTINE mpp_ilor 658 659 605 606 !!---------------------------------------------------------------------- 607 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 608 !! 609 !!---------------------------------------------------------------------- 610 !! 611 # define OPERATION_MAX 612 # define INTEGER_TYPE 613 # define DIM_0d 614 # define ROUTINE_ALLREDUCE mppmax_int 615 # include "mpp_allreduce_generic.h90" 616 # undef ROUTINE_ALLREDUCE 617 # undef DIM_0d 618 # define DIM_1d 619 # define ROUTINE_ALLREDUCE mppmax_a_int 620 # include "mpp_allreduce_generic.h90" 621 # undef ROUTINE_ALLREDUCE 622 # undef DIM_1d 623 # undef INTEGER_TYPE 624 ! 625 # define REAL_TYPE 626 # define DIM_0d 627 # define ROUTINE_ALLREDUCE mppmax_real 628 # include "mpp_allreduce_generic.h90" 629 # undef ROUTINE_ALLREDUCE 630 # undef DIM_0d 631 # define DIM_1d 632 # define ROUTINE_ALLREDUCE mppmax_a_real 633 # include "mpp_allreduce_generic.h90" 634 # undef ROUTINE_ALLREDUCE 635 # undef DIM_1d 636 # undef REAL_TYPE 637 # undef OPERATION_MAX 660 638 !!---------------------------------------------------------------------- 661 639 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** … … 663 641 !!---------------------------------------------------------------------- 664 642 !! 665 SUBROUTINE mppmin_a_int( ktab, kdim, kcom ) 666 !!---------------------------------------------------------------------- 667 INTEGER , INTENT( in ) :: kdim ! size of array 668 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 669 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 670 !! 671 INTEGER :: ierror, ilocalcomm ! temporary integer 672 INTEGER, DIMENSION(kdim) :: iwork 673 !!---------------------------------------------------------------------- 674 ilocalcomm = mpi_comm_oce 675 IF( PRESENT(kcom) ) ilocalcomm = kcom 676 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, ilocalcomm, ierror ) 677 ktab(:) = iwork(:) 678 END SUBROUTINE mppmin_a_int 679 !! 680 SUBROUTINE mppmin_int( ktab, kcom ) 681 !!---------------------------------------------------------------------- 682 INTEGER, INTENT(inout) :: ktab ! ??? 683 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 684 !! 685 INTEGER :: ierror, iwork, ilocalcomm 686 !!---------------------------------------------------------------------- 687 ilocalcomm = mpi_comm_oce 688 IF( PRESENT(kcom) ) ilocalcomm = kcom 689 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, ilocalcomm, ierror ) 690 ktab = iwork 691 END SUBROUTINE mppmin_int 692 !! 693 SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) 694 !!---------------------------------------------------------------------- 695 INTEGER , INTENT(in ) :: kdim 696 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 697 INTEGER , INTENT(in ), OPTIONAL :: kcom 698 INTEGER :: ierror, ilocalcomm 699 REAL(wp), DIMENSION(kdim) :: zwork 700 !!----------------------------------------------------------------------- 701 ilocalcomm = mpi_comm_oce 702 IF( PRESENT(kcom) ) ilocalcomm = kcom 703 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, ilocalcomm, ierror ) 704 ptab(:) = zwork(:) 705 END SUBROUTINE mppmin_a_real 706 !! 707 SUBROUTINE mppmin_real( ptab, kcom ) 708 !!----------------------------------------------------------------------- 709 REAL(wp), INTENT(inout) :: ptab ! 710 INTEGER , INTENT(in ), OPTIONAL :: kcom 711 INTEGER :: ierror, ilocalcomm 712 REAL(wp) :: zwork 713 !!----------------------------------------------------------------------- 714 ilocalcomm = mpi_comm_oce 715 IF( PRESENT(kcom) ) ilocalcomm = kcom 716 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, ilocalcomm, ierror ) 717 ptab = zwork 718 END SUBROUTINE mppmin_real 719 643 # define OPERATION_MIN 644 # define INTEGER_TYPE 645 # define DIM_0d 646 # define ROUTINE_ALLREDUCE mppmin_int 647 # include "mpp_allreduce_generic.h90" 648 # undef ROUTINE_ALLREDUCE 649 # undef DIM_0d 650 # define DIM_1d 651 # define ROUTINE_ALLREDUCE mppmin_a_int 652 # include "mpp_allreduce_generic.h90" 653 # undef ROUTINE_ALLREDUCE 654 # undef DIM_1d 655 # undef INTEGER_TYPE 656 ! 657 # define REAL_TYPE 658 # define DIM_0d 659 # define ROUTINE_ALLREDUCE mppmin_real 660 # include "mpp_allreduce_generic.h90" 661 # undef ROUTINE_ALLREDUCE 662 # undef DIM_0d 663 # define DIM_1d 664 # define ROUTINE_ALLREDUCE mppmin_a_real 665 # include "mpp_allreduce_generic.h90" 666 # undef ROUTINE_ALLREDUCE 667 # undef DIM_1d 668 # undef REAL_TYPE 669 # undef OPERATION_MIN 720 670 721 671 !!---------------------------------------------------------------------- … … 725 675 !!---------------------------------------------------------------------- 726 676 !! 727 SUBROUTINE mppsum_a_int( ktab, kdim ) 728 !!---------------------------------------------------------------------- 729 INTEGER, INTENT(in ) :: kdim ! ??? 730 INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? 731 INTEGER :: ierror 732 INTEGER, DIMENSION (kdim) :: iwork 733 !!---------------------------------------------------------------------- 734 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_oce, ierror ) 735 ktab(:) = iwork(:) 736 END SUBROUTINE mppsum_a_int 737 !! 738 SUBROUTINE mppsum_int( ktab ) 739 !!---------------------------------------------------------------------- 740 INTEGER, INTENT(inout) :: ktab 741 INTEGER :: ierror, iwork 742 !!---------------------------------------------------------------------- 743 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_oce, ierror ) 744 ktab = iwork 745 END SUBROUTINE mppsum_int 746 !! 747 SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) 748 !!----------------------------------------------------------------------- 749 INTEGER , INTENT(in ) :: kdim ! size of ptab 750 REAL(wp), DIMENSION(kdim), INTENT(inout) :: ptab ! input array 751 INTEGER , OPTIONAL , INTENT(in ) :: kcom ! specific communicator 752 INTEGER :: ierror, ilocalcomm ! local integer 753 REAL(wp) :: zwork(kdim) ! local workspace 754 !!----------------------------------------------------------------------- 755 ilocalcomm = mpi_comm_oce 756 IF( PRESENT(kcom) ) ilocalcomm = kcom 757 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, ilocalcomm, ierror ) 758 ptab(:) = zwork(:) 759 END SUBROUTINE mppsum_a_real 760 !! 761 SUBROUTINE mppsum_real( ptab, kcom ) 762 !!----------------------------------------------------------------------- 763 REAL(wp) , INTENT(inout) :: ptab ! input scalar 764 INTEGER , OPTIONAL, INTENT(in ) :: kcom 765 INTEGER :: ierror, ilocalcomm 766 REAL(wp) :: zwork 767 !!----------------------------------------------------------------------- 768 ilocalcomm = mpi_comm_oce 769 IF( PRESENT(kcom) ) ilocalcomm = kcom 770 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, ilocalcomm, ierror ) 771 ptab = zwork 772 END SUBROUTINE mppsum_real 773 !! 774 SUBROUTINE mppsum_realdd( ytab, kcom ) 775 !!----------------------------------------------------------------------- 776 COMPLEX(wp) , INTENT(inout) :: ytab ! input scalar 777 INTEGER , OPTIONAL, INTENT(in ) :: kcom 778 INTEGER :: ierror, ilocalcomm 779 COMPLEX(wp) :: zwork 780 !!----------------------------------------------------------------------- 781 ilocalcomm = mpi_comm_oce 782 IF( PRESENT(kcom) ) ilocalcomm = kcom 783 CALL MPI_ALLREDUCE( ytab, zwork, 1, MPI_DOUBLE_COMPLEX, MPI_SUMDD, ilocalcomm, ierror ) 784 ytab = zwork 785 END SUBROUTINE mppsum_realdd 786 !! 787 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 788 !!---------------------------------------------------------------------- 789 INTEGER , INTENT(in ) :: kdim ! size of ytab 790 COMPLEX(wp), DIMENSION(kdim), INTENT(inout) :: ytab ! input array 791 INTEGER , OPTIONAL , INTENT(in ) :: kcom 792 INTEGER:: ierror, ilocalcomm ! local integer 793 COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace 794 !!----------------------------------------------------------------------- 795 ilocalcomm = mpi_comm_oce 796 IF( PRESENT(kcom) ) ilocalcomm = kcom 797 CALL MPI_ALLREDUCE( ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, MPI_SUMDD, ilocalcomm, ierror ) 798 ytab(:) = zwork(:) 799 END SUBROUTINE mppsum_a_realdd 800 677 # define OPERATION_SUM 678 # define INTEGER_TYPE 679 # define DIM_0d 680 # define ROUTINE_ALLREDUCE mppsum_int 681 # include "mpp_allreduce_generic.h90" 682 # undef ROUTINE_ALLREDUCE 683 # undef DIM_0d 684 # define DIM_1d 685 # define ROUTINE_ALLREDUCE mppsum_a_int 686 # include "mpp_allreduce_generic.h90" 687 # undef ROUTINE_ALLREDUCE 688 # undef DIM_1d 689 # undef INTEGER_TYPE 690 ! 691 # define REAL_TYPE 692 # define DIM_0d 693 # define ROUTINE_ALLREDUCE mppsum_real 694 # include "mpp_allreduce_generic.h90" 695 # undef ROUTINE_ALLREDUCE 696 # undef DIM_0d 697 # define DIM_1d 698 # define ROUTINE_ALLREDUCE mppsum_a_real 699 # include "mpp_allreduce_generic.h90" 700 # undef ROUTINE_ALLREDUCE 701 # undef DIM_1d 702 # undef REAL_TYPE 703 # undef OPERATION_SUM 704 705 # define OPERATION_SUM_DD 706 # define COMPLEX_TYPE 707 # define DIM_0d 708 # define ROUTINE_ALLREDUCE mppsum_realdd 709 # include "mpp_allreduce_generic.h90" 710 # undef ROUTINE_ALLREDUCE 711 # undef DIM_0d 712 # define DIM_1d 713 # define ROUTINE_ALLREDUCE mppsum_a_realdd 714 # include "mpp_allreduce_generic.h90" 715 # undef ROUTINE_ALLREDUCE 716 # undef DIM_1d 717 # undef COMPLEX_TYPE 718 # undef OPERATION_SUM_DD 801 719 802 720 SUBROUTINE mppmax_real_multiple( pt1d, kdim, kcom ) … … 1117 1035 l_znl_root = .FALSE. 1118 1036 kwork (1) = nimpp 1119 CALL mpp_min ( kwork(1), kcom = ncomm_znl)1037 CALL mpp_min ( 'lib_mpp', kwork(1), kcom = ncomm_znl) 1120 1038 IF ( nimpp == kwork(1)) l_znl_root = .TRUE. 1121 1039 END IF -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/LBC/mpp_lnk_generic.h90
r10179 r10297 63 63 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 64 64 INTEGER :: ierr 65 INTEGER :: icom_freq 65 66 REAL(wp) :: zland 66 67 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend … … 153 154 ! 154 155 IF( narea == 1 ) THEN 155 IF ( ncom_stp == nit000 ) THEN 156 157 ! find the smallest common frequency: default = frequency product, if multiple, choose the larger of the 2 frequency 158 icom_freq = ncom_fsbc * ncom_dttrc 159 IF( MOD( MAX(ncom_fsbc,ncom_dttrc), MIN(ncom_fsbc,ncom_dttrc) ) == 0 ) icom_freq = MAX(ncom_fsbc,ncom_dttrc) 160 161 IF ( ncom_stp == nit000+icom_freq ) THEN ! avoid to count extra communications in potential initializations at nit000 156 162 IF( .NOT. ALLOCATED( ncomm_sequence) ) THEN 157 163 ALLOCATE( ncomm_sequence(2000,2), STAT=ierr ) 158 164 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'lnk_generic, cannot allocate ncomm_sequence' ) 159 ALLOCATE( crname (2000), STAT=ierr )165 ALLOCATE( crname_lbc(2000), STAT=ierr ) 160 166 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'lnk_generic, cannot allocate crname' ) 161 167 ENDIF 162 n_sequence = n_sequence + 1 163 IF( n_sequence > 2000 ) CALL ctl_stop( 'STOP', 'lnk_generic, increase ncomm_sequence first dimension' ) 164 ncomm_sequence(n_sequence,1) = ipk*ipl ! size of 3rd and 4th dimensions 165 ncomm_sequence(n_sequence,2) = ipf ! number of arrays to be treated (multi) 166 crname(n_sequence) = cdname ! keep the name of the calling routine 167 ELSE IF ( ncom_stp == (nit000+1) ) THEN 168 IF ( .NOT. l_comm_report_done ) THEN 169 WRITE(numout,*) ' ' 170 WRITE(numout,*) ' -----------------------------------------------' 171 WRITE(numout,*) ' Communication pattern report (first time step):' 172 WRITE(numout,*) ' -----------------------------------------------' 173 WRITE(numout,*) ' ' 174 WRITE(numout,'(A,I4)') ' Exchanged halos : ', n_sequence 168 n_sequence_lbc = n_sequence_lbc + 1 169 IF( n_sequence_lbc > 2000 ) CALL ctl_stop( 'STOP', 'lnk_generic, increase ncomm_sequence first dimension' ) 170 ncomm_sequence(n_sequence_lbc,1) = ipk*ipl ! size of 3rd and 4th dimensions 171 ncomm_sequence(n_sequence_lbc,2) = ipf ! number of arrays to be treated (multi) 172 crname_lbc (n_sequence_lbc) = cdname ! keep the name of the calling routine 173 ELSE IF ( ncom_stp == (nit000+2*icom_freq) ) THEN 174 IF ( numcom == -1 ) THEN 175 CALL ctl_opn( numcom, 'communication_report.txt', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) 176 WRITE(numcom,*) ' ' 177 WRITE(numcom,*) ' ------------------------------------------------------------' 178 WRITE(numcom,*) ' Communication pattern report (second oce+sbc+top time step):' 179 WRITE(numcom,*) ' ------------------------------------------------------------' 180 WRITE(numcom,*) ' ' 181 WRITE(numcom,'(A,I4)') ' Exchanged halos : ', n_sequence_lbc 175 182 jj = 0; jk = 0; jf = 0; jh = 0 176 DO ji = 1, n_sequence 183 DO ji = 1, n_sequence_lbc 177 184 IF ( ncomm_sequence(ji,1) .GT. 1 ) jk = jk + 1 178 185 IF ( ncomm_sequence(ji,2) .GT. 1 ) jf = jf + 1 … … 180 187 jh = MAX (jh, ncomm_sequence(ji,1)*ncomm_sequence(ji,2)) 181 188 END DO 182 WRITE(num out,'(A,I3)') ' 3D Exchanged halos : ', jk183 WRITE(num out,'(A,I3)') ' Multi arrays exchanged halos : ', jf184 WRITE(num out,'(A,I3)') ' from which 3D : ', jj185 WRITE(num out,'(A,I10)') ' Array max size : ', jh*jpi*jpj186 WRITE(num out,*) ' '187 WRITE(num out,*) ' lbc_lnk called'189 WRITE(numcom,'(A,I3)') ' 3D Exchanged halos : ', jk 190 WRITE(numcom,'(A,I3)') ' Multi arrays exchanged halos : ', jf 191 WRITE(numcom,'(A,I3)') ' from which 3D : ', jj 192 WRITE(numcom,'(A,I10)') ' Array max size : ', jh*jpi*jpj 193 WRITE(numcom,*) ' ' 194 WRITE(numcom,*) ' lbc_lnk called' 188 195 jj = 1 189 DO ji = 2, n_sequence 190 IF( crname (ji-1) /= crname(ji) ) THEN191 WRITE(num out,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname(ji-1))196 DO ji = 2, n_sequence_lbc 197 IF( crname_lbc(ji-1) /= crname_lbc(ji) ) THEN 198 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(ji-1)) 192 199 jj = 0 193 200 END IF 194 201 jj = jj + 1 195 202 END DO 196 WRITE(numout,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname(n_sequence)) 197 WRITE(numout,*) ' ' 198 WRITE(numout,*) ' -----------------------------------------------' 199 WRITE(numout,*) ' ' 203 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(n_sequence_lbc)) 204 WRITE(numcom,*) ' ' 205 IF ( n_sequence_glb > 0 ) THEN 206 WRITE(numcom,'(A,I4)') ' Global communications : ', n_sequence_glb 207 jj = 1 208 DO ji = 2, n_sequence_glb 209 IF( crname_glb(ji-1) /= crname_glb(ji) ) THEN 210 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(ji-1)) 211 jj = 0 212 END IF 213 jj = jj + 1 214 END DO 215 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(n_sequence_glb)) 216 DEALLOCATE(crname_glb) 217 ELSE 218 WRITE(numcom,*) ' No MPI global communication ' 219 ENDIF 220 WRITE(numcom,*) ' ' 221 WRITE(numcom,*) ' -----------------------------------------------' 222 WRITE(numcom,*) ' ' 200 223 DEALLOCATE(ncomm_sequence) 201 DEALLOCATE(crname) 202 l_comm_report_done = .TRUE. 224 DEALLOCATE(crname_lbc) 203 225 ENDIF 204 226 ENDIF -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/LBC/mppini.F90
r10068 r10297 173 173 & ilej(jpni,jpnj), ildj(jpni,jpnj), ioso(jpni,jpnj), iowe(jpni,jpnj), & 174 174 & STAT=ierr ) 175 CALL mpp_sum( ierr )175 CALL mpp_sum( 'mppini', ierr ) 176 176 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'mpp_init: unable to allocate standard ocean arrays' ) 177 177 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/OBS/obs_averg_h2d.F90
r10069 r10297 616 616 617 617 IF(lk_mpp) THEN 618 CALL mpp_min( ze1min )619 CALL mpp_min( ze2min )618 CALL mpp_min( 'obs_averg_h2d', ze1min ) 619 CALL mpp_min( 'obs_averg_h2d', ze2min ) 620 620 ENDIF 621 621 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/OBS/obs_readmdt.F90
r10068 r10297 220 220 END DO 221 221 222 IF( lk_mpp) CALL mpp_sum( zeta1 )223 IF( lk_mpp) CALL mpp_sum( zeta2 )224 IF( lk_mpp) CALL mpp_sum( zarea )222 IF( lk_mpp) CALL mpp_sum( 'obs_readmdt', zeta1 ) 223 IF( lk_mpp) CALL mpp_sum( 'obs_readmdt', zeta2 ) 224 IF( lk_mpp) CALL mpp_sum( 'obs_readmdt', zarea ) 225 225 226 226 zcorr_mdt = zeta1 / zarea -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/OBS/obs_write.F90
r10068 r10297 592 592 593 593 CALL obs_mpp_sum_integer( inumgoodobs, inumgoodobsmpp ) 594 CALL mpp_sum( zsumx)595 CALL mpp_sum( zsumx2)594 CALL mpp_sum('obs_write', zsumx) 595 CALL mpp_sum('obs_write', zsumx2) 596 596 597 597 IF (lwp) THEN -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/geo2ocean.F90
r10170 r10297 156 156 & gsinv(jpi,jpj), gcosv(jpi,jpj), & 157 157 & gsinf(jpi,jpj), gcosf(jpi,jpj), STAT=ierr ) 158 IF(lk_mpp) CALL mpp_sum( ierr )158 IF(lk_mpp) CALL mpp_sum( 'geo2ocean', ierr ) 159 159 IF( ierr /= 0 ) CALL ctl_stop( 'angle: unable to allocate arrays' ) 160 160 ! … … 308 308 ALLOCATE( gsinlon(jpi,jpj,4) , gcoslon(jpi,jpj,4) , & 309 309 & gsinlat(jpi,jpj,4) , gcoslat(jpi,jpj,4) , STAT=ierr ) 310 IF( lk_mpp ) CALL mpp_sum( ierr )310 IF( lk_mpp ) CALL mpp_sum( 'geo2ocean', ierr ) 311 311 IF( ierr /= 0 ) CALL ctl_stop('geo2oce: unable to allocate arrays' ) 312 312 ENDIF … … 385 385 ALLOCATE( gsinlon(jpi,jpj,4) , gcoslon(jpi,jpj,4) , & 386 386 & gsinlat(jpi,jpj,4) , gcoslat(jpi,jpj,4) , STAT=ierr ) 387 IF( lk_mpp ) CALL mpp_sum( ierr )387 IF( lk_mpp ) CALL mpp_sum( 'geo2ocean', ierr ) 388 388 IF( ierr /= 0 ) CALL ctl_stop('oce2geo: unable to allocate arrays' ) 389 389 ENDIF -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbc_ice.F90
r10068 r10297 150 150 151 151 sbc_ice_alloc = MAXVAL( ierr ) 152 IF( lk_mpp ) CALL mpp_sum ( sbc_ice_alloc )152 IF( lk_mpp ) CALL mpp_sum ( 'sbc_ice', sbc_ice_alloc ) 153 153 IF( sbc_ice_alloc > 0 ) CALL ctl_warn('sbc_ice_alloc: allocation of arrays failed') 154 154 END FUNCTION sbc_ice_alloc … … 197 197 ALLOCATE( snwice_mass(jpi,jpj) , snwice_mass_b(jpi,jpj), snwice_fmass(jpi,jpj) , STAT=ierr(1) ) 198 198 sbc_ice_alloc = MAXVAL( ierr ) 199 IF( lk_mpp ) CALL mpp_sum ( sbc_ice_alloc )199 IF( lk_mpp ) CALL mpp_sum ( 'sbc_ice', sbc_ice_alloc ) 200 200 IF( sbc_ice_alloc > 0 ) CALL ctl_warn('sbc_ice_alloc: allocation of arrays failed') 201 201 END FUNCTION sbc_ice_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbc_oce.F90
r10170 r10297 186 186 ! 187 187 sbc_oce_alloc = MAXVAL( ierr ) 188 IF( lk_mpp ) CALL mpp_sum ( sbc_oce_alloc )188 IF( lk_mpp ) CALL mpp_sum ( 'sbc_oce', sbc_oce_alloc ) 189 189 IF( sbc_oce_alloc > 0 ) CALL ctl_warn('sbc_oce_alloc: allocation of arrays failed') 190 190 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbcblk.F90
r10170 r10297 147 147 & cdn_oce(jpi,jpj), chn_oce(jpi,jpj), cen_oce(jpi,jpj), STAT=sbc_blk_alloc ) 148 148 ! 149 IF( lk_mpp ) CALL mpp_sum ( sbc_blk_alloc )149 IF( lk_mpp ) CALL mpp_sum ( 'sbcblk', sbc_blk_alloc ) 150 150 IF( sbc_blk_alloc /= 0 ) CALL ctl_warn('sbc_blk_alloc: failed to allocate arrays') 151 151 END FUNCTION sbc_blk_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbccpl.F90
r10170 r10297 224 224 225 225 sbc_cpl_alloc = MAXVAL( ierr ) 226 IF( lk_mpp ) CALL mpp_sum ( sbc_cpl_alloc )226 IF( lk_mpp ) CALL mpp_sum ( 'sbccpl', sbc_cpl_alloc ) 227 227 IF( sbc_cpl_alloc > 0 ) CALL ctl_warn('sbc_cpl_alloc: allocation of arrays failed') 228 228 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbcdcy.F90
r10068 r10297 44 44 & rtmd(jpi,jpj) , rdawn(jpi,jpj) , rdusk(jpi,jpj) , rscal(jpi,jpj) , STAT=sbc_dcy_alloc ) 45 45 ! 46 IF( lk_mpp ) CALL mpp_sum ( sbc_dcy_alloc )46 IF( lk_mpp ) CALL mpp_sum ( 'sbcdcy', sbc_dcy_alloc ) 47 47 IF( sbc_dcy_alloc /= 0 ) CALL ctl_warn('sbc_dcy_alloc: failed to allocate arrays') 48 48 END FUNCTION sbc_dcy_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbcice_cice.F90
r10170 r10297 100 100 !!---------------------------------------------------------------------- 101 101 ALLOCATE( png(jpi,jpj,jpnij), STAT=sbc_ice_cice_alloc ) 102 IF( lk_mpp ) CALL mpp_sum ( sbc_ice_cice_alloc )102 IF( lk_mpp ) CALL mpp_sum ( 'sbcice_cice', sbc_ice_cice_alloc ) 103 103 IF( sbc_ice_cice_alloc > 0 ) CALL ctl_warn('sbc_ice_cice_alloc: allocation of arrays failed.') 104 104 END FUNCTION sbc_ice_cice_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbcisf.F90
r10170 r10297 244 244 & STAT= sbc_isf_alloc ) 245 245 ! 246 IF( lk_mpp ) CALL mpp_sum ( sbc_isf_alloc )246 IF( lk_mpp ) CALL mpp_sum ( 'sbcisf', sbc_isf_alloc ) 247 247 IF( sbc_isf_alloc /= 0 ) CALL ctl_warn('sbc_isf_alloc: failed to allocate arrays.') 248 248 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbcmod.F90
r10170 r10297 115 115 IF(lwm) WRITE( numond, namsbc ) 116 116 ! 117 #if defined key_mpp_mpi 118 ncom_fsbc = nn_fsbc ! make nn_fsbc available for lib_mpp 119 #endif 117 120 ! !* overwrite namelist parameter using CPP key information 118 121 #if defined key_agrif -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/SBC/sbcrnf.F90
r10068 r10297 83 83 & rnf_tsc_b(jpi,jpj,jpts) , rnf_tsc (jpi,jpj,jpts) , STAT=sbc_rnf_alloc ) 84 84 ! 85 IF( lk_mpp ) CALL mpp_sum ( sbc_rnf_alloc )85 IF( lk_mpp ) CALL mpp_sum ( 'sbcrnf', sbc_rnf_alloc ) 86 86 IF( sbc_rnf_alloc > 0 ) CALL ctl_warn('sbc_rnf_alloc: allocation of arrays failed') 87 87 END FUNCTION sbc_rnf_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRA/trabbl.F90
r10170 r10297 84 84 & e3u_bbl_0(jpi,jpj) , e3v_bbl_0(jpi,jpj) , STAT=tra_bbl_alloc ) 85 85 ! 86 IF( lk_mpp ) CALL mpp_sum ( tra_bbl_alloc )86 IF( lk_mpp ) CALL mpp_sum ( 'trabbl', tra_bbl_alloc ) 87 87 IF( tra_bbl_alloc > 0 ) CALL ctl_warn('tra_bbl_alloc: allocation of arrays failed.') 88 88 END FUNCTION tra_bbl_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRA/tradmp.F90
r10068 r10297 66 66 ALLOCATE( resto(jpi,jpj,jpk), STAT= tra_dmp_alloc ) 67 67 ! 68 IF( lk_mpp ) CALL mpp_sum ( tra_dmp_alloc )68 IF( lk_mpp ) CALL mpp_sum ( 'tradmp', tra_dmp_alloc ) 69 69 IF( tra_dmp_alloc > 0 ) CALL ctl_warn('tra_dmp_alloc: allocation of arrays failed') 70 70 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRA/traldf_triad.F90
r10068 r10297 98 98 IF( .NOT.ALLOCATED(zdkt3d) ) THEN 99 99 ALLOCATE( zdkt3d(jpi,jpj,0:1) , STAT=ierr ) 100 IF( lk_mpp ) CALL mpp_sum ( ierr )100 IF( lk_mpp ) CALL mpp_sum ( 'traldf_triad', ierr ) 101 101 IF( ierr > 0 ) CALL ctl_stop('STOP', 'tra_ldf_triad: unable to allocate arrays') 102 102 ENDIF -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRD/trdglo.F90
r10068 r10297 249 249 ! --------------------------------- 250 250 IF( lk_mpp ) THEN 251 CALL mpp_sum( peke )252 CALL mpp_sum( umo , jptot_dyn )253 CALL mpp_sum( vmo , jptot_dyn )254 CALL mpp_sum( hke , jptot_dyn )251 CALL mpp_sum( 'trdglo', peke ) 252 CALL mpp_sum( 'trdglo', umo , jptot_dyn ) 253 CALL mpp_sum( 'trdglo', vmo , jptot_dyn ) 254 CALL mpp_sum( 'trdglo', hke , jptot_dyn ) 255 255 ENDIF 256 256 … … 392 392 ! ------------------------------- 393 393 IF( lk_mpp ) THEN 394 CALL mpp_sum( tmo, jptot_tra )395 CALL mpp_sum( smo, jptot_tra )396 CALL mpp_sum( t2 , jptot_tra )397 CALL mpp_sum( s2 , jptot_tra )394 CALL mpp_sum( 'trdglo', tmo, jptot_tra ) 395 CALL mpp_sum( 'trdglo', smo, jptot_tra ) 396 CALL mpp_sum( 'trdglo', t2 , jptot_tra ) 397 CALL mpp_sum( 'trdglo', s2 , jptot_tra ) 398 398 ENDIF 399 399 … … 526 526 tvolt = tvolt + SUM( e1e2t(:,:) * e3t_n(:,:,jk) * tmask(:,:,jk) * tmask_i(:,:) ) 527 527 END DO 528 IF( lk_mpp ) CALL mpp_sum( tvolt ) ! sum over the global domain528 IF( lk_mpp ) CALL mpp_sum( 'trdglo', tvolt ) ! sum over the global domain 529 529 530 530 IF(lwp) WRITE(numout,*) ' total ocean volume at T-point tvolt = ',tvolt … … 546 546 END DO 547 547 END DO 548 IF( lk_mpp ) CALL mpp_sum( tvolu ) ! sums over the global domain549 IF( lk_mpp ) CALL mpp_sum( tvolv )548 IF( lk_mpp ) CALL mpp_sum( 'trdglo', tvolu ) ! sums over the global domain 549 IF( lk_mpp ) CALL mpp_sum( 'trdglo', tvolv ) 550 550 551 551 IF(lwp) THEN -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRD/trdken.F90
r10170 r10297 54 54 ALLOCATE( bu(jpi,jpj,jpk) , bv(jpi,jpj,jpk) , r1_bt(jpi,jpj,jpk) , STAT= trd_ken_alloc ) 55 55 ! 56 IF( lk_mpp ) CALL mpp_sum ( trd_ken_alloc )56 IF( lk_mpp ) CALL mpp_sum ( 'trdken', trd_ken_alloc ) 57 57 IF( trd_ken_alloc /= 0 ) CALL ctl_warn('trd_ken_alloc: failed to allocate arrays') 58 58 END FUNCTION trd_ken_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRD/trdmxl.F90
r10170 r10297 81 81 ALLOCATE( ndextrd1(jpi*jpj) , STAT=trd_mxl_alloc ) 82 82 ! 83 IF( lk_mpp ) CALL mpp_sum ( trd_mxl_alloc )83 IF( lk_mpp ) CALL mpp_sum ( 'trdmxl', trd_mxl_alloc ) 84 84 IF( trd_mxl_alloc /= 0 ) CALL ctl_warn('trd_mxl_alloc: failed to allocate array ndextrd1') 85 85 END FUNCTION trd_mxl_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRD/trdmxl_oce.F90
r10068 r10297 127 127 ! 128 128 trdmxl_oce_alloc = MAXVAL( ierr ) 129 IF( lk_mpp ) CALL mpp_sum ( trdmxl_oce_alloc )129 IF( lk_mpp ) CALL mpp_sum ( 'trdmxl_oce', trdmxl_oce_alloc ) 130 130 IF( trdmxl_oce_alloc /= 0 ) CALL ctl_warn('trdmxl_oce_alloc: failed to allocate arrays') 131 131 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRD/trdpen.F90
r10068 r10297 50 50 ALLOCATE( rab_pe(jpi,jpj,jpk,jpts) , STAT= trd_pen_alloc ) 51 51 ! 52 IF( lk_mpp ) CALL mpp_sum ( trd_pen_alloc )52 IF( lk_mpp ) CALL mpp_sum ( 'trdpen', trd_pen_alloc ) 53 53 IF( trd_pen_alloc /= 0 ) CALL ctl_warn( 'trd_pen_alloc: failed to allocate arrays' ) 54 54 END FUNCTION trd_pen_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRD/trdtra.F90
r10068 r10297 55 55 ALLOCATE( trdtx(jpi,jpj,jpk) , trdty(jpi,jpj,jpk) , trdt(jpi,jpj,jpk) , avt_evd(jpi,jpj,jpk), STAT= trd_tra_alloc ) 56 56 ! 57 IF( lk_mpp ) CALL mpp_sum ( trd_tra_alloc )57 IF( lk_mpp ) CALL mpp_sum ( 'trdtra', trd_tra_alloc ) 58 58 IF( trd_tra_alloc /= 0 ) CALL ctl_warn('trd_tra_alloc: failed to allocate arrays') 59 59 END FUNCTION trd_tra_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/TRD/trdvor.F90
r10170 r10297 73 73 & ndexvor1 (jpi*jpj) , STAT= trd_vor_alloc ) 74 74 ! 75 IF( lk_mpp ) CALL mpp_sum ( trd_vor_alloc )75 IF( lk_mpp ) CALL mpp_sum ( 'trdvor', trd_vor_alloc ) 76 76 IF( trd_vor_alloc /= 0 ) CALL ctl_warn('trd_vor_alloc: failed to allocate arrays') 77 77 END FUNCTION trd_vor_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ZDF/zdfgls.F90
r10068 r10297 119 119 & zwall (jpi,jpj,jpk) , ustar2_top (jpi,jpj) , ustar2_bot(jpi,jpj) , STAT= zdf_gls_alloc ) 120 120 ! 121 IF( lk_mpp ) CALL mpp_sum ( zdf_gls_alloc )121 IF( lk_mpp ) CALL mpp_sum ( 'zdfgls', zdf_gls_alloc ) 122 122 IF( zdf_gls_alloc /= 0 ) CALL ctl_warn('zdf_gls_alloc: failed to allocate arrays') 123 123 END FUNCTION zdf_gls_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ZDF/zdfiwm.F90
r10069 r10297 64 64 & hbot_iwm(jpi,jpj), hcri_iwm(jpi,jpj) , STAT=zdf_iwm_alloc ) 65 65 ! 66 IF( lk_mpp ) CALL mpp_sum ( zdf_iwm_alloc )66 IF( lk_mpp ) CALL mpp_sum ( 'zdfiwm', zdf_iwm_alloc ) 67 67 IF( zdf_iwm_alloc /= 0 ) CALL ctl_warn('zdf_iwm_alloc: failed to allocate arrays') 68 68 END FUNCTION zdf_iwm_alloc … … 305 305 END DO 306 306 END DO 307 IF( lk_mpp ) CALL mpp_sum( zztmp )307 IF( lk_mpp ) CALL mpp_sum( 'zdfiwm', zztmp ) 308 308 zztmp = rau0 * zztmp ! Global integral of rauo * Kz * N^2 = power contributing to mixing 309 309 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ZDF/zdfmxl.F90
r10068 r10297 50 50 ALLOCATE( nmln(jpi,jpj), hmld(jpi,jpj), hmlp(jpi,jpj), hmlpt(jpi,jpj), STAT= zdf_mxl_alloc ) 51 51 ! 52 IF( lk_mpp ) CALL mpp_sum ( zdf_mxl_alloc )52 IF( lk_mpp ) CALL mpp_sum ( 'zdfmxl', zdf_mxl_alloc ) 53 53 IF( zdf_mxl_alloc /= 0 ) CALL ctl_warn('zdf_mxl_alloc: failed to allocate arrays.') 54 54 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ZDF/zdfosm.F90
r10170 r10297 118 118 & etmean(jpi,jpj,jpk), STAT= zdf_osm_alloc ) 119 119 IF( zdf_osm_alloc /= 0 ) CALL ctl_warn('zdf_osm_alloc: failed to allocate zdf_osm arrays') 120 IF( lk_mpp ) CALL mpp_sum ( zdf_osm_alloc )120 IF( lk_mpp ) CALL mpp_sum ( 'zdfosm', zdf_osm_alloc ) 121 121 END FUNCTION zdf_osm_alloc 122 122 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/ZDF/zdftke.F90
r10068 r10297 103 103 ALLOCATE( htau(jpi,jpj) , dissl(jpi,jpj,jpk) , apdlr(jpi,jpj,jpk) , STAT= zdf_tke_alloc ) 104 104 ! 105 IF( lk_mpp ) CALL mpp_sum ( zdf_tke_alloc )105 IF( lk_mpp ) CALL mpp_sum ( 'zdftke', zdf_tke_alloc ) 106 106 IF( zdf_tke_alloc /= 0 ) CALL ctl_warn('zdf_tke_alloc: failed to allocate arrays') 107 107 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/lib_fortran.F90
r10068 r10297 84 84 CALL DDPDD( CMPLX( ztmp, 0.e0, wp ), ctmp ) 85 85 END DO 86 IF( lk_mpp ) CALL mpp_sum( ctmp ) ! sum over the global domain86 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 87 87 glob_sum_1d = REAL(ctmp,wp) 88 88 ! … … 111 111 END DO 112 112 END DO 113 IF( lk_mpp ) CALL mpp_sum( ctmp ) ! sum over the global domain113 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 114 114 glob_sum_2d = REAL(ctmp,wp) 115 115 ! … … 144 144 END DO 145 145 END DO 146 IF( lk_mpp ) CALL mpp_sum( ctmp ) ! sum over the global domain146 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 147 147 glob_sum_3d = REAL(ctmp,wp) 148 148 ! … … 174 174 END DO 175 175 END DO 176 IF( lk_mpp ) CALL mpp_sum( ctmp ) ! sum over the global domain176 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 177 177 glob_sum_2d_a = REAL(ctmp,wp) 178 178 ! … … 209 209 END DO 210 210 END DO 211 IF( lk_mpp ) CALL mpp_sum( ctmp ) ! sum over the global domain211 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 212 212 glob_sum_3d_a = REAL(ctmp,wp) 213 213 ! … … 236 236 END DO 237 237 END DO 238 IF( lk_mpp ) CALL mpp_sum( ctmp ) ! sum over the global domain238 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 239 239 glob_sum_full_2d = REAL(ctmp,wp) 240 240 ! … … 268 268 END DO 269 269 END DO 270 IF( lk_mpp ) CALL mpp_sum( ctmp ) ! sum over the global domain270 IF( lk_mpp ) CALL mpp_sum( 'lib_fortran', ctmp ) ! sum over the global domain 271 271 glob_sum_full_3d = REAL(ctmp,wp) 272 272 ! … … 285 285 ! 286 286 glob_min_2d = MINVAL( ptab(:,:)*tmask_i(:,:) ) 287 IF( lk_mpp ) CALL mpp_min( glob_min_2d )287 IF( lk_mpp ) CALL mpp_min( 'lib_fortran', glob_min_2d ) 288 288 ! 289 289 END FUNCTION glob_min_2d … … 308 308 glob_min_3d = MIN( glob_min_3d, MINVAL( ptab(:,:,jk)*tmask_i(:,:) ) ) 309 309 END DO 310 IF( lk_mpp ) CALL mpp_min( glob_min_3d )310 IF( lk_mpp ) CALL mpp_min( 'lib_fortran', glob_min_3d ) 311 311 ! 312 312 END FUNCTION glob_min_3d … … 325 325 glob_min_2d_a(1) = MINVAL( ptab1(:,:)*tmask_i(:,:) ) 326 326 glob_min_2d_a(2) = MINVAL( ptab2(:,:)*tmask_i(:,:) ) 327 IF( lk_mpp ) CALL mpp_min( glob_min_2d_a, 2 )327 IF( lk_mpp ) CALL mpp_min( 'lib_fortran', glob_min_2d_a, 2 ) 328 328 ! 329 329 END FUNCTION glob_min_2d_a … … 351 351 glob_min_3d_a(2) = MIN( glob_min_3d_a(2), MINVAL( ptab2(:,:,jk)*tmask_i(:,:) ) ) 352 352 END DO 353 IF( lk_mpp ) CALL mpp_min( glob_min_3d_a, 2 )353 IF( lk_mpp ) CALL mpp_min( 'lib_fortran', glob_min_3d_a, 2 ) 354 354 ! 355 355 END FUNCTION glob_min_3d_a … … 367 367 ! 368 368 glob_max_2d = MAXVAL( ptab(:,:)*tmask_i(:,:) ) 369 IF( lk_mpp ) CALL mpp_max( glob_max_2d )369 IF( lk_mpp ) CALL mpp_max( 'lib_fortran', glob_max_2d ) 370 370 ! 371 371 END FUNCTION glob_max_2d … … 390 390 glob_max_3d = MAX( glob_max_3d, MAXVAL( ptab(:,:,jk)*tmask_i(:,:) ) ) 391 391 END DO 392 IF( lk_mpp ) CALL mpp_max( glob_max_3d )392 IF( lk_mpp ) CALL mpp_max( 'lib_fortran', glob_max_3d ) 393 393 ! 394 394 END FUNCTION glob_max_3d … … 407 407 glob_max_2d_a(1) = MAXVAL( ptab1(:,:)*tmask_i(:,:) ) 408 408 glob_max_2d_a(2) = MAXVAL( ptab2(:,:)*tmask_i(:,:) ) 409 IF( lk_mpp ) CALL mpp_max( glob_max_2d_a, 2 )409 IF( lk_mpp ) CALL mpp_max( 'lib_fortran', glob_max_2d_a, 2 ) 410 410 ! 411 411 END FUNCTION glob_max_2d_a … … 433 433 glob_max_3d_a(2) = MAX( glob_max_3d_a(2), MAXVAL( ptab2(:,:,jk)*tmask_i(:,:) ) ) 434 434 END DO 435 IF( lk_mpp ) CALL mpp_max( glob_max_3d_a, 2 )435 IF( lk_mpp ) CALL mpp_max( 'lib_fortran', glob_max_3d_a, 2 ) 436 436 ! 437 437 END FUNCTION glob_max_3d_a -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/module_example
r10170 r10297 63 63 ALLOCATE( array(jpi,jpj,jpk) , STAT= exa_mpl_alloc ) ! Module array 64 64 ! 65 IF( lk_mpp ) CALL mpp_sum ( exa_mpl_alloc )65 IF( lk_mpp ) CALL mpp_sum ( 'module_example', exa_mpl_alloc ) 66 66 IF( exa_mpl_alloc /= 0 ) CALL ctl_warn('exa_mpl_alloc: failed to allocate arrays') 67 67 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/nemogcm.F90
r10172 r10297 148 148 ! check that all process are still there... If some process have an error, 149 149 ! they will never enter in step and other processes will wait until the end of the cpu time! 150 IF( lk_mpp ) CALL mpp_max( nstop )150 IF( lk_mpp ) CALL mpp_max( 'nemogcm', nstop ) 151 151 152 152 IF(lwp) WRITE(numout,cform_aaa) ! Flag AAAAAAA … … 621 621 #endif 622 622 ! 623 IF( lk_mpp ) CALL mpp_sum( ierr )623 IF( lk_mpp ) CALL mpp_sum( 'nemogcm', ierr ) 624 624 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'nemo_alloc: unable to allocate standard ocean arrays' ) 625 625 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OCE/timing.F90
r10178 r10297 320 320 END DO 321 321 idum = nsize 322 IF(lk_mpp) CALL mpp_sum( idum)322 IF(lk_mpp) CALL mpp_sum('timing', idum) 323 323 IF( idum/jpnij /= nsize ) THEN 324 324 IF( lwriter ) WRITE(numtime,*) ' ===> W A R N I N G: ' -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OFF/dtadyn.F90
r10068 r10297 407 407 hrnf_max = MAXVAL( h_rnf(:,:) ) 408 408 IF( lk_mpp ) THEN 409 CALL mpp_max( nkrnf_max ) ! max over the global domain410 CALL mpp_max( hrnf_max ) ! max over the global domain409 CALL mpp_max( 'dtadyn', nkrnf_max ) ! max over the global domain 410 CALL mpp_max( 'dtadyn', hrnf_max ) ! max over the global domain 411 411 ENDIF 412 412 IF(lwp) WRITE(numout,*) ' ' -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/OFF/nemogcm.F90
r10068 r10297 95 95 ! check that all process are still there... If some process have an error, 96 96 ! they will never enter in step and other processes will wait until the end of the cpu time! 97 IF( lk_mpp ) CALL mpp_max( nstop )97 IF( lk_mpp ) CALL mpp_max( 'nemogcm', nstop ) 98 98 99 99 ! !-----------------------! … … 434 434 ierr = ierr + trc_oce_alloc() ! shared TRC / TRA arrays 435 435 ! 436 IF( lk_mpp ) CALL mpp_sum( ierr )436 IF( lk_mpp ) CALL mpp_sum( 'nemogcm', ierr ) 437 437 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'nemo_alloc: unable to allocate standard ocean arrays' ) 438 438 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/SAO/nemogcm.F90
r10068 r10297 372 372 ierr = ierr + dom_oce_alloc () ! ocean domain 373 373 ! 374 IF( lk_mpp ) CALL mpp_sum( ierr )374 IF( lk_mpp ) CALL mpp_sum( 'nemogcm', ierr ) 375 375 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'nemo_alloc : unable to allocate standard ocean arrays' ) 376 376 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/SAS/diawri.F90
r10068 r10297 109 109 ! 110 110 ALLOCATE( ndex_hT(jpi*jpj), ndex_hU(jpi*jpj), ndex_hV(jpi*jpj), STAT=dia_wri_alloc ) 111 IF( lk_mpp ) CALL mpp_sum( dia_wri_alloc )111 IF( lk_mpp ) CALL mpp_sum( 'diawri', dia_wri_alloc ) 112 112 ! 113 113 END FUNCTION dia_wri_alloc -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/SAS/nemogcm.F90
r10068 r10297 94 94 ! check that all process are still there... If some process have an error, 95 95 ! they will never enter in step and other processes will wait until the end of the cpu time! 96 IF( lk_mpp ) CALL mpp_max( nstop )96 IF( lk_mpp ) CALL mpp_max( 'nemogcm', nstop ) 97 97 98 98 IF(lwp) WRITE(numout,cform_aaa) ! Flag AAAAAAA … … 486 486 ierr = ierr + bdy_oce_alloc() ! bdy masks (incl. initialization) 487 487 ! 488 IF( lk_mpp ) CALL mpp_sum( ierr )488 IF( lk_mpp ) CALL mpp_sum( 'nemogcm', ierr ) 489 489 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'nemo_alloc: unable to allocate standard ocean arrays' ) 490 490 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/TOP/PISCES/P4Z/p4zsink.F90
r10069 r10297 124 124 END DO 125 125 IF( lk_mpp ) THEN 126 CALL mpp_max( iiter1 )127 CALL mpp_max( iiter2 )126 CALL mpp_max( 'p4zsink', iiter1 ) 127 CALL mpp_max( 'p4zsink', iiter2 ) 128 128 ENDIF 129 129 iiter1 = MIN( iiter1, niter1max ) -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/TOP/PISCES/trcini_pisces.F90
r10068 r10297 117 117 ENDIF 118 118 ! 119 IF( lk_mpp ) CALL mpp_sum( ierr )119 IF( lk_mpp ) CALL mpp_sum( 'trcini_pisces', ierr ) 120 120 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'pisces_alloc: unable to allocate PISCES arrays' ) 121 121 ! … … 299 299 ierr = ierr + p2z_exp_alloc() 300 300 ! 301 IF( lk_mpp ) CALL mpp_sum( ierr )301 IF( lk_mpp ) CALL mpp_sum( 'trcini_pisces', ierr ) 302 302 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'p2z_ini: unable to allocate LOBSTER arrays' ) 303 303 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/TOP/TRP/trdmxl_trc.F90
r10170 r10297 64 64 & ndextrd1(jpi*jpj), nidtrd(jptra), nh_t(jptra), STAT=trd_mxl_trc_alloc) 65 65 ! 66 IF( lk_mpp ) CALL mpp_sum ( trd_mxl_trc_alloc )66 IF( lk_mpp ) CALL mpp_sum ( 'trdmxl_trc', trd_mxl_trc_alloc ) 67 67 IF( trd_mxl_trc_alloc /=0 ) CALL ctl_warn('trd_mxl_trc_alloc: failed to allocate arrays') 68 68 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/TOP/trcini.F90
r10068 r10297 289 289 #endif 290 290 ! 291 IF( lk_mpp ) CALL mpp_sum( ierr )291 IF( lk_mpp ) CALL mpp_sum( 'trcini', ierr ) 292 292 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'top_alloc : unable to allocate standard ocean arrays' ) 293 293 ! -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/TOP/trcnam.F90
r10068 r10297 23 23 USE trdtrc_oce ! 24 24 USE iom ! I/O manager 25 USE lib_mpp, ONLY: ncom_dttrc 25 26 26 27 IMPLICIT NONE … … 76 77 ENDIF 77 78 ! 78 rdttrc = rdt * FLOAT( nn_dttrc ) ! passive tracer time-step 79 rdttrc = rdt * FLOAT( nn_dttrc ) ! passive tracer time-step 79 80 ! 80 81 IF(lwp) THEN ! control print … … 128 129 ENDIF 129 130 ! 130 END SUBROUTINE trc_nam_run 131 #if defined key_mpp_mpi 132 ncom_dttrc = nn_dttrc ! make nn_fsbc available for lib_mpp 133 #endif 134 ! 135 END SUBROUTINE trc_nam_run 131 136 132 137 -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/TOP/trcrst.F90
r10068 r10297 320 320 zmax = MAXVAL( trn(:,:,:,jn), mask= ((tmask*SPREAD(tmask_i,DIM=3,NCOPIES=jpk).NE.0.)) ) 321 321 IF( lk_mpp ) THEN 322 CALL mpp_min( zmin ) ! min over the global domain323 CALL mpp_max( zmax ) ! max over the global domain322 CALL mpp_min( 'trcrst', zmin ) ! min over the global domain 323 CALL mpp_max( 'trcrst', zmax ) ! max over the global domain 324 324 END IF 325 325 zmean = ztraf / areatot -
NEMO/branches/2018/dev_r10164_HPC09_ESIWACE_PREP_MERGE/src/TOP/trcsub.F90
r10170 r10297 308 308 309 309 ierr = trc_sub_alloc () 310 IF( lk_mpp ) CALL mpp_sum( ierr )310 IF( lk_mpp ) CALL mpp_sum( 'trcsub', ierr ) 311 311 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'top_sub_alloc : unable to allocate standard ocean arrays' ) 312 312
Note: See TracChangeset
for help on using the changeset viewer.