- Timestamp:
- 2010-06-29T17:41:10+02:00 (14 years ago)
- Location:
- branches/DEV_r1784_mid_year_merge_2010
- Files:
-
- 19 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/DEV_r1784_mid_year_merge_2010/CONFIG/GYRE/EXP00/namelist
r1741 r1976 627 627 nn_bench = 0 ! Bench mode (1/0): CAUTION use zero except for bench 628 628 ! (no physical validity of the results) 629 nn_bit_cmp = 0 ! bit comparison mode (1/0): CAUTION use zero except for test630 ! of comparison between single and multiple processor runs631 629 / 632 630 -
branches/DEV_r1784_mid_year_merge_2010/CONFIG/GYRE_LOBSTER/EXP00/namelist
r1741 r1976 627 627 nn_bench = 0 ! Bench mode (1/0): CAUTION use zero except for bench 628 628 ! (no physical validity of the results) 629 nn_bit_cmp = 0 ! bit comparison mode (1/0): CAUTION use zero except for test630 ! of comparison between single and multiple processor runs631 629 / 632 630 -
branches/DEV_r1784_mid_year_merge_2010/CONFIG/ORCA2_LIM/EXP00/1_namelist
r1741 r1976 626 626 nn_bench = 0 ! Bench mode (1/0): CAUTION use zero except for bench 627 627 ! (no physical validity of the results) 628 nn_bit_cmp = 0 ! bit comparison mode (1/0): CAUTION use zero except for test629 ! of comparison between single and multiple processor runs630 628 / 631 629 -
branches/DEV_r1784_mid_year_merge_2010/CONFIG/ORCA2_LIM/EXP00/namelist
r1954 r1976 673 673 nn_bench = 0 ! Bench mode (1/0): CAUTION use zero except for bench 674 674 ! (no physical validity of the results) 675 nn_bit_cmp = 0 ! bit comparison mode (1/0): CAUTION use zero except for test676 ! of comparison between single and multiple processor runs677 675 / 678 676 -
branches/DEV_r1784_mid_year_merge_2010/CONFIG/ORCA2_LIM_PISCES/EXP00/namelist
r1759 r1976 654 654 nn_bench = 0 ! Bench mode (1/0): CAUTION use zero except for bench 655 655 ! (no physical validity of the results) 656 nn_bit_cmp = 0 ! bit comparison mode (1/0): CAUTION use zero except for test657 ! of comparison between single and multiple processor runs658 656 / 659 657 -
branches/DEV_r1784_mid_year_merge_2010/CONFIG/POMME/EXP00/namelist
r1970 r1976 651 651 nn_bench = 0 ! Bench mode (1/0): CAUTION use zero except for bench 652 652 ! (no physical validity of the results) 653 nn_bit_cmp = 0 ! bit comparison mode (1/0): CAUTION use zero except for test654 ! of comparison between single and multiple processor runs655 653 / 656 654 -
branches/DEV_r1784_mid_year_merge_2010/NEMO/LIM_SRC_2/limdyn_2.F90
r1694 r1976 83 83 ! --------------------------------------------------- 84 84 85 IF( lk_mpp .OR. nbit_cmp == 1) THEN ! mpp: compute over the whole domain85 IF( lk_mpp .OR. lk_mpp_rep ) THEN ! mpp: compute over the whole domain 86 86 i_j1 = 1 87 87 i_jpj = jpj -
branches/DEV_r1784_mid_year_merge_2010/NEMO/LIM_SRC_3/limdyn.F90
r1470 r1976 93 93 ! --------------------------------------------------- 94 94 95 IF( lk_mpp .OR. nbit_cmp == 1) THEN ! mpp: compute over the whole domain95 IF( lk_mpp .OR. lk_mpp_rep ) THEN ! mpp: compute over the whole domain 96 96 i_j1 = 1 97 97 i_jpj = jpj -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/DOM/dom_oce.F90
r1970 r1976 221 221 #endif 222 222 223 !!---------------------------------------------------------------------- 224 !! mpp reproducibility 225 !!---------------------------------------------------------------------- 226 #if defined key_mpp_rep1 || defined key_mpp_re2 227 LOGICAL, PUBLIC, PARAMETER :: lk_mpp_rep = .TRUE. !: agrif flag 228 #else 229 LOGICAL, PUBLIC, PARAMETER :: lk_mpp_rep = .FALSE. !: agrif flag 230 #endif 223 231 !!====================================================================== 224 232 END MODULE dom_oce -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/DOM/domain.F90
r1953 r1976 166 166 ENDIF 167 167 168 #if defined key_agrif 168 169 IF( Agrif_Root() ) THEN 169 SELECT CASE ( nleapy ) ! Choose calendar for IOIPSL 170 CASE ( 1 ) 171 CALL ioconf_calendar('gregorian') 172 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "gregorian", i.e. leap year' 173 CASE ( 0 ) 174 CALL ioconf_calendar('noleap') 175 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "noleap", i.e. no leap year' 176 CASE ( 30 ) 177 CALL ioconf_calendar('360d') 178 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "360d", i.e. 360 days in a year' 179 END SELECT 180 ENDIF 170 #endif 171 SELECT CASE ( nleapy ) ! Choose calendar for IOIPSL 172 CASE ( 1 ) 173 CALL ioconf_calendar('gregorian') 174 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "gregorian", i.e. leap year' 175 CASE ( 0 ) 176 CALL ioconf_calendar('noleap') 177 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "noleap", i.e. no leap year' 178 CASE ( 30 ) 179 CALL ioconf_calendar('360d') 180 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "360d", i.e. 360 days in a year' 181 END SELECT 182 #if defined key_agrif 183 ENDIF 184 #endif 181 185 182 186 REWIND( numnam ) ! Namelist namdom : space & time domain (bathymetry, mesh, timestep) … … 226 230 n_cla = nn_cla ! conversion DOCTOR names into model names (this should disappear soon) 227 231 228 IF( nbit_cmp == 1.AND. n_cla /= 0 ) CALL ctl_stop( ' Reproductibility tests (nbit_cmp=1) require n_cla = 0' )232 IF( lk_mpp_rep .AND. n_cla /= 0 ) CALL ctl_stop( ' Reproductibility tests (nbit_cmp=1) require n_cla = 0' ) 229 233 ! 230 234 END SUBROUTINE dom_nam -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/DYN/dynspg_flt.F90
r1970 r1976 47 47 USE iom 48 48 USE restart ! only for lrst_oce 49 USE lib_fortran 49 50 50 51 IMPLICIT NONE … … 186 187 187 188 #if defined key_obc 188 IF( lk_obc ) CALL obc_dyn( kt )! Update velocities on each open boundary with the radiation algorithm189 IF( lk_obc ) CALL obc_vol( kt )! Correction of the barotropic componant velocity to control the volume of the system189 CALL obc_dyn( kt ) ! Update velocities on each open boundary with the radiation algorithm 190 CALL obc_vol( kt ) ! Correction of the barotropic componant velocity to control the volume of the system 190 191 #endif 191 192 #if defined key_bdy … … 281 282 ! ------------------ 282 283 rnorme =0.e0 283 rnorme = SUM( gcb(1:jpi,1:jpj) * gcdmat(1:jpi,1:jpj) * gcb(1:jpi,1:jpj) * bmask(:,:) ) 284 IF( lk_mpp ) CALL mpp_sum( rnorme ) ! sum over the global domain 284 rnorme = GLOB_SUM( gcb(1:jpi,1:jpj) * gcdmat(1:jpi,1:jpj) * gcb(1:jpi,1:jpj) * bmask(:,:) ) 285 285 286 286 epsr = eps * eps * rnorme … … 315 315 #if defined key_obc 316 316 ! caution : grad D = 0 along open boundaries 317 IF( Agrif_Root() ) THEN 318 spgu(ji,jj) = z2dt * ztdgu * obcumask(ji,jj) 319 spgv(ji,jj) = z2dt * ztdgv * obcvmask(ji,jj) 320 ELSE 321 spgu(ji,jj) = z2dt * ztdgu 322 spgv(ji,jj) = z2dt * ztdgv 323 ENDIF 317 spgu(ji,jj) = z2dt * ztdgu * obcumask(ji,jj) 318 spgv(ji,jj) = z2dt * ztdgv * obcvmask(ji,jj) 324 319 #elif defined key_bdy 325 320 ! caution : grad D = 0 along open boundaries -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/IOM/in_out_manager.F90
r1770 r1976 80 80 81 81 ! !: OLD namelist names 82 INTEGER :: nprint, nictls, nictle, njctls, njctle, isplt, jsplt, nbench , nbit_cmp82 INTEGER :: nprint, nictls, nictle, njctls, njctle, isplt, jsplt, nbench 83 83 84 84 INTEGER :: ijsplt = 1 !: nb of local domain = nb of processors -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/SBC/sbcana.F90
r1732 r1976 207 207 208 208 ! Compute the emp flux such as its integration on the whole domain at each time is zero 209 IF( nbench /= 1 .AND. nbit_cmp /= 1) THEN209 IF( nbench /= 1 ) THEN 210 210 zsumemp = 0.e0 ; zsurf = 0.e0 211 211 DO jj = 1, jpj -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/SBC/sbcfwb.F90
r1970 r1976 23 23 USE lib_mpp ! distribued memory computing library 24 24 USE lbclnk ! ocean lateral boundary conditions 25 USE lib_fortran 25 26 26 27 IMPLICIT NONE … … 65 66 INTEGER :: inum ! temporary logical unit 66 67 INTEGER :: ikty, iyear ! 67 REAL(wp) :: z_emp, z_emp_nsrf , zsum_emp, zsum_erp! temporary scalars68 REAL(wp) :: z_emp, z_emp_nsrf ! temporary scalars 68 69 REAL(wp) :: zsurf_neg, zsurf_pos, zsurf_tospread 69 70 REAL(wp), DIMENSION(jpi,jpj) :: ztmsk_neg, ztmsk_pos, ztmsk_tospread … … 87 88 ! 88 89 e1e2_i(:,:) = e1t(:,:) * e2t(:,:) * tmask_i(:,:) 89 area = SUM( e1e2_i(:,:) ) 90 IF( lk_mpp ) CALL mpp_sum( area ) ! sum over the global domain 90 area = glob_sum( e1e2_i(:,:) ) ! sum over the global domain 91 91 ! 92 92 ENDIF … … 103 103 CASE ( 1 ) ! global mean emp set to zero 104 104 IF( MOD( kt-1, kn_fsbc ) == 0 ) THEN 105 z_emp = SUM( e1e2_i(:,:) * emp(:,:) ) / area 106 IF( lk_mpp ) CALL mpp_sum( z_emp ) ! sum over the global domain 105 z_emp = glob_sum( e1e2_i(:,:) * emp(:,:) ) / area ! sum over the global domain 107 106 emp (:,:) = emp (:,:) - z_emp 108 107 emps(:,:) = emps(:,:) - z_emp … … 128 127 IF( MOD( kt, ikty ) == 0 ) THEN 129 128 a_fwb_b = a_fwb 130 a_fwb = SUM( e1e2_i(:,:) * sshn(:,:) ) 131 IF( lk_mpp ) CALL mpp_sum( a_fwb ) ! sum over the global domain 129 a_fwb = glob_sum( e1e2_i(:,:) * sshn(:,:) ) ! sum over the global domain 132 130 a_fwb = a_fwb * 1.e+3 / ( area * 86400. * 365. ) ! convert in Kg/m3/s = mm/s 133 131 !!gm ! !!bug 365d year … … 162 160 163 161 ! emp global mean 164 z_emp = SUM( e1e2_i(:,:) * emp(:,:) ) / area162 z_emp = glob_sum( e1e2_i(:,:) * emp(:,:) ) / area 165 163 ! 166 IF( lk_mpp ) CALL mpp_sum( z_emp )167 IF( lk_mpp ) CALL mpp_sum( zsurf_neg )168 IF( lk_mpp ) CALL mpp_sum( zsurf_pos )169 164 170 165 IF( z_emp < 0.e0 ) THEN … … 179 174 180 175 ! emp global mean over <0 or >0 erp area 181 zsum_emp = SUM( e1e2_i(:,:) * z_emp ) 182 IF( lk_mpp ) CALL mpp_sum( zsum_emp ) 183 z_emp_nsrf = zsum_emp / ( zsurf_tospread + rsmall ) 176 z_emp_nsrf = SUM( e1e2_i(:,:) * z_emp ) / ( zsurf_tospread + rsmall ) 184 177 ! weight to respect erp field 2D structure 185 zsum_erp = SUM( ztmsk_tospread(:,:) * erp(:,:) * e1e2_i(:,:) ) 186 IF( lk_mpp ) CALL mpp_sum( zsum_erp ) 187 z_wgt(:,:) = ztmsk_tospread(:,:) * erp(:,:) / ( zsum_erp + rsmall ) 188 178 z_wgt(:,:) = ztmsk_tospread(:,:) * erp(:,:) / ( SUM( ztmsk_tospread(:,:) * erp(:,:) * e1e2_i(:,:) ) + rsmall ) 189 179 ! final correction term to apply 190 180 zerp_cor(:,:) = -1. * z_emp_nsrf * zsurf_tospread * z_wgt(:,:) -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/SOL/solpcg.F90
r1601 r1976 14 14 USE lbclnk ! ocean lateral boundary conditions (or mpp link) 15 15 USE in_out_manager ! I/O manager 16 USE lib_fortran 16 17 17 18 IMPLICIT NONE … … 95 96 ! Initialization of the algorithm with standard PCG 96 97 ! ------------------------------------------------- 98 zgcr = 0.e0 99 gcr = 0.e0 97 100 98 101 CALL lbc_lnk( gcx, c_solver_pt, 1. ) ! lateral boundary condition … … 100 103 ! gcr = gcb-a.gcx 101 104 ! gcdes = gcr 102 103 105 DO jj = 2, jpjm1 104 106 DO ji = fs_2, fs_jpim1 ! vector opt. … … 114 116 115 117 ! rnorme = (gcr,gcr) 116 rnorme = SUM( gcr(:,:) * gcdmat(:,:) * gcr(:,:) ) 117 IF( lk_mpp ) CALL mpp_sum( rnorme ) ! sum over the global domain 118 rnorme = glob_sum( gcr(:,:) * gcdmat(:,:) * gcr(:,:) ) 118 119 119 120 CALL lbc_lnk( gcdes, c_solver_pt, 1. ) ! lateral boundary condition … … 129 130 130 131 ! alph = (gcr,gcr)/(gcdes,gccd) 131 radd = SUM( gcdes(:,:) * gcdmat(:,:) * gccd(:,:) ) 132 IF( lk_mpp ) CALL mpp_sum( radd ) ! sum over the global domain 132 radd = glob_sum( gcdes(:,:) * gcdmat(:,:) * gccd(:,:) ) 133 133 alph = rnorme /radd 134 134 … … 162 162 ! rnorme = (gcr,gcr) 163 163 rr = rnorme 164 zsum(1) = SUM( gcr(:,:) * gcdmat(:,:) * gcr(:,:) )165 164 166 165 ! zgcad = (zgcr,gcr) 167 zsum(2) = SUM( gcr(2:jpim1,2:jpjm1) * gcdmat(2:jpim1,2:jpjm1) * zgcr(2:jpim1,2:jpjm1) ) 168 169 IF( lk_mpp ) CALL mpp_sum( zsum, 2 ) ! sum over the global domain 166 zsum(1) = glob_sum(gcr(:,:) * gcdmat(:,:) * gcr(:,:)) 167 zsum(2) = glob_sum(gcr(:,:) * gcdmat(:,:) * zgcr(:,:) * bmask(:,:)) 168 169 !!RB we should gather the 2 glob_sum 170 170 rnorme = zsum(1) 171 171 zgcad = zsum(2) 172 173 172 ! test of convergence 174 173 IF( rnorme < epsr .OR. jn == nn_nmax ) THEN -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/SOL/solsor.F90
r1601 r1976 22 22 USE lib_mpp ! distributed memory computing 23 23 USE lbclnk ! ocean lateral boundary conditions (or mpp link) 24 USE lib_fortran 24 25 25 26 IMPLICIT NONE … … 64 65 INTEGER :: ijmppodd, ijmppeven, ijpr2d 65 66 REAL(wp) :: ztmp, zres, zres2 67 REAL(wp), DIMENSION(jpi,jpj) ::ztab 66 68 !!---------------------------------------------------------------------- 67 69 … … 131 133 ENDIF 132 134 CASE ( 1 ) ! relative precision 133 rnorme = SUM( gcr(2:nlci-1,2:nlcj-1) ) 134 IF( lk_mpp ) CALL mpp_sum( rnorme ) ! sum over the global domain 135 ztab = 0. 136 ztab(:,:) = gcr(2:nlci-1,2:nlcj-1) 137 rnorme = glob_sum( ztab) ! sum over the global domain 135 138 ! test of convergence 136 139 IF( rnorme < epsr .OR. jn == nn_nmax ) THEN -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/SOL/solver.F90
r1601 r1976 102 102 END SELECT 103 103 ! 104 IF( nbit_cmp == 1 ) THEN ! reproductibility test SOR required105 IF( nn_solv /= 2 ) THEN106 CALL ctl_stop( ' Reproductibility tests (nbit_cmp=1) require the SOR solver: nn_solv = 2' )107 ELSE IF( MAX( jpr2di, jpr2dj ) > 0 ) THEN108 CALL ctl_stop( ' Reproductibility tests (nbit_cmp=1) require jpr2di = jpr2dj = 0' )109 END IF110 END IF111 104 112 105 ! !* Grid-point at which the solver is applied -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/lib_mpp.F90
r1970 r1976 75 75 #endif 76 76 77 # if defined key_mpp_rep1 78 PUBLIC mpp_allgatherv 79 # endif 80 77 81 !! * Interfaces 78 82 !! define generic interface for these routine as they are called sometimes … … 86 90 END INTERFACE 87 91 INTERFACE mpp_sum 92 # if defined key_mpp_rep2 93 MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real, & 94 mppsum_realdd, mppsum_a_realdd 95 # else 88 96 MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real 97 # endif 89 98 END INTERFACE 90 99 INTERFACE mpp_lbc_north … … 97 106 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 98 107 END INTERFACE 99 108 109 # if defined key_mpp_rep1 110 INTERFACE mpp_allgatherv 111 MODULE PROCEDURE mpp_allgatherv_real, mpp_allgatherv_int 112 END INTERFACE 113 # endif 100 114 101 115 !! ========================= !! … … 112 126 INTEGER :: mppsize ! number of process 113 127 INTEGER :: mpprank ! process number [ 0 - size-1 ] 114 !$AGRIF_DO_NOT_TREAT 115 INTEGER, PUBLIC :: mpi_comm_opa ! opa local communicator 116 !$AGRIF_END_DO_NOT_TREAT 128 INTEGER :: mpi_comm_opa ! opa local communicator 129 130 INTEGER, PUBLIC :: MPI_SUMDD 117 131 118 132 ! variables used in case of sea-ice … … 193 207 WRITE(ldtxt(6),*) ' size in bytes of exported buffer nn_buffer = ', nn_buffer 194 208 195 CALL mpi_initialized ( mpi_was_called, code ) 196 IF( code /= MPI_SUCCESS ) THEN 197 WRITE(*, cform_err) 198 WRITE(*, *) 'lib_mpp: Error in routine mpi_initialized' 199 CALL mpi_abort( mpi_comm_world, code, ierr ) 200 ENDIF 201 202 IF( mpi_was_called ) THEN 203 ! 209 #if defined key_agrif 210 IF( Agrif_Root() ) THEN 211 #endif 212 !!bug RB : should be clean to use Agrif in coupled mode 213 #if ! defined key_agrif 214 CALL mpi_initialized ( mpi_was_called, code ) 215 IF( code /= MPI_SUCCESS ) THEN 216 WRITE(*, cform_err) 217 WRITE(*, *) 'lib_mpp: Error in routine mpi_initialized' 218 CALL mpi_abort( mpi_comm_world, code, ierr ) 219 ENDIF 220 221 IF( PRESENT(localComm) .and. mpi_was_called ) THEN 222 mpi_comm_opa = localComm 223 SELECT CASE ( cn_mpi_send ) 224 CASE ( 'S' ) ! Standard mpi send (blocking) 225 WRITE(ldtxt(7),*) ' Standard blocking mpi send (send)' 226 CASE ( 'B' ) ! Buffer mpi send (blocking) 227 WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)' 228 CALL mpi_init_opa( ierr ) 229 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 230 WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)' 231 l_isend = .TRUE. 232 CASE DEFAULT 233 WRITE(ldtxt(7),cform_err) 234 WRITE(ldtxt(8),*) ' bad value for cn_mpi_send = ', cn_mpi_send 235 nstop = nstop + 1 236 END SELECT 237 ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN 238 WRITE(ldtxt(7),*) ' lib_mpp: You cannot provide a local communicator ' 239 WRITE(ldtxt(8),*) ' without calling MPI_Init before ! ' 240 nstop = nstop + 1 241 ELSE 242 #endif 243 SELECT CASE ( cn_mpi_send ) 244 CASE ( 'S' ) ! Standard mpi send (blocking) 245 WRITE(ldtxt(7),*) ' Standard blocking mpi send (send)' 246 CALL mpi_init( ierr ) 247 CASE ( 'B' ) ! Buffer mpi send (blocking) 248 WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)' 249 CALL mpi_init_opa( ierr ) 250 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 251 WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)' 252 l_isend = .TRUE. 253 CALL mpi_init( ierr ) 254 CASE DEFAULT 255 WRITE(ldtxt(7),cform_err) 256 WRITE(ldtxt(8),*) ' bad value for cn_mpi_send = ', cn_mpi_send 257 nstop = nstop + 1 258 END SELECT 259 260 #if ! defined key_agrif 261 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code) 262 IF( code /= MPI_SUCCESS ) THEN 263 WRITE(*, cform_err) 264 WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup' 265 CALL mpi_abort( mpi_comm_world, code, ierr ) 266 ENDIF 267 ! 268 ENDIF 269 #endif 270 #if defined key_agrif 271 ELSE 204 272 SELECT CASE ( cn_mpi_send ) 205 273 CASE ( 'S' ) ! Standard mpi send (blocking) … … 207 275 CASE ( 'B' ) ! Buffer mpi send (blocking) 208 276 WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)' 209 CALL mpi_init_opa( ierr )210 277 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 211 278 WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)' … … 216 283 nstop = nstop + 1 217 284 END SELECT 218 ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN219 WRITE(ldtxt(7),*) ' lib_mpp: You cannot provide a local communicator '220 WRITE(ldtxt(8),*) ' without calling MPI_Init before ! '221 nstop = nstop + 1222 ELSE223 SELECT CASE ( cn_mpi_send )224 CASE ( 'S' ) ! Standard mpi send (blocking)225 WRITE(ldtxt(7),*) ' Standard blocking mpi send (send)'226 CALL mpi_init( ierr )227 CASE ( 'B' ) ! Buffer mpi send (blocking)228 WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)'229 CALL mpi_init_opa( ierr )230 CASE ( 'I' ) ! Immediate mpi send (non-blocking send)231 WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)'232 l_isend = .TRUE.233 CALL mpi_init( ierr )234 CASE DEFAULT235 WRITE(ldtxt(7),cform_err)236 WRITE(ldtxt(8),*) ' bad value for cn_mpi_send = ', cn_mpi_send237 nstop = nstop + 1238 END SELECT239 !240 285 ENDIF 241 286 242 IF( PRESENT(localComm) ) THEN 243 IF( Agrif_Root() ) THEN 244 mpi_comm_opa = localComm 245 ENDIF 246 ELSE 247 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code) 248 IF( code /= MPI_SUCCESS ) THEN 249 WRITE(*, cform_err) 250 WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup' 251 CALL mpi_abort( mpi_comm_world, code, ierr ) 252 ENDIF 253 ENDIF 254 287 mpi_comm_opa = mpi_comm_world 288 #endif 255 289 CALL mpi_comm_rank( mpi_comm_opa, mpprank, ierr ) 256 290 CALL mpi_comm_size( mpi_comm_opa, mppsize, ierr ) 257 291 mynode = mpprank 292 ! 293 #if defined key_mpp_rep2 294 CALL MPI_OP_CREATE(DDPDD_MPI, .TRUE., MPI_SUMDD, ierr) 295 #endif 258 296 ! 259 297 END FUNCTION mynode … … 1394 1432 END SUBROUTINE mppsum_real 1395 1433 1396 1434 # if defined key_mpp_rep2 1435 SUBROUTINE mppsum_realdd( ytab, kcom ) 1436 !!---------------------------------------------------------------------- 1437 !! *** routine mppsum_realdd *** 1438 !! 1439 !! ** Purpose : global sum in Massively Parallel Processing 1440 !! SCALAR argument case for double-double precision 1441 !! 1442 !!----------------------------------------------------------------------- 1443 COMPLEX(wp), INTENT(inout) :: ytab ! input scalar 1444 INTEGER , INTENT( in ), OPTIONAL :: kcom 1445 1446 !! * Local variables (MPI version) 1447 INTEGER :: ierror 1448 INTEGER :: localcomm 1449 COMPLEX(wp) :: zwork 1450 1451 localcomm = mpi_comm_opa 1452 IF( PRESENT(kcom) ) localcomm = kcom 1453 1454 ! reduce local sums into global sum 1455 CALL MPI_ALLREDUCE (ytab, zwork, 1, MPI_DOUBLE_COMPLEX, & 1456 MPI_SUMDD,localcomm,ierror) 1457 ytab = zwork 1458 1459 END SUBROUTINE mppsum_realdd 1460 1461 1462 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 1463 !!---------------------------------------------------------------------- 1464 !! *** routine mppsum_a_realdd *** 1465 !! 1466 !! ** Purpose : global sum in Massively Parallel Processing 1467 !! COMPLEX ARRAY case for double-double precision 1468 !! 1469 !!----------------------------------------------------------------------- 1470 INTEGER , INTENT( in ) :: kdim ! size of ytab 1471 COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array 1472 INTEGER , INTENT( in ), OPTIONAL :: kcom 1473 1474 !! * Local variables (MPI version) 1475 INTEGER :: ierror ! temporary integer 1476 INTEGER :: localcomm 1477 COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace 1478 1479 localcomm = mpi_comm_opa 1480 IF( PRESENT(kcom) ) localcomm = kcom 1481 1482 CALL MPI_ALLREDUCE (ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, & 1483 MPI_SUMDD,localcomm,ierror) 1484 ytab(:) = zwork(:) 1485 1486 END SUBROUTINE mppsum_a_realdd 1487 # endif 1488 1397 1489 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) 1398 1490 !!------------------------------------------------------------------------ … … 2049 2141 ijpj = 4 2050 2142 ijpjm1 = 3 2051 ztab(:,:,:) = 0.e02052 2143 ! 2053 2144 DO jj = nlcj - ijpj +1, nlcj ! put in znorthloc the last 4 jlines of pt3d … … 2115 2206 ijpj = 4 2116 2207 ijpjm1 = 3 2117 ztab(:,:) = 0.e02118 2208 ! 2119 2209 DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d … … 2181 2271 ! 2182 2272 ijpj=4 2183 ztab(:,:) = 0.e02184 2273 2185 2274 ij=0 … … 2265 2354 END SUBROUTINE mpi_init_opa 2266 2355 2356 #if defined key_mpp_rep1 2357 SUBROUTINE mpp_allgatherv_real( pvalsin, knoin, pvalsout, ksizeout, & 2358 & knoout, kstartout ) 2359 !!---------------------------------------------------------------------- 2360 !! *** ROUTINE mpp_allgatherv_real *** 2361 !! 2362 !! ** Purpose : Gather a real array on all processors 2363 !! 2364 !! ** Method : MPI all gatherv 2365 !! 2366 !! ** Action : This does only work for MPI. 2367 !! It does not work for SHMEM. 2368 !! 2369 !! References : http://www.mpi-forum.org 2370 !! 2371 !! History : 2372 !! ! 08-08 (K. Mogensen) Original code 2373 !!---------------------------------------------------------------------- 2374 2375 !! * Arguments 2376 INTEGER, INTENT(IN) :: & 2377 & knoin, & 2378 & ksizeout 2379 REAL(wp), DIMENSION(knoin), INTENT(IN) :: & 2380 & pvalsin 2381 REAL(wp), DIMENSION(ksizeout), INTENT(OUT) :: & 2382 & pvalsout 2383 INTEGER, DIMENSION(jpnij), INTENT(OUT) :: & 2384 & kstartout, & 2385 & knoout 2386 2387 !! * Local declarations 2388 INTEGER :: & 2389 & ierr 2390 INTEGER :: & 2391 & ji 2392 !----------------------------------------------------------------------- 2393 ! Call the MPI library to get number of data per processor 2394 !----------------------------------------------------------------------- 2395 CALL mpi_allgather( knoin, 1, mpi_integer, & 2396 & knoout, 1, mpi_integer, & 2397 & mpi_comm_opa, ierr ) 2398 !----------------------------------------------------------------------- 2399 ! Compute starts of each processors contribution 2400 !----------------------------------------------------------------------- 2401 kstartout(1) = 0 2402 DO ji = 2, jpnij 2403 kstartout(ji) = kstartout(ji-1) + knoout(ji-1) 2404 ENDDO 2405 !----------------------------------------------------------------------- 2406 ! Call the MPI library to do the gathering of the data 2407 !----------------------------------------------------------------------- 2408 CALL mpi_allgatherv( pvalsin, knoin, MPI_DOUBLE_PRECISION, & 2409 & pvalsout, knoout, kstartout, MPI_DOUBLE_PRECISION, & 2410 & mpi_comm_opa, ierr ) 2411 2412 END SUBROUTINE mpp_allgatherv_real 2413 2414 SUBROUTINE mpp_allgatherv_int( kvalsin, knoin, kvalsout, ksizeout, & 2415 & knoout, kstartout ) 2416 !!---------------------------------------------------------------------- 2417 !! *** ROUTINE mpp_allgatherv *** 2418 !! 2419 !! ** Purpose : Gather an integer array on all processors 2420 !! 2421 !! ** Method : MPI all gatherv 2422 !! 2423 !! ** Action : This does only work for MPI. 2424 !! It does not work for SHMEM. 2425 !! 2426 !! References : http://www.mpi-forum.org 2427 !! 2428 !! History : 2429 !! ! 06-07 (K. Mogensen) Original code 2430 !!---------------------------------------------------------------------- 2431 2432 !! * Arguments 2433 INTEGER, INTENT(IN) :: & 2434 & knoin, & 2435 & ksizeout 2436 INTEGER, DIMENSION(knoin), INTENT(IN) :: & 2437 & kvalsin 2438 INTEGER, DIMENSION(ksizeout), INTENT(OUT) :: & 2439 & kvalsout 2440 INTEGER, DIMENSION(jpnij), INTENT(OUT) :: & 2441 & kstartout, & 2442 & knoout 2443 2444 !! * Local declarations 2445 INTEGER :: & 2446 & ierr 2447 INTEGER :: & 2448 & ji 2449 !----------------------------------------------------------------------- 2450 ! Call the MPI library to get number of data per processor 2451 !----------------------------------------------------------------------- 2452 CALL mpi_allgather( knoin, 1, mpi_integer, & 2453 & knoout, 1, mpi_integer, & 2454 & mpi_comm_opa, ierr ) 2455 !----------------------------------------------------------------------- 2456 ! Compute starts of each processors contribution 2457 !----------------------------------------------------------------------- 2458 kstartout(1) = 0 2459 DO ji = 2, jpnij 2460 kstartout(ji) = kstartout(ji-1) + knoout(ji-1) 2461 ENDDO 2462 !----------------------------------------------------------------------- 2463 ! Call the MPI library to do the gathering of the data 2464 !----------------------------------------------------------------------- 2465 CALL mpi_allgatherv( kvalsin, knoin, mpi_integer, & 2466 & kvalsout, knoout, kstartout, mpi_integer, & 2467 & mpi_comm_opa, ierr ) 2468 2469 END SUBROUTINE mpp_allgatherv_int 2470 #endif 2471 2472 #if defined key_mpp_rep2 2473 SUBROUTINE DDPDD_MPI (ydda, yddb, ilen, itype) 2474 !!--------------------------------------------------------------------- 2475 !! Routine DDPDD_MPI: used by reduction operator MPI_SUMDD 2476 !! 2477 !! Modification of original codes written by David H. Bailey 2478 !! This subroutine computes yddb(i) = ydda(i)+yddb(i) 2479 !!--------------------------------------------------------------------- 2480 INTEGER, INTENT(in) :: ilen, itype 2481 COMPLEX(wp), DIMENSION(ilen), INTENT(in) :: ydda 2482 COMPLEX(wp), DIMENSION(ilen), INTENT(inout) :: yddb 2483 ! 2484 REAL(wp) :: zerr, zt1, zt2 ! local work variables 2485 INTEGER :: ji, ztmp ! local scalar 2486 2487 ztmp = itype ! avoid compilation warning 2488 2489 DO ji=1,ilen 2490 ! Compute ydda + yddb using Knuth's trick. 2491 zt1 = real(ydda(ji)) + real(yddb(ji)) 2492 zerr = zt1 - real(ydda(ji)) 2493 zt2 = ((real(yddb(ji)) - zerr) + (real(ydda(ji)) - (zt1 - zerr))) & 2494 + aimag(ydda(ji)) + aimag(yddb(ji)) 2495 2496 ! The result is zt1 + zt2, after normalization. 2497 yddb(ji) = cmplx ( zt1 + zt2, zt2 - ((zt1 + zt2) - zt1),wp ) 2498 END DO 2499 2500 END SUBROUTINE DDPDD_MPI 2501 #endif 2502 2267 2503 #else 2268 2504 !!---------------------------------------------------------------------- 2269 2505 !! Default case: Dummy module share memory computing 2270 2506 !!---------------------------------------------------------------------- 2507 # if defined key_mpp_rep1 2508 USE par_kind 2509 USE par_oce 2510 2511 PUBLIC mpp_allgatherv 2512 # endif 2513 2271 2514 INTERFACE mpp_sum 2272 MODULE PROCEDURE mpp_sum_a2s, mpp_sum_as, mpp_sum_ai, mpp_sum_s, mpp_sum_i 2515 MODULE PROCEDURE mpp_sum_a2s, mpp_sum_as, mpp_sum_ai, mpp_sum_s, mpp_sum_i, & 2516 & mpp_sum_c, mpp_sum_ac 2273 2517 END INTERFACE 2274 2518 INTERFACE mpp_max … … 2288 2532 END INTERFACE 2289 2533 2534 # if defined key_mpp_rep1 2535 INTERFACE mpp_allgatherv 2536 MODULE PROCEDURE mpp_allgatherv_real, mpp_allgatherv_int 2537 END INTERFACE 2538 # endif 2539 2290 2540 2291 2541 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag … … 2325 2575 END SUBROUTINE mpp_sum_ai 2326 2576 2577 SUBROUTINE mpp_sum_ac( yarr, kdim, kcom ) ! Dummy routine 2578 COMPLEX, DIMENSION(:) :: yarr 2579 INTEGER :: kdim 2580 INTEGER, OPTIONAL :: kcom 2581 WRITE(*,*) 'mpp_sum_ac: You should not have seen this print! error?', kdim, yarr(1), kcom 2582 END SUBROUTINE mpp_sum_ac 2583 2327 2584 SUBROUTINE mpp_sum_s( psca, kcom ) ! Dummy routine 2328 2585 REAL :: psca … … 2330 2587 WRITE(*,*) 'mpp_sum_s: You should not have seen this print! error?', psca, kcom 2331 2588 END SUBROUTINE mpp_sum_s 2332 2589 2333 2590 SUBROUTINE mpp_sum_i( kint, kcom ) ! Dummy routine 2334 2591 integer :: kint 2335 INTEGER, OPTIONAL :: kcom 2592 INTEGER, OPTIONAL :: kcom 2336 2593 WRITE(*,*) 'mpp_sum_i: You should not have seen this print! error?', kint, kcom 2337 2594 END SUBROUTINE mpp_sum_i 2595 2596 SUBROUTINE mpp_sum_c( ysca, kcom ) ! Dummy routine 2597 COMPLEX :: ysca 2598 INTEGER, OPTIONAL :: kcom 2599 WRITE(*,*) 'mpp_sum_c: You should not have seen this print! error?', ysca, kcom 2600 END SUBROUTINE mpp_sum_c 2338 2601 2339 2602 SUBROUTINE mppmax_a_real( parr, kdim, kcom ) … … 2459 2722 END SUBROUTINE mpp_comm_free 2460 2723 2724 # if defined key_mpp_rep1 2725 SUBROUTINE mpp_allgatherv_real( pvalsin, knoin, pvalsout, ksizeout, & 2726 & knoout, kstartout ) 2727 INTEGER, INTENT(IN) :: & 2728 & knoin, & 2729 & ksizeout 2730 REAL(wp), DIMENSION(knoin), INTENT(IN) :: & 2731 & pvalsin 2732 REAL(wp), DIMENSION(ksizeout), INTENT(OUT) :: & 2733 & pvalsout 2734 INTEGER, DIMENSION(jpnij), INTENT(OUT) :: & 2735 & kstartout, & 2736 & knoout 2737 pvalsout(1:knoin) = pvalsin(1:knoin) 2738 kstartout(1) = 0 2739 knoout(1) = knoin 2740 END SUBROUTINE mpp_allgatherv_real 2741 2742 SUBROUTINE mpp_allgatherv_int( kvalsin, knoin, kvalsout, ksizeout, & 2743 & knoout, kstartout ) 2744 INTEGER, INTENT(IN) :: & 2745 & knoin, & 2746 & ksizeout 2747 INTEGER, DIMENSION(knoin), INTENT(IN) :: & 2748 & kvalsin 2749 INTEGER, DIMENSION(ksizeout), INTENT(OUT) :: & 2750 & kvalsout 2751 INTEGER, DIMENSION(jpnij), INTENT(OUT) :: & 2752 & kstartout, & 2753 & knoout 2754 2755 kvalsout(1:knoin) = kvalsin(1:knoin) 2756 kstartout(1) = 0 2757 knoout(1) = knoin 2758 END SUBROUTINE mpp_allgatherv_int 2759 # endif 2760 2461 2761 #endif 2462 2762 !!---------------------------------------------------------------------- -
branches/DEV_r1784_mid_year_merge_2010/NEMO/OPA_SRC/opa.F90
r1953 r1976 156 156 CALL opa_closefile 157 157 #if defined key_oasis3 || defined key_oasis4 158 IF( Agrif_Root() ) THEN 159 CALL cpl_prism_finalize ! end coupling and mpp communications with OASIS 160 ENDIF 158 CALL cpl_prism_finalize ! end coupling and mpp communications with OASIS 161 159 #else 162 160 IF( lk_mpp ) CALL mppstop ! end mpp communications … … 180 178 !! 181 179 NAMELIST/namctl/ ln_ctl , nn_print, nn_ictls, nn_ictle, & 182 & nn_isplt, nn_jsplt, nn_jctls, nn_jctle, nn_bench , nn_bit_cmp180 & nn_isplt, nn_jsplt, nn_jctls, nn_jctle, nn_bench 183 181 !!---------------------------------------------------------------------- 184 182 ! … … 193 191 #if defined key_iomput 194 192 # if defined key_oasis3 || defined key_oasis4 195 IF( Agrif_Root() ) THEN 196 CALL cpl_prism_init( ilocal_comm ) ! nemo local communicator given by oasis 197 CALL init_ioclient() ! io_server will get its communicators (if needed) from oasis (we don't see it) 198 ENDIF 193 CALL cpl_prism_init( ilocal_comm ) ! nemo local communicator given by oasis 194 CALL init_ioclient() ! io_server will get its communicators (if needed) from oasis (we don't see it) 199 195 # else 200 IF( Agrif_Root() ) THEN 201 CALL init_ioclient( ilocal_comm ) ! nemo local communicator (used or not) given by the io_server 202 ENDIF 196 CALL init_ioclient( ilocal_comm ) ! nemo local communicator (used or not) given by the io_server 203 197 # endif 204 198 narea = mynode( cltxt, ilocal_comm ) ! Nodes selection … … 206 200 #else 207 201 # if defined key_oasis3 || defined key_oasis4 208 IF( Agrif_Root() ) THEN 209 CALL cpl_prism_init( ilocal_comm ) ! nemo local communicator given by oasis 210 ENDIF 202 CALL cpl_prism_init( ilocal_comm ) ! nemo local communicator given by oasis 211 203 narea = mynode( cltxt, ilocal_comm ) ! Nodes selection (control print return in cltxt) 212 204 # else … … 312 304 WRITE(numout,*) ' number of proc. following j nn_jsplt = ', nn_jsplt 313 305 WRITE(numout,*) ' benchmark parameter (0/1) nn_bench = ', nn_bench 314 WRITE(numout,*) ' bit comparison mode (0/1) nn_bit_cmp = ', nn_bit_cmp315 306 ENDIF 316 307 … … 323 314 jsplt = nn_jsplt 324 315 nbench = nn_bench 325 nbit_cmp = nn_bit_cmp326 316 327 317 ! ! Parameter control … … 372 362 ENDIF 373 363 374 IF( nbit_cmp == 1 ) THEN ! Bit compare375 CALL ctl_warn( ' Bit comparison enabled. Single and multiple processor results must bit compare', &376 & ' WARNING: RESULTS ARE NOT PHYSICAL.' )377 ENDIF378 379 364 REWIND( numnam ) ! Read Namelist namdyn_hpg : ln_dynhpg_imp must be read at the initialisation phase 380 365 READ ( numnam, namdyn_hpg )
Note: See TracChangeset
for help on using the changeset viewer.