Changeset 2219
- Timestamp:
- 2010-10-12T15:06:30+02:00 (14 years ago)
- Location:
- branches/DEV_r2191_3partymerge2010/NEMO
- Files:
-
- 12 edited
- 1 copied
Legend:
- Unmodified
- Added
- Removed
-
branches/DEV_r2191_3partymerge2010/NEMO/LIM_SRC_2/limdyn_2.F90
r2208 r2219 83 83 ! --------------------------------------------------- 84 84 85 IF( lk_mpp .OR. nbit_cmp == 1) THEN ! mpp: compute over the whole domain85 IF( lk_mpp ) THEN ! mpp: compute over the whole domain 86 86 i_j1 = 1 87 87 i_jpj = jpj -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/DOM/dom_oce.F90
r2208 r2219 220 220 LOGICAL, PUBLIC, PARAMETER :: lk_agrif = .FALSE. !: agrif flag 221 221 222 !!---------------------------------------------------------------------- 223 !! mpp reproducibility 224 !!---------------------------------------------------------------------- 225 #if defined key_mpp_rep1 || defined key_mpp_re2 226 LOGICAL, PUBLIC, PARAMETER :: lk_mpp_rep = .TRUE. !: agrif flag 227 #else 228 LOGICAL, PUBLIC, PARAMETER :: lk_mpp_rep = .FALSE. !: agrif flag 229 #endif 230 222 231 CONTAINS 223 232 LOGICAL FUNCTION Agrif_Root() … … 229 238 END FUNCTION Agrif_CFixed 230 239 #endif 231 232 240 !!====================================================================== 233 241 END MODULE dom_oce -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/DOM/domain.F90
r2208 r2219 166 166 ENDIF 167 167 168 #if defined key_agrif 168 169 IF( Agrif_Root() ) THEN 169 SELECT CASE ( nleapy ) ! Choose calendar for IOIPSL 170 CASE ( 1 ) 171 CALL ioconf_calendar('gregorian') 172 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "gregorian", i.e. leap year' 173 CASE ( 0 ) 174 CALL ioconf_calendar('noleap') 175 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "noleap", i.e. no leap year' 176 CASE ( 30 ) 177 CALL ioconf_calendar('360d') 178 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "360d", i.e. 360 days in a year' 179 END SELECT 180 ENDIF 170 #endif 171 SELECT CASE ( nleapy ) ! Choose calendar for IOIPSL 172 CASE ( 1 ) 173 CALL ioconf_calendar('gregorian') 174 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "gregorian", i.e. leap year' 175 CASE ( 0 ) 176 CALL ioconf_calendar('noleap') 177 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "noleap", i.e. no leap year' 178 CASE ( 30 ) 179 CALL ioconf_calendar('360d') 180 IF(lwp) WRITE(numout,*) ' The IOIPSL calendar is "360d", i.e. 360 days in a year' 181 END SELECT 182 #if defined key_agrif 183 ENDIF 184 #endif 181 185 182 186 REWIND( numnam ) ! Namelist namdom : space & time domain (bathymetry, mesh, timestep) … … 226 230 n_cla = nn_cla ! conversion DOCTOR names into model names (this should disappear soon) 227 231 228 IF( nbit_cmp == 1.AND. n_cla /= 0 ) CALL ctl_stop( ' Reproductibility tests (nbit_cmp=1) require n_cla = 0' )232 IF( lk_mpp_rep .AND. n_cla /= 0 ) CALL ctl_stop( ' Reproductibility tests (nbit_cmp=1) require n_cla = 0' ) 229 233 ! 230 234 END SUBROUTINE dom_nam -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/DYN/dynspg_flt.F90
r2208 r2219 46 46 USE iom 47 47 USE restart ! only for lrst_oce 48 USE lib_fortran 48 49 #if defined key_agrif 49 50 USE agrif_opa_interp … … 188 189 189 190 #if defined key_obc 190 IF( lk_obc ) CALL obc_dyn( kt )! Update velocities on each open boundary with the radiation algorithm191 IF( lk_obc ) CALL obc_vol( kt )! Correction of the barotropic componant velocity to control the volume of the system191 CALL obc_dyn( kt ) ! Update velocities on each open boundary with the radiation algorithm 192 CALL obc_vol( kt ) ! Correction of the barotropic componant velocity to control the volume of the system 192 193 #endif 193 194 #if defined key_bdy … … 283 284 ! ------------------ 284 285 rnorme =0.e0 285 rnorme = SUM( gcb(1:jpi,1:jpj) * gcdmat(1:jpi,1:jpj) * gcb(1:jpi,1:jpj) * bmask(:,:) ) 286 IF( lk_mpp ) CALL mpp_sum( rnorme ) ! sum over the global domain 286 rnorme = GLOB_SUM( gcb(1:jpi,1:jpj) * gcdmat(1:jpi,1:jpj) * gcb(1:jpi,1:jpj) * bmask(:,:) ) 287 287 288 288 epsr = eps * eps * rnorme … … 317 317 #if defined key_obc 318 318 ! caution : grad D = 0 along open boundaries 319 IF( Agrif_Root() ) THEN 320 spgu(ji,jj) = z2dt * ztdgu * obcumask(ji,jj) 321 spgv(ji,jj) = z2dt * ztdgv * obcvmask(ji,jj) 322 ELSE 323 spgu(ji,jj) = z2dt * ztdgu 324 spgv(ji,jj) = z2dt * ztdgv 325 ENDIF 319 spgu(ji,jj) = z2dt * ztdgu * obcumask(ji,jj) 320 spgv(ji,jj) = z2dt * ztdgv * obcvmask(ji,jj) 326 321 #elif defined key_bdy 327 322 ! caution : grad D = 0 along open boundaries -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/IOM/in_out_manager.F90
r2208 r2219 80 80 81 81 ! !: OLD namelist names 82 INTEGER :: nprint, nictls, nictle, njctls, njctle, isplt, jsplt, nbench , nbit_cmp82 INTEGER :: nprint, nictls, nictle, njctls, njctle, isplt, jsplt, nbench 83 83 84 84 INTEGER :: ijsplt = 1 !: nb of local domain = nb of processors -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/SBC/sbcana.F90
r2208 r2219 208 208 209 209 ! Compute the emp flux such as its integration on the whole domain at each time is zero 210 IF( nbench /= 1 .AND. nbit_cmp /= 1) THEN210 IF( nbench /= 1 ) THEN 211 211 zsumemp = 0.e0 ; zsurf = 0.e0 212 212 DO jj = 1, jpj -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/SBC/sbcfwb.F90
r2208 r2219 23 23 USE lib_mpp ! distribued memory computing library 24 24 USE lbclnk ! ocean lateral boundary conditions 25 USE lib_fortran 25 26 26 27 IMPLICIT NONE … … 87 88 ! 88 89 e1e2_i(:,:) = e1t(:,:) * e2t(:,:) * tmask_i(:,:) 89 area = SUM( e1e2_i(:,:) ) 90 IF( lk_mpp ) CALL mpp_sum( area ) ! sum over the global domain 90 area = glob_sum( e1e2_i(:,:) ) ! sum over the global domain 91 91 ! 92 92 ENDIF … … 128 128 IF( MOD( kt, ikty ) == 0 ) THEN 129 129 a_fwb_b = a_fwb 130 a_fwb = SUM( e1e2_i(:,:) * sshn(:,:) ) 131 IF( lk_mpp ) CALL mpp_sum( a_fwb ) ! sum over the global domain 130 a_fwb = glob_sum( e1e2_i(:,:) * sshn(:,:) ) ! sum over the global domain 132 131 a_fwb = a_fwb * 1.e+3 / ( area * 86400. * 365. ) ! convert in Kg/m3/s = mm/s 133 132 !!gm ! !!bug 365d year … … 185 184 z_fwf_nsrf = zsum_fwf / ( zsurf_tospread + rsmall ) 186 185 ! weight to respect erp field 2D structure 187 zsum_erp = SUM( ztmsk_tospread(:,:) * erp(:,:) * e1e2_i(:,:) ) 188 IF( lk_mpp ) CALL mpp_sum( zsum_erp ) 189 z_wgt(:,:) = ztmsk_tospread(:,:) * erp(:,:) / ( zsum_erp + rsmall ) 190 186 z_wgt(:,:) = ztmsk_tospread(:,:) * erp(:,:) / ( SUM( ztmsk_tospread(:,:) * erp(:,:) * e1e2_i(:,:) ) + rsmall ) 191 187 ! final correction term to apply 192 188 zerp_cor(:,:) = -1. * z_fwf_nsrf * zsurf_tospread * z_wgt(:,:) -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/SOL/solpcg.F90
r2208 r2219 14 14 USE lbclnk ! ocean lateral boundary conditions (or mpp link) 15 15 USE in_out_manager ! I/O manager 16 USE lib_fortran 16 17 17 18 IMPLICIT NONE … … 95 96 ! Initialization of the algorithm with standard PCG 96 97 ! ------------------------------------------------- 98 zgcr = 0.e0 99 gcr = 0.e0 97 100 98 101 CALL lbc_lnk( gcx, c_solver_pt, 1. ) ! lateral boundary condition … … 100 103 ! gcr = gcb-a.gcx 101 104 ! gcdes = gcr 102 103 105 DO jj = 2, jpjm1 104 106 DO ji = fs_2, fs_jpim1 ! vector opt. … … 114 116 115 117 ! rnorme = (gcr,gcr) 116 rnorme = SUM( gcr(:,:) * gcdmat(:,:) * gcr(:,:) ) 117 IF( lk_mpp ) CALL mpp_sum( rnorme ) ! sum over the global domain 118 rnorme = glob_sum( gcr(:,:) * gcdmat(:,:) * gcr(:,:) ) 118 119 119 120 CALL lbc_lnk( gcdes, c_solver_pt, 1. ) ! lateral boundary condition … … 129 130 130 131 ! alph = (gcr,gcr)/(gcdes,gccd) 131 radd = SUM( gcdes(:,:) * gcdmat(:,:) * gccd(:,:) ) 132 IF( lk_mpp ) CALL mpp_sum( radd ) ! sum over the global domain 132 radd = glob_sum( gcdes(:,:) * gcdmat(:,:) * gccd(:,:) ) 133 133 alph = rnorme /radd 134 134 … … 162 162 ! rnorme = (gcr,gcr) 163 163 rr = rnorme 164 zsum(1) = SUM( gcr(:,:) * gcdmat(:,:) * gcr(:,:) )165 164 166 165 ! zgcad = (zgcr,gcr) 167 zsum(2) = SUM( gcr(2:jpim1,2:jpjm1) * gcdmat(2:jpim1,2:jpjm1) * zgcr(2:jpim1,2:jpjm1) ) 168 169 IF( lk_mpp ) CALL mpp_sum( zsum, 2 ) ! sum over the global domain 166 zsum(1) = glob_sum(gcr(:,:) * gcdmat(:,:) * gcr(:,:)) 167 zsum(2) = glob_sum(gcr(:,:) * gcdmat(:,:) * zgcr(:,:) * bmask(:,:)) 168 169 !!RB we should gather the 2 glob_sum 170 170 rnorme = zsum(1) 171 171 zgcad = zsum(2) 172 173 172 ! test of convergence 174 173 IF( rnorme < epsr .OR. jn == nn_nmax ) THEN -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/SOL/solsor.F90
r2208 r2219 22 22 USE lib_mpp ! distributed memory computing 23 23 USE lbclnk ! ocean lateral boundary conditions (or mpp link) 24 USE lib_fortran 24 25 25 26 IMPLICIT NONE … … 64 65 INTEGER :: ijmppodd, ijmppeven, ijpr2d 65 66 REAL(wp) :: ztmp, zres, zres2 67 REAL(wp), DIMENSION(jpi,jpj) ::ztab 66 68 !!---------------------------------------------------------------------- 67 69 … … 131 133 ENDIF 132 134 CASE ( 1 ) ! relative precision 133 rnorme = SUM( gcr(2:nlci-1,2:nlcj-1) ) 134 IF( lk_mpp ) CALL mpp_sum( rnorme ) ! sum over the global domain 135 ztab = 0. 136 ztab(:,:) = gcr(2:nlci-1,2:nlcj-1) 137 rnorme = glob_sum( ztab) ! sum over the global domain 135 138 ! test of convergence 136 139 IF( rnorme < epsr .OR. jn == nn_nmax ) THEN -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/SOL/solver.F90
r2208 r2219 102 102 END SELECT 103 103 ! 104 IF( nbit_cmp == 1 ) THEN ! reproductibility test SOR required105 IF( nn_solv /= 2 ) THEN106 CALL ctl_stop( ' Reproductibility tests (nbit_cmp=1) require the SOR solver: nn_solv = 2' )107 ELSE IF( MAX( jpr2di, jpr2dj ) > 0 ) THEN108 CALL ctl_stop( ' Reproductibility tests (nbit_cmp=1) require jpr2di = jpr2dj = 0' )109 END IF110 END IF111 104 112 105 ! !* Grid-point at which the solver is applied -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/lib_mpp.F90
r2208 r2219 73 73 PUBLIC mppsize, mpprank 74 74 75 # if defined key_mpp_rep1 76 PUBLIC mpp_allgatherv 77 # endif 78 75 79 !! * Interfaces 76 80 !! define generic interface for these routine as they are called sometimes … … 84 88 END INTERFACE 85 89 INTERFACE mpp_sum 90 # if defined key_mpp_rep2 91 MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real, & 92 mppsum_realdd, mppsum_a_realdd 93 # else 86 94 MODULE PROCEDURE mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real 95 # endif 87 96 END INTERFACE 88 97 INTERFACE mpp_lbc_north … … 95 104 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 96 105 END INTERFACE 97 106 107 # if defined key_mpp_rep1 108 INTERFACE mpp_allgatherv 109 MODULE PROCEDURE mpp_allgatherv_real, mpp_allgatherv_int 110 END INTERFACE 111 # endif 98 112 99 113 !! ========================= !! … … 110 124 INTEGER :: mppsize ! number of process 111 125 INTEGER :: mpprank ! process number [ 0 - size-1 ] 112 !$AGRIF_DO_NOT_TREAT 113 INTEGER, PUBLIC :: mpi_comm_opa ! opa local communicator 114 !$AGRIF_END_DO_NOT_TREAT 126 INTEGER :: mpi_comm_opa ! opa local communicator 127 128 INTEGER, PUBLIC :: MPI_SUMDD 115 129 116 130 ! variables used in case of sea-ice … … 191 205 WRITE(ldtxt(6),*) ' size in bytes of exported buffer nn_buffer = ', nn_buffer 192 206 193 CALL mpi_initialized ( mpi_was_called, code ) 194 IF( code /= MPI_SUCCESS ) THEN 195 WRITE(*, cform_err) 196 WRITE(*, *) 'lib_mpp: Error in routine mpi_initialized' 197 CALL mpi_abort( mpi_comm_world, code, ierr ) 198 ENDIF 199 200 IF( mpi_was_called ) THEN 201 ! 207 #if defined key_agrif 208 IF( Agrif_Root() ) THEN 209 #endif 210 !!bug RB : should be clean to use Agrif in coupled mode 211 #if ! defined key_agrif 212 CALL mpi_initialized ( mpi_was_called, code ) 213 IF( code /= MPI_SUCCESS ) THEN 214 WRITE(*, cform_err) 215 WRITE(*, *) 'lib_mpp: Error in routine mpi_initialized' 216 CALL mpi_abort( mpi_comm_world, code, ierr ) 217 ENDIF 218 219 IF( PRESENT(localComm) .and. mpi_was_called ) THEN 220 mpi_comm_opa = localComm 221 SELECT CASE ( cn_mpi_send ) 222 CASE ( 'S' ) ! Standard mpi send (blocking) 223 WRITE(ldtxt(7),*) ' Standard blocking mpi send (send)' 224 CASE ( 'B' ) ! Buffer mpi send (blocking) 225 WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)' 226 CALL mpi_init_opa( ierr ) 227 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 228 WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)' 229 l_isend = .TRUE. 230 CASE DEFAULT 231 WRITE(ldtxt(7),cform_err) 232 WRITE(ldtxt(8),*) ' bad value for cn_mpi_send = ', cn_mpi_send 233 nstop = nstop + 1 234 END SELECT 235 ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN 236 WRITE(ldtxt(7),*) ' lib_mpp: You cannot provide a local communicator ' 237 WRITE(ldtxt(8),*) ' without calling MPI_Init before ! ' 238 nstop = nstop + 1 239 ELSE 240 #endif 241 SELECT CASE ( cn_mpi_send ) 242 CASE ( 'S' ) ! Standard mpi send (blocking) 243 WRITE(ldtxt(7),*) ' Standard blocking mpi send (send)' 244 CALL mpi_init( ierr ) 245 CASE ( 'B' ) ! Buffer mpi send (blocking) 246 WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)' 247 CALL mpi_init_opa( ierr ) 248 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 249 WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)' 250 l_isend = .TRUE. 251 CALL mpi_init( ierr ) 252 CASE DEFAULT 253 WRITE(ldtxt(7),cform_err) 254 WRITE(ldtxt(8),*) ' bad value for cn_mpi_send = ', cn_mpi_send 255 nstop = nstop + 1 256 END SELECT 257 258 #if ! defined key_agrif 259 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code) 260 IF( code /= MPI_SUCCESS ) THEN 261 WRITE(*, cform_err) 262 WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup' 263 CALL mpi_abort( mpi_comm_world, code, ierr ) 264 ENDIF 265 ! 266 ENDIF 267 #endif 268 #if defined key_agrif 269 ELSE 202 270 SELECT CASE ( cn_mpi_send ) 203 271 CASE ( 'S' ) ! Standard mpi send (blocking) … … 205 273 CASE ( 'B' ) ! Buffer mpi send (blocking) 206 274 WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)' 207 CALL mpi_init_opa( ierr )208 275 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 209 276 WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)' … … 214 281 nstop = nstop + 1 215 282 END SELECT 216 ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN217 WRITE(ldtxt(7),*) ' lib_mpp: You cannot provide a local communicator '218 WRITE(ldtxt(8),*) ' without calling MPI_Init before ! '219 nstop = nstop + 1220 ELSE221 SELECT CASE ( cn_mpi_send )222 CASE ( 'S' ) ! Standard mpi send (blocking)223 WRITE(ldtxt(7),*) ' Standard blocking mpi send (send)'224 CALL mpi_init( ierr )225 CASE ( 'B' ) ! Buffer mpi send (blocking)226 WRITE(ldtxt(7),*) ' Buffer blocking mpi send (bsend)'227 CALL mpi_init_opa( ierr )228 CASE ( 'I' ) ! Immediate mpi send (non-blocking send)229 WRITE(ldtxt(7),*) ' Immediate non-blocking send (isend)'230 l_isend = .TRUE.231 CALL mpi_init( ierr )232 CASE DEFAULT233 WRITE(ldtxt(7),cform_err)234 WRITE(ldtxt(8),*) ' bad value for cn_mpi_send = ', cn_mpi_send235 nstop = nstop + 1236 END SELECT237 !238 283 ENDIF 239 284 240 IF( PRESENT(localComm) ) THEN 241 IF( Agrif_Root() ) THEN 242 mpi_comm_opa = localComm 243 ENDIF 244 ELSE 245 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code) 246 IF( code /= MPI_SUCCESS ) THEN 247 WRITE(*, cform_err) 248 WRITE(*, *) ' lib_mpp: Error in routine mpi_comm_dup' 249 CALL mpi_abort( mpi_comm_world, code, ierr ) 250 ENDIF 251 ENDIF 252 285 mpi_comm_opa = mpi_comm_world 286 #endif 253 287 CALL mpi_comm_rank( mpi_comm_opa, mpprank, ierr ) 254 288 CALL mpi_comm_size( mpi_comm_opa, mppsize, ierr ) 255 289 mynode = mpprank 290 ! 291 #if defined key_mpp_rep2 292 CALL MPI_OP_CREATE(DDPDD_MPI, .TRUE., MPI_SUMDD, ierr) 293 #endif 256 294 ! 257 295 END FUNCTION mynode … … 1392 1430 END SUBROUTINE mppsum_real 1393 1431 1394 1432 # if defined key_mpp_rep2 1433 SUBROUTINE mppsum_realdd( ytab, kcom ) 1434 !!---------------------------------------------------------------------- 1435 !! *** routine mppsum_realdd *** 1436 !! 1437 !! ** Purpose : global sum in Massively Parallel Processing 1438 !! SCALAR argument case for double-double precision 1439 !! 1440 !!----------------------------------------------------------------------- 1441 COMPLEX(wp), INTENT(inout) :: ytab ! input scalar 1442 INTEGER , INTENT( in ), OPTIONAL :: kcom 1443 1444 !! * Local variables (MPI version) 1445 INTEGER :: ierror 1446 INTEGER :: localcomm 1447 COMPLEX(wp) :: zwork 1448 1449 localcomm = mpi_comm_opa 1450 IF( PRESENT(kcom) ) localcomm = kcom 1451 1452 ! reduce local sums into global sum 1453 CALL MPI_ALLREDUCE (ytab, zwork, 1, MPI_DOUBLE_COMPLEX, & 1454 MPI_SUMDD,localcomm,ierror) 1455 ytab = zwork 1456 1457 END SUBROUTINE mppsum_realdd 1458 1459 1460 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 1461 !!---------------------------------------------------------------------- 1462 !! *** routine mppsum_a_realdd *** 1463 !! 1464 !! ** Purpose : global sum in Massively Parallel Processing 1465 !! COMPLEX ARRAY case for double-double precision 1466 !! 1467 !!----------------------------------------------------------------------- 1468 INTEGER , INTENT( in ) :: kdim ! size of ytab 1469 COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array 1470 INTEGER , INTENT( in ), OPTIONAL :: kcom 1471 1472 !! * Local variables (MPI version) 1473 INTEGER :: ierror ! temporary integer 1474 INTEGER :: localcomm 1475 COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace 1476 1477 localcomm = mpi_comm_opa 1478 IF( PRESENT(kcom) ) localcomm = kcom 1479 1480 CALL MPI_ALLREDUCE (ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, & 1481 MPI_SUMDD,localcomm,ierror) 1482 ytab(:) = zwork(:) 1483 1484 END SUBROUTINE mppsum_a_realdd 1485 # endif 1486 1395 1487 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) 1396 1488 !!------------------------------------------------------------------------ … … 2047 2139 ijpj = 4 2048 2140 ijpjm1 = 3 2049 ztab(:,:,:) = 0.e02050 2141 ! 2051 2142 DO jj = nlcj - ijpj +1, nlcj ! put in znorthloc the last 4 jlines of pt3d … … 2113 2204 ijpj = 4 2114 2205 ijpjm1 = 3 2115 ztab(:,:) = 0.e02116 2206 ! 2117 2207 DO jj = nlcj-ijpj+1, nlcj ! put in znorthloc the last 4 jlines of pt2d … … 2179 2269 ! 2180 2270 ijpj=4 2181 ztab(:,:) = 0.e02182 2271 2183 2272 ij=0 … … 2263 2352 END SUBROUTINE mpi_init_opa 2264 2353 2354 #if defined key_mpp_rep1 2355 SUBROUTINE mpp_allgatherv_real( pvalsin, knoin, pvalsout, ksizeout, & 2356 & knoout, kstartout ) 2357 !!---------------------------------------------------------------------- 2358 !! *** ROUTINE mpp_allgatherv_real *** 2359 !! 2360 !! ** Purpose : Gather a real array on all processors 2361 !! 2362 !! ** Method : MPI all gatherv 2363 !! 2364 !! ** Action : This does only work for MPI. 2365 !! It does not work for SHMEM. 2366 !! 2367 !! References : http://www.mpi-forum.org 2368 !! 2369 !! History : 2370 !! ! 08-08 (K. Mogensen) Original code 2371 !!---------------------------------------------------------------------- 2372 2373 !! * Arguments 2374 INTEGER, INTENT(IN) :: & 2375 & knoin, & 2376 & ksizeout 2377 REAL(wp), DIMENSION(knoin), INTENT(IN) :: & 2378 & pvalsin 2379 REAL(wp), DIMENSION(ksizeout), INTENT(OUT) :: & 2380 & pvalsout 2381 INTEGER, DIMENSION(jpnij), INTENT(OUT) :: & 2382 & kstartout, & 2383 & knoout 2384 2385 !! * Local declarations 2386 INTEGER :: & 2387 & ierr 2388 INTEGER :: & 2389 & ji 2390 !----------------------------------------------------------------------- 2391 ! Call the MPI library to get number of data per processor 2392 !----------------------------------------------------------------------- 2393 CALL mpi_allgather( knoin, 1, mpi_integer, & 2394 & knoout, 1, mpi_integer, & 2395 & mpi_comm_opa, ierr ) 2396 !----------------------------------------------------------------------- 2397 ! Compute starts of each processors contribution 2398 !----------------------------------------------------------------------- 2399 kstartout(1) = 0 2400 DO ji = 2, jpnij 2401 kstartout(ji) = kstartout(ji-1) + knoout(ji-1) 2402 ENDDO 2403 !----------------------------------------------------------------------- 2404 ! Call the MPI library to do the gathering of the data 2405 !----------------------------------------------------------------------- 2406 CALL mpi_allgatherv( pvalsin, knoin, MPI_DOUBLE_PRECISION, & 2407 & pvalsout, knoout, kstartout, MPI_DOUBLE_PRECISION, & 2408 & mpi_comm_opa, ierr ) 2409 2410 END SUBROUTINE mpp_allgatherv_real 2411 2412 SUBROUTINE mpp_allgatherv_int( kvalsin, knoin, kvalsout, ksizeout, & 2413 & knoout, kstartout ) 2414 !!---------------------------------------------------------------------- 2415 !! *** ROUTINE mpp_allgatherv *** 2416 !! 2417 !! ** Purpose : Gather an integer array on all processors 2418 !! 2419 !! ** Method : MPI all gatherv 2420 !! 2421 !! ** Action : This does only work for MPI. 2422 !! It does not work for SHMEM. 2423 !! 2424 !! References : http://www.mpi-forum.org 2425 !! 2426 !! History : 2427 !! ! 06-07 (K. Mogensen) Original code 2428 !!---------------------------------------------------------------------- 2429 2430 !! * Arguments 2431 INTEGER, INTENT(IN) :: & 2432 & knoin, & 2433 & ksizeout 2434 INTEGER, DIMENSION(knoin), INTENT(IN) :: & 2435 & kvalsin 2436 INTEGER, DIMENSION(ksizeout), INTENT(OUT) :: & 2437 & kvalsout 2438 INTEGER, DIMENSION(jpnij), INTENT(OUT) :: & 2439 & kstartout, & 2440 & knoout 2441 2442 !! * Local declarations 2443 INTEGER :: & 2444 & ierr 2445 INTEGER :: & 2446 & ji 2447 !----------------------------------------------------------------------- 2448 ! Call the MPI library to get number of data per processor 2449 !----------------------------------------------------------------------- 2450 CALL mpi_allgather( knoin, 1, mpi_integer, & 2451 & knoout, 1, mpi_integer, & 2452 & mpi_comm_opa, ierr ) 2453 !----------------------------------------------------------------------- 2454 ! Compute starts of each processors contribution 2455 !----------------------------------------------------------------------- 2456 kstartout(1) = 0 2457 DO ji = 2, jpnij 2458 kstartout(ji) = kstartout(ji-1) + knoout(ji-1) 2459 ENDDO 2460 !----------------------------------------------------------------------- 2461 ! Call the MPI library to do the gathering of the data 2462 !----------------------------------------------------------------------- 2463 CALL mpi_allgatherv( kvalsin, knoin, mpi_integer, & 2464 & kvalsout, knoout, kstartout, mpi_integer, & 2465 & mpi_comm_opa, ierr ) 2466 2467 END SUBROUTINE mpp_allgatherv_int 2468 #endif 2469 2470 #if defined key_mpp_rep2 2471 SUBROUTINE DDPDD_MPI (ydda, yddb, ilen, itype) 2472 !!--------------------------------------------------------------------- 2473 !! Routine DDPDD_MPI: used by reduction operator MPI_SUMDD 2474 !! 2475 !! Modification of original codes written by David H. Bailey 2476 !! This subroutine computes yddb(i) = ydda(i)+yddb(i) 2477 !!--------------------------------------------------------------------- 2478 INTEGER, INTENT(in) :: ilen, itype 2479 COMPLEX(wp), DIMENSION(ilen), INTENT(in) :: ydda 2480 COMPLEX(wp), DIMENSION(ilen), INTENT(inout) :: yddb 2481 ! 2482 REAL(wp) :: zerr, zt1, zt2 ! local work variables 2483 INTEGER :: ji, ztmp ! local scalar 2484 2485 ztmp = itype ! avoid compilation warning 2486 2487 DO ji=1,ilen 2488 ! Compute ydda + yddb using Knuth's trick. 2489 zt1 = real(ydda(ji)) + real(yddb(ji)) 2490 zerr = zt1 - real(ydda(ji)) 2491 zt2 = ((real(yddb(ji)) - zerr) + (real(ydda(ji)) - (zt1 - zerr))) & 2492 + aimag(ydda(ji)) + aimag(yddb(ji)) 2493 2494 ! The result is zt1 + zt2, after normalization. 2495 yddb(ji) = cmplx ( zt1 + zt2, zt2 - ((zt1 + zt2) - zt1),wp ) 2496 END DO 2497 2498 END SUBROUTINE DDPDD_MPI 2499 #endif 2500 2265 2501 #else 2266 2502 !!---------------------------------------------------------------------- 2267 2503 !! Default case: Dummy module share memory computing 2268 2504 !!---------------------------------------------------------------------- 2505 # if defined key_mpp_rep1 2506 USE par_kind 2507 USE par_oce 2508 2509 PUBLIC mpp_allgatherv 2510 # endif 2511 2269 2512 INTERFACE mpp_sum 2270 MODULE PROCEDURE mpp_sum_a2s, mpp_sum_as, mpp_sum_ai, mpp_sum_s, mpp_sum_i 2513 MODULE PROCEDURE mpp_sum_a2s, mpp_sum_as, mpp_sum_ai, mpp_sum_s, mpp_sum_i, & 2514 & mpp_sum_c, mpp_sum_ac 2271 2515 END INTERFACE 2272 2516 INTERFACE mpp_max … … 2286 2530 END INTERFACE 2287 2531 2532 # if defined key_mpp_rep1 2533 INTERFACE mpp_allgatherv 2534 MODULE PROCEDURE mpp_allgatherv_real, mpp_allgatherv_int 2535 END INTERFACE 2536 # endif 2537 2288 2538 2289 2539 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag … … 2323 2573 END SUBROUTINE mpp_sum_ai 2324 2574 2575 SUBROUTINE mpp_sum_ac( yarr, kdim, kcom ) ! Dummy routine 2576 COMPLEX, DIMENSION(:) :: yarr 2577 INTEGER :: kdim 2578 INTEGER, OPTIONAL :: kcom 2579 WRITE(*,*) 'mpp_sum_ac: You should not have seen this print! error?', kdim, yarr(1), kcom 2580 END SUBROUTINE mpp_sum_ac 2581 2325 2582 SUBROUTINE mpp_sum_s( psca, kcom ) ! Dummy routine 2326 2583 REAL :: psca … … 2328 2585 WRITE(*,*) 'mpp_sum_s: You should not have seen this print! error?', psca, kcom 2329 2586 END SUBROUTINE mpp_sum_s 2330 2587 2331 2588 SUBROUTINE mpp_sum_i( kint, kcom ) ! Dummy routine 2332 2589 integer :: kint 2333 INTEGER, OPTIONAL :: kcom 2590 INTEGER, OPTIONAL :: kcom 2334 2591 WRITE(*,*) 'mpp_sum_i: You should not have seen this print! error?', kint, kcom 2335 2592 END SUBROUTINE mpp_sum_i 2593 2594 SUBROUTINE mpp_sum_c( ysca, kcom ) ! Dummy routine 2595 COMPLEX :: ysca 2596 INTEGER, OPTIONAL :: kcom 2597 WRITE(*,*) 'mpp_sum_c: You should not have seen this print! error?', ysca, kcom 2598 END SUBROUTINE mpp_sum_c 2336 2599 2337 2600 SUBROUTINE mppmax_a_real( parr, kdim, kcom ) … … 2457 2720 END SUBROUTINE mpp_comm_free 2458 2721 2722 # if defined key_mpp_rep1 2723 SUBROUTINE mpp_allgatherv_real( pvalsin, knoin, pvalsout, ksizeout, & 2724 & knoout, kstartout ) 2725 INTEGER, INTENT(IN) :: & 2726 & knoin, & 2727 & ksizeout 2728 REAL(wp), DIMENSION(knoin), INTENT(IN) :: & 2729 & pvalsin 2730 REAL(wp), DIMENSION(ksizeout), INTENT(OUT) :: & 2731 & pvalsout 2732 INTEGER, DIMENSION(jpnij), INTENT(OUT) :: & 2733 & kstartout, & 2734 & knoout 2735 pvalsout(1:knoin) = pvalsin(1:knoin) 2736 kstartout(1) = 0 2737 knoout(1) = knoin 2738 END SUBROUTINE mpp_allgatherv_real 2739 2740 SUBROUTINE mpp_allgatherv_int( kvalsin, knoin, kvalsout, ksizeout, & 2741 & knoout, kstartout ) 2742 INTEGER, INTENT(IN) :: & 2743 & knoin, & 2744 & ksizeout 2745 INTEGER, DIMENSION(knoin), INTENT(IN) :: & 2746 & kvalsin 2747 INTEGER, DIMENSION(ksizeout), INTENT(OUT) :: & 2748 & kvalsout 2749 INTEGER, DIMENSION(jpnij), INTENT(OUT) :: & 2750 & kstartout, & 2751 & knoout 2752 2753 kvalsout(1:knoin) = kvalsin(1:knoin) 2754 kstartout(1) = 0 2755 knoout(1) = knoin 2756 END SUBROUTINE mpp_allgatherv_int 2757 # endif 2758 2459 2759 #endif 2460 2760 !!---------------------------------------------------------------------- -
branches/DEV_r2191_3partymerge2010/NEMO/OPA_SRC/opa.F90
r2218 r2219 178 178 CALL opa_closefile 179 179 #if defined key_oasis3 || defined key_oasis4 180 IF( Agrif_Root() ) THEN 181 CALL cpl_prism_finalize ! end coupling and mpp communications with OASIS 182 ENDIF 180 CALL cpl_prism_finalize ! end coupling and mpp communications with OASIS 183 181 #else 184 182 IF( lk_mpp ) CALL mppstop ! end mpp communications … … 202 200 !! 203 201 NAMELIST/namctl/ ln_ctl , nn_print, nn_ictls, nn_ictle, & 204 & nn_isplt, nn_jsplt, nn_jctls, nn_jctle, nn_bench , nn_bit_cmp202 & nn_isplt, nn_jsplt, nn_jctls, nn_jctle, nn_bench 205 203 !!---------------------------------------------------------------------- 206 204 ! … … 339 337 WRITE(numout,*) ' number of proc. following j nn_jsplt = ', nn_jsplt 340 338 WRITE(numout,*) ' benchmark parameter (0/1) nn_bench = ', nn_bench 341 WRITE(numout,*) ' bit comparison mode (0/1) nn_bit_cmp = ', nn_bit_cmp342 339 ENDIF 343 340 … … 350 347 jsplt = nn_jsplt 351 348 nbench = nn_bench 352 nbit_cmp = nn_bit_cmp353 349 354 350 ! ! Parameter control … … 399 395 ENDIF 400 396 401 IF( nbit_cmp == 1 ) THEN ! Bit compare402 CALL ctl_warn( ' Bit comparison enabled. Single and multiple processor results must bit compare', &403 & ' WARNING: RESULTS ARE NOT PHYSICAL.' )404 ENDIF405 406 397 REWIND( numnam ) ! Read Namelist namdyn_hpg : ln_dynhpg_imp must be read at the initialisation phase 407 398 READ ( numnam, namdyn_hpg )
Note: See TracChangeset
for help on using the changeset viewer.