Changeset 2668 for branches/dev_r2586_dynamic_mem/NEMOGCM
- Timestamp:
- 2011-03-07T20:14:16+01:00 (12 years ago)
- Location:
- branches/dev_r2586_dynamic_mem/NEMOGCM
- Files:
-
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/dev_r2586_dynamic_mem/NEMOGCM/CONFIG/GYRE/EXP00/namelist
r2528 r2668 672 672 !!====================================================================== 673 673 !! nammpp Massively Parallel Processing ("key_mpp_mpi) 674 !! nammpp_dyndist Massively Parallel domain decomposition ("key_agrif" && "key_mpp_dyndist")675 674 !! namctl Control prints & Benchmark 676 675 !! namsol elliptic solver / island / free surface … … 696 695 ! buffer blocking send or immediate non-blocking sends, resp. 697 696 nn_buffer = 0 ! size in bytes of exported buffer ('B' case), 0 no exportation 698 / 699 !----------------------------------------------------------------------- 700 &nammpp_dyndist ! Massively Parallel Distribution for AGRIF zoom ("key_agrif" && "key_mpp_dyndist") 701 !----------------------------------------------------------------------- 702 jpni = 1 ! jpni number of processors following i 703 jpnj = 1 ! jpnj number of processors following j 704 jpnij = 1 ! jpnij number of local domains 697 jpni = 0 ! jpni number of processors following i (set automatically if < 1) 698 jpnj = 0 ! jpnj number of processors following j (set automatically if < 1) 699 jpnij = 0 ! jpnij number of local domains (set automatically if < 1) 705 700 / 706 701 !----------------------------------------------------------------------- -
branches/dev_r2586_dynamic_mem/NEMOGCM/CONFIG/ORCA2_LIM/EXP00/namelist
r2551 r2668 669 669 670 670 !!====================================================================== 671 !! *** Miscel aneous namelists ***671 !! *** Miscellaneous namelists *** 672 672 !!====================================================================== 673 673 !! nammpp Massively Parallel Processing ("key_mpp_mpi) 674 !! nammpp_dyndist Massively Parallel domain decomposition ("key_agrif" && "key_mpp_dyndist")675 674 !! namctl Control prints & Benchmark 676 675 !! namsol elliptic solver / island / free surface … … 696 695 ! buffer blocking send or immediate non-blocking sends, resp. 697 696 nn_buffer = 0 ! size in bytes of exported buffer ('B' case), 0 no exportation 698 / 699 !----------------------------------------------------------------------- 700 &nammpp_dyndist ! Massively Parallel Distribution for AGRIF zoom ("key_agrif" && "key_mpp_dyndist") 701 !----------------------------------------------------------------------- 702 jpni = 1 ! jpni number of processors following i 703 jpnj = 1 ! jpnj number of processors following j 704 jpnij = 1 ! jpnij number of local domains 697 jpni = 0 ! jpni number of processors following i (set automatically if < 1) 698 jpnj = 0 ! jpnj number of processors following j (set automatically if < 1) 699 jpnij = 0 ! jpnij number of local domains (set automatically if < 1) 705 700 / 706 701 !----------------------------------------------------------------------- -
branches/dev_r2586_dynamic_mem/NEMOGCM/CONFIG/ORCA2_OFF_PISCES/EXP00/namelist
r2528 r2668 686 686 !!====================================================================== 687 687 !! nammpp Massively Parallel Processing ("key_mpp_mpi) 688 !! nammpp_dyndist Massively Parallel domain decomposition ("key_agrif" && "key_mpp_dyndist")689 688 !! namctl Control prints & Benchmark 690 689 !! namsol elliptic solver / island / free surface … … 710 709 ! buffer blocking send or immediate non-blocking sends, resp. 711 710 nn_buffer = 0 ! size in bytes of exported buffer ('B' case), 0 no exportation 712 / 713 !----------------------------------------------------------------------- 714 &nammpp_dyndist ! Massively Parallel Distribution for AGRIF zoom ("key_agrif" && "key_mpp_dyndist") 715 !----------------------------------------------------------------------- 716 jpni = 1 ! jpni number of processors following i 717 jpnj = 1 ! jpnj number of processors following j 718 jpnij = 1 ! jpnij number of local domains 711 jpni = 0 ! jpni number of processors following i (set automatically if < 1) 712 jpnj = 0 ! jpnj number of processors following j (set automatically if < 1) 713 jpnij = 0 ! jpnij number of local domains (set automatically if < 1) 719 714 / 720 715 !----------------------------------------------------------------------- -
branches/dev_r2586_dynamic_mem/NEMOGCM/NEMO/OPA_SRC/LBC/lib_mpp.F90
r2636 r2668 232 232 LOGICAL :: mpi_was_called 233 233 ! 234 NAMELIST/nammpp/ cn_mpi_send, nn_buffer 234 NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, jpnij 235 235 !!---------------------------------------------------------------------- 236 236 ! … … 240 240 WRITE(ldtxt(ii),*) '~~~~~~ ' ; ii = ii + 1 241 241 ! 242 jpni = -1; jpnj = -1; jpnij = -1 242 243 REWIND( kumnam ) ! Namelist namrun : parameters of the run 243 244 READ ( kumnam, nammpp ) … … 246 247 WRITE(ldtxt(ii),*) ' mpi send type cn_mpi_send = ', cn_mpi_send ; ii = ii + 1 247 248 WRITE(ldtxt(ii),*) ' size in bytes of exported buffer nn_buffer = ', nn_buffer ; ii = ii + 1 249 250 IF(jpnij < 1)THEN 251 ! If jpnij is not specified in namelist then we calculate it - this 252 ! means there will be no land cutting out. 253 jpnij = jpni * jpnj 254 END IF 255 256 IF( (jpni < 1) .OR. (jpnj < 1) )THEN 257 WRITE(ldtxt(ii),*) ' jpni, jpnj and jpnij will be calculated automatically'; ii = ii + 1 258 ELSE 259 WRITE(ldtxt(ii),*) ' processor grid extent in i jpni = ',jpni; ii = ii + 1 260 WRITE(ldtxt(ii),*) ' processor grid extent in j jpnj = ',jpnj; ii = ii + 1 261 WRITE(ldtxt(ii),*) ' number of local domains jpnij = ',jpnij; ii = ii +1 262 END IF 248 263 249 264 CALL mpi_initialized ( mpi_was_called, code ) -
branches/dev_r2586_dynamic_mem/NEMOGCM/NEMO/OPA_SRC/nemogcm.F90
r2664 r2668 184 184 INTEGER :: ji ! dummy loop indices 185 185 INTEGER :: ilocal_comm ! local integer 186 CHARACTER(len=80), DIMENSION(1 0) :: cltxt186 CHARACTER(len=80), DIMENSION(16) :: cltxt 187 187 !! 188 188 NAMELIST/namctl/ ln_ctl , nn_print, nn_ictls, nn_ictle, & … … 223 223 lwp = (narea == 1) .OR. ln_ctl ! control of all listing output print 224 224 225 ! Decide on size of grid now that we have our communicator size 226 225 ! If dimensions of processor grid weren't specified in the namelist file 226 ! then we calculate them here now that we have our communicator size 227 IF( (jpni < 1) .OR. (jpnj < 1) )THEN 227 228 #if defined key_mpp_mpi || defined key_mpp_shmem 228 CALL nemo_partition(mppsize)229 CALL nemo_partition(mppsize) 229 230 #else 230 jpni = 1 231 jpnj = 1 232 jpnij = jpni*jpnj 233 #endif 231 jpni = 1 232 jpnj = 1 233 jpnij = jpni*jpnj 234 #endif 235 END IF 236 234 237 ! Calculate domain dimensions given calculated jpni and jpnj 235 238 ! This used to be done in par_oce.F90 when they were parameters rather … … 537 540 jpni = ifact(imin + 1) 538 541 ENDIF 542 ! 539 543 jpnij = jpni*jpnj 540 541 WRITE(*,*) 'ARPDBG: jpni = ',jpni,'jpnj = ',jpnj,'jpnij = ',jpnij542 544 ! 543 545 END SUBROUTINE nemo_partition
Note: See TracChangeset
for help on using the changeset viewer.