- Timestamp:
- 2019-06-27T12:40:32+02:00 (5 years ago)
- Location:
- NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE/CRS/crsdom.F90
r10068 r11192 296 296 ENDDO 297 297 298 CALL crs_lbc_lnk( p_e1_crs, cd_type, 1.0, p val=1.0 )299 CALL crs_lbc_lnk( p_e2_crs, cd_type, 1.0, p val=1.0 )298 CALL crs_lbc_lnk( p_e1_crs, cd_type, 1.0, pfillval=1.0 ) 299 CALL crs_lbc_lnk( p_e2_crs, cd_type, 1.0, pfillval=1.0 ) 300 300 301 301 END SUBROUTINE crs_dom_hgr … … 1748 1748 ENDDO 1749 1749 1750 CALL crs_lbc_lnk( p_e3_crs , cd_type, 1.0, p val=1.0 )1751 CALL crs_lbc_lnk( p_e3_max_crs, cd_type, 1.0, p val=1.0 )1750 CALL crs_lbc_lnk( p_e3_crs , cd_type, 1.0, pfillval=1.0 ) 1751 CALL crs_lbc_lnk( p_e3_max_crs, cd_type, 1.0, pfillval=1.0 ) 1752 1752 ! 1753 1753 ! … … 1857 1857 ENDDO 1858 1858 1859 CALL crs_lbc_lnk( p_surf_crs , cd_type, 1.0, p val=1.0 )1860 CALL crs_lbc_lnk( p_surf_crs_msk, cd_type, 1.0, p val=1.0 )1859 CALL crs_lbc_lnk( p_surf_crs , cd_type, 1.0, pfillval=1.0 ) 1860 CALL crs_lbc_lnk( p_surf_crs_msk, cd_type, 1.0, pfillval=1.0 ) 1861 1861 1862 1862 END SUBROUTINE crs_dom_sfc -
NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE/CRS/crslbclnk.F90
r10425 r11192 27 27 CONTAINS 28 28 29 SUBROUTINE crs_lbc_lnk_3d( pt3d1, cd_type1, psgn, cd_mpp, pval)29 SUBROUTINE crs_lbc_lnk_3d( pt3d1, cd_type1, psgn, kfillmode, pfillval ) 30 30 !!--------------------------------------------------------------------- 31 31 !! *** SUBROUTINE crs_lbc_lnk *** … … 40 40 REAL(wp) , INTENT(in ) :: psgn ! control of the sign 41 41 REAL(wp), DIMENSION(jpi_crs,jpj_crs,jpk), INTENT(inout) :: pt3d1 ! 3D array on which the lbc is applied 42 REAL(wp) , OPTIONAL, INTENT(in ) :: pval ! valeur sur les halo43 CHARACTER(len=3) , OPTIONAL, INTENT(in ) :: cd_mpp ! MPP only (here do nothing)42 INTEGER , OPTIONAL , INTENT(in ) :: kfillmode ! filling method for halo over land (default = cst) 43 REAL(wp) , OPTIONAL , INTENT(in ) :: pfillval ! background value (used at closed boundaries) 44 44 ! 45 45 LOGICAL :: ll_grid_crs 46 REAL(wp) :: zval ! valeur sur les halo47 46 !!---------------------------------------------------------------------- 48 47 ! 49 48 ll_grid_crs = ( jpi == jpi_crs ) 50 49 ! 51 IF( PRESENT(pval) ) THEN ; zval = pval52 ELSE ; zval = 0._wp53 ENDIF54 !55 50 IF( .NOT.ll_grid_crs ) CALL dom_grid_crs ! Save the parent grid information & Switch to coarse grid domain 56 51 ! 57 IF( PRESENT( cd_mpp ) ) THEN ; CALL lbc_lnk( 'crslbclnk', pt3d1, cd_type1, psgn, cd_mpp, pval=zval ) 58 ELSE ; CALL lbc_lnk( 'crslbclnk', pt3d1, cd_type1, psgn , pval=zval ) 59 ENDIF 52 CALL lbc_lnk( 'crslbclnk', pt3d1, cd_type1, psgn, kfillmode, pfillval ) 60 53 ! 61 54 IF( .NOT.ll_grid_crs ) CALL dom_grid_glo ! Return to parent grid domain … … 64 57 65 58 66 SUBROUTINE crs_lbc_lnk_2d(pt2d, cd_type, psgn, cd_mpp, pval)59 SUBROUTINE crs_lbc_lnk_2d(pt2d, cd_type, psgn, kfillmode, pfillval ) 67 60 !!--------------------------------------------------------------------- 68 61 !! *** SUBROUTINE crs_lbc_lnk *** … … 77 70 REAL(wp) , INTENT(in ) :: psgn ! control of the sign 78 71 REAL(wp), DIMENSION(jpi_crs,jpj_crs), INTENT(inout) :: pt2d ! 2D array on which the lbc is applied 79 REAL(wp) , OPTIONAL, INTENT(in ) :: pval ! valeur sur les halo80 CHARACTER(len=3) , OPTIONAL, INTENT(in ) :: cd_mpp ! MPP only (here do nothing)72 INTEGER , OPTIONAL , INTENT(in ) :: kfillmode ! filling method for halo over land (default = constant) 73 REAL(wp) , OPTIONAL , INTENT(in ) :: pfillval ! background value (used at closed boundaries) 81 74 ! 82 75 LOGICAL :: ll_grid_crs 83 REAL(wp) :: zval ! valeur sur les halo84 76 !!---------------------------------------------------------------------- 85 77 ! 86 78 ll_grid_crs = ( jpi == jpi_crs ) 87 79 ! 88 IF( PRESENT(pval) ) THEN ; zval = pval89 ELSE ; zval = 0._wp90 ENDIF91 !92 80 IF( .NOT.ll_grid_crs ) CALL dom_grid_crs ! Save the parent grid information & Switch to coarse grid domain 93 81 ! 94 IF( PRESENT( cd_mpp ) ) THEN ; CALL lbc_lnk( 'crslbclnk', pt2d, cd_type, psgn, cd_mpp, pval=zval ) 95 ELSE ; CALL lbc_lnk( 'crslbclnk', pt2d, cd_type, psgn , pval=zval ) 96 ENDIF 82 CALL lbc_lnk( 'crslbclnk', pt2d, cd_type, psgn, kfillmode, pfillval ) 97 83 ! 98 84 IF( .NOT.ll_grid_crs ) CALL dom_grid_glo ! Return to parent grid domain -
NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE/IOM/iom.F90
r10817 r11192 1270 1270 !--- overlap areas and extra hallows (mpp) 1271 1271 IF( PRESENT(pv_r2d) .AND. idom /= jpdom_unknown ) THEN 1272 CALL lbc_lnk( 'iom', pv_r2d,'Z', -999.,'no0')1272 CALL lbc_lnk( 'iom', pv_r2d,'Z', -999., kfillmode = jpfillnothing ) 1273 1273 ELSEIF( PRESENT(pv_r3d) .AND. idom /= jpdom_unknown ) THEN 1274 1274 ! this if could be simplified with the new lbc_lnk that works with any size of the 3rd dimension 1275 1275 IF( icnt(3) == inlev ) THEN 1276 CALL lbc_lnk( 'iom', pv_r3d,'Z', -999.,'no0')1276 CALL lbc_lnk( 'iom', pv_r3d,'Z', -999., kfillmode = jpfillnothing ) 1277 1277 ELSE ! put some arbitrary value (a call to lbc_lnk will be done later...) 1278 1278 DO jj = nlcj+1, jpj ; pv_r3d(1:nlci, jj, :) = pv_r3d(1:nlci, nlej, :) ; END DO … … 1299 1299 CALL xios_recv_field( trim(cdvar), pv_r3d) 1300 1300 IF(idom /= jpdom_unknown ) then 1301 CALL lbc_lnk( 'iom', pv_r3d,'Z', -999.,'no0')1301 CALL lbc_lnk( 'iom', pv_r3d,'Z', -999., kfillmode = jpfillnothing) 1302 1302 ENDIF 1303 1303 ELSEIF( PRESENT(pv_r2d) ) THEN … … 1306 1306 CALL xios_recv_field( trim(cdvar), pv_r2d) 1307 1307 IF(idom /= jpdom_unknown ) THEN 1308 CALL lbc_lnk('iom', pv_r2d,'Z',-999., 'no0')1308 CALL lbc_lnk('iom', pv_r2d,'Z',-999., kfillmode = jpfillnothing) 1309 1309 ENDIF 1310 1310 ELSEIF( PRESENT(pv_r1d) ) THEN -
NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE/LBC/lbc_lnk_generic.h90
r10425 r11192 46 46 47 47 #if defined MULTI 48 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn, kfld, cd_mpp, pval )48 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn, kfld, kfillmode, pfillval ) 49 49 INTEGER , INTENT(in ) :: kfld ! number of pt3d arrays 50 50 #else 51 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn , cd_mpp, pval )51 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn , kfillmode, pfillval ) 52 52 #endif 53 53 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine … … 55 55 CHARACTER(len=1) , INTENT(in ) :: NAT_IN(:) ! nature of array grid-points 56 56 REAL(wp) , INTENT(in ) :: SGN_IN(:) ! sign used across the north fold boundary 57 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only58 REAL(wp) , OPTIONAL , INTENT(in ) :: p val! background value (used at closed boundaries)57 INTEGER , OPTIONAL , INTENT(in ) :: kfillmode ! filling method for halo over land (default = constant) 58 REAL(wp) , OPTIONAL , INTENT(in ) :: pfillval ! background value (used at closed boundaries) 59 59 ! 60 60 INTEGER :: ji, jj, jk, jl, jh, jf ! dummy loop indices -
NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE/LBC/lbc_lnk_multi_generic.h90
r11067 r11192 20 20 & , pt5, cdna5, psgn5, pt6 , cdna6 , psgn6 , pt7 , cdna7 , psgn7 , pt8, cdna8, psgn8 & 21 21 & , pt9, cdna9, psgn9, pt10, cdna10, psgn10, pt11, cdna11, psgn11 & 22 & , cd_mpp, pval )22 & , kfillmode, pfillval ) 23 23 LOGICAL, DIMENSION(4) , INTENT(in ) :: lsend, lrecv ! indicate how communications are to be carried out 24 24 #else … … 27 27 & , pt5, cdna5, psgn5, pt6 , cdna6 , psgn6 , pt7 , cdna7 , psgn7 , pt8, cdna8, psgn8 & 28 28 & , pt9, cdna9, psgn9, pt10, cdna10, psgn10, pt11, cdna11, psgn11 & 29 & , cd_mpp, pval )29 & , kfillmode, pfillval ) 30 30 #endif 31 31 !!--------------------------------------------------------------------- 32 CHARACTER(len=*) , INTENT(in ) :: 33 ARRAY_TYPE(:,:,:,:) , TARGET, INTENT(inout) :: 34 ARRAY_TYPE(:,:,:,:), OPTIONAL, TARGET, INTENT(inout) :: 35 CHARACTER(len=1) , INTENT(in ) :: 36 CHARACTER(len=1) , OPTIONAL , INTENT(in ) :: 37 REAL(wp) , INTENT(in ) :: 38 REAL(wp) , OPTIONAL , INTENT(in ) :: 39 CHARACTER(len=3) , OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only40 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries)32 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 33 ARRAY_TYPE(:,:,:,:) , TARGET, INTENT(inout) :: pt1 ! arrays on which the lbc is applied 34 ARRAY_TYPE(:,:,:,:), OPTIONAL, TARGET, INTENT(inout) :: pt2 , pt3 , pt4 , pt5 , pt6 , pt7 , pt8 , pt9 , pt10 , pt11 35 CHARACTER(len=1) , INTENT(in ) :: cdna1 ! nature of pt2D. array grid-points 36 CHARACTER(len=1) , OPTIONAL , INTENT(in ) :: cdna2, cdna3, cdna4, cdna5, cdna6, cdna7, cdna8, cdna9, cdna10, cdna11 37 REAL(wp) , INTENT(in ) :: psgn1 ! sign used across the north fold 38 REAL(wp) , OPTIONAL , INTENT(in ) :: psgn2, psgn3, psgn4, psgn5, psgn6, psgn7, psgn8, psgn9, psgn10, psgn11 39 INTEGER , OPTIONAL , INTENT(in ) :: kfillmode ! filling method for halo over land (default = constant) 40 REAL(wp) , OPTIONAL , INTENT(in ) :: pfillval ! background value (used at closed boundaries) 41 41 !! 42 42 INTEGER :: kfld ! number of elements that will be attributed … … 64 64 ! 65 65 #if defined IS_BDY 66 CALL lbc_bdy_lnk_ptr( cdname, lsend, lrecv, ptab_ptr, cdna_ptr, psgn_ptr, kfld )66 CALL lbc_bdy_lnk_ptr( cdname, lsend, lrecv, ptab_ptr, cdna_ptr, psgn_ptr, kfld ) 67 67 #else 68 CALL lbc_lnk_ptr ( cdname, ptab_ptr, cdna_ptr, psgn_ptr, kfld, cd_mpp, pval )68 CALL lbc_lnk_ptr ( cdname, ptab_ptr, cdna_ptr, psgn_ptr, kfld, kfillmode, pfillval ) 69 69 #endif 70 70 ! -
NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE/LBC/lbclnk.F90
r11067 r11192 14 14 !! - ! 2017-05 (G. Madec) create generic.h90 files to generate all lbc and north fold routines 15 15 !!---------------------------------------------------------------------- 16 #if defined key_mpp_mpi17 !!----------------------------------------------------------------------18 !! 'key_mpp_mpi' MPI massively parallel processing library19 !!----------------------------------------------------------------------20 16 !! define the generic interfaces of lib_mpp routines 21 17 !!---------------------------------------------------------------------- … … 23 19 !! lbc_bdy_lnk : generic interface for mpp_lnk_bdy_2d and mpp_lnk_bdy_3d routines defined in lib_mpp 24 20 !!---------------------------------------------------------------------- 25 USE par_oce ! ocean dynamics and tracers21 USE dom_oce ! ocean space and time domain 26 22 USE lib_mpp ! distributed memory computing library 27 23 USE lbcnfd ! north fold 24 USE in_out_manager ! I/O manager 25 26 IMPLICIT NONE 27 PRIVATE 28 28 29 29 INTERFACE lbc_lnk … … 51 51 END INTERFACE 52 52 53 PUBLIC lbc_lnk ! ocean/ice lateral boundary conditions 54 PUBLIC lbc_lnk_multi ! modified ocean/ice lateral boundary conditions 55 PUBLIC lbc_bdy_lnk ! ocean lateral BDY boundary conditions 53 INTERFACE mpp_nfd 54 MODULE PROCEDURE mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d 55 MODULE PROCEDURE mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr 56 END INTERFACE 57 58 PUBLIC lbc_lnk ! ocean/ice lateral boundary conditions 59 PUBLIC lbc_lnk_multi ! modified ocean/ice lateral boundary conditions 60 PUBLIC lbc_bdy_lnk ! ocean lateral BDY boundary conditions 56 61 PUBLIC lbc_bdy_lnk_multi ! modified ocean lateral BDY boundary conditions 57 PUBLIC lbc_lnk_icb ! iceberg lateral boundary conditions 58 62 PUBLIC lbc_lnk_icb ! iceberg lateral boundary conditions 63 64 #if defined key_mpp_mpi 65 !$AGRIF_DO_NOT_TREAT 66 INCLUDE 'mpif.h' 67 !$AGRIF_END_DO_NOT_TREAT 68 #endif 59 69 !!---------------------------------------------------------------------- 60 70 !! NEMO/OCE 4.0 , NEMO Consortium (2018) … … 63 73 !!---------------------------------------------------------------------- 64 74 CONTAINS 65 66 #else67 !!----------------------------------------------------------------------68 !! Default option shared memory computing69 !!----------------------------------------------------------------------70 !! routines setting the appropriate values71 !! on first and last row and column of the global domain72 !!----------------------------------------------------------------------73 !! lbc_lnk_sum_3d: compute sum over the halos on a 3D variable on ocean mesh74 !! lbc_lnk_sum_3d: compute sum over the halos on a 2D variable on ocean mesh75 !! lbc_lnk : generic interface for lbc_lnk_3d and lbc_lnk_2d76 !! lbc_lnk_3d : set the lateral boundary condition on a 3D variable on ocean mesh77 !! lbc_lnk_2d : set the lateral boundary condition on a 2D variable on ocean mesh78 !! lbc_bdy_lnk : set the lateral BDY boundary condition79 !!----------------------------------------------------------------------80 USE oce ! ocean dynamics and tracers81 USE dom_oce ! ocean space and time domain82 USE in_out_manager ! I/O manager83 USE lbcnfd ! north fold84 85 IMPLICIT NONE86 PRIVATE87 88 INTERFACE lbc_lnk89 MODULE PROCEDURE lbc_lnk_2d , lbc_lnk_3d , lbc_lnk_4d90 END INTERFACE91 INTERFACE lbc_lnk_ptr92 MODULE PROCEDURE lbc_lnk_2d_ptr , lbc_lnk_3d_ptr , lbc_lnk_4d_ptr93 END INTERFACE94 INTERFACE lbc_lnk_multi95 MODULE PROCEDURE lbc_lnk_2d_multi, lbc_lnk_3d_multi, lbc_lnk_4d_multi96 END INTERFACE97 !98 INTERFACE lbc_bdy_lnk99 MODULE PROCEDURE lbc_bdy_lnk_2d, lbc_bdy_lnk_3d, lbc_bdy_lnk_4d100 END INTERFACE101 !102 INTERFACE lbc_lnk_icb103 MODULE PROCEDURE lbc_lnk_2d_icb104 END INTERFACE105 106 PUBLIC lbc_lnk ! ocean/ice lateral boundary conditions107 PUBLIC lbc_lnk_multi ! modified ocean/ice lateral boundary conditions108 PUBLIC lbc_bdy_lnk ! ocean lateral BDY boundary conditions109 PUBLIC lbc_lnk_icb ! iceberg lateral boundary conditions110 111 !!----------------------------------------------------------------------112 !! NEMO/OCE 4.0 , NEMO Consortium (2018)113 !! $Id$114 !! Software governed by the CeCILL license (see ./LICENSE)115 !!----------------------------------------------------------------------116 CONTAINS117 118 !!======================================================================119 !! Default option 3D shared memory computing120 !!======================================================================121 !! routines setting land point, or east-west cyclic,122 !! or north-south cyclic, or north fold values123 !! on first and last row and column of the global domain124 !!----------------------------------------------------------------------125 126 !!----------------------------------------------------------------------127 !! *** routine lbc_lnk_(2,3,4)d ***128 !!129 !! * Argument : dummy argument use in lbc_lnk_... routines130 !! ptab : array or pointer of arrays on which the boundary condition is applied131 !! cd_nat : nature of array grid-points132 !! psgn : sign used across the north fold boundary133 !! kfld : optional, number of pt3d arrays134 !! cd_mpp : optional, fill the overlap area only135 !! pval : optional, background value (used at closed boundaries)136 !!----------------------------------------------------------------------137 !138 ! !== 2D array and array of 2D pointer ==!139 !140 # define DIM_2d141 # define ROUTINE_LNK lbc_lnk_2d142 # include "lbc_lnk_generic.h90"143 # undef ROUTINE_LNK144 # define MULTI145 # define ROUTINE_LNK lbc_lnk_2d_ptr146 # include "lbc_lnk_generic.h90"147 # undef ROUTINE_LNK148 # undef MULTI149 # undef DIM_2d150 !151 ! !== 3D array and array of 3D pointer ==!152 !153 # define DIM_3d154 # define ROUTINE_LNK lbc_lnk_3d155 # include "lbc_lnk_generic.h90"156 # undef ROUTINE_LNK157 # define MULTI158 # define ROUTINE_LNK lbc_lnk_3d_ptr159 # include "lbc_lnk_generic.h90"160 # undef ROUTINE_LNK161 # undef MULTI162 # undef DIM_3d163 !164 ! !== 4D array and array of 4D pointer ==!165 !166 # define DIM_4d167 # define ROUTINE_LNK lbc_lnk_4d168 # include "lbc_lnk_generic.h90"169 # undef ROUTINE_LNK170 # define MULTI171 # define ROUTINE_LNK lbc_lnk_4d_ptr172 # include "lbc_lnk_generic.h90"173 # undef ROUTINE_LNK174 # undef MULTI175 # undef DIM_4d176 177 !!======================================================================178 !! identical routines in both C1D and shared memory computing179 !!======================================================================180 181 !!----------------------------------------------------------------------182 !! *** routine lbc_bdy_lnk_(2,3,4)d ***183 !!184 !! wrapper rountine to 'lbc_lnk_3d'. This wrapper is used185 !! to maintain the same interface with regards to the mpp case186 !!----------------------------------------------------------------------187 188 SUBROUTINE lbc_bdy_lnk_4d( cdname, pt4d, cd_type, psgn, ib_bdy )189 !!----------------------------------------------------------------------190 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine191 REAL(wp), DIMENSION(:,:,:,:), INTENT(inout) :: pt4d ! 3D array on which the lbc is applied192 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points193 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold194 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set195 !!----------------------------------------------------------------------196 CALL lbc_lnk_4d( cdname, pt4d, cd_type, psgn)197 END SUBROUTINE lbc_bdy_lnk_4d198 199 SUBROUTINE lbc_bdy_lnk_3d( cdname, pt3d, cd_type, psgn, ib_bdy )200 !!----------------------------------------------------------------------201 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine202 REAL(wp), DIMENSION(:,:,:), INTENT(inout) :: pt3d ! 3D array on which the lbc is applied203 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points204 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold205 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set206 !!----------------------------------------------------------------------207 CALL lbc_lnk_3d( cdname, pt3d, cd_type, psgn)208 END SUBROUTINE lbc_bdy_lnk_3d209 210 211 SUBROUTINE lbc_bdy_lnk_2d( cdname, pt2d, cd_type, psgn, ib_bdy )212 !!----------------------------------------------------------------------213 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine214 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 3D array on which the lbc is applied215 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points216 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold217 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set218 !!----------------------------------------------------------------------219 CALL lbc_lnk_2d( cdname, pt2d, cd_type, psgn)220 END SUBROUTINE lbc_bdy_lnk_2d221 222 223 !!gm This routine should be removed with an optional halos size added in argument of generic routines224 225 SUBROUTINE lbc_lnk_2d_icb( cdname, pt2d, cd_type, psgn, ki, kj )226 !!----------------------------------------------------------------------227 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine228 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array on which the lbc is applied229 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points230 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold231 INTEGER , INTENT(in ) :: ki, kj ! sizes of extra halo (not needed in non-mpp)232 !!----------------------------------------------------------------------233 CALL lbc_lnk_2d( cdname, pt2d, cd_type, psgn )234 END SUBROUTINE lbc_lnk_2d_icb235 !!gm end236 237 #endif238 239 !!======================================================================240 !! identical routines in both distributed and shared memory computing241 !!======================================================================242 75 243 76 !!---------------------------------------------------------------------- … … 307 140 # undef DIM_4d 308 141 142 !!---------------------------------------------------------------------- 143 !! *** routine mpp_lnk_(2,3,4)d *** 144 !! 145 !! * Argument : dummy argument use in mpp_lnk_... routines 146 !! ptab : array or pointer of arrays on which the boundary condition is applied 147 !! cd_nat : nature of array grid-points 148 !! psgn : sign used across the north fold boundary 149 !! kfld : optional, number of pt3d arrays 150 !! cd_mpp : optional, fill the overlap area only 151 !! pval : optional, background value (used at closed boundaries) 152 !!---------------------------------------------------------------------- 153 ! 154 ! !== 2D array and array of 2D pointer ==! 155 ! 156 # define DIM_2d 157 # define ROUTINE_LNK mpp_lnk_2d 158 # include "mpp_lnk_generic.h90" 159 # undef ROUTINE_LNK 160 # define MULTI 161 # define ROUTINE_LNK mpp_lnk_2d_ptr 162 # include "mpp_lnk_generic.h90" 163 # undef ROUTINE_LNK 164 # undef MULTI 165 # undef DIM_2d 166 ! 167 ! !== 3D array and array of 3D pointer ==! 168 ! 169 # define DIM_3d 170 # define ROUTINE_LNK mpp_lnk_3d 171 # include "mpp_lnk_generic.h90" 172 # undef ROUTINE_LNK 173 # define MULTI 174 # define ROUTINE_LNK mpp_lnk_3d_ptr 175 # include "mpp_lnk_generic.h90" 176 # undef ROUTINE_LNK 177 # undef MULTI 178 # undef DIM_3d 179 ! 180 ! !== 4D array and array of 4D pointer ==! 181 ! 182 # define DIM_4d 183 # define ROUTINE_LNK mpp_lnk_4d 184 # include "mpp_lnk_generic.h90" 185 # undef ROUTINE_LNK 186 # define MULTI 187 # define ROUTINE_LNK mpp_lnk_4d_ptr 188 # include "mpp_lnk_generic.h90" 189 # undef ROUTINE_LNK 190 # undef MULTI 191 # undef DIM_4d 192 193 !!---------------------------------------------------------------------- 194 !! *** routine mpp_nfd_(2,3,4)d *** 195 !! 196 !! * Argument : dummy argument use in mpp_nfd_... routines 197 !! ptab : array or pointer of arrays on which the boundary condition is applied 198 !! cd_nat : nature of array grid-points 199 !! psgn : sign used across the north fold boundary 200 !! kfld : optional, number of pt3d arrays 201 !! cd_mpp : optional, fill the overlap area only 202 !! pval : optional, background value (used at closed boundaries) 203 !!---------------------------------------------------------------------- 204 ! 205 ! !== 2D array and array of 2D pointer ==! 206 ! 207 # define DIM_2d 208 # define ROUTINE_NFD mpp_nfd_2d 209 # include "mpp_nfd_generic.h90" 210 # undef ROUTINE_NFD 211 # define MULTI 212 # define ROUTINE_NFD mpp_nfd_2d_ptr 213 # include "mpp_nfd_generic.h90" 214 # undef ROUTINE_NFD 215 # undef MULTI 216 # undef DIM_2d 217 ! 218 ! !== 3D array and array of 3D pointer ==! 219 ! 220 # define DIM_3d 221 # define ROUTINE_NFD mpp_nfd_3d 222 # include "mpp_nfd_generic.h90" 223 # undef ROUTINE_NFD 224 # define MULTI 225 # define ROUTINE_NFD mpp_nfd_3d_ptr 226 # include "mpp_nfd_generic.h90" 227 # undef ROUTINE_NFD 228 # undef MULTI 229 # undef DIM_3d 230 ! 231 ! !== 4D array and array of 4D pointer ==! 232 ! 233 # define DIM_4d 234 # define ROUTINE_NFD mpp_nfd_4d 235 # include "mpp_nfd_generic.h90" 236 # undef ROUTINE_NFD 237 # define MULTI 238 # define ROUTINE_NFD mpp_nfd_4d_ptr 239 # include "mpp_nfd_generic.h90" 240 # undef ROUTINE_NFD 241 # undef MULTI 242 # undef DIM_4d 243 244 !!---------------------------------------------------------------------- 245 !! *** routine mpp_lnk_bdy_(2,3,4)d *** 246 !! 247 !! * Argument : dummy argument use in mpp_lnk_... routines 248 !! ptab : array or pointer of arrays on which the boundary condition is applied 249 !! cd_nat : nature of array grid-points 250 !! psgn : sign used across the north fold boundary 251 !! kb_bdy : BDY boundary set 252 !! kfld : optional, number of pt3d arrays 253 !!---------------------------------------------------------------------- 254 ! 255 ! !== 2D array and array of 2D pointer ==! 256 ! 257 # define DIM_2d 258 # define ROUTINE_BDY mpp_lnk_bdy_2d 259 # include "mpp_bdy_generic.h90" 260 # undef ROUTINE_BDY 261 # define MULTI 262 # define ROUTINE_BDY mpp_lnk_bdy_2d_ptr 263 # include "mpp_bdy_generic.h90" 264 # undef ROUTINE_BDY 265 # undef MULTI 266 # undef DIM_2d 267 ! 268 ! !== 3D array and array of 3D pointer ==! 269 ! 270 # define DIM_3d 271 # define ROUTINE_BDY mpp_lnk_bdy_3d 272 # include "mpp_bdy_generic.h90" 273 # undef ROUTINE_BDY 274 # define MULTI 275 # define ROUTINE_BDY mpp_lnk_bdy_3d_ptr 276 # include "mpp_bdy_generic.h90" 277 # undef ROUTINE_BDY 278 # undef MULTI 279 # undef DIM_3d 280 ! 281 ! !== 4D array and array of 4D pointer ==! 282 ! 283 # define DIM_4d 284 # define ROUTINE_BDY mpp_lnk_bdy_4d 285 # include "mpp_bdy_generic.h90" 286 # undef ROUTINE_BDY 287 # define MULTI 288 # define ROUTINE_BDY mpp_lnk_bdy_4d_ptr 289 # include "mpp_bdy_generic.h90" 290 # undef ROUTINE_BDY 291 # undef MULTI 292 # undef DIM_4d 309 293 310 294 !!====================================================================== 295 296 297 298 SUBROUTINE mpp_lbc_north_icb( pt2d, cd_type, psgn, kextj) 299 !!--------------------------------------------------------------------- 300 !! *** routine mpp_lbc_north_icb *** 301 !! 302 !! ** Purpose : Ensure proper north fold horizontal bondary condition 303 !! in mpp configuration in case of jpn1 > 1 and for 2d 304 !! array with outer extra halo 305 !! 306 !! ** Method : North fold condition and mpp with more than one proc 307 !! in i-direction require a specific treatment. We gather 308 !! the 4+kextj northern lines of the global domain on 1 309 !! processor and apply lbc north-fold on this sub array. 310 !! Then we scatter the north fold array back to the processors. 311 !! This routine accounts for an extra halo with icebergs 312 !! and assumes ghost rows and columns have been suppressed. 313 !! 314 !!---------------------------------------------------------------------- 315 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array with extra halo 316 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 317 ! ! = T , U , V , F or W -points 318 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 319 !! ! north fold, = 1. otherwise 320 INTEGER , INTENT(in ) :: kextj ! Extra halo width at north fold 321 ! 322 INTEGER :: ji, jj, jr 323 INTEGER :: ierr, itaille, ildi, ilei, iilb 324 INTEGER :: ipj, ij, iproc 325 ! 326 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e 327 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e 328 !!---------------------------------------------------------------------- 329 #if defined key_mpp_mpi 330 ! 331 ipj=4 332 ALLOCATE( ztab_e(jpiglo, 1-kextj:ipj+kextj) , & 333 & znorthloc_e(jpimax, 1-kextj:ipj+kextj) , & 334 & znorthgloio_e(jpimax, 1-kextj:ipj+kextj,jpni) ) 335 ! 336 ztab_e(:,:) = 0._wp 337 znorthloc_e(:,:) = 0._wp 338 ! 339 ij = 1 - kextj 340 ! put the last ipj+2*kextj lines of pt2d into znorthloc_e 341 DO jj = jpj - ipj + 1 - kextj , jpj + kextj 342 znorthloc_e(1:jpi,ij)=pt2d(1:jpi,jj) 343 ij = ij + 1 344 END DO 345 ! 346 itaille = jpimax * ( ipj + 2*kextj ) 347 ! 348 IF( ln_timing ) CALL tic_tac(.TRUE.) 349 CALL MPI_ALLGATHER( znorthloc_e(1,1-kextj) , itaille, MPI_DOUBLE_PRECISION, & 350 & znorthgloio_e(1,1-kextj,1), itaille, MPI_DOUBLE_PRECISION, & 351 & ncomm_north, ierr ) 352 ! 353 IF( ln_timing ) CALL tic_tac(.FALSE.) 354 ! 355 DO jr = 1, ndim_rank_north ! recover the global north array 356 iproc = nrank_north(jr) + 1 357 ildi = nldit (iproc) 358 ilei = nleit (iproc) 359 iilb = nimppt(iproc) 360 DO jj = 1-kextj, ipj+kextj 361 DO ji = ildi, ilei 362 ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr) 363 END DO 364 END DO 365 END DO 366 367 ! 2. North-Fold boundary conditions 368 ! ---------------------------------- 369 CALL lbc_nfd( ztab_e(:,1-kextj:ipj+kextj), cd_type, psgn, kextj ) 370 371 ij = 1 - kextj 372 !! Scatter back to pt2d 373 DO jj = jpj - ipj + 1 - kextj , jpj + kextj 374 DO ji= 1, jpi 375 pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij) 376 END DO 377 ij = ij +1 378 END DO 379 ! 380 DEALLOCATE( ztab_e, znorthloc_e, znorthgloio_e ) 381 ! 382 #endif 383 END SUBROUTINE mpp_lbc_north_icb 384 385 386 SUBROUTINE mpp_lnk_2d_icb( cdname, pt2d, cd_type, psgn, kexti, kextj ) 387 !!---------------------------------------------------------------------- 388 !! *** routine mpp_lnk_2d_icb *** 389 !! 390 !! ** Purpose : Message passing management for 2d array (with extra halo for icebergs) 391 !! This routine receives a (1-kexti:jpi+kexti,1-kexti:jpj+kextj) 392 !! array (usually (0:jpi+1, 0:jpj+1)) from lbc_lnk_icb calls. 393 !! 394 !! ** Method : Use mppsend and mpprecv function for passing mask 395 !! between processors following neighboring subdomains. 396 !! domain parameters 397 !! jpi : first dimension of the local subdomain 398 !! jpj : second dimension of the local subdomain 399 !! kexti : number of columns for extra outer halo 400 !! kextj : number of rows for extra outer halo 401 !! nbondi : mark for "east-west local boundary" 402 !! nbondj : mark for "north-south local boundary" 403 !! noea : number for local neighboring processors 404 !! nowe : number for local neighboring processors 405 !! noso : number for local neighboring processors 406 !! nono : number for local neighboring processors 407 !!---------------------------------------------------------------------- 408 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 409 REAL(wp), DIMENSION(1-kexti:jpi+kexti,1-kextj:jpj+kextj), INTENT(inout) :: pt2d ! 2D array with extra halo 410 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points 411 REAL(wp) , INTENT(in ) :: psgn ! sign used across the north fold 412 INTEGER , INTENT(in ) :: kexti ! extra i-halo width 413 INTEGER , INTENT(in ) :: kextj ! extra j-halo width 414 ! 415 INTEGER :: jl ! dummy loop indices 416 INTEGER :: imigr, iihom, ijhom ! local integers 417 INTEGER :: ipreci, iprecj ! - - 418 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 419 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 420 !! 421 REAL(wp), DIMENSION(1-kexti:jpi+kexti,nn_hls+kextj,2) :: r2dns, r2dsn 422 REAL(wp), DIMENSION(1-kextj:jpj+kextj,nn_hls+kexti,2) :: r2dwe, r2dew 423 !!---------------------------------------------------------------------- 424 425 ipreci = nn_hls + kexti ! take into account outer extra 2D overlap area 426 iprecj = nn_hls + kextj 427 428 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, 1, 1, 1, ld_lbc = .TRUE. ) 429 430 ! 1. standard boundary treatment 431 ! ------------------------------ 432 ! Order matters Here !!!! 433 ! 434 ! ! East-West boundaries 435 ! !* Cyclic east-west 436 IF( l_Iperio ) THEN 437 pt2d(1-kexti: 1 ,:) = pt2d(jpim1-kexti: jpim1 ,:) ! east 438 pt2d( jpi :jpi+kexti,:) = pt2d( 2 :2+kexti,:) ! west 439 ! 440 ELSE !* closed 441 IF( .NOT. cd_type == 'F' ) pt2d( 1-kexti :nn_hls ,:) = 0._wp ! east except at F-point 442 pt2d(jpi-nn_hls+1:jpi+kexti,:) = 0._wp ! west 443 ENDIF 444 ! ! North-South boundaries 445 IF( l_Jperio ) THEN !* cyclic (only with no mpp j-split) 446 pt2d(:,1-kextj: 1 ) = pt2d(:,jpjm1-kextj: jpjm1) ! north 447 pt2d(:, jpj :jpj+kextj) = pt2d(:, 2 :2+kextj) ! south 448 ELSE !* closed 449 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-kextj :nn_hls ) = 0._wp ! north except at F-point 450 pt2d(:,jpj-nn_hls+1:jpj+kextj) = 0._wp ! south 451 ENDIF 452 ! 453 454 ! north fold treatment 455 ! ----------------------- 456 IF( npolj /= 0 ) THEN 457 ! 458 SELECT CASE ( jpni ) 459 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+kextj), cd_type, psgn, kextj ) 460 CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+kextj), cd_type, psgn, kextj ) 461 END SELECT 462 ! 463 ENDIF 464 465 ! 2. East and west directions exchange 466 ! ------------------------------------ 467 ! we play with the neigbours AND the row number because of the periodicity 468 ! 469 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 470 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 471 iihom = jpi-nreci-kexti 472 DO jl = 1, ipreci 473 r2dew(:,jl,1) = pt2d(nn_hls+jl,:) 474 r2dwe(:,jl,1) = pt2d(iihom +jl,:) 475 END DO 476 END SELECT 477 ! 478 ! ! Migrations 479 imigr = ipreci * ( jpj + 2*kextj ) 480 ! 481 IF( ln_timing ) CALL tic_tac(.TRUE.) 482 ! 483 SELECT CASE ( nbondi ) 484 CASE ( -1 ) 485 CALL mppsend( 2, r2dwe(1-kextj,1,1), imigr, noea, ml_req1 ) 486 CALL mpprecv( 1, r2dew(1-kextj,1,2), imigr, noea ) 487 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 488 CASE ( 0 ) 489 CALL mppsend( 1, r2dew(1-kextj,1,1), imigr, nowe, ml_req1 ) 490 CALL mppsend( 2, r2dwe(1-kextj,1,1), imigr, noea, ml_req2 ) 491 CALL mpprecv( 1, r2dew(1-kextj,1,2), imigr, noea ) 492 CALL mpprecv( 2, r2dwe(1-kextj,1,2), imigr, nowe ) 493 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 494 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 495 CASE ( 1 ) 496 CALL mppsend( 1, r2dew(1-kextj,1,1), imigr, nowe, ml_req1 ) 497 CALL mpprecv( 2, r2dwe(1-kextj,1,2), imigr, nowe ) 498 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 499 END SELECT 500 ! 501 IF( ln_timing ) CALL tic_tac(.FALSE.) 502 ! 503 ! ! Write Dirichlet lateral conditions 504 iihom = jpi - nn_hls 505 ! 506 SELECT CASE ( nbondi ) 507 CASE ( -1 ) 508 DO jl = 1, ipreci 509 pt2d(iihom+jl,:) = r2dew(:,jl,2) 510 END DO 511 CASE ( 0 ) 512 DO jl = 1, ipreci 513 pt2d(jl-kexti,:) = r2dwe(:,jl,2) 514 pt2d(iihom+jl,:) = r2dew(:,jl,2) 515 END DO 516 CASE ( 1 ) 517 DO jl = 1, ipreci 518 pt2d(jl-kexti,:) = r2dwe(:,jl,2) 519 END DO 520 END SELECT 521 522 523 ! 3. North and south directions 524 ! ----------------------------- 525 ! always closed : we play only with the neigbours 526 ! 527 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 528 ijhom = jpj-nrecj-kextj 529 DO jl = 1, iprecj 530 r2dsn(:,jl,1) = pt2d(:,ijhom +jl) 531 r2dns(:,jl,1) = pt2d(:,nn_hls+jl) 532 END DO 533 ENDIF 534 ! 535 ! ! Migrations 536 imigr = iprecj * ( jpi + 2*kexti ) 537 ! 538 IF( ln_timing ) CALL tic_tac(.TRUE.) 539 ! 540 SELECT CASE ( nbondj ) 541 CASE ( -1 ) 542 CALL mppsend( 4, r2dsn(1-kexti,1,1), imigr, nono, ml_req1 ) 543 CALL mpprecv( 3, r2dns(1-kexti,1,2), imigr, nono ) 544 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 545 CASE ( 0 ) 546 CALL mppsend( 3, r2dns(1-kexti,1,1), imigr, noso, ml_req1 ) 547 CALL mppsend( 4, r2dsn(1-kexti,1,1), imigr, nono, ml_req2 ) 548 CALL mpprecv( 3, r2dns(1-kexti,1,2), imigr, nono ) 549 CALL mpprecv( 4, r2dsn(1-kexti,1,2), imigr, noso ) 550 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 551 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 552 CASE ( 1 ) 553 CALL mppsend( 3, r2dns(1-kexti,1,1), imigr, noso, ml_req1 ) 554 CALL mpprecv( 4, r2dsn(1-kexti,1,2), imigr, noso ) 555 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 556 END SELECT 557 ! 558 IF( ln_timing ) CALL tic_tac(.FALSE.) 559 ! 560 ! ! Write Dirichlet lateral conditions 561 ijhom = jpj - nn_hls 562 ! 563 SELECT CASE ( nbondj ) 564 CASE ( -1 ) 565 DO jl = 1, iprecj 566 pt2d(:,ijhom+jl) = r2dns(:,jl,2) 567 END DO 568 CASE ( 0 ) 569 DO jl = 1, iprecj 570 pt2d(:,jl-kextj) = r2dsn(:,jl,2) 571 pt2d(:,ijhom+jl) = r2dns(:,jl,2) 572 END DO 573 CASE ( 1 ) 574 DO jl = 1, iprecj 575 pt2d(:,jl-kextj) = r2dsn(:,jl,2) 576 END DO 577 END SELECT 578 ! 579 END SUBROUTINE mpp_lnk_2d_icb 580 311 581 END MODULE lbclnk 312 582 -
NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE/LBC/lib_mpp.F90
r11067 r11192 34 34 !! get_unit : give the index of an unused logical unit 35 35 !!---------------------------------------------------------------------- 36 #if defined key_mpp_mpi 37 !!---------------------------------------------------------------------- 38 !! 'key_mpp_mpi' MPI massively parallel processing library 39 !!---------------------------------------------------------------------- 40 !! lib_mpp_alloc : allocate mpp arrays 36 !!---------------------------------------------------------------------- 41 37 !! mynode : indentify the processor unit 42 38 !! mpp_lnk : interface (defined in lbclnk) for message passing of 2d or 3d arrays (mpp_lnk_2d, mpp_lnk_3d) … … 57 53 !!---------------------------------------------------------------------- 58 54 USE dom_oce ! ocean space and time domain 59 USE lbcnfd ! north fold treatment60 55 USE in_out_manager ! I/O manager 61 56 62 57 IMPLICIT NONE 63 58 PRIVATE 64 65 INTERFACE mpp_nfd66 MODULE PROCEDURE mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d67 MODULE PROCEDURE mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr68 END INTERFACE69 70 ! Interface associated to the mpp_lnk_... routines is defined in lbclnk71 PUBLIC mpp_lnk_2d , mpp_lnk_3d , mpp_lnk_4d72 PUBLIC mpp_lnk_2d_ptr , mpp_lnk_3d_ptr , mpp_lnk_4d_ptr73 PUBLIC mpp_lnk_bdy_2d , mpp_lnk_bdy_3d , mpp_lnk_bdy_4d74 PUBLIC mpp_lnk_bdy_2d_ptr, mpp_lnk_bdy_3d_ptr, mpp_lnk_bdy_4d_ptr75 !76 !!gm this should be useless77 PUBLIC mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d78 PUBLIC mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr79 !!gm end80 59 ! 81 60 PUBLIC ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam 82 61 PUBLIC mynode, mppstop, mppsync, mpp_comm_free 83 62 PUBLIC mpp_ini_north 84 PUBLIC mpp_lnk_2d_icb85 PUBLIC mpp_lbc_north_icb86 63 PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc 87 64 PUBLIC mpp_delay_max, mpp_delay_sum, mpp_delay_rcv … … 89 66 PUBLIC mpp_ini_znl 90 67 PUBLIC mppsend, mpprecv ! needed by TAM and ICB routines 68 PUBLIC mpp_report 69 PUBLIC tic_tac 91 70 92 71 !! * Interfaces … … 114 93 !! MPI variable definition !! 115 94 !! ========================= !! 95 #if defined key_mpp_mpi 116 96 !$AGRIF_DO_NOT_TREAT 117 97 INCLUDE 'mpif.h' 118 98 !$AGRIF_END_DO_NOT_TREAT 119 120 99 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .TRUE. !: mpp flag 100 #else 101 INTEGER, PUBLIC, PARAMETER :: MPI_STATUS_SIZE = 1 102 INTEGER, PUBLIC, PARAMETER :: MPI_DOUBLE_PRECISION = 8 103 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag 104 #endif 121 105 122 106 INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2) … … 189 173 LOGICAL, PUBLIC :: l_north_nogather = .FALSE. !: internal control of northfold comms 190 174 175 INTEGER, PUBLIC, PARAMETER :: jpfillnothing = 1 176 INTEGER, PUBLIC, PARAMETER :: jpfillcst = 2 177 INTEGER, PUBLIC, PARAMETER :: jpfillcopy = 3 178 INTEGER, PUBLIC, PARAMETER :: jpfillperio = 4 179 INTEGER, PUBLIC, PARAMETER :: jpfillmpi = 5 180 191 181 !!---------------------------------------------------------------------- 192 182 !! NEMO/OCE 4.0 , NEMO Consortium (2018) … … 215 205 NAMELIST/nammpp/ cn_mpi_send, nn_buffer, jpni, jpnj, ln_nnogather 216 206 !!---------------------------------------------------------------------- 207 #if defined key_mpp_mpi 217 208 ! 218 209 ii = 1 … … 311 302 ENDIF 312 303 313 # if defined key_agrif304 # if defined key_agrif 314 305 IF( Agrif_Root() ) THEN 315 306 CALL Agrif_MPI_Init(mpi_comm_oce) … … 317 308 CALL Agrif_MPI_set_grid_comm(mpi_comm_oce) 318 309 ENDIF 319 # endif310 # endif 320 311 321 312 CALL mpi_comm_rank( mpi_comm_oce, mpprank, ierr ) … … 330 321 CALL MPI_OP_CREATE(DDPDD_MPI, .TRUE., MPI_SUMDD, ierr) 331 322 ! 323 #else 324 IF( PRESENT( localComm ) ) mpi_comm_oce = localComm 325 mynode = 0 326 CALL ctl_opn( kumond, TRIM(ldname), 'UNKNOWN', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. , 1 ) 327 #endif 332 328 END FUNCTION mynode 333 334 !!----------------------------------------------------------------------335 !! *** routine mpp_lnk_(2,3,4)d ***336 !!337 !! * Argument : dummy argument use in mpp_lnk_... routines338 !! ptab : array or pointer of arrays on which the boundary condition is applied339 !! cd_nat : nature of array grid-points340 !! psgn : sign used across the north fold boundary341 !! kfld : optional, number of pt3d arrays342 !! cd_mpp : optional, fill the overlap area only343 !! pval : optional, background value (used at closed boundaries)344 !!----------------------------------------------------------------------345 !346 ! !== 2D array and array of 2D pointer ==!347 !348 # define DIM_2d349 # define ROUTINE_LNK mpp_lnk_2d350 # include "mpp_lnk_generic.h90"351 # undef ROUTINE_LNK352 # define MULTI353 # define ROUTINE_LNK mpp_lnk_2d_ptr354 # include "mpp_lnk_generic.h90"355 # undef ROUTINE_LNK356 # undef MULTI357 # undef DIM_2d358 !359 ! !== 3D array and array of 3D pointer ==!360 !361 # define DIM_3d362 # define ROUTINE_LNK mpp_lnk_3d363 # include "mpp_lnk_generic.h90"364 # undef ROUTINE_LNK365 # define MULTI366 # define ROUTINE_LNK mpp_lnk_3d_ptr367 # include "mpp_lnk_generic.h90"368 # undef ROUTINE_LNK369 # undef MULTI370 # undef DIM_3d371 !372 ! !== 4D array and array of 4D pointer ==!373 !374 # define DIM_4d375 # define ROUTINE_LNK mpp_lnk_4d376 # include "mpp_lnk_generic.h90"377 # undef ROUTINE_LNK378 # define MULTI379 # define ROUTINE_LNK mpp_lnk_4d_ptr380 # include "mpp_lnk_generic.h90"381 # undef ROUTINE_LNK382 # undef MULTI383 # undef DIM_4d384 385 !!----------------------------------------------------------------------386 !! *** routine mpp_nfd_(2,3,4)d ***387 !!388 !! * Argument : dummy argument use in mpp_nfd_... routines389 !! ptab : array or pointer of arrays on which the boundary condition is applied390 !! cd_nat : nature of array grid-points391 !! psgn : sign used across the north fold boundary392 !! kfld : optional, number of pt3d arrays393 !! cd_mpp : optional, fill the overlap area only394 !! pval : optional, background value (used at closed boundaries)395 !!----------------------------------------------------------------------396 !397 ! !== 2D array and array of 2D pointer ==!398 !399 # define DIM_2d400 # define ROUTINE_NFD mpp_nfd_2d401 # include "mpp_nfd_generic.h90"402 # undef ROUTINE_NFD403 # define MULTI404 # define ROUTINE_NFD mpp_nfd_2d_ptr405 # include "mpp_nfd_generic.h90"406 # undef ROUTINE_NFD407 # undef MULTI408 # undef DIM_2d409 !410 ! !== 3D array and array of 3D pointer ==!411 !412 # define DIM_3d413 # define ROUTINE_NFD mpp_nfd_3d414 # include "mpp_nfd_generic.h90"415 # undef ROUTINE_NFD416 # define MULTI417 # define ROUTINE_NFD mpp_nfd_3d_ptr418 # include "mpp_nfd_generic.h90"419 # undef ROUTINE_NFD420 # undef MULTI421 # undef DIM_3d422 !423 ! !== 4D array and array of 4D pointer ==!424 !425 # define DIM_4d426 # define ROUTINE_NFD mpp_nfd_4d427 # include "mpp_nfd_generic.h90"428 # undef ROUTINE_NFD429 # define MULTI430 # define ROUTINE_NFD mpp_nfd_4d_ptr431 # include "mpp_nfd_generic.h90"432 # undef ROUTINE_NFD433 # undef MULTI434 # undef DIM_4d435 436 437 !!----------------------------------------------------------------------438 !! *** routine mpp_lnk_bdy_(2,3,4)d ***439 !!440 !! * Argument : dummy argument use in mpp_lnk_... routines441 !! ptab : array or pointer of arrays on which the boundary condition is applied442 !! cd_nat : nature of array grid-points443 !! psgn : sign used across the north fold boundary444 !! kb_bdy : BDY boundary set445 !! kfld : optional, number of pt3d arrays446 !!----------------------------------------------------------------------447 !448 ! !== 2D array and array of 2D pointer ==!449 !450 # define DIM_2d451 # define ROUTINE_BDY mpp_lnk_bdy_2d452 # include "mpp_bdy_generic.h90"453 # undef ROUTINE_BDY454 # define MULTI455 # define ROUTINE_BDY mpp_lnk_bdy_2d_ptr456 # include "mpp_bdy_generic.h90"457 # undef ROUTINE_BDY458 # undef MULTI459 # undef DIM_2d460 !461 ! !== 3D array and array of 3D pointer ==!462 !463 # define DIM_3d464 # define ROUTINE_BDY mpp_lnk_bdy_3d465 # include "mpp_bdy_generic.h90"466 # undef ROUTINE_BDY467 # define MULTI468 # define ROUTINE_BDY mpp_lnk_bdy_3d_ptr469 # include "mpp_bdy_generic.h90"470 # undef ROUTINE_BDY471 # undef MULTI472 # undef DIM_3d473 !474 ! !== 4D array and array of 4D pointer ==!475 !476 # define DIM_4d477 # define ROUTINE_BDY mpp_lnk_bdy_4d478 # include "mpp_bdy_generic.h90"479 # undef ROUTINE_BDY480 # define MULTI481 # define ROUTINE_BDY mpp_lnk_bdy_4d_ptr482 # include "mpp_bdy_generic.h90"483 # undef ROUTINE_BDY484 # undef MULTI485 # undef DIM_4d486 487 !!----------------------------------------------------------------------488 !!489 !! load_array & mpp_lnk_2d_9 à generaliser a 3D et 4D490 491 492 !! mpp_lnk_sum_2d et 3D ====>>>>>> à virer du code !!!!493 494 495 !!----------------------------------------------------------------------496 497 329 498 330 … … 513 345 !!---------------------------------------------------------------------- 514 346 ! 347 #if defined key_mpp_mpi 515 348 SELECT CASE ( cn_mpi_send ) 516 349 CASE ( 'S' ) ! Standard mpi send (blocking) … … 522 355 CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_oce, md_req, iflag ) 523 356 END SELECT 357 #endif 524 358 ! 525 359 END SUBROUTINE mppsend … … 543 377 !!---------------------------------------------------------------------- 544 378 ! 379 #if defined key_mpp_mpi 545 380 ! If a specific process number has been passed to the receive call, 546 381 ! use that one. Default is to use mpi_any_source … … 549 384 ! 550 385 CALL mpi_recv( pmess, kbytes, mpi_double_precision, use_source, ktyp, mpi_comm_oce, istatus, iflag ) 386 #endif 551 387 ! 552 388 END SUBROUTINE mpprecv … … 569 405 ! 570 406 itaille = jpi * jpj 407 #if defined key_mpp_mpi 571 408 CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , & 572 409 & mpi_double_precision, kp , mpi_comm_oce, ierror ) 410 #else 411 pio(:,:,1) = ptab(:,:) 412 #endif 573 413 ! 574 414 END SUBROUTINE mppgather … … 592 432 itaille = jpi * jpj 593 433 ! 434 #if defined key_mpp_mpi 594 435 CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille , & 595 436 & mpi_double_precision, kp , mpi_comm_oce, ierror ) 437 #else 438 ptab(:,:) = pio(:,:,1) 439 #endif 596 440 ! 597 441 END SUBROUTINE mppscatter … … 617 461 COMPLEX(wp), ALLOCATABLE, DIMENSION(:) :: ytmp 618 462 !!---------------------------------------------------------------------- 463 #if defined key_mpp_mpi 619 464 ilocalcomm = mpi_comm_oce 620 465 IF( PRESENT(kcom) ) ilocalcomm = kcom … … 655 500 656 501 ! send y_in into todelay(idvar)%y1d with a non-blocking communication 657 # if defined key_mpi2502 # if defined key_mpi2 658 503 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 659 504 CALL mpi_allreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ndelayid(idvar), ierr ) 660 505 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 506 # else 507 CALL mpi_iallreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ndelayid(idvar), ierr ) 508 # endif 661 509 #else 662 CALL mpi_iallreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ndelayid(idvar), ierr)510 pout(:) = REAL(y_in(:), wp) 663 511 #endif 664 512 … … 684 532 INTEGER :: ierr, ilocalcomm 685 533 !!---------------------------------------------------------------------- 534 #if defined key_mpp_mpi 686 535 ilocalcomm = mpi_comm_oce 687 536 IF( PRESENT(kcom) ) ilocalcomm = kcom … … 718 567 719 568 ! send p_in into todelay(idvar)%z1d with a non-blocking communication 720 # if defined key_mpi2569 # if defined key_mpi2 721 570 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 722 571 CALL mpi_allreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ndelayid(idvar), ierr ) 723 572 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 573 # else 574 CALL mpi_iallreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ndelayid(idvar), ierr ) 575 # endif 724 576 #else 725 CALL mpi_iallreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ndelayid(idvar), ierr)577 pout(:) = p_in(:) 726 578 #endif 727 579 … … 739 591 INTEGER :: ierr 740 592 !!---------------------------------------------------------------------- 593 #if defined key_mpp_mpi 741 594 IF( ndelayid(kid) /= -2 ) THEN 742 595 #if ! defined key_mpi2 … … 748 601 ndelayid(kid) = -2 ! add flag to know that mpi_wait was already called on kid 749 602 ENDIF 603 #endif 750 604 END SUBROUTINE mpp_delay_rcv 751 605 … … 906 760 !!----------------------------------------------------------------------- 907 761 ! 762 #if defined key_mpp_mpi 908 763 CALL mpi_barrier( mpi_comm_oce, ierror ) 764 #endif 909 765 ! 910 766 END SUBROUTINE mppsync … … 928 784 IF( PRESENT(ld_force_abort) ) ll_force_abort = ld_force_abort 929 785 ! 786 #if defined key_mpp_mpi 930 787 IF(ll_force_abort) THEN 931 788 CALL mpi_abort( MPI_COMM_WORLD ) … … 934 791 CALL mpi_finalize( info ) 935 792 ENDIF 793 #endif 936 794 IF( .NOT. llfinal ) STOP 123 937 795 ! … … 946 804 !!---------------------------------------------------------------------- 947 805 ! 806 #if defined key_mpp_mpi 948 807 CALL MPI_COMM_FREE(kcom, ierr) 808 #endif 949 809 ! 950 810 END SUBROUTINE mpp_comm_free … … 976 836 INTEGER, ALLOCATABLE, DIMENSION(:) :: kwork 977 837 !!---------------------------------------------------------------------- 838 #if defined key_mpp_mpi 978 839 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_world : ', ngrp_world 979 840 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_world : ', mpi_comm_world … … 1047 908 1048 909 DEALLOCATE(kwork) 910 #endif 1049 911 1050 912 END SUBROUTINE mpp_ini_znl … … 1078 940 !!---------------------------------------------------------------------- 1079 941 ! 942 #if defined key_mpp_mpi 1080 943 njmppmax = MAXVAL( njmppt ) 1081 944 ! … … 1109 972 CALL MPI_COMM_CREATE( mpi_comm_oce, ngrp_north, ncomm_north, ierr ) 1110 973 ! 974 #endif 1111 975 END SUBROUTINE mpp_ini_north 1112 976 … … 1130 994 LOGICAL :: mpi_was_called 1131 995 !!--------------------------------------------------------------------- 996 #if defined key_mpp_mpi 1132 997 ! 1133 998 CALL mpi_initialized( mpi_was_called, code ) ! MPI initialization … … 1169 1034 ENDIF 1170 1035 ! 1036 #endif 1171 1037 END SUBROUTINE mpi_init_oce 1172 1038 … … 1203 1069 1204 1070 1205 SUBROUTINE mpp_lbc_north_icb( pt2d, cd_type, psgn, kextj)1206 !!---------------------------------------------------------------------1207 !! *** routine mpp_lbc_north_icb ***1208 !!1209 !! ** Purpose : Ensure proper north fold horizontal bondary condition1210 !! in mpp configuration in case of jpn1 > 1 and for 2d1211 !! array with outer extra halo1212 !!1213 !! ** Method : North fold condition and mpp with more than one proc1214 !! in i-direction require a specific treatment. We gather1215 !! the 4+kextj northern lines of the global domain on 11216 !! processor and apply lbc north-fold on this sub array.1217 !! Then we scatter the north fold array back to the processors.1218 !! This routine accounts for an extra halo with icebergs1219 !! and assumes ghost rows and columns have been suppressed.1220 !!1221 !!----------------------------------------------------------------------1222 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array with extra halo1223 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points1224 ! ! = T , U , V , F or W -points1225 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the1226 !! ! north fold, = 1. otherwise1227 INTEGER , INTENT(in ) :: kextj ! Extra halo width at north fold1228 !1229 INTEGER :: ji, jj, jr1230 INTEGER :: ierr, itaille, ildi, ilei, iilb1231 INTEGER :: ipj, ij, iproc1232 !1233 REAL(wp), DIMENSION(:,:) , ALLOCATABLE :: ztab_e, znorthloc_e1234 REAL(wp), DIMENSION(:,:,:), ALLOCATABLE :: znorthgloio_e1235 !!----------------------------------------------------------------------1236 !1237 ipj=41238 ALLOCATE( ztab_e(jpiglo, 1-kextj:ipj+kextj) , &1239 & znorthloc_e(jpimax, 1-kextj:ipj+kextj) , &1240 & znorthgloio_e(jpimax, 1-kextj:ipj+kextj,jpni) )1241 !1242 ztab_e(:,:) = 0._wp1243 znorthloc_e(:,:) = 0._wp1244 !1245 ij = 1 - kextj1246 ! put the last ipj+2*kextj lines of pt2d into znorthloc_e1247 DO jj = jpj - ipj + 1 - kextj , jpj + kextj1248 znorthloc_e(1:jpi,ij)=pt2d(1:jpi,jj)1249 ij = ij + 11250 END DO1251 !1252 itaille = jpimax * ( ipj + 2*kextj )1253 !1254 IF( ln_timing ) CALL tic_tac(.TRUE.)1255 CALL MPI_ALLGATHER( znorthloc_e(1,1-kextj) , itaille, MPI_DOUBLE_PRECISION, &1256 & znorthgloio_e(1,1-kextj,1), itaille, MPI_DOUBLE_PRECISION, &1257 & ncomm_north, ierr )1258 !1259 IF( ln_timing ) CALL tic_tac(.FALSE.)1260 !1261 DO jr = 1, ndim_rank_north ! recover the global north array1262 iproc = nrank_north(jr) + 11263 ildi = nldit (iproc)1264 ilei = nleit (iproc)1265 iilb = nimppt(iproc)1266 DO jj = 1-kextj, ipj+kextj1267 DO ji = ildi, ilei1268 ztab_e(ji+iilb-1,jj) = znorthgloio_e(ji,jj,jr)1269 END DO1270 END DO1271 END DO1272 1273 ! 2. North-Fold boundary conditions1274 ! ----------------------------------1275 CALL lbc_nfd( ztab_e(:,1-kextj:ipj+kextj), cd_type, psgn, kextj )1276 1277 ij = 1 - kextj1278 !! Scatter back to pt2d1279 DO jj = jpj - ipj + 1 - kextj , jpj + kextj1280 DO ji= 1, jpi1281 pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij)1282 END DO1283 ij = ij +11284 END DO1285 !1286 DEALLOCATE( ztab_e, znorthloc_e, znorthgloio_e )1287 !1288 END SUBROUTINE mpp_lbc_north_icb1289 1290 1291 SUBROUTINE mpp_lnk_2d_icb( cdname, pt2d, cd_type, psgn, kexti, kextj )1292 !!----------------------------------------------------------------------1293 !! *** routine mpp_lnk_2d_icb ***1294 !!1295 !! ** Purpose : Message passing management for 2d array (with extra halo for icebergs)1296 !! This routine receives a (1-kexti:jpi+kexti,1-kexti:jpj+kextj)1297 !! array (usually (0:jpi+1, 0:jpj+1)) from lbc_lnk_icb calls.1298 !!1299 !! ** Method : Use mppsend and mpprecv function for passing mask1300 !! between processors following neighboring subdomains.1301 !! domain parameters1302 !! jpi : first dimension of the local subdomain1303 !! jpj : second dimension of the local subdomain1304 !! kexti : number of columns for extra outer halo1305 !! kextj : number of rows for extra outer halo1306 !! nbondi : mark for "east-west local boundary"1307 !! nbondj : mark for "north-south local boundary"1308 !! noea : number for local neighboring processors1309 !! nowe : number for local neighboring processors1310 !! noso : number for local neighboring processors1311 !! nono : number for local neighboring processors1312 !!----------------------------------------------------------------------1313 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine1314 REAL(wp), DIMENSION(1-kexti:jpi+kexti,1-kextj:jpj+kextj), INTENT(inout) :: pt2d ! 2D array with extra halo1315 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points1316 REAL(wp) , INTENT(in ) :: psgn ! sign used across the north fold1317 INTEGER , INTENT(in ) :: kexti ! extra i-halo width1318 INTEGER , INTENT(in ) :: kextj ! extra j-halo width1319 !1320 INTEGER :: jl ! dummy loop indices1321 INTEGER :: imigr, iihom, ijhom ! local integers1322 INTEGER :: ipreci, iprecj ! - -1323 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend1324 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend1325 !!1326 REAL(wp), DIMENSION(1-kexti:jpi+kexti,nn_hls+kextj,2) :: r2dns, r2dsn1327 REAL(wp), DIMENSION(1-kextj:jpj+kextj,nn_hls+kexti,2) :: r2dwe, r2dew1328 !!----------------------------------------------------------------------1329 1330 ipreci = nn_hls + kexti ! take into account outer extra 2D overlap area1331 iprecj = nn_hls + kextj1332 1333 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, 1, 1, 1, ld_lbc = .TRUE. )1334 1335 ! 1. standard boundary treatment1336 ! ------------------------------1337 ! Order matters Here !!!!1338 !1339 ! ! East-West boundaries1340 ! !* Cyclic east-west1341 IF( l_Iperio ) THEN1342 pt2d(1-kexti: 1 ,:) = pt2d(jpim1-kexti: jpim1 ,:) ! east1343 pt2d( jpi :jpi+kexti,:) = pt2d( 2 :2+kexti,:) ! west1344 !1345 ELSE !* closed1346 IF( .NOT. cd_type == 'F' ) pt2d( 1-kexti :nn_hls ,:) = 0._wp ! east except at F-point1347 pt2d(jpi-nn_hls+1:jpi+kexti,:) = 0._wp ! west1348 ENDIF1349 ! ! North-South boundaries1350 IF( l_Jperio ) THEN !* cyclic (only with no mpp j-split)1351 pt2d(:,1-kextj: 1 ) = pt2d(:,jpjm1-kextj: jpjm1) ! north1352 pt2d(:, jpj :jpj+kextj) = pt2d(:, 2 :2+kextj) ! south1353 ELSE !* closed1354 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-kextj :nn_hls ) = 0._wp ! north except at F-point1355 pt2d(:,jpj-nn_hls+1:jpj+kextj) = 0._wp ! south1356 ENDIF1357 !1358 1359 ! north fold treatment1360 ! -----------------------1361 IF( npolj /= 0 ) THEN1362 !1363 SELECT CASE ( jpni )1364 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+kextj), cd_type, psgn, kextj )1365 CASE DEFAULT ; CALL mpp_lbc_north_icb( pt2d(1:jpi,1:jpj+kextj), cd_type, psgn, kextj )1366 END SELECT1367 !1368 ENDIF1369 1370 ! 2. East and west directions exchange1371 ! ------------------------------------1372 ! we play with the neigbours AND the row number because of the periodicity1373 !1374 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions1375 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case)1376 iihom = jpi-nreci-kexti1377 DO jl = 1, ipreci1378 r2dew(:,jl,1) = pt2d(nn_hls+jl,:)1379 r2dwe(:,jl,1) = pt2d(iihom +jl,:)1380 END DO1381 END SELECT1382 !1383 ! ! Migrations1384 imigr = ipreci * ( jpj + 2*kextj )1385 !1386 IF( ln_timing ) CALL tic_tac(.TRUE.)1387 !1388 SELECT CASE ( nbondi )1389 CASE ( -1 )1390 CALL mppsend( 2, r2dwe(1-kextj,1,1), imigr, noea, ml_req1 )1391 CALL mpprecv( 1, r2dew(1-kextj,1,2), imigr, noea )1392 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1393 CASE ( 0 )1394 CALL mppsend( 1, r2dew(1-kextj,1,1), imigr, nowe, ml_req1 )1395 CALL mppsend( 2, r2dwe(1-kextj,1,1), imigr, noea, ml_req2 )1396 CALL mpprecv( 1, r2dew(1-kextj,1,2), imigr, noea )1397 CALL mpprecv( 2, r2dwe(1-kextj,1,2), imigr, nowe )1398 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1399 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)1400 CASE ( 1 )1401 CALL mppsend( 1, r2dew(1-kextj,1,1), imigr, nowe, ml_req1 )1402 CALL mpprecv( 2, r2dwe(1-kextj,1,2), imigr, nowe )1403 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1404 END SELECT1405 !1406 IF( ln_timing ) CALL tic_tac(.FALSE.)1407 !1408 ! ! Write Dirichlet lateral conditions1409 iihom = jpi - nn_hls1410 !1411 SELECT CASE ( nbondi )1412 CASE ( -1 )1413 DO jl = 1, ipreci1414 pt2d(iihom+jl,:) = r2dew(:,jl,2)1415 END DO1416 CASE ( 0 )1417 DO jl = 1, ipreci1418 pt2d(jl-kexti,:) = r2dwe(:,jl,2)1419 pt2d(iihom+jl,:) = r2dew(:,jl,2)1420 END DO1421 CASE ( 1 )1422 DO jl = 1, ipreci1423 pt2d(jl-kexti,:) = r2dwe(:,jl,2)1424 END DO1425 END SELECT1426 1427 1428 ! 3. North and south directions1429 ! -----------------------------1430 ! always closed : we play only with the neigbours1431 !1432 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions1433 ijhom = jpj-nrecj-kextj1434 DO jl = 1, iprecj1435 r2dsn(:,jl,1) = pt2d(:,ijhom +jl)1436 r2dns(:,jl,1) = pt2d(:,nn_hls+jl)1437 END DO1438 ENDIF1439 !1440 ! ! Migrations1441 imigr = iprecj * ( jpi + 2*kexti )1442 !1443 IF( ln_timing ) CALL tic_tac(.TRUE.)1444 !1445 SELECT CASE ( nbondj )1446 CASE ( -1 )1447 CALL mppsend( 4, r2dsn(1-kexti,1,1), imigr, nono, ml_req1 )1448 CALL mpprecv( 3, r2dns(1-kexti,1,2), imigr, nono )1449 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1450 CASE ( 0 )1451 CALL mppsend( 3, r2dns(1-kexti,1,1), imigr, noso, ml_req1 )1452 CALL mppsend( 4, r2dsn(1-kexti,1,1), imigr, nono, ml_req2 )1453 CALL mpprecv( 3, r2dns(1-kexti,1,2), imigr, nono )1454 CALL mpprecv( 4, r2dsn(1-kexti,1,2), imigr, noso )1455 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1456 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err)1457 CASE ( 1 )1458 CALL mppsend( 3, r2dns(1-kexti,1,1), imigr, noso, ml_req1 )1459 CALL mpprecv( 4, r2dsn(1-kexti,1,2), imigr, noso )1460 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err)1461 END SELECT1462 !1463 IF( ln_timing ) CALL tic_tac(.FALSE.)1464 !1465 ! ! Write Dirichlet lateral conditions1466 ijhom = jpj - nn_hls1467 !1468 SELECT CASE ( nbondj )1469 CASE ( -1 )1470 DO jl = 1, iprecj1471 pt2d(:,ijhom+jl) = r2dns(:,jl,2)1472 END DO1473 CASE ( 0 )1474 DO jl = 1, iprecj1475 pt2d(:,jl-kextj) = r2dsn(:,jl,2)1476 pt2d(:,ijhom+jl) = r2dns(:,jl,2)1477 END DO1478 CASE ( 1 )1479 DO jl = 1, iprecj1480 pt2d(:,jl-kextj) = r2dsn(:,jl,2)1481 END DO1482 END SELECT1483 !1484 END SUBROUTINE mpp_lnk_2d_icb1485 1486 1487 1071 SUBROUTINE mpp_report( cdname, kpk, kpl, kpf, ld_lbc, ld_glb, ld_dlg ) 1488 1072 !!---------------------------------------------------------------------- … … 1500 1084 INTEGER :: ji, jj, jk, jh, jf, jcount ! dummy loop indices 1501 1085 !!---------------------------------------------------------------------- 1086 #if defined key_mpp_mpi 1502 1087 ! 1503 1088 ll_lbc = .FALSE. … … 1610 1195 DEALLOCATE(crname_lbc) 1611 1196 ENDIF 1197 #endif 1612 1198 END SUBROUTINE mpp_report 1613 1199 … … 1620 1206 REAL(wp), SAVE :: tic_ct = 0._wp 1621 1207 INTEGER :: ii 1208 #if defined key_mpp_mpi 1622 1209 1623 1210 IF( ncom_stp <= nit000 ) RETURN … … 1635 1222 tic_ct = MPI_Wtime() ! start count tac->tic (waiting time) 1636 1223 ENDIF 1224 #endif 1637 1225 1638 1226 END SUBROUTINE tic_tac 1639 1227 1640 1641 #else 1642 !!---------------------------------------------------------------------- 1643 !! Default case: Dummy module share memory computing 1644 !!---------------------------------------------------------------------- 1645 USE in_out_manager 1646 1647 INTERFACE mpp_sum 1648 MODULE PROCEDURE mppsum_int, mppsum_a_int, mppsum_real, mppsum_a_real, mppsum_realdd, mppsum_a_realdd 1649 END INTERFACE 1650 INTERFACE mpp_max 1651 MODULE PROCEDURE mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real 1652 END INTERFACE 1653 INTERFACE mpp_min 1654 MODULE PROCEDURE mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real 1655 END INTERFACE 1656 INTERFACE mpp_minloc 1657 MODULE PROCEDURE mpp_minloc2d ,mpp_minloc3d 1658 END INTERFACE 1659 INTERFACE mpp_maxloc 1660 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 1661 END INTERFACE 1662 1663 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag 1664 LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms (needed here in case "key_mpp_mpi" is not used) 1665 INTEGER, PUBLIC :: mpi_comm_oce ! opa local communicator 1666 1667 INTEGER, PARAMETER, PUBLIC :: nbdelay = 0 ! make sure we don't enter loops: DO ji = 1, nbdelay 1668 CHARACTER(len=32), DIMENSION(1), PUBLIC :: c_delaylist = 'empty' 1669 CHARACTER(len=32), DIMENSION(1), PUBLIC :: c_delaycpnt = 'empty' 1670 LOGICAL, PUBLIC :: l_full_nf_update = .TRUE. 1671 TYPE :: DELAYARR 1672 REAL( wp), POINTER, DIMENSION(:) :: z1d => NULL() 1673 COMPLEX(wp), POINTER, DIMENSION(:) :: y1d => NULL() 1674 END TYPE DELAYARR 1675 TYPE( DELAYARR ), DIMENSION(1), PUBLIC :: todelay 1676 INTEGER, PUBLIC, DIMENSION(1) :: ndelayid = -1 1677 !!---------------------------------------------------------------------- 1678 CONTAINS 1679 1680 INTEGER FUNCTION lib_mpp_alloc(kumout) ! Dummy function 1681 INTEGER, INTENT(in) :: kumout 1682 lib_mpp_alloc = 0 1683 END FUNCTION lib_mpp_alloc 1684 1685 FUNCTION mynode( ldtxt, ldname, kumnam_ref, knumnam_cfg, kumond , kstop, localComm ) RESULT (function_value) 1686 INTEGER, OPTIONAL , INTENT(in ) :: localComm 1687 CHARACTER(len=*),DIMENSION(:) :: ldtxt 1688 CHARACTER(len=*) :: ldname 1689 INTEGER :: kumnam_ref, knumnam_cfg , kumond , kstop 1690 IF( PRESENT( localComm ) ) mpi_comm_oce = localComm 1691 function_value = 0 1692 IF( .FALSE. ) ldtxt(:) = 'never done' 1693 CALL ctl_opn( kumond, TRIM(ldname), 'UNKNOWN', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. , 1 ) 1694 END FUNCTION mynode 1695 1696 SUBROUTINE mppsync ! Dummy routine 1697 END SUBROUTINE mppsync 1698 1699 !!---------------------------------------------------------------------- 1700 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 1701 !! 1702 !!---------------------------------------------------------------------- 1703 !! 1704 # define OPERATION_MAX 1705 # define INTEGER_TYPE 1706 # define DIM_0d 1707 # define ROUTINE_ALLREDUCE mppmax_int 1708 # include "mpp_allreduce_generic.h90" 1709 # undef ROUTINE_ALLREDUCE 1710 # undef DIM_0d 1711 # define DIM_1d 1712 # define ROUTINE_ALLREDUCE mppmax_a_int 1713 # include "mpp_allreduce_generic.h90" 1714 # undef ROUTINE_ALLREDUCE 1715 # undef DIM_1d 1716 # undef INTEGER_TYPE 1717 ! 1718 # define REAL_TYPE 1719 # define DIM_0d 1720 # define ROUTINE_ALLREDUCE mppmax_real 1721 # include "mpp_allreduce_generic.h90" 1722 # undef ROUTINE_ALLREDUCE 1723 # undef DIM_0d 1724 # define DIM_1d 1725 # define ROUTINE_ALLREDUCE mppmax_a_real 1726 # include "mpp_allreduce_generic.h90" 1727 # undef ROUTINE_ALLREDUCE 1728 # undef DIM_1d 1729 # undef REAL_TYPE 1730 # undef OPERATION_MAX 1731 !!---------------------------------------------------------------------- 1732 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** 1733 !! 1734 !!---------------------------------------------------------------------- 1735 !! 1736 # define OPERATION_MIN 1737 # define INTEGER_TYPE 1738 # define DIM_0d 1739 # define ROUTINE_ALLREDUCE mppmin_int 1740 # include "mpp_allreduce_generic.h90" 1741 # undef ROUTINE_ALLREDUCE 1742 # undef DIM_0d 1743 # define DIM_1d 1744 # define ROUTINE_ALLREDUCE mppmin_a_int 1745 # include "mpp_allreduce_generic.h90" 1746 # undef ROUTINE_ALLREDUCE 1747 # undef DIM_1d 1748 # undef INTEGER_TYPE 1749 ! 1750 # define REAL_TYPE 1751 # define DIM_0d 1752 # define ROUTINE_ALLREDUCE mppmin_real 1753 # include "mpp_allreduce_generic.h90" 1754 # undef ROUTINE_ALLREDUCE 1755 # undef DIM_0d 1756 # define DIM_1d 1757 # define ROUTINE_ALLREDUCE mppmin_a_real 1758 # include "mpp_allreduce_generic.h90" 1759 # undef ROUTINE_ALLREDUCE 1760 # undef DIM_1d 1761 # undef REAL_TYPE 1762 # undef OPERATION_MIN 1763 1764 !!---------------------------------------------------------------------- 1765 !! *** mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real *** 1766 !! 1767 !! Global sum of 1D array or a variable (integer, real or complex) 1768 !!---------------------------------------------------------------------- 1769 !! 1770 # define OPERATION_SUM 1771 # define INTEGER_TYPE 1772 # define DIM_0d 1773 # define ROUTINE_ALLREDUCE mppsum_int 1774 # include "mpp_allreduce_generic.h90" 1775 # undef ROUTINE_ALLREDUCE 1776 # undef DIM_0d 1777 # define DIM_1d 1778 # define ROUTINE_ALLREDUCE mppsum_a_int 1779 # include "mpp_allreduce_generic.h90" 1780 # undef ROUTINE_ALLREDUCE 1781 # undef DIM_1d 1782 # undef INTEGER_TYPE 1783 ! 1784 # define REAL_TYPE 1785 # define DIM_0d 1786 # define ROUTINE_ALLREDUCE mppsum_real 1787 # include "mpp_allreduce_generic.h90" 1788 # undef ROUTINE_ALLREDUCE 1789 # undef DIM_0d 1790 # define DIM_1d 1791 # define ROUTINE_ALLREDUCE mppsum_a_real 1792 # include "mpp_allreduce_generic.h90" 1793 # undef ROUTINE_ALLREDUCE 1794 # undef DIM_1d 1795 # undef REAL_TYPE 1796 # undef OPERATION_SUM 1797 1798 # define OPERATION_SUM_DD 1799 # define COMPLEX_TYPE 1800 # define DIM_0d 1801 # define ROUTINE_ALLREDUCE mppsum_realdd 1802 # include "mpp_allreduce_generic.h90" 1803 # undef ROUTINE_ALLREDUCE 1804 # undef DIM_0d 1805 # define DIM_1d 1806 # define ROUTINE_ALLREDUCE mppsum_a_realdd 1807 # include "mpp_allreduce_generic.h90" 1808 # undef ROUTINE_ALLREDUCE 1809 # undef DIM_1d 1810 # undef COMPLEX_TYPE 1811 # undef OPERATION_SUM_DD 1812 1813 !!---------------------------------------------------------------------- 1814 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 1815 !! 1816 !!---------------------------------------------------------------------- 1817 !! 1818 # define OPERATION_MINLOC 1819 # define DIM_2d 1820 # define ROUTINE_LOC mpp_minloc2d 1821 # include "mpp_loc_generic.h90" 1822 # undef ROUTINE_LOC 1823 # undef DIM_2d 1824 # define DIM_3d 1825 # define ROUTINE_LOC mpp_minloc3d 1826 # include "mpp_loc_generic.h90" 1827 # undef ROUTINE_LOC 1828 # undef DIM_3d 1829 # undef OPERATION_MINLOC 1830 1831 # define OPERATION_MAXLOC 1832 # define DIM_2d 1833 # define ROUTINE_LOC mpp_maxloc2d 1834 # include "mpp_loc_generic.h90" 1835 # undef ROUTINE_LOC 1836 # undef DIM_2d 1837 # define DIM_3d 1838 # define ROUTINE_LOC mpp_maxloc3d 1839 # include "mpp_loc_generic.h90" 1840 # undef ROUTINE_LOC 1841 # undef DIM_3d 1842 # undef OPERATION_MAXLOC 1843 1844 SUBROUTINE mpp_delay_sum( cdname, cdelay, y_in, pout, ldlast, kcom ) 1845 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1846 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 1847 COMPLEX(wp), INTENT(in ), DIMENSION(:) :: y_in 1848 REAL(wp), INTENT( out), DIMENSION(:) :: pout 1849 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 1850 INTEGER, INTENT(in ), OPTIONAL :: kcom 1851 ! 1852 pout(:) = REAL(y_in(:), wp) 1853 END SUBROUTINE mpp_delay_sum 1854 1855 SUBROUTINE mpp_delay_max( cdname, cdelay, p_in, pout, ldlast, kcom ) 1856 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1857 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 1858 REAL(wp), INTENT(in ), DIMENSION(:) :: p_in 1859 REAL(wp), INTENT( out), DIMENSION(:) :: pout 1860 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 1861 INTEGER, INTENT(in ), OPTIONAL :: kcom 1862 ! 1863 pout(:) = p_in(:) 1864 END SUBROUTINE mpp_delay_max 1865 1866 SUBROUTINE mpp_delay_rcv( kid ) 1867 INTEGER,INTENT(in ) :: kid 1868 WRITE(*,*) 'mpp_delay_rcv: You should not have seen this print! error?', kid 1869 END SUBROUTINE mpp_delay_rcv 1870 1871 SUBROUTINE mppstop( ldfinal, ld_force_abort ) 1872 LOGICAL, OPTIONAL, INTENT(in) :: ldfinal ! source process number 1873 LOGICAL, OPTIONAL, INTENT(in) :: ld_force_abort ! source process number 1874 STOP ! non MPP case, just stop the run 1875 END SUBROUTINE mppstop 1876 1877 SUBROUTINE mpp_ini_znl( knum ) 1878 INTEGER :: knum 1879 WRITE(*,*) 'mpp_ini_znl: You should not have seen this print! error?', knum 1880 END SUBROUTINE mpp_ini_znl 1881 1882 SUBROUTINE mpp_comm_free( kcom ) 1883 INTEGER :: kcom 1884 WRITE(*,*) 'mpp_comm_free: You should not have seen this print! error?', kcom 1885 END SUBROUTINE mpp_comm_free 1886 1887 #endif 1888 1889 !!---------------------------------------------------------------------- 1890 !! All cases: ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam routines 1228 #if ! defined key_mpp_mpi 1229 SUBROUTINE mpi_wait(request, status, ierror) 1230 INTEGER , INTENT(in ) :: request 1231 INTEGER, DIMENSION(MPI_STATUS_SIZE), INTENT( out) :: status 1232 INTEGER , INTENT( out) :: ierror 1233 END SUBROUTINE mpi_wait 1234 #endif 1235 1236 !!---------------------------------------------------------------------- 1237 !! ctl_stop, ctl_warn, get_unit, ctl_opn, ctl_nam routines 1891 1238 !!---------------------------------------------------------------------- 1892 1239 -
NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE/LBC/mpp_lnk_generic.h90
r10542 r11192 46 46 47 47 #if defined MULTI 48 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn, kfld, cd_mpp, pval )49 INTEGER 48 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn, kfld, kfillmode, pfillval ) 49 INTEGER , INTENT(in ) :: kfld ! number of pt3d arrays 50 50 #else 51 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn , cd_mpp, pval )51 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn , kfillmode, pfillval ) 52 52 #endif 53 53 ARRAY_TYPE(:,:,:,:,:) ! array or pointer of arrays on which the boundary condition is applied 54 CHARACTER(len=*) 55 CHARACTER(len=1) 56 REAL(wp) 57 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only58 REAL(wp) , OPTIONAL , INTENT(in ) :: pval! background value (used at closed boundaries)59 ! 60 INTEGER :: ji, jj, jk, jl, jh, jf! dummy loop indices54 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 55 CHARACTER(len=1) , INTENT(in ) :: NAT_IN(:) ! nature of array grid-points 56 REAL(wp) , INTENT(in ) :: SGN_IN(:) ! sign used across the north fold boundary 57 INTEGER , OPTIONAL , INTENT(in ) :: kfillmode ! filling method for halo over land (default = constant) 58 REAL(wp), OPTIONAL , INTENT(in ) :: pfillval ! background value (used at closed boundaries) 59 ! 60 INTEGER :: ji, jj, jk, jl, jf ! dummy loop indices 61 61 INTEGER :: ipi, ipj, ipk, ipl, ipf ! dimension of the input array 62 INTEGER :: i migr, iihom, ijhom! local integers63 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend62 INTEGER :: isize, ishift, ishift2 ! local integers 63 INTEGER :: ireq_we, ireq_ea, ireq_so, ireq_no ! mpi_request id 64 64 INTEGER :: ierr 65 INTEGER :: ifill_we, ifill_ea, ifill_so, ifill_no 65 66 REAL(wp) :: zland 66 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend 67 REAL(wp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: zt3ns, zt3sn ! north-south & south-north halos 68 REAL(wp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: zt3ew, zt3we ! east -west & west - east halos 67 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: istat ! for mpi_isend 68 REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zsnd_we, zrcv_we, zsnd_ea, zrcv_ea ! east -west & west - east halos 69 REAL(wp), DIMENSION(:,:,:,:,:), ALLOCATABLE :: zsnd_so, zrcv_so, zsnd_no, zrcv_no ! north-south & south-north halos 70 LOGICAL :: llcom_we, llcom_ea, llcom_no, llcom_so ! communication done or not 71 LOGICAL :: lldo_nfd ! do north pole folding 69 72 !!---------------------------------------------------------------------- 73 ! 74 ! ----------------------------------------- ! 75 ! 0. local variables initialization ! 76 ! ----------------------------------------- ! 70 77 ! 71 78 ipk = K_SIZE(ptab) ! 3rd dimension … … 75 82 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ipk, ipl, ipf, ld_lbc = .TRUE. ) 76 83 ! 77 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value 78 ELSE ; zland = 0._wp ! zero by default 79 ENDIF 80 81 ! ------------------------------- ! 82 ! standard boundary treatment ! ! CAUTION: semi-column notation is often impossible 83 ! ------------------------------- ! 84 ! 85 IF( .NOT. PRESENT( cd_mpp ) ) THEN !== standard close or cyclic treatment ==! 86 ! 87 DO jf = 1, ipf ! number of arrays to be treated 88 ! 89 ! ! East-West boundaries 90 IF( l_Iperio ) THEN !* cyclic 91 ARRAY_IN( 1 ,:,:,:,jf) = ARRAY_IN(jpim1,:,:,:,jf) 92 ARRAY_IN(jpi,:,:,:,jf) = ARRAY_IN( 2 ,:,:,:,jf) 93 ELSE !* closed 94 IF( .NOT. NAT_IN(jf) == 'F' ) ARRAY_IN( 1 :nn_hls,:,:,:,jf) = zland ! east except F-point 95 ARRAY_IN(nlci-nn_hls+1:jpi ,:,:,:,jf) = zland ! west 96 ENDIF 97 ! ! North-South boundaries 98 IF( l_Jperio ) THEN !* cyclic (only with no mpp j-split) 99 ARRAY_IN(:, 1 ,:,:,jf) = ARRAY_IN(:, jpjm1,:,:,jf) 100 ARRAY_IN(:,jpj,:,:,jf) = ARRAY_IN(:, 2 ,:,:,jf) 101 ELSE !* closed 102 IF( .NOT. NAT_IN(jf) == 'F' ) ARRAY_IN(:, 1 :nn_hls,:,:,jf) = zland ! south except F-point 103 ARRAY_IN(:,nlcj-nn_hls+1:jpj ,:,:,jf) = zland ! north 84 llcom_we = nbondi == 1 .OR. nbondi == 0 ! keep for compatibility, should be defined in mppini 85 llcom_ea = nbondi == -1 .OR. nbondi == 0 ! keep for compatibility, should be defined in mppini 86 llcom_so = nbondj == 1 .OR. nbondj == 0 ! keep for compatibility, should be defined in mppini 87 llcom_no = nbondj == -1 .OR. nbondj == 0 ! keep for compatibility, should be defined in mppini 88 89 lldo_nfd = npolj /= 0 ! keep for compatibility, should be defined in mppini 90 91 zland = 0._wp ! land filling value: zero by default 92 IF( PRESENT( pfillval ) ) zland = pfillval ! set land value 93 94 ! define the method we will use to fill the halos in each direction 95 IF( llcom_we ) THEN ; ifill_we = jpfillmpi 96 ELSEIF( l_Iperio ) THEN ; ifill_we = jpfillperio 97 ELSEIF( PRESENT(kfillmode) ) THEN ; ifill_we = kfillmode 98 ELSE ; ifill_we = jpfillcst 99 END IF 100 ! 101 IF( llcom_ea ) THEN ; ifill_ea = jpfillmpi 102 ELSE ; ifill_ea = ifill_we 103 END IF 104 ! 105 IF( llcom_so ) THEN ; ifill_so = jpfillmpi 106 ELSEIF( l_Jperio ) THEN ; ifill_so = jpfillperio 107 ELSEIF( PRESENT(kfillmode) ) THEN ; ifill_so = kfillmode 108 ELSE ; ifill_so = jpfillcst 109 END IF 110 ! 111 IF( llcom_no ) THEN ; ifill_no = jpfillmpi 112 ELSE ; ifill_no = ifill_so ! warning will be potentially changed if lldo_nfd = T 113 END IF 114 ! 115 #if defined PRINT_CAUTION 116 ! 117 ! ================================================================================== ! 118 ! CAUTION: semi-column notation is often impossible because of the cpp preprocessing ! 119 ! ================================================================================== ! 120 ! 121 #endif 122 ! 123 ! -------------------------------------------------- ! 124 ! 1. Do east and west MPI exchange if needed ! 125 ! -------------------------------------------------- ! 126 ! 127 ! these echanges are made for jj = nn_hls+1 to jpj-nn_hls 128 isize = nn_hls * ( jpj - 2*nn_hls ) * ipk * ipl * ipf 129 130 ! Allocate local temporary arrays to be sent/received. Fill arrays to be sent 131 IF( ifill_we == jpfillmpi ) THEN ! copy western side of the inner mpi domain in local temporary array to be sent by MPI 132 ! 133 ALLOCATE( zsnd_we(nn_hls,jpj-2*nn_hls,ipk,ipl,ipf), zrcv_we(nn_hls,jpj-2*nn_hls,ipk,ipl,ipf) ) 134 ishift = nn_hls 135 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 136 zsnd_we(ji,jj-nn_hls,jk,jl,jf) = ARRAY_IN(ishift+ji,jj,jk,jl,jf) ! nn_hls + 1 -> 2*nn_hls 137 END DO ; END DO ; END DO ; END DO ; END DO 138 ENDIF 139 ! 140 IF( ifill_ea == jpfillmpi ) THEN ! copy eastern side of the inner mpi domain in local temporary array to be sent by MPI 141 ! 142 ALLOCATE( zsnd_ea(nn_hls,jpj-2*nn_hls,ipk,ipl,ipf), zrcv_ea(nn_hls,jpj-2*nn_hls,ipk,ipl,ipf) ) 143 ishift = jpi - 2 * nn_hls 144 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 145 zsnd_ea(ji,jj-nn_hls,jk,jl,jf) = ARRAY_IN(ishift+ji,jj,jk,jl,jf) ! jpi - 2*nn_hls + 1 -> jpi - nn_hls 146 END DO ; END DO ; END DO ; END DO ; END DO 147 ENDIF 148 ! 149 IF( ln_timing ) CALL tic_tac(.TRUE.) 150 ! 151 ! non-blocking send of the western/eastern side using local temporary arrays 152 IF( ifill_we == jpfillmpi ) CALL mppsend( 1, zsnd_we(1,1,1,1,1), isize, nowe, ireq_we ) 153 IF( ifill_ea == jpfillmpi ) CALL mppsend( 2, zsnd_ea(1,1,1,1,1), isize, noea, ireq_ea ) 154 ! blocking receive of the western/eastern halo in local temporary arrays 155 IF( ifill_we == jpfillmpi ) CALL mpprecv( 2, zrcv_we(1,1,1,1,1), isize, nowe ) 156 IF( ifill_ea == jpfillmpi ) CALL mpprecv( 1, zrcv_ea(1,1,1,1,1), isize, noea ) 157 ! 158 IF( ln_timing ) CALL tic_tac(.FALSE.) 159 ! 160 ! 161 ! ----------------------------------- ! 162 ! 2. Fill east and west halos ! 163 ! ----------------------------------- ! 164 ! 165 ! 2.1 fill weastern halo 166 ! ---------------------- 167 ! ishift = 0 ! fill halo from ji = 1 to nn_hls 168 SELECT CASE ( ifill_we ) 169 CASE ( jpfillnothing ) ! no filling 170 CASE ( jpfillmpi ) ! use data received by MPI 171 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 172 ARRAY_IN(ji,jj,jk,jl,jf) = zrcv_we(ji,jj-nn_hls,jk,jl,jf) ! 1 -> nn_hls 173 END DO; END DO ; END DO ; END DO ; END DO 174 CASE ( jpfillperio ) ! use east-weast periodicity 175 ishift2 = jpi - 2 * nn_hls 176 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 177 ARRAY_IN(ji,jj,jk,jl,jf) = ARRAY_IN(ishift2+ji,jj,jk,jl,jf) 178 END DO; END DO ; END DO ; END DO ; END DO 179 CASE ( jpfillcopy ) ! filling with inner domain values 180 DO jf = 1, ipf ! number of arrays to be treated 181 IF( .NOT. NAT_IN(jf) == 'F' ) THEN ! do nothing for F point 182 DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 183 ARRAY_IN(ji,jj,jk,jl,jf) = ARRAY_IN(nn_hls+1,jj,jk,jl,jf) 184 END DO ; END DO ; END DO ; END DO 104 185 ENDIF 105 186 END DO 106 ! 107 ENDIF 108 109 ! ------------------------------- ! 110 ! East and west exchange ! 111 ! ------------------------------- ! 112 ! we play with the neigbours AND the row number because of the periodicity 113 ! 114 IF( ABS(nbondi) == 1 ) ALLOCATE( zt3ew(jpj,nn_hls,ipk,ipl,ipf,1), zt3we(jpj,nn_hls,ipk,ipl,ipf,1) ) 115 IF( nbondi == 0 ) ALLOCATE( zt3ew(jpj,nn_hls,ipk,ipl,ipf,2), zt3we(jpj,nn_hls,ipk,ipl,ipf,2) ) 116 ! 117 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 118 CASE ( -1 ) 119 iihom = nlci-nreci 120 DO jf = 1, ipf 121 DO jl = 1, ipl 122 DO jk = 1, ipk 123 DO jh = 1, nn_hls 124 zt3we(:,jh,jk,jl,jf,1) = ARRAY_IN(iihom +jh,:,jk,jl,jf) 125 END DO 126 END DO 127 END DO 128 END DO 129 CASE ( 0 ) 130 iihom = nlci-nreci 131 DO jf = 1, ipf 132 DO jl = 1, ipl 133 DO jk = 1, ipk 134 DO jh = 1, nn_hls 135 zt3ew(:,jh,jk,jl,jf,1) = ARRAY_IN(nn_hls+jh,:,jk,jl,jf) 136 zt3we(:,jh,jk,jl,jf,1) = ARRAY_IN(iihom +jh,:,jk,jl,jf) 137 END DO 138 END DO 139 END DO 140 END DO 141 CASE ( 1 ) 142 iihom = nlci-nreci 143 DO jf = 1, ipf 144 DO jl = 1, ipl 145 DO jk = 1, ipk 146 DO jh = 1, nn_hls 147 zt3ew(:,jh,jk,jl,jf,1) = ARRAY_IN(nn_hls+jh,:,jk,jl,jf) 148 END DO 149 END DO 150 END DO 187 CASE ( jpfillcst ) ! filling with constant value 188 DO jf = 1, ipf ! number of arrays to be treated 189 IF( .NOT. NAT_IN(jf) == 'F' ) THEN ! do nothing for F point 190 DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 191 ARRAY_IN(ji,jj,jk,jl,jf) = zland 192 END DO; END DO ; END DO ; END DO 193 ENDIF 151 194 END DO 152 195 END SELECT 153 ! ! Migrations 154 imigr = nn_hls * jpj * ipk * ipl * ipf 155 ! 156 IF( ln_timing ) CALL tic_tac(.TRUE.) 157 ! 158 SELECT CASE ( nbondi ) 159 CASE ( -1 ) 160 CALL mppsend( 2, zt3we(1,1,1,1,1,1), imigr, noea, ml_req1 ) 161 CALL mpprecv( 1, zt3ew(1,1,1,1,1,1), imigr, noea ) 162 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 163 CASE ( 0 ) 164 CALL mppsend( 1, zt3ew(1,1,1,1,1,1), imigr, nowe, ml_req1 ) 165 CALL mppsend( 2, zt3we(1,1,1,1,1,1), imigr, noea, ml_req2 ) 166 CALL mpprecv( 1, zt3ew(1,1,1,1,1,2), imigr, noea ) 167 CALL mpprecv( 2, zt3we(1,1,1,1,1,2), imigr, nowe ) 168 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 169 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 170 CASE ( 1 ) 171 CALL mppsend( 1, zt3ew(1,1,1,1,1,1), imigr, nowe, ml_req1 ) 172 CALL mpprecv( 2, zt3we(1,1,1,1,1,1), imigr, nowe ) 173 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 196 ! 197 ! 2.2 fill eastern halo 198 ! --------------------- 199 ishift = jpi - nn_hls ! fill halo from ji = jpi-nn_hls+1 to jpi 200 SELECT CASE ( ifill_ea ) 201 CASE ( jpfillnothing ) ! no filling 202 CASE ( jpfillmpi ) ! use data received by MPI 203 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 204 ARRAY_IN(ishift+ji,jj,jk,jl,jf) = zrcv_ea(ji,jj-nn_hls,jk,jl,jf) ! jpi - nn_hls + 1 -> jpi 205 END DO ; END DO ; END DO ; END DO ; END DO 206 CASE ( jpfillperio ) ! use east-weast periodicity 207 ishift2 = nn_hls 208 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 209 ARRAY_IN(ishift+ji,jj,jk,jl,jf) = ARRAY_IN(ishift2+ji,jj,jk,jl,jf) 210 END DO ; END DO ; END DO ; END DO ; END DO 211 CASE ( jpfillcopy ) ! filling with inner domain values 212 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 213 ARRAY_IN(ishift+ji,jj,jk,jl,jf) = ARRAY_IN(ishift,jj,jk,jl,jf) 214 END DO ; END DO ; END DO ; END DO ; END DO 215 CASE ( jpfillcst ) ! filling with constant value 216 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = nn_hls+1, jpj-nn_hls ; DO ji = 1, nn_hls 217 ARRAY_IN(ishift+ji,jj,jk,jl,jf) = zland 218 END DO; END DO ; END DO ; END DO ; END DO 174 219 END SELECT 175 !176 IF( ln_timing ) CALL tic_tac(.FALSE.)177 !178 ! ! Write Dirichlet lateral conditions179 iihom = nlci-nn_hls180 !181 SELECT CASE ( nbondi )182 CASE ( -1 )183 DO jf = 1, ipf184 DO jl = 1, ipl185 DO jk = 1, ipk186 DO jh = 1, nn_hls187 ARRAY_IN(iihom+jh,:,jk,jl,jf) = zt3ew(:,jh,jk,jl,jf,1)188 END DO189 END DO190 END DO191 END DO192 CASE ( 0 )193 DO jf = 1, ipf194 DO jl = 1, ipl195 DO jk = 1, ipk196 DO jh = 1, nn_hls197 ARRAY_IN(jh ,:,jk,jl,jf) = zt3we(:,jh,jk,jl,jf,2)198 ARRAY_IN(iihom+jh,:,jk,jl,jf) = zt3ew(:,jh,jk,jl,jf,2)199 END DO200 END DO201 END DO202 END DO203 CASE ( 1 )204 DO jf = 1, ipf205 DO jl = 1, ipl206 DO jk = 1, ipk207 DO jh = 1, nn_hls208 ARRAY_IN(jh ,:,jk,jl,jf) = zt3we(:,jh,jk,jl,jf,1)209 END DO210 END DO211 END DO212 END DO213 END SELECT214 !215 IF( nbondi /= 2 ) DEALLOCATE( zt3ew, zt3we )216 220 ! 217 221 ! ------------------------------- ! 218 222 ! 3. north fold treatment ! 219 223 ! ------------------------------- ! 224 ! 220 225 ! do it before south directions so concerned processes can do it without waiting for the comm with the sourthern neighbor 221 IF( npolj /= 0 .AND. .NOT. PRESENT(cd_mpp) ) THEN 226 ! 227 IF( lldo_nfd .AND. ifill_no /= jpfillnothing ) THEN 222 228 ! 223 229 SELECT CASE ( jpni ) … … 226 232 END SELECT 227 233 ! 228 ENDIF 229 ! 230 ! ------------------------------- ! 231 ! 4. North and south directions ! 232 ! ------------------------------- ! 233 ! always closed : we play only with the neigbours 234 ! 235 IF( ABS(nbondj) == 1 ) ALLOCATE( zt3ns(jpi,nn_hls,ipk,ipl,ipf,1), zt3sn(jpi,nn_hls,ipk,ipl,ipf,1) ) 236 IF( nbondj == 0 ) ALLOCATE( zt3ns(jpi,nn_hls,ipk,ipl,ipf,2), zt3sn(jpi,nn_hls,ipk,ipl,ipf,2) ) 237 ! 238 SELECT CASE ( nbondj ) 239 CASE ( -1 ) 240 ijhom = nlcj-nrecj 241 DO jf = 1, ipf 242 DO jl = 1, ipl 243 DO jk = 1, ipk 244 DO jh = 1, nn_hls 245 zt3sn(:,jh,jk,jl,jf,1) = ARRAY_IN(:,ijhom +jh,jk,jl,jf) 246 END DO 247 END DO 248 END DO 234 ifill_no = jpfillnothing ! force to do nothing for the northern halo as we just done the north pole folding 235 ! 236 ENDIF 237 ! 238 ! ---------------------------------------------------- ! 239 ! 4. Do north and south MPI exchange if needed ! 240 ! ---------------------------------------------------- ! 241 ! 242 isize = jpi * nn_hls * ipk * ipl * ipf 243 244 ! allocate local temporary arrays to be sent/received. Fill arrays to be sent 245 IF( ifill_so == jpfillmpi ) THEN ! copy sourhern side of the inner mpi domain in local temporary array to be sent by MPI 246 ! 247 ALLOCATE( zsnd_so(jpi,nn_hls,ipk,ipl,ipf), zrcv_so(jpi,nn_hls,ipk,ipl,ipf) ) 248 ishift = nn_hls 249 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 250 zsnd_so(ji,jj,jk,jl,jf) = ARRAY_IN(ji,ishift+jj,jk,jl,jf) ! nn_hls+1 -> 2*nn_hls 251 END DO ; END DO ; END DO ; END DO ; END DO 252 ENDIF 253 ! 254 IF( ifill_no == jpfillmpi ) THEN ! copy eastern side of the inner mpi domain in local temporary array to be sent by MPI 255 ! 256 ALLOCATE( zsnd_no(jpi,nn_hls,ipk,ipl,ipf), zrcv_no(jpi,nn_hls,ipk,ipl,ipf) ) 257 ishift = jpj - 2 * nn_hls 258 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 259 zsnd_no(ji,jj,jk,jl,jf) = ARRAY_IN(ji,ishift+jj,jk,jl,jf) ! jpj-2*nn_hls+1 -> jpj-nn_hls 260 END DO ; END DO ; END DO ; END DO ; END DO 261 ENDIF 262 ! 263 IF( ln_timing ) CALL tic_tac(.TRUE.) 264 ! 265 ! non-blocking send of the southern/northern side 266 IF( ifill_so == jpfillmpi ) CALL mppsend( 3, zsnd_so(1,1,1,1,1), isize, noso, ireq_so ) 267 IF( ifill_no == jpfillmpi ) CALL mppsend( 4, zsnd_no(1,1,1,1,1), isize, nono, ireq_no ) 268 ! blocking receive of the southern/northern halo 269 IF( ifill_so == jpfillmpi ) CALL mpprecv( 4, zrcv_so(1,1,1,1,1), isize, noso ) 270 IF( ifill_no == jpfillmpi ) CALL mpprecv( 3, zrcv_no(1,1,1,1,1), isize, nono ) 271 ! 272 IF( ln_timing ) CALL tic_tac(.FALSE.) 273 ! 274 ! ------------------------------------- ! 275 ! 5. Fill south and north halos ! 276 ! ------------------------------------- ! 277 ! 278 ! 5.1 fill southern halo 279 ! ---------------------- 280 ! ishift = 0 ! fill halo from jj = 1 to nn_hls 281 SELECT CASE ( ifill_so ) 282 CASE ( jpfillnothing ) ! no filling 283 CASE ( jpfillmpi ) ! use data received by MPI 284 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 285 ARRAY_IN(ji,jj,jk,jl,jf) = zrcv_so(ji,jj,jk,jl,jf) ! 1 -> nn_hls 286 END DO; END DO ; END DO ; END DO ; END DO 287 CASE ( jpfillperio ) ! use north-south periodicity 288 ishift2 = jpj - 2 * nn_hls 289 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 290 ARRAY_IN(ji,jj,jk,jl,jf) = ARRAY_IN(ji,ishift2+jj,jk,jl,jf) 291 END DO; END DO ; END DO ; END DO ; END DO 292 CASE ( jpfillcopy ) ! filling with inner domain values 293 DO jf = 1, ipf ! number of arrays to be treated 294 IF( .NOT. NAT_IN(jf) == 'F' ) THEN ! do nothing for F point 295 DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 296 ARRAY_IN(ji,jj,jk,jl,jf) = ARRAY_IN(ji,nn_hls+1,jk,jl,jf) 297 END DO ; END DO ; END DO ; END DO 298 ENDIF 249 299 END DO 250 CASE ( 0 ) 251 ijhom = nlcj-nrecj 252 DO jf = 1, ipf 253 DO jl = 1, ipl 254 DO jk = 1, ipk 255 DO jh = 1, nn_hls 256 zt3sn(:,jh,jk,jl,jf,1) = ARRAY_IN(:,ijhom +jh,jk,jl,jf) 257 zt3ns(:,jh,jk,jl,jf,1) = ARRAY_IN(:,nn_hls+jh,jk,jl,jf) 258 END DO 259 END DO 260 END DO 261 END DO 262 CASE ( 1 ) 263 ijhom = nlcj-nrecj 264 DO jf = 1, ipf 265 DO jl = 1, ipl 266 DO jk = 1, ipk 267 DO jh = 1, nn_hls 268 zt3ns(:,jh,jk,jl,jf,1) = ARRAY_IN(:,nn_hls+jh,jk,jl,jf) 269 END DO 270 END DO 271 END DO 300 CASE ( jpfillcst ) ! filling with constant value 301 DO jf = 1, ipf ! number of arrays to be treated 302 IF( .NOT. NAT_IN(jf) == 'F' ) THEN ! do nothing for F point 303 DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 304 ARRAY_IN(ji,jj,jk,jl,jf) = zland 305 END DO; END DO ; END DO ; END DO 306 ENDIF 272 307 END DO 273 308 END SELECT 274 309 ! 275 ! ! Migrations 276 imigr = nn_hls * jpi * ipk * ipl * ipf 277 ! 278 IF( ln_timing ) CALL tic_tac(.TRUE.) 279 ! 280 SELECT CASE ( nbondj ) 281 CASE ( -1 ) 282 CALL mppsend( 4, zt3sn(1,1,1,1,1,1), imigr, nono, ml_req1 ) 283 CALL mpprecv( 3, zt3ns(1,1,1,1,1,1), imigr, nono ) 284 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 285 CASE ( 0 ) 286 CALL mppsend( 3, zt3ns(1,1,1,1,1,1), imigr, noso, ml_req1 ) 287 CALL mppsend( 4, zt3sn(1,1,1,1,1,1), imigr, nono, ml_req2 ) 288 CALL mpprecv( 3, zt3ns(1,1,1,1,1,2), imigr, nono ) 289 CALL mpprecv( 4, zt3sn(1,1,1,1,1,2), imigr, noso ) 290 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 291 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err ) 292 CASE ( 1 ) 293 CALL mppsend( 3, zt3ns(1,1,1,1,1,1), imigr, noso, ml_req1 ) 294 CALL mpprecv( 4, zt3sn(1,1,1,1,1,1), imigr, noso ) 295 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 310 ! 5.2 fill northern halo 311 ! ---------------------- 312 ishift = jpj - nn_hls ! fill halo from jj = jpj-nn_hls+1 to jpj 313 SELECT CASE ( ifill_no ) 314 CASE ( jpfillnothing ) ! no filling 315 CASE ( jpfillmpi ) ! use data received by MPI 316 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 317 ARRAY_IN(ji,ishift+jj,jk,jl,jf) = zrcv_no(ji,jj,jk,jl,jf) ! jpj-nn_hls+1 -> jpj 318 END DO ; END DO ; END DO ; END DO ; END DO 319 CASE ( jpfillperio ) ! use north-south periodicity 320 ishift2 = nn_hls 321 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 322 ARRAY_IN(ji,ishift+jj,jk,jl,jf) = ARRAY_IN(ji,ishift2+jj,jk,jl,jf) 323 END DO; END DO ; END DO ; END DO ; END DO 324 CASE ( jpfillcopy ) ! filling with inner domain values 325 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 326 ARRAY_IN(ji,ishift+jj,jk,jl,jf) = ARRAY_IN(ji,ishift,jk,jl,jf) 327 END DO; END DO ; END DO ; END DO ; END DO 328 CASE ( jpfillcst ) ! filling with constant value 329 DO jf = 1, ipf ; DO jl = 1, ipl ; DO jk = 1, ipk ; DO jj = 1, nn_hls ; DO ji = 1, jpi 330 ARRAY_IN(ji,ishift+jj,jk,jl,jf) = zland 331 END DO; END DO ; END DO ; END DO ; END DO 296 332 END SELECT 297 333 ! 298 IF( ln_timing ) CALL tic_tac(.FALSE.) 299 ! ! Write Dirichlet lateral conditions 300 ijhom = nlcj-nn_hls 301 ! 302 SELECT CASE ( nbondj ) 303 CASE ( -1 ) 304 DO jf = 1, ipf 305 DO jl = 1, ipl 306 DO jk = 1, ipk 307 DO jh = 1, nn_hls 308 ARRAY_IN(:,ijhom+jh,jk,jl,jf) = zt3ns(:,jh,jk,jl,jf,1) 309 END DO 310 END DO 311 END DO 312 END DO 313 CASE ( 0 ) 314 DO jf = 1, ipf 315 DO jl = 1, ipl 316 DO jk = 1, ipk 317 DO jh = 1, nn_hls 318 ARRAY_IN(:, jh,jk,jl,jf) = zt3sn(:,jh,jk,jl,jf,2) 319 ARRAY_IN(:,ijhom+jh,jk,jl,jf) = zt3ns(:,jh,jk,jl,jf,2) 320 END DO 321 END DO 322 END DO 323 END DO 324 CASE ( 1 ) 325 DO jf = 1, ipf 326 DO jl = 1, ipl 327 DO jk = 1, ipk 328 DO jh = 1, nn_hls 329 ARRAY_IN(:,jh,jk,jl,jf) = zt3sn(:,jh,jk,jl,jf,1) 330 END DO 331 END DO 332 END DO 333 END DO 334 END SELECT 335 ! 336 IF( nbondj /= 2 ) DEALLOCATE( zt3ns, zt3sn ) 334 ! -------------------------------------------- ! 335 ! 6. deallocate local temporary arrays ! 336 ! -------------------------------------------- ! 337 ! 338 IF( ifill_we == jpfillmpi ) THEN 339 CALL mpi_wait(ireq_we, istat, ierr ) 340 DEALLOCATE( zsnd_we, zrcv_we ) 341 ENDIF 342 IF( ifill_ea == jpfillmpi ) THEN 343 CALL mpi_wait(ireq_ea, istat, ierr ) 344 DEALLOCATE( zsnd_ea, zrcv_ea ) 345 ENDIF 346 IF( ifill_so == jpfillmpi ) THEN 347 CALL mpi_wait(ireq_so, istat, ierr ) 348 DEALLOCATE( zsnd_so, zrcv_so ) 349 ENDIF 350 IF( ifill_no == jpfillmpi ) THEN 351 CALL mpi_wait(ireq_no, istat, ierr ) 352 DEALLOCATE( zsnd_no, zrcv_no ) 353 ENDIF 337 354 ! 338 355 END SUBROUTINE ROUTINE_LNK -
NEMO/branches/2019/dev_r10984_HPC-13_IRRMANN_BDY_optimization/src/OCE/LBC/mppini.F90
r10615 r11192 84 84 nbondj = 2 85 85 nidom = FLIO_DOM_NONE 86 npolj = jperio 86 npolj = 0 87 IF( jperio == 3 .OR. jperio == 4 ) npolj = 3 88 IF( jperio == 5 .OR. jperio == 6 ) npolj = 5 87 89 l_Iperio = jpni == 1 .AND. (jperio == 1 .OR. jperio == 4 .OR. jperio == 6 .OR. jperio == 7) 88 90 l_Jperio = jpnj == 1 .AND. (jperio == 2 .OR. jperio == 7)
Note: See TracChangeset
for help on using the changeset viewer.