Changeset 10425 for NEMO/trunk/src/OCE/LBC
- Timestamp:
- 2018-12-19T22:54:16+01:00 (5 years ago)
- Location:
- NEMO/trunk/src/OCE/LBC
- Files:
-
- 12 edited
- 2 copied
Legend:
- Unmodified
- Added
- Removed
-
NEMO/trunk/src/OCE/LBC/lbc_lnk_generic.h90
r10068 r10425 46 46 47 47 #if defined MULTI 48 SUBROUTINE ROUTINE_LNK( ptab, cd_nat, psgn, kfld, cd_mpp, pval )48 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn, kfld, cd_mpp, pval ) 49 49 INTEGER , INTENT(in ) :: kfld ! number of pt3d arrays 50 50 #else 51 SUBROUTINE ROUTINE_LNK( ptab, cd_nat, psgn , cd_mpp, pval )51 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn , cd_mpp, pval ) 52 52 #endif 53 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 53 54 ARRAY_TYPE(:,:,:,:,:) ! array or pointer of arrays on which the boundary condition is applied 54 55 CHARACTER(len=1) , INTENT(in ) :: NAT_IN(:) ! nature of array grid-points … … 77 78 ! ------------------------------- ! 78 79 ! 79 IF( PRESENT( cd_mpp ) ) THEN !== halos filled with inner values ==! 80 ! 81 ! only fill the overlap area and extra allows 82 ! this is in mpp case. In this module, just do nothing 83 ! 84 ELSE !== standard close or cyclic treatment ==! 80 IF( .NOT. PRESENT( cd_mpp ) ) THEN !== standard close or cyclic treatment ==! 85 81 ! 86 82 DO jf = 1, ipf ! number of arrays to be treated -
NEMO/trunk/src/OCE/LBC/lbc_lnk_multi_generic.h90
r10068 r10425 14 14 # define PTR_ptab pt4d 15 15 #endif 16 SUBROUTINE ROUTINE_MULTI( pt1, cdna1, psgn1, pt2, cdna2, psgn2, pt3, cdna3, psgn3 & 16 SUBROUTINE ROUTINE_MULTI( cdname & 17 & , pt1, cdna1, psgn1, pt2, cdna2, psgn2, pt3, cdna3, psgn3 & 17 18 & , pt4, cdna4, psgn4, pt5, cdna5, psgn5, pt6, cdna6, psgn6 & 18 19 & , pt7, cdna7, psgn7, pt8, cdna8, psgn8, pt9, cdna9, psgn9, cd_mpp, pval) 19 20 !!--------------------------------------------------------------------- 21 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 20 22 ARRAY_TYPE(:,:,:,:) , TARGET, INTENT(inout) :: pt1 ! arrays on which the lbc is applied 21 23 ARRAY_TYPE(:,:,:,:), OPTIONAL, TARGET, INTENT(inout) :: pt2 , pt3 , pt4 , pt5 , pt6 , pt7 , pt8 , pt9 … … 48 50 IF( PRESENT(psgn9) ) CALL ROUTINE_LOAD( pt9, cdna9, psgn9, ptab_ptr, cdna_ptr, psgn_ptr, kfld ) 49 51 ! 50 CALL lbc_lnk_ptr( ptab_ptr, cdna_ptr, psgn_ptr, kfld, cd_mpp, pval )52 CALL lbc_lnk_ptr( cdname, ptab_ptr, cdna_ptr, psgn_ptr, kfld, cd_mpp, pval ) 51 53 ! 52 54 END SUBROUTINE ROUTINE_MULTI -
NEMO/trunk/src/OCE/LBC/lbc_nfd_ext_generic.h90
r10068 r10425 106 106 ARRAY_IN(ji,ipj+jh,:,:,jf) = SGN_IN(jf) * ARRAY_IN(iju,ipj-1-jh,:,:,jf) 107 107 END DO 108 ARRAY_IN(jpiglo,ipj+jh,:,:,jf) = SGN_IN(jf) * ARRAY_IN( 1,ipj-1-jh,:,:,jf)108 ARRAY_IN(jpiglo,ipj+jh,:,:,jf) = SGN_IN(jf) * ARRAY_IN(jpiglo-2,ipj-1-jh,:,:,jf) 109 109 END DO 110 110 CASE ( 'V' ) ! V-point … … 125 125 ARRAY_IN(ji,ipj+jh ,:,:,jf) = SGN_IN(jf) * ARRAY_IN(iju,ipj-2-jh,:,:,jf) 126 126 END DO 127 ARRAY_IN(jpiglo,ipj+jh,:,:,jf) = SGN_IN(jf) * ARRAY_IN( 1,ipj-2-jh,:,:,jf)127 ARRAY_IN(jpiglo,ipj+jh,:,:,jf) = SGN_IN(jf) * ARRAY_IN(jpiglo-2,ipj-2-jh,:,:,jf) 128 128 END DO 129 129 DO ji = jpiglo/2+1, jpiglo-1 -
NEMO/trunk/src/OCE/LBC/lbc_nfd_generic.h90
r10068 r10425 129 129 ARRAY_IN(ji,ipj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(iju,ipj-1,:,:,jf) 130 130 END DO 131 ARRAY_IN(jpiglo,ipj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(1,ipj-1,:,:,jf)131 ARRAY_IN(jpiglo,ipj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(jpiglo-2,ipj-1,:,:,jf) 132 132 CASE ( 'V' ) ! V-point 133 133 DO ji = 1, jpiglo … … 144 144 ARRAY_IN(ji,ipj ,:,:,jf) = SGN_IN(jf) * ARRAY_IN(iju,ipj-2,:,:,jf) 145 145 END DO 146 ARRAY_IN(jpiglo,ipj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(1,ipj-2,:,:,jf)146 ARRAY_IN(jpiglo,ipj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(jpiglo-2,ipj-2,:,:,jf) 147 147 DO ji = jpiglo/2+1, jpiglo-1 148 148 iju = jpiglo-ji -
NEMO/trunk/src/OCE/LBC/lbc_nfd_nogather_generic.h90
r10068 r10425 8 8 # define K_SIZE(ptab) 1 9 9 # define L_SIZE(ptab) 1 10 # define ARRAY2_TYPE(i,j,k,l,f) TYPE(PTR_2D),INTENT(inout)::ptab2(f)11 # define ARRAY2_IN(i,j,k,l,f) ptab2(f)%pt2d(i,j)12 10 # endif 13 11 # if defined DIM_3d … … 16 14 # define K_SIZE(ptab) SIZE(ptab(1)%pt3d,3) 17 15 # define L_SIZE(ptab) 1 18 # define ARRAY2_TYPE(i,j,k,l,f) TYPE(PTR_3D),INTENT(inout)::ptab2(f)19 # define ARRAY2_IN(i,j,k,l,f) ptab2(f)%pt3d(i,j,k)20 16 # endif 21 17 # if defined DIM_4d … … 24 20 # define K_SIZE(ptab) SIZE(ptab(1)%pt4d,3) 25 21 # define L_SIZE(ptab) SIZE(ptab(1)%pt4d,4) 26 # define ARRAY2_TYPE(i,j,k,l,f) TYPE(PTR_4D),INTENT(inout)::ptab2(f) 27 # define ARRAY2_IN(i,j,k,l,f) ptab2(f)%pt4d(i,j,k,l) 28 # endif 22 # endif 23 # define ARRAY2_TYPE(i,j,k,l,f) TYPE(PTR_4D),INTENT(inout)::ptab2(f) 24 # define J_SIZE(ptab2) SIZE(ptab2(1)%pt4d,2) 25 # define ARRAY2_IN(i,j,k,l,f) ptab2(f)%pt4d(i,j,k,l) 29 26 #else 30 27 ! !== IN: ptab is an array ==! … … 36 33 # define K_SIZE(ptab) 1 37 34 # define L_SIZE(ptab) 1 38 # define ARRAY2_IN(i,j,k,l,f) ptab2(i,j)39 35 # endif 40 36 # if defined DIM_3d … … 42 38 # define K_SIZE(ptab) SIZE(ptab,3) 43 39 # define L_SIZE(ptab) 1 44 # define ARRAY2_IN(i,j,k,l,f) ptab2(i,j,k)45 40 # endif 46 41 # if defined DIM_4d … … 48 43 # define K_SIZE(ptab) SIZE(ptab,3) 49 44 # define L_SIZE(ptab) SIZE(ptab,4) 50 # define ARRAY2_IN(i,j,k,l,f) ptab2(i,j,k,l) 51 # endif 45 # endif 46 # define ARRAY2_IN(i,j,k,l,f) ptab2(i,j,k,l) 47 # define J_SIZE(ptab2) SIZE(ptab2,2) 52 48 # define ARRAY_TYPE(i,j,k,l,f) REAL(wp),INTENT(inout)::ARRAY_IN(i,j,k,l,f) 53 49 # define ARRAY2_TYPE(i,j,k,l,f) REAL(wp),INTENT(inout)::ARRAY2_IN(i,j,k,l,f) … … 69 65 INTEGER :: ji, jj, jk, jl, jh, jf ! dummy loop indices 70 66 INTEGER :: ipi, ipj, ipk, ipl, ipf ! dimension of the input array 71 INTEGER :: ijt, iju, ijpj, ijpjm1, ijta, ijua, jia, startloop, endloop 67 INTEGER :: ijt, iju, ijpj, ijpjp1, ijta, ijua, jia, startloop, endloop 68 LOGICAL :: l_fast_exchanges 72 69 !!---------------------------------------------------------------------- 73 ipk = K_SIZE(ptab) ! 3rd dimension 70 ipj = J_SIZE(ptab2) ! 2nd dimension of input array 71 ipk = K_SIZE(ptab) ! 3rd dimension of output array 74 72 ipl = L_SIZE(ptab) ! 4th - 75 73 ipf = F_SIZE(ptab) ! 5th - use in "multi" case (array of pointers) 76 74 ! 75 ! Security check for further developments 76 IF ( ipf > 1 ) THEN 77 write(6,*) 'lbc_nfd_nogather: multiple fields not allowed. Revise implementation' 78 write(6,*) 'You should not be there...' 79 STOP 80 ENDIF 77 81 ! 78 SELECT CASE ( jpni ) 79 CASE ( 1 ) ; ijpj = nlcj ! 1 proc only along the i-direction 80 CASE DEFAULT ; ijpj = 4 ! several proc along the i-direction 81 END SELECT 82 ijpjm1 = ijpj-1 83 ! 82 ijpj = 1 ! index of first modified line 83 ijpjp1 = 2 ! index + 1 84 85 ! 2nd dimension determines exchange speed 86 IF (ipj == 1 ) THEN 87 l_fast_exchanges = .TRUE. 88 ELSE 89 l_fast_exchanges = .FALSE. 90 ENDIF 84 91 ! 85 92 DO jf = 1, ipf ! Loop over the number of arrays to be processed … … 96 103 ENDIF 97 104 ! 98 DO ji = startloop, nlci 99 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4 100 ARRAY_IN(ji,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpj-2,:,:,jf) 101 END DO 105 DO jl = 1, ipl; DO jk = 1, ipk 106 DO ji = startloop, nlci 107 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4 108 ARRAY_IN(ji,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpj,jk,jl,jf) 109 END DO 110 END DO; END DO 102 111 IF( nimpp == 1 ) THEN 103 ARRAY_IN(1,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(3,ijpj-2,:,:,jf) 104 ENDIF 105 ! 106 IF( nimpp >= jpiglo/2+1 ) THEN 107 startloop = 1 108 ELSEIF( nimpp+nlci-1 >= jpiglo/2+1 .AND. nimpp < jpiglo/2+1 ) THEN 109 startloop = jpiglo/2+1 - nimpp + 1 110 ELSE 111 startloop = nlci + 1 112 ENDIF 113 IF( startloop <= nlci ) THEN 114 DO ji = startloop, nlci 115 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4 116 jia = ji + nimpp - 1 117 ijta = jpiglo - jia + 2 118 IF( ijta >= startloop+nimpp-1 .AND. ijta < jia ) THEN 119 ARRAY_IN(ji,ijpjm1,:,:,jf) = SGN_IN(jf) * ARRAY_IN(ijta-nimpp+1,ijpjm1,:,:,jf) 120 ELSE 121 ARRAY_IN(ji,ijpjm1,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpjm1,:,:,jf) 122 ENDIF 123 END DO 124 ENDIF 125 ! 112 DO jl = 1, ipl; DO jk = 1, ipk 113 ARRAY_IN(1,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY_IN(3,nlcj-2,jk,jl,jf) 114 END DO; END DO 115 ENDIF 116 ! 117 IF ( .NOT. l_fast_exchanges ) THEN 118 IF( nimpp >= jpiglo/2+1 ) THEN 119 startloop = 1 120 ELSEIF( nimpp+nlci-1 >= jpiglo/2+1 .AND. nimpp < jpiglo/2+1 ) THEN 121 startloop = jpiglo/2+1 - nimpp + 1 122 ELSE 123 startloop = nlci + 1 124 ENDIF 125 IF( startloop <= nlci ) THEN 126 DO jl = 1, ipl; DO jk = 1, ipk 127 DO ji = startloop, nlci 128 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4 129 jia = ji + nimpp - 1 130 ijta = jpiglo - jia + 2 131 IF( ijta >= startloop+nimpp-1 .AND. ijta < jia ) THEN 132 ARRAY_IN(ji,nlcj-1,jk,jl,jf) = SGN_IN(jf) * ARRAY_IN(ijta-nimpp+1,nlcj-1,jk,jl,jf) 133 ELSE 134 ARRAY_IN(ji,nlcj-1,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpjp1,jk,jl,jf) 135 ENDIF 136 END DO 137 END DO; END DO 138 ENDIF 139 ENDIF 140 126 141 CASE ( 'U' ) ! U-point 127 142 IF( nimpp + nlci - 1 /= jpiglo ) THEN … … 130 145 endloop = nlci - 1 131 146 ENDIF 132 DO ji = 1, endloop 133 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 134 ARRAY_IN(ji,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpj-2,:,:,jf) 135 END DO 147 DO jl = 1, ipl; DO jk = 1, ipk 148 DO ji = 1, endloop 149 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 150 ARRAY_IN(ji,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpj,jk,jl,jf) 151 END DO 152 END DO; END DO 136 153 IF (nimpp .eq. 1) THEN 137 ARRAY_IN( 1 ,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY_IN( 2 ,ijpj-2,:,:,jf)154 ARRAY_IN(1,nlcj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(2,nlcj-2,:,:,jf) 138 155 ENDIF 139 156 IF((nimpp + nlci - 1) .eq. jpiglo) THEN 140 ARRAY_IN(nlci,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(nlci-1,ijpj-2,:,:,jf) 141 ENDIF 142 ! 143 IF( nimpp + nlci - 1 /= jpiglo ) THEN 144 endloop = nlci 145 ELSE 146 endloop = nlci - 1 147 ENDIF 148 IF( nimpp >= jpiglo/2 ) THEN 149 startloop = 1 150 ELSEIF( ( nimpp+nlci-1 >= jpiglo/2 ) .AND. ( nimpp < jpiglo/2 ) ) THEN 151 startloop = jpiglo/2 - nimpp + 1 152 ELSE 153 startloop = endloop + 1 154 ENDIF 155 IF( startloop <= endloop ) THEN 156 DO ji = startloop, endloop 157 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 158 jia = ji + nimpp - 1 159 ijua = jpiglo - jia + 1 160 IF( ijua >= startloop+nimpp-1 .AND. ijua < jia ) THEN 161 ARRAY_IN(ji,ijpjm1,:,:,jf) = SGN_IN(jf) * ARRAY_IN(ijua-nimpp+1,ijpjm1,:,:,jf) 162 ELSE 163 ARRAY_IN(ji,ijpjm1,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpjm1,:,:,jf) 164 ENDIF 165 END DO 157 ARRAY_IN(nlci,nlcj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(nlci-1,nlcj-2,:,:,jf) 158 ENDIF 159 ! 160 IF ( .NOT. l_fast_exchanges ) THEN 161 IF( nimpp + nlci - 1 /= jpiglo ) THEN 162 endloop = nlci 163 ELSE 164 endloop = nlci - 1 165 ENDIF 166 IF( nimpp >= jpiglo/2 ) THEN 167 startloop = 1 168 ELSEIF( ( nimpp+nlci-1 >= jpiglo/2 ) .AND. ( nimpp < jpiglo/2 ) ) THEN 169 startloop = jpiglo/2 - nimpp + 1 170 ELSE 171 startloop = endloop + 1 172 ENDIF 173 IF( startloop <= endloop ) THEN 174 DO jl = 1, ipl; DO jk = 1, ipk 175 DO ji = startloop, endloop 176 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 177 jia = ji + nimpp - 1 178 ijua = jpiglo - jia + 1 179 IF( ijua >= startloop+nimpp-1 .AND. ijua < jia ) THEN 180 ARRAY_IN(ji,nlcj-1,jk,jl,jf) = SGN_IN(jf) * ARRAY_IN(ijua-nimpp+1,nlcj-1,jk,jl,jf) 181 ELSE 182 ARRAY_IN(ji,nlcj-1,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpjp1,jk,jl,jf) 183 ENDIF 184 END DO 185 END DO; END DO 186 ENDIF 166 187 ENDIF 167 188 ! … … 172 193 startloop = 2 173 194 ENDIF 174 DO ji = startloop, nlci 175 ijt=jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4 176 ARRAY_IN(ji,ijpj-1,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpj-2,:,:,jf) 177 ARRAY_IN(ji,ijpj ,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpj-3,:,:,jf) 178 END DO 195 IF ( .NOT. l_fast_exchanges ) THEN 196 DO jl = 1, ipl; DO jk = 1, ipk 197 DO ji = startloop, nlci 198 ijt=jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4 199 ARRAY_IN(ji,nlcj-1,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpjp1,jk,jl,jf) 200 END DO 201 END DO; END DO 202 ENDIF 203 DO jl = 1, ipl; DO jk = 1, ipk 204 DO ji = startloop, nlci 205 ijt=jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4 206 ARRAY_IN(ji,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpj,jk,jl,jf) 207 END DO 208 END DO; END DO 179 209 IF (nimpp .eq. 1) THEN 180 ARRAY_IN(1,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(3,ijpj-3,:,:,jf)210 ARRAY_IN(1,nlcj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(3,nlcj-3,:,:,jf) 181 211 ENDIF 182 212 CASE ( 'F' ) ! F-point … … 186 216 endloop = nlci - 1 187 217 ENDIF 188 DO ji = 1, endloop 189 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 190 ARRAY_IN(ji,ijpj-1,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpj-2,:,:,jf) 191 ARRAY_IN(ji,ijpj ,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpj-3,:,:,jf) 192 END DO 218 IF ( .NOT. l_fast_exchanges ) THEN 219 DO jl = 1, ipl; DO jk = 1, ipk 220 DO ji = 1, endloop 221 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 222 ARRAY_IN(ji,nlcj-1,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpjp1,jk,jl,jf) 223 END DO 224 END DO; END DO 225 ENDIF 226 DO jl = 1, ipl; DO jk = 1, ipk 227 DO ji = 1, endloop 228 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 229 ARRAY_IN(ji,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpj,jk,jl,jf) 230 END DO 231 END DO; END DO 193 232 IF (nimpp .eq. 1) THEN 194 ARRAY_IN( 1 ,ijpj ,:,:,jf) = SGN_IN(jf) * ARRAY_IN( 2 ,ijpj-3,:,:,jf) 195 ARRAY_IN( 1 ,ijpj-1,:,:,jf) = SGN_IN(jf) * ARRAY_IN( 2 ,ijpj-2,:,:,jf) 233 ARRAY_IN(1,nlcj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(2,nlcj-3,:,:,jf) 234 IF ( .NOT. l_fast_exchanges ) & 235 ARRAY_IN(1,nlcj-1,:,:,jf) = SGN_IN(jf) * ARRAY_IN(2,nlcj-2,:,:,jf) 196 236 ENDIF 197 237 IF((nimpp + nlci - 1) .eq. jpiglo) THEN 198 ARRAY_IN(nlci,ijpj ,:,:,jf) = SGN_IN(jf) * ARRAY_IN(nlci-1,ijpj-3,:,:,jf) 199 ARRAY_IN(nlci,ijpj-1,:,:,jf) = SGN_IN(jf) * ARRAY_IN(nlci-1,ijpj-2,:,:,jf) 200 ENDIF 201 ! 202 CASE ( 'I' ) ! ice U-V point (I-point) 203 IF( nimpp /= 1 ) THEN 204 startloop = 1 205 ELSE 206 startloop = 3 207 ARRAY_IN(2,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(3,ijpjm1,:,:,jf) 208 ENDIF 209 DO ji = startloop, nlci 210 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 5 211 ARRAY_IN(ji,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpjm1,:,:,jf) 212 END DO 238 ARRAY_IN(nlci,nlcj,:,:,jf) = SGN_IN(jf) * ARRAY_IN(nlci-1,nlcj-3,:,:,jf) 239 IF ( .NOT. l_fast_exchanges ) & 240 ARRAY_IN(nlci,nlcj-1,:,:,jf) = SGN_IN(jf) * ARRAY_IN(nlci-1,nlcj-2,:,:,jf) 241 ENDIF 242 ! 213 243 END SELECT 214 244 ! … … 217 247 SELECT CASE ( NAT_IN(jf) ) 218 248 CASE ( 'T' , 'W' ) ! T-, W-point 219 DO ji = 1, nlci 220 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 221 ARRAY_IN(ji,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpj-1,:,:,jf) 222 END DO 249 DO jl = 1, ipl; DO jk = 1, ipk 250 DO ji = 1, nlci 251 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 252 ARRAY_IN(ji,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpj,jk,jl,jf) 253 END DO 254 END DO; END DO 223 255 ! 224 256 CASE ( 'U' ) ! U-point … … 228 260 endloop = nlci - 1 229 261 ENDIF 230 DO ji = 1, endloop 231 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2 232 ARRAY_IN(ji,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpj-1,:,:,jf) 233 END DO 262 DO jl = 1, ipl; DO jk = 1, ipk 263 DO ji = 1, endloop 264 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2 265 ARRAY_IN(ji,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpj,jk,jl,jf) 266 END DO 267 END DO; END DO 234 268 IF((nimpp + nlci - 1) .eq. jpiglo) THEN 235 ARRAY_IN(nlci,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(1,ijpj-1,:,:,jf) 269 DO jl = 1, ipl; DO jk = 1, ipk 270 ARRAY_IN(nlci,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY_IN(nlci-2,nlcj-1,jk,jl,jf) 271 END DO; END DO 236 272 ENDIF 237 273 ! 238 274 CASE ( 'V' ) ! V-point 239 DO ji = 1, nlci 240 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 241 ARRAY_IN(ji,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpj-2,:,:,jf) 242 END DO 243 ! 244 IF( nimpp >= jpiglo/2+1 ) THEN 245 startloop = 1 246 ELSEIF( nimpp+nlci-1 >= jpiglo/2+1 .AND. nimpp < jpiglo/2+1 ) THEN 247 startloop = jpiglo/2+1 - nimpp + 1 248 ELSE 249 startloop = nlci + 1 250 ENDIF 251 IF( startloop <= nlci ) THEN 252 DO ji = startloop, nlci 253 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 254 ARRAY_IN(ji,ijpjm1,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpjm1,:,:,jf) 255 END DO 275 DO jl = 1, ipl; DO jk = 1, ipk 276 DO ji = 1, nlci 277 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 278 ARRAY_IN(ji,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpj,jk,jl,jf) 279 END DO 280 END DO; END DO 281 282 IF ( .NOT. l_fast_exchanges ) THEN 283 IF( nimpp >= jpiglo/2+1 ) THEN 284 startloop = 1 285 ELSEIF( nimpp+nlci-1 >= jpiglo/2+1 .AND. nimpp < jpiglo/2+1 ) THEN 286 startloop = jpiglo/2+1 - nimpp + 1 287 ELSE 288 startloop = nlci + 1 289 ENDIF 290 IF( startloop <= nlci ) THEN 291 DO jl = 1, ipl; DO jk = 1, ipk 292 DO ji = startloop, nlci 293 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 3 294 ARRAY_IN(ji,nlcj-1,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(ijt,ijpjp1,jk,jl,jf) 295 END DO 296 END DO; END DO 297 ENDIF 256 298 ENDIF 257 299 ! … … 262 304 endloop = nlci - 1 263 305 ENDIF 264 DO ji = 1, endloop 265 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2 266 ARRAY_IN(ji,ijpj ,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpj-2,:,:,jf) 267 END DO 306 DO jl = 1, ipl; DO jk = 1, ipk 307 DO ji = 1, endloop 308 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2 309 ARRAY_IN(ji,nlcj ,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpj,jk,jl,jf) 310 END DO 311 END DO; END DO 268 312 IF((nimpp + nlci - 1) .eq. jpiglo) THEN 269 ARRAY_IN(nlci,ijpj,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(1,ijpj-2,:,:,jf) 270 ENDIF 271 ! 272 IF( nimpp + nlci - 1 /= jpiglo ) THEN 273 endloop = nlci 274 ELSE 275 endloop = nlci - 1 276 ENDIF 277 IF( nimpp >= jpiglo/2+1 ) THEN 278 startloop = 1 279 ELSEIF( nimpp+nlci-1 >= jpiglo/2+1 .AND. nimpp < jpiglo/2+1 ) THEN 280 startloop = jpiglo/2+1 - nimpp + 1 281 ELSE 282 startloop = endloop + 1 283 ENDIF 284 IF( startloop <= endloop ) THEN 285 DO ji = startloop, endloop 286 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2 287 ARRAY_IN(ji,ijpjm1,:,:,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpjm1,:,:,jf) 288 END DO 289 ENDIF 290 ! 291 CASE ( 'I' ) ! ice U-V point (I-point) 292 IF( nimpp /= 1 ) THEN 293 startloop = 1 294 ELSE 295 startloop = 2 296 ENDIF 297 IF( nimpp + nlci - 1 /= jpiglo ) THEN 298 endloop = nlci 299 ELSE 300 endloop = nlci - 1 301 ENDIF 302 DO ji = startloop , endloop 303 ijt = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 4 304 ARRAY_IN(ji,ijpj,:,:,jf) = 0.5 * (ARRAY_IN(ji,ijpjm1,:,:,jf) + SGN_IN(jf) * ARRAY2_IN(ijt,ijpjm1,:,:,jf)) 305 END DO 313 DO jl = 1, ipl; DO jk = 1, ipk 314 ARRAY_IN(nlci,nlcj,jk,jl,jf) = SGN_IN(jf) * ARRAY_IN(nlci-2,nlcj-2,jk,jl,jf) 315 END DO; END DO 316 ENDIF 317 ! 318 IF ( .NOT. l_fast_exchanges ) THEN 319 IF( nimpp + nlci - 1 /= jpiglo ) THEN 320 endloop = nlci 321 ELSE 322 endloop = nlci - 1 323 ENDIF 324 IF( nimpp >= jpiglo/2+1 ) THEN 325 startloop = 1 326 ELSEIF( nimpp+nlci-1 >= jpiglo/2+1 .AND. nimpp < jpiglo/2+1 ) THEN 327 startloop = jpiglo/2+1 - nimpp + 1 328 ELSE 329 startloop = endloop + 1 330 ENDIF 331 IF( startloop <= endloop ) THEN 332 DO jl = 1, ipl; DO jk = 1, ipk 333 DO ji = startloop, endloop 334 iju = jpiglo - ji - nimpp - nfiimpp(isendto(1),jpnj) + 2 335 ARRAY_IN(ji,nlcj-1,jk,jl,jf) = SGN_IN(jf) * ARRAY2_IN(iju,ijpjp1,jk,jl,jf) 336 END DO 337 END DO; END DO 338 ENDIF 339 ENDIF 306 340 ! 307 341 END SELECT … … 309 343 CASE DEFAULT ! * closed : the code probably never go through 310 344 ! 311 SELECT CASE ( NAT_IN(jf)) 312 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 313 ARRAY_IN(:, 1 ,:,:,jf) = 0._wp 314 ARRAY_IN(:,ijpj,:,:,jf) = 0._wp 315 CASE ( 'F' ) ! F-point 316 ARRAY_IN(:,ijpj,:,:,jf) = 0._wp 317 CASE ( 'I' ) ! ice U-V point 318 ARRAY_IN(:, 1 ,:,:,jf) = 0._wp 319 ARRAY_IN(:,ijpj,:,:,jf) = 0._wp 320 END SELECT 345 WRITE(*,*) 'lbc_nfd_nogather_generic: You should not have seen this print! error?', npolj 321 346 ! 322 347 END SELECT ! npolj … … 328 353 #undef NAT_IN 329 354 #undef SGN_IN 355 #undef J_SIZE 330 356 #undef K_SIZE 331 357 #undef L_SIZE -
NEMO/trunk/src/OCE/LBC/lbclnk.F90
r10233 r10425 179 179 !!---------------------------------------------------------------------- 180 180 181 SUBROUTINE lbc_bdy_lnk_4d( pt4d, cd_type, psgn, ib_bdy ) 182 !!---------------------------------------------------------------------- 183 REAL(wp), DIMENSION(:,:,:,:), INTENT(inout) :: pt4d ! 4D array on which the lbc is applied 184 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt4d grid-points 185 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold 186 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 187 !!---------------------------------------------------------------------- 188 CALL lbc_lnk_4d( pt4d, cd_type, psgn) 181 SUBROUTINE lbc_bdy_lnk_4d( cdname, pt4d, cd_type, psgn, ib_bdy ) 182 !!---------------------------------------------------------------------- 183 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 184 REAL(wp), DIMENSION(:,:,:,:), INTENT(inout) :: pt4d ! 3D array on which the lbc is applied 185 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 186 REAL(wp) , INTENT(in ) :: psgn ! sign used across north fold 187 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 188 !!---------------------------------------------------------------------- 189 CALL lbc_lnk_4d( cdname, pt4d, cd_type, psgn) 189 190 END SUBROUTINE lbc_bdy_lnk_4d 190 191 191 SUBROUTINE lbc_bdy_lnk_3d( pt3d, cd_type, psgn, ib_bdy ) 192 !!---------------------------------------------------------------------- 192 SUBROUTINE lbc_bdy_lnk_3d( cdname, pt3d, cd_type, psgn, ib_bdy ) 193 !!---------------------------------------------------------------------- 194 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 193 195 REAL(wp), DIMENSION(:,:,:), INTENT(inout) :: pt3d ! 3D array on which the lbc is applied 194 196 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points … … 196 198 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 197 199 !!---------------------------------------------------------------------- 198 CALL lbc_lnk_3d( pt3d, cd_type, psgn)200 CALL lbc_lnk_3d( cdname, pt3d, cd_type, psgn) 199 201 END SUBROUTINE lbc_bdy_lnk_3d 200 202 201 203 202 SUBROUTINE lbc_bdy_lnk_2d( pt2d, cd_type, psgn, ib_bdy ) 203 !!---------------------------------------------------------------------- 204 SUBROUTINE lbc_bdy_lnk_2d( cdname, pt2d, cd_type, psgn, ib_bdy ) 205 !!---------------------------------------------------------------------- 206 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 204 207 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 3D array on which the lbc is applied 205 208 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points … … 207 210 INTEGER , INTENT(in ) :: ib_bdy ! BDY boundary set 208 211 !!---------------------------------------------------------------------- 209 CALL lbc_lnk_2d( pt2d, cd_type, psgn)212 CALL lbc_lnk_2d( cdname, pt2d, cd_type, psgn) 210 213 END SUBROUTINE lbc_bdy_lnk_2d 211 214 … … 213 216 !!gm This routine should be removed with an optional halos size added in argument of generic routines 214 217 215 SUBROUTINE lbc_lnk_2d_icb( pt2d, cd_type, psgn, ki, kj ) 216 !!---------------------------------------------------------------------- 218 SUBROUTINE lbc_lnk_2d_icb( cdname, pt2d, cd_type, psgn, ki, kj ) 219 !!---------------------------------------------------------------------- 220 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 217 221 REAL(wp), DIMENSION(:,:), INTENT(inout) :: pt2d ! 2D array on which the lbc is applied 218 222 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points … … 220 224 INTEGER , INTENT(in ) :: ki, kj ! sizes of extra halo (not needed in non-mpp) 221 225 !!---------------------------------------------------------------------- 222 CALL lbc_lnk_2d( pt2d, cd_type, psgn )226 CALL lbc_lnk_2d( cdname, pt2d, cd_type, psgn ) 223 227 END SUBROUTINE lbc_lnk_2d_icb 224 228 !!gm end -
NEMO/trunk/src/OCE/LBC/lbcnfd.F90
r10068 r10425 32 32 INTERFACE lbc_nfd_nogather 33 33 ! ! Currently only 4d array version is needed 34 !MODULE PROCEDURE lbc_nfd_nogather_2d , lbc_nfd_nogather_3d35 36 !MODULE PROCEDURE lbc_nfd_nogather_2d_ptr, lbc_nfd_nogather_3d_ptr34 MODULE PROCEDURE lbc_nfd_nogather_2d , lbc_nfd_nogather_3d 35 MODULE PROCEDURE lbc_nfd_nogather_4d 36 MODULE PROCEDURE lbc_nfd_nogather_2d_ptr, lbc_nfd_nogather_3d_ptr 37 37 ! MODULE PROCEDURE lbc_nfd_nogather_4d_ptr 38 38 END INTERFACE … … 125 125 ! !== 2D array and array of 2D pointer ==! 126 126 ! 127 !# define DIM_2d128 !# define ROUTINE_NFD lbc_nfd_nogather_2d129 !# include "lbc_nfd_nogather_generic.h90"130 !# undef ROUTINE_NFD131 !# define MULTI132 !# define ROUTINE_NFD lbc_nfd_nogather_2d_ptr133 !# include "lbc_nfd_nogather_generic.h90"134 !# undef ROUTINE_NFD135 !# undef MULTI136 !# undef DIM_2d127 # define DIM_2d 128 # define ROUTINE_NFD lbc_nfd_nogather_2d 129 # include "lbc_nfd_nogather_generic.h90" 130 # undef ROUTINE_NFD 131 # define MULTI 132 # define ROUTINE_NFD lbc_nfd_nogather_2d_ptr 133 # include "lbc_nfd_nogather_generic.h90" 134 # undef ROUTINE_NFD 135 # undef MULTI 136 # undef DIM_2d 137 137 ! 138 138 ! !== 3D array and array of 3D pointer ==! 139 139 ! 140 !# define DIM_3d141 !# define ROUTINE_NFD lbc_nfd_nogather_3d142 !# include "lbc_nfd_nogather_generic.h90"143 !# undef ROUTINE_NFD144 !# define MULTI145 !# define ROUTINE_NFD lbc_nfd_nogather_3d_ptr146 !# include "lbc_nfd_nogather_generic.h90"147 !# undef ROUTINE_NFD148 !# undef MULTI149 !# undef DIM_3d140 # define DIM_3d 141 # define ROUTINE_NFD lbc_nfd_nogather_3d 142 # include "lbc_nfd_nogather_generic.h90" 143 # undef ROUTINE_NFD 144 # define MULTI 145 # define ROUTINE_NFD lbc_nfd_nogather_3d_ptr 146 # include "lbc_nfd_nogather_generic.h90" 147 # undef ROUTINE_NFD 148 # undef MULTI 149 # undef DIM_3d 150 150 ! 151 151 ! !== 4D array and array of 4D pointer ==! -
NEMO/trunk/src/OCE/LBC/lib_mpp.F90
r10068 r10425 64 64 65 65 INTERFACE mpp_nfd 66 MODULE PROCEDURE mpp_nfd_2d , mpp_nfd_3d, mpp_nfd_4d66 MODULE PROCEDURE mpp_nfd_2d , mpp_nfd_3d , mpp_nfd_4d 67 67 MODULE PROCEDURE mpp_nfd_2d_ptr, mpp_nfd_3d_ptr, mpp_nfd_4d_ptr 68 68 END INTERFACE 69 69 70 70 ! Interface associated to the mpp_lnk_... routines is defined in lbclnk 71 PUBLIC mpp_lnk_2d , mpp_lnk_3d, mpp_lnk_4d71 PUBLIC mpp_lnk_2d , mpp_lnk_3d , mpp_lnk_4d 72 72 PUBLIC mpp_lnk_2d_ptr, mpp_lnk_3d_ptr, mpp_lnk_4d_ptr 73 73 ! … … 83 83 PUBLIC mpp_lbc_north_icb 84 84 PUBLIC mpp_min, mpp_max, mpp_sum, mpp_minloc, mpp_maxloc 85 PUBLIC mpp_ max_multiple85 PUBLIC mpp_delay_max, mpp_delay_sum, mpp_delay_rcv 86 86 PUBLIC mppscatter, mppgather 87 PUBLIC mpp_ini_ice, mpp_ini_znl 88 PUBLIC mppsize 87 PUBLIC mpp_ini_znl 89 88 PUBLIC mppsend, mpprecv ! needed by TAM and ICB routines 90 89 PUBLIC mpp_lnk_bdy_2d, mpp_lnk_bdy_3d, mpp_lnk_bdy_4d 91 PUBLIC mpprank92 90 93 91 !! * Interfaces … … 111 109 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 112 110 END INTERFACE 113 INTERFACE mpp_max_multiple114 MODULE PROCEDURE mppmax_real_multiple115 END INTERFACE116 111 117 112 !! ========================= !! … … 126 121 INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2) 127 122 128 INTEGER :: mppsize ! number of process129 INTEGER :: mpprank ! process number [ 0 - size-1 ]123 INTEGER, PUBLIC :: mppsize ! number of process 124 INTEGER, PUBLIC :: mpprank ! process number [ 0 - size-1 ] 130 125 !$AGRIF_DO_NOT_TREAT 131 126 INTEGER, PUBLIC :: mpi_comm_oce ! opa local communicator … … 133 128 134 129 INTEGER :: MPI_SUMDD 135 136 ! variables used in case of sea-ice137 INTEGER, PUBLIC :: ncomm_ice !: communicator made by the processors with sea-ice (public so that it can be freed in icethd)138 INTEGER :: ngrp_iworld ! group ID for the world processors (for rheology)139 INTEGER :: ngrp_ice ! group ID for the ice processors (for rheology)140 INTEGER :: ndim_rank_ice ! number of 'ice' processors141 INTEGER :: n_ice_root ! number (in the comm_ice) of proc 0 in the ice comm142 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_ice ! dimension ndim_rank_ice143 130 144 131 ! variables used for zonal integration … … 164 151 INTEGER , PUBLIC :: nn_buffer !: size of the buffer in case of mpi_bsend 165 152 153 ! Communications summary report 154 CHARACTER(len=128), DIMENSION(:), ALLOCATABLE :: crname_lbc !: names of lbc_lnk calling routines 155 CHARACTER(len=128), DIMENSION(:), ALLOCATABLE :: crname_glb !: names of global comm calling routines 156 INTEGER, PUBLIC :: ncom_stp = 0 !: copy of time step # istp 157 INTEGER, PUBLIC :: ncom_fsbc = 1 !: copy of sbc time step # nn_fsbc 158 INTEGER, PUBLIC :: ncom_dttrc = 1 !: copy of top time step # nn_dttrc 159 INTEGER, PUBLIC :: ncom_freq !: frequency of comm diagnostic 160 INTEGER, PUBLIC , DIMENSION(:,:), ALLOCATABLE :: ncomm_sequence !: size of communicated arrays (halos) 161 INTEGER, PARAMETER, PUBLIC :: ncom_rec_max = 2000 !: max number of communication record 162 INTEGER, PUBLIC :: n_sequence_lbc = 0 !: # of communicated arraysvia lbc 163 INTEGER, PUBLIC :: n_sequence_glb = 0 !: # of global communications 164 INTEGER, PUBLIC :: numcom = -1 !: logical unit for communicaton report 165 LOGICAL, PUBLIC :: l_full_nf_update = .TRUE. !: logical for a full (2lines) update of bc at North fold report 166 INTEGER, PARAMETER, PUBLIC :: nbdelay = 2 !: number of delayed operations 167 !: name (used as id) of allreduce-delayed operations 168 CHARACTER(len=32), DIMENSION(nbdelay), PUBLIC :: c_delaylist = (/ 'cflice', 'fwb' /) 169 !: component name where the allreduce-delayed operation is performed 170 CHARACTER(len=3), DIMENSION(nbdelay), PUBLIC :: c_delaycpnt = (/ 'ICE' , 'OCE' /) 171 TYPE, PUBLIC :: DELAYARR 172 REAL( wp), POINTER, DIMENSION(:) :: z1d => NULL() 173 COMPLEX(wp), POINTER, DIMENSION(:) :: y1d => NULL() 174 END TYPE DELAYARR 175 TYPE( DELAYARR ), DIMENSION(nbdelay), PUBLIC :: todelay 176 INTEGER, DIMENSION(nbdelay), PUBLIC :: ndelayid = -1 !: mpi request id of the delayed operations 177 178 ! timing summary report 179 REAL(wp), DIMENSION(2), PUBLIC :: waiting_time = 0._wp 180 REAL(wp) , PUBLIC :: compute_time = 0._wp, elapsed_time = 0._wp 181 166 182 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend 167 183 … … 214 230 WRITE(ldtxt(ii),*) ' size exported buffer nn_buffer = ', nn_buffer,' bytes'; ii = ii + 1 215 231 ! 216 #if defined key_agrif217 IF( .NOT. Agrif_Root() ) THEN218 jpni = Agrif_Parent(jpni )219 jpnj = Agrif_Parent(jpnj )220 jpnij = Agrif_Parent(jpnij)221 ENDIF222 #endif223 !224 IF( jpnij < 1 ) THEN ! If jpnij is not specified in namelist then we calculate it225 jpnij = jpni * jpnj ! this means there will be no land cutting out.226 ENDIF227 228 232 IF( jpni < 1 .OR. jpnj < 1 ) THEN 229 233 WRITE(ldtxt(ii),*) ' jpni, jpnj and jpnij will be calculated automatically' ; ii = ii + 1 … … 231 235 WRITE(ldtxt(ii),*) ' processor grid extent in i jpni = ',jpni ; ii = ii + 1 232 236 WRITE(ldtxt(ii),*) ' processor grid extent in j jpnj = ',jpnj ; ii = ii + 1 233 WRITE(ldtxt(ii),*) ' number of local domains jpnij = ',jpnij ; ii = ii + 1234 237 ENDIF 235 238 … … 264 267 ! 265 268 ELSEIF ( PRESENT(localComm) .AND. .NOT. mpi_was_called ) THEN 269 WRITE(ldtxt(ii),cform_err) ; ii = ii + 1 266 270 WRITE(ldtxt(ii),*) ' lib_mpp: You cannot provide a local communicator ' ; ii = ii + 1 267 271 WRITE(ldtxt(ii),*) ' without calling MPI_Init before ! ' ; ii = ii + 1 … … 574 578 END SUBROUTINE mppscatter 575 579 580 581 SUBROUTINE mpp_delay_sum( cdname, cdelay, y_in, pout, ldlast, kcom ) 582 !!---------------------------------------------------------------------- 583 !! *** routine mpp_delay_sum *** 584 !! 585 !! ** Purpose : performed delayed mpp_sum, the result is received on next call 586 !! 587 !!---------------------------------------------------------------------- 588 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 589 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 590 COMPLEX(wp), INTENT(in ), DIMENSION(:) :: y_in 591 REAL(wp), INTENT( out), DIMENSION(:) :: pout 592 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 593 INTEGER, INTENT(in ), OPTIONAL :: kcom 594 !! 595 INTEGER :: ji, isz 596 INTEGER :: idvar 597 INTEGER :: ierr, ilocalcomm 598 COMPLEX(wp), ALLOCATABLE, DIMENSION(:) :: ytmp 599 !!---------------------------------------------------------------------- 600 ilocalcomm = mpi_comm_oce 601 IF( PRESENT(kcom) ) ilocalcomm = kcom 602 603 isz = SIZE(y_in) 604 605 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ld_glb = .TRUE. ) 606 607 idvar = -1 608 DO ji = 1, nbdelay 609 IF( TRIM(cdelay) == TRIM(c_delaylist(ji)) ) idvar = ji 610 END DO 611 IF ( idvar == -1 ) CALL ctl_stop( 'STOP',' mpp_delay_sum : please add a new delayed exchange for '//TRIM(cdname) ) 612 613 IF ( ndelayid(idvar) == 0 ) THEN ! first call with restart: %z1d defined in iom_delay_rst 614 ! -------------------------- 615 IF ( SIZE(todelay(idvar)%z1d) /= isz ) THEN ! Check dimension coherence 616 IF(lwp) WRITE(numout,*) ' WARNING: the nb of delayed variables in restart file is not the model one' 617 DEALLOCATE(todelay(idvar)%z1d) 618 ndelayid(idvar) = -1 ! do as if we had no restart 619 ELSE 620 ALLOCATE(todelay(idvar)%y1d(isz)) 621 todelay(idvar)%y1d(:) = CMPLX(todelay(idvar)%z1d(:), 0., wp) ! create %y1d, complex variable needed by mpi_sumdd 622 END IF 623 ENDIF 624 625 IF( ndelayid(idvar) == -1 ) THEN ! first call without restart: define %y1d and %z1d from y_in with blocking allreduce 626 ! -------------------------- 627 ALLOCATE(todelay(idvar)%z1d(isz), todelay(idvar)%y1d(isz)) ! allocate also %z1d as used for the restart 628 CALL mpi_allreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ierr ) ! get %y1d 629 todelay(idvar)%z1d(:) = REAL(todelay(idvar)%y1d(:), wp) ! define %z1d from %y1d 630 ENDIF 631 632 IF( ndelayid(idvar) > 0 ) CALL mpp_delay_rcv( idvar ) ! make sure %z1d is received 633 634 ! send back pout from todelay(idvar)%z1d defined at previous call 635 pout(:) = todelay(idvar)%z1d(:) 636 637 ! send y_in into todelay(idvar)%y1d with a non-blocking communication 638 CALL mpi_iallreduce( y_in(:), todelay(idvar)%y1d(:), isz, MPI_DOUBLE_COMPLEX, mpi_sumdd, ilocalcomm, ndelayid(idvar), ierr ) 639 640 END SUBROUTINE mpp_delay_sum 641 642 643 SUBROUTINE mpp_delay_max( cdname, cdelay, p_in, pout, ldlast, kcom ) 644 !!---------------------------------------------------------------------- 645 !! *** routine mpp_delay_max *** 646 !! 647 !! ** Purpose : performed delayed mpp_max, the result is received on next call 648 !! 649 !!---------------------------------------------------------------------- 650 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 651 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 652 REAL(wp), INTENT(in ), DIMENSION(:) :: p_in ! 653 REAL(wp), INTENT( out), DIMENSION(:) :: pout ! 654 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 655 INTEGER, INTENT(in ), OPTIONAL :: kcom 656 !! 657 INTEGER :: ji, isz 658 INTEGER :: idvar 659 INTEGER :: ierr, ilocalcomm 660 !!---------------------------------------------------------------------- 661 ilocalcomm = mpi_comm_oce 662 IF( PRESENT(kcom) ) ilocalcomm = kcom 663 664 isz = SIZE(p_in) 665 666 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ld_glb = .TRUE. ) 667 668 idvar = -1 669 DO ji = 1, nbdelay 670 IF( TRIM(cdelay) == TRIM(c_delaylist(ji)) ) idvar = ji 671 END DO 672 IF ( idvar == -1 ) CALL ctl_stop( 'STOP',' mpp_delay_max : please add a new delayed exchange for '//TRIM(cdname) ) 673 674 IF ( ndelayid(idvar) == 0 ) THEN ! first call with restart: %z1d defined in iom_delay_rst 675 ! -------------------------- 676 IF ( SIZE(todelay(idvar)%z1d) /= isz ) THEN ! Check dimension coherence 677 IF(lwp) WRITE(numout,*) ' WARNING: the nb of delayed variables in restart file is not the model one' 678 DEALLOCATE(todelay(idvar)%z1d) 679 ndelayid(idvar) = -1 ! do as if we had no restart 680 END IF 681 ENDIF 682 683 IF( ndelayid(idvar) == -1 ) THEN ! first call without restart: define %z1d from p_in with a blocking allreduce 684 ! -------------------------- 685 ALLOCATE(todelay(idvar)%z1d(isz)) 686 CALL mpi_allreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ierr ) ! get %z1d 687 ENDIF 688 689 IF( ndelayid(idvar) > 0 ) CALL mpp_delay_rcv( idvar ) ! make sure %z1d is received 690 691 ! send back pout from todelay(idvar)%z1d defined at previous call 692 pout(:) = todelay(idvar)%z1d(:) 693 694 ! send p_in into todelay(idvar)%z1d with a non-blocking communication 695 CALL mpi_iallreduce( p_in(:), todelay(idvar)%z1d(:), isz, MPI_DOUBLE_PRECISION, mpi_max, ilocalcomm, ndelayid(idvar), ierr ) 696 697 END SUBROUTINE mpp_delay_max 698 699 700 SUBROUTINE mpp_delay_rcv( kid ) 701 !!---------------------------------------------------------------------- 702 !! *** routine mpp_delay_rcv *** 703 !! 704 !! ** Purpose : force barrier for delayed mpp (needed for restart) 705 !! 706 !!---------------------------------------------------------------------- 707 INTEGER,INTENT(in ) :: kid 708 INTEGER :: ierr 709 !!---------------------------------------------------------------------- 710 IF( ndelayid(kid) /= -2 ) THEN 711 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 712 CALL mpi_wait( ndelayid(kid), MPI_STATUS_IGNORE, ierr ) ! make sure todelay(kid) is received 713 IF( ln_timing ) CALL tic_tac(.FALSE., ld_global = .TRUE.) 714 IF( ASSOCIATED(todelay(kid)%y1d) ) todelay(kid)%z1d(:) = REAL(todelay(kid)%y1d(:), wp) ! define %z1d from %y1d 715 ndelayid(kid) = -2 ! add flag to know that mpi_wait was already called on kid 716 ENDIF 717 END SUBROUTINE mpp_delay_rcv 718 719 576 720 !!---------------------------------------------------------------------- 577 721 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** … … 579 723 !!---------------------------------------------------------------------- 580 724 !! 581 SUBROUTINE mppmax_a_int( ktab, kdim, kcom ) 582 !!---------------------------------------------------------------------- 583 INTEGER , INTENT(in ) :: kdim ! size of array 584 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 585 INTEGER , INTENT(in ), OPTIONAL :: kcom ! 586 INTEGER :: ierror, ilocalcomm ! temporary integer 587 INTEGER, DIMENSION(kdim) :: iwork 588 !!---------------------------------------------------------------------- 589 ilocalcomm = mpi_comm_oce 590 IF( PRESENT(kcom) ) ilocalcomm = kcom 591 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_max, ilocalcomm, ierror ) 592 ktab(:) = iwork(:) 593 END SUBROUTINE mppmax_a_int 594 !! 595 SUBROUTINE mppmax_int( ktab, kcom ) 596 !!---------------------------------------------------------------------- 597 INTEGER, INTENT(inout) :: ktab ! ??? 598 INTEGER, INTENT(in ), OPTIONAL :: kcom ! ??? 599 INTEGER :: ierror, iwork, ilocalcomm ! temporary integer 600 !!---------------------------------------------------------------------- 601 ilocalcomm = mpi_comm_oce 602 IF( PRESENT(kcom) ) ilocalcomm = kcom 603 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_max, ilocalcomm, ierror ) 604 ktab = iwork 605 END SUBROUTINE mppmax_int 606 !! 607 SUBROUTINE mppmax_a_real( ptab, kdim, kcom ) 608 !!---------------------------------------------------------------------- 609 REAL(wp), DIMENSION(kdim), INTENT(inout) :: ptab 610 INTEGER , INTENT(in ) :: kdim 611 INTEGER , OPTIONAL , INTENT(in ) :: kcom 612 INTEGER :: ierror, ilocalcomm 613 REAL(wp), DIMENSION(kdim) :: zwork 614 !!---------------------------------------------------------------------- 615 ilocalcomm = mpi_comm_oce 616 IF( PRESENT(kcom) ) ilocalcomm = kcom 617 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_max, ilocalcomm, ierror ) 618 ptab(:) = zwork(:) 619 END SUBROUTINE mppmax_a_real 620 !! 621 SUBROUTINE mppmax_real( ptab, kcom ) 622 !!---------------------------------------------------------------------- 623 REAL(wp), INTENT(inout) :: ptab ! ??? 624 INTEGER , INTENT(in ), OPTIONAL :: kcom ! ??? 625 INTEGER :: ierror, ilocalcomm 626 REAL(wp) :: zwork 627 !!---------------------------------------------------------------------- 628 ilocalcomm = mpi_comm_oce 629 IF( PRESENT(kcom) ) ilocalcomm = kcom! 630 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_max, ilocalcomm, ierror ) 631 ptab = zwork 632 END SUBROUTINE mppmax_real 633 634 725 # define OPERATION_MAX 726 # define INTEGER_TYPE 727 # define DIM_0d 728 # define ROUTINE_ALLREDUCE mppmax_int 729 # include "mpp_allreduce_generic.h90" 730 # undef ROUTINE_ALLREDUCE 731 # undef DIM_0d 732 # define DIM_1d 733 # define ROUTINE_ALLREDUCE mppmax_a_int 734 # include "mpp_allreduce_generic.h90" 735 # undef ROUTINE_ALLREDUCE 736 # undef DIM_1d 737 # undef INTEGER_TYPE 738 ! 739 # define REAL_TYPE 740 # define DIM_0d 741 # define ROUTINE_ALLREDUCE mppmax_real 742 # include "mpp_allreduce_generic.h90" 743 # undef ROUTINE_ALLREDUCE 744 # undef DIM_0d 745 # define DIM_1d 746 # define ROUTINE_ALLREDUCE mppmax_a_real 747 # include "mpp_allreduce_generic.h90" 748 # undef ROUTINE_ALLREDUCE 749 # undef DIM_1d 750 # undef REAL_TYPE 751 # undef OPERATION_MAX 635 752 !!---------------------------------------------------------------------- 636 753 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** … … 638 755 !!---------------------------------------------------------------------- 639 756 !! 640 SUBROUTINE mppmin_a_int( ktab, kdim, kcom ) 641 !!---------------------------------------------------------------------- 642 INTEGER , INTENT( in ) :: kdim ! size of array 643 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 644 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 645 !! 646 INTEGER :: ierror, ilocalcomm ! temporary integer 647 INTEGER, DIMENSION(kdim) :: iwork 648 !!---------------------------------------------------------------------- 649 ilocalcomm = mpi_comm_oce 650 IF( PRESENT(kcom) ) ilocalcomm = kcom 651 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_min, ilocalcomm, ierror ) 652 ktab(:) = iwork(:) 653 END SUBROUTINE mppmin_a_int 654 !! 655 SUBROUTINE mppmin_int( ktab, kcom ) 656 !!---------------------------------------------------------------------- 657 INTEGER, INTENT(inout) :: ktab ! ??? 658 INTEGER , INTENT( in ), OPTIONAL :: kcom ! input array 659 !! 660 INTEGER :: ierror, iwork, ilocalcomm 661 !!---------------------------------------------------------------------- 662 ilocalcomm = mpi_comm_oce 663 IF( PRESENT(kcom) ) ilocalcomm = kcom 664 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_min, ilocalcomm, ierror ) 665 ktab = iwork 666 END SUBROUTINE mppmin_int 667 !! 668 SUBROUTINE mppmin_a_real( ptab, kdim, kcom ) 669 !!---------------------------------------------------------------------- 670 INTEGER , INTENT(in ) :: kdim 671 REAL(wp), INTENT(inout), DIMENSION(kdim) :: ptab 672 INTEGER , INTENT(in ), OPTIONAL :: kcom 673 INTEGER :: ierror, ilocalcomm 674 REAL(wp), DIMENSION(kdim) :: zwork 675 !!----------------------------------------------------------------------- 676 ilocalcomm = mpi_comm_oce 677 IF( PRESENT(kcom) ) ilocalcomm = kcom 678 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_min, ilocalcomm, ierror ) 679 ptab(:) = zwork(:) 680 END SUBROUTINE mppmin_a_real 681 !! 682 SUBROUTINE mppmin_real( ptab, kcom ) 683 !!----------------------------------------------------------------------- 684 REAL(wp), INTENT(inout) :: ptab ! 685 INTEGER , INTENT(in ), OPTIONAL :: kcom 686 INTEGER :: ierror, ilocalcomm 687 REAL(wp) :: zwork 688 !!----------------------------------------------------------------------- 689 ilocalcomm = mpi_comm_oce 690 IF( PRESENT(kcom) ) ilocalcomm = kcom 691 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_min, ilocalcomm, ierror ) 692 ptab = zwork 693 END SUBROUTINE mppmin_real 694 757 # define OPERATION_MIN 758 # define INTEGER_TYPE 759 # define DIM_0d 760 # define ROUTINE_ALLREDUCE mppmin_int 761 # include "mpp_allreduce_generic.h90" 762 # undef ROUTINE_ALLREDUCE 763 # undef DIM_0d 764 # define DIM_1d 765 # define ROUTINE_ALLREDUCE mppmin_a_int 766 # include "mpp_allreduce_generic.h90" 767 # undef ROUTINE_ALLREDUCE 768 # undef DIM_1d 769 # undef INTEGER_TYPE 770 ! 771 # define REAL_TYPE 772 # define DIM_0d 773 # define ROUTINE_ALLREDUCE mppmin_real 774 # include "mpp_allreduce_generic.h90" 775 # undef ROUTINE_ALLREDUCE 776 # undef DIM_0d 777 # define DIM_1d 778 # define ROUTINE_ALLREDUCE mppmin_a_real 779 # include "mpp_allreduce_generic.h90" 780 # undef ROUTINE_ALLREDUCE 781 # undef DIM_1d 782 # undef REAL_TYPE 783 # undef OPERATION_MIN 695 784 696 785 !!---------------------------------------------------------------------- … … 700 789 !!---------------------------------------------------------------------- 701 790 !! 702 SUBROUTINE mppsum_a_int( ktab, kdim ) 703 !!---------------------------------------------------------------------- 704 INTEGER, INTENT(in ) :: kdim ! ??? 705 INTEGER, INTENT(inout), DIMENSION (kdim) :: ktab ! ??? 706 INTEGER :: ierror 707 INTEGER, DIMENSION (kdim) :: iwork 708 !!---------------------------------------------------------------------- 709 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, mpi_sum, mpi_comm_oce, ierror ) 710 ktab(:) = iwork(:) 711 END SUBROUTINE mppsum_a_int 791 # define OPERATION_SUM 792 # define INTEGER_TYPE 793 # define DIM_0d 794 # define ROUTINE_ALLREDUCE mppsum_int 795 # include "mpp_allreduce_generic.h90" 796 # undef ROUTINE_ALLREDUCE 797 # undef DIM_0d 798 # define DIM_1d 799 # define ROUTINE_ALLREDUCE mppsum_a_int 800 # include "mpp_allreduce_generic.h90" 801 # undef ROUTINE_ALLREDUCE 802 # undef DIM_1d 803 # undef INTEGER_TYPE 804 ! 805 # define REAL_TYPE 806 # define DIM_0d 807 # define ROUTINE_ALLREDUCE mppsum_real 808 # include "mpp_allreduce_generic.h90" 809 # undef ROUTINE_ALLREDUCE 810 # undef DIM_0d 811 # define DIM_1d 812 # define ROUTINE_ALLREDUCE mppsum_a_real 813 # include "mpp_allreduce_generic.h90" 814 # undef ROUTINE_ALLREDUCE 815 # undef DIM_1d 816 # undef REAL_TYPE 817 # undef OPERATION_SUM 818 819 # define OPERATION_SUM_DD 820 # define COMPLEX_TYPE 821 # define DIM_0d 822 # define ROUTINE_ALLREDUCE mppsum_realdd 823 # include "mpp_allreduce_generic.h90" 824 # undef ROUTINE_ALLREDUCE 825 # undef DIM_0d 826 # define DIM_1d 827 # define ROUTINE_ALLREDUCE mppsum_a_realdd 828 # include "mpp_allreduce_generic.h90" 829 # undef ROUTINE_ALLREDUCE 830 # undef DIM_1d 831 # undef COMPLEX_TYPE 832 # undef OPERATION_SUM_DD 833 834 !!---------------------------------------------------------------------- 835 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 836 !! 837 !!---------------------------------------------------------------------- 712 838 !! 713 SUBROUTINE mppsum_int( ktab ) 714 !!---------------------------------------------------------------------- 715 INTEGER, INTENT(inout) :: ktab 716 INTEGER :: ierror, iwork 717 !!---------------------------------------------------------------------- 718 CALL mpi_allreduce( ktab, iwork, 1, mpi_integer, mpi_sum, mpi_comm_oce, ierror ) 719 ktab = iwork 720 END SUBROUTINE mppsum_int 721 !! 722 SUBROUTINE mppsum_a_real( ptab, kdim, kcom ) 723 !!----------------------------------------------------------------------- 724 INTEGER , INTENT(in ) :: kdim ! size of ptab 725 REAL(wp), DIMENSION(kdim), INTENT(inout) :: ptab ! input array 726 INTEGER , OPTIONAL , INTENT(in ) :: kcom ! specific communicator 727 INTEGER :: ierror, ilocalcomm ! local integer 728 REAL(wp) :: zwork(kdim) ! local workspace 729 !!----------------------------------------------------------------------- 730 ilocalcomm = mpi_comm_oce 731 IF( PRESENT(kcom) ) ilocalcomm = kcom 732 CALL mpi_allreduce( ptab, zwork, kdim, mpi_double_precision, mpi_sum, ilocalcomm, ierror ) 733 ptab(:) = zwork(:) 734 END SUBROUTINE mppsum_a_real 735 !! 736 SUBROUTINE mppsum_real( ptab, kcom ) 737 !!----------------------------------------------------------------------- 738 REAL(wp) , INTENT(inout) :: ptab ! input scalar 739 INTEGER , OPTIONAL, INTENT(in ) :: kcom 740 INTEGER :: ierror, ilocalcomm 741 REAL(wp) :: zwork 742 !!----------------------------------------------------------------------- 743 ilocalcomm = mpi_comm_oce 744 IF( PRESENT(kcom) ) ilocalcomm = kcom 745 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, mpi_sum, ilocalcomm, ierror ) 746 ptab = zwork 747 END SUBROUTINE mppsum_real 748 !! 749 SUBROUTINE mppsum_realdd( ytab, kcom ) 750 !!----------------------------------------------------------------------- 751 COMPLEX(wp) , INTENT(inout) :: ytab ! input scalar 752 INTEGER , OPTIONAL, INTENT(in ) :: kcom 753 INTEGER :: ierror, ilocalcomm 754 COMPLEX(wp) :: zwork 755 !!----------------------------------------------------------------------- 756 ilocalcomm = mpi_comm_oce 757 IF( PRESENT(kcom) ) ilocalcomm = kcom 758 CALL MPI_ALLREDUCE( ytab, zwork, 1, MPI_DOUBLE_COMPLEX, MPI_SUMDD, ilocalcomm, ierror ) 759 ytab = zwork 760 END SUBROUTINE mppsum_realdd 761 !! 762 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 763 !!---------------------------------------------------------------------- 764 INTEGER , INTENT(in ) :: kdim ! size of ytab 765 COMPLEX(wp), DIMENSION(kdim), INTENT(inout) :: ytab ! input array 766 INTEGER , OPTIONAL , INTENT(in ) :: kcom 767 INTEGER:: ierror, ilocalcomm ! local integer 768 COMPLEX(wp), DIMENSION(kdim) :: zwork ! temporary workspace 769 !!----------------------------------------------------------------------- 770 ilocalcomm = mpi_comm_oce 771 IF( PRESENT(kcom) ) ilocalcomm = kcom 772 CALL MPI_ALLREDUCE( ytab, zwork, kdim, MPI_DOUBLE_COMPLEX, MPI_SUMDD, ilocalcomm, ierror ) 773 ytab(:) = zwork(:) 774 END SUBROUTINE mppsum_a_realdd 775 776 777 SUBROUTINE mppmax_real_multiple( pt1d, kdim, kcom ) 778 !!---------------------------------------------------------------------- 779 !! *** routine mppmax_real *** 780 !! 781 !! ** Purpose : Maximum across processor of each element of a 1D arrays 782 !! 783 !!---------------------------------------------------------------------- 784 REAL(wp), DIMENSION(kdim), INTENT(inout) :: pt1d ! 1D arrays 785 INTEGER , INTENT(in ) :: kdim 786 INTEGER , OPTIONAL , INTENT(in ) :: kcom ! local communicator 787 !! 788 INTEGER :: ierror, ilocalcomm 789 REAL(wp), DIMENSION(kdim) :: zwork 790 !!---------------------------------------------------------------------- 791 ilocalcomm = mpi_comm_oce 792 IF( PRESENT(kcom) ) ilocalcomm = kcom 793 ! 794 CALL mpi_allreduce( pt1d, zwork, kdim, mpi_double_precision, mpi_max, ilocalcomm, ierror ) 795 pt1d(:) = zwork(:) 796 ! 797 END SUBROUTINE mppmax_real_multiple 798 799 800 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) 801 !!------------------------------------------------------------------------ 802 !! *** routine mpp_minloc *** 803 !! 804 !! ** Purpose : Compute the global minimum of an array ptab 805 !! and also give its global position 806 !! 807 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 808 !! 809 !!-------------------------------------------------------------------------- 810 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 811 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 812 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 813 INTEGER , INTENT( out) :: ki, kj ! index of minimum in global frame 814 ! 815 INTEGER :: ierror 816 INTEGER , DIMENSION(2) :: ilocs 817 REAL(wp) :: zmin ! local minimum 818 REAL(wp), DIMENSION(2,1) :: zain, zaout 819 !!----------------------------------------------------------------------- 820 ! 821 zmin = MINVAL( ptab(:,:) , mask= pmask == 1._wp ) 822 ilocs = MINLOC( ptab(:,:) , mask= pmask == 1._wp ) 823 ! 824 ki = ilocs(1) + nimpp - 1 825 kj = ilocs(2) + njmpp - 1 826 ! 827 zain(1,:)=zmin 828 zain(2,:)=ki+10000.*kj 829 ! 830 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OCE,ierror) 831 ! 832 pmin = zaout(1,1) 833 kj = INT(zaout(2,1)/10000.) 834 ki = INT(zaout(2,1) - 10000.*kj ) 835 ! 836 END SUBROUTINE mpp_minloc2d 837 838 839 SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj ,kk) 840 !!------------------------------------------------------------------------ 841 !! *** routine mpp_minloc *** 842 !! 843 !! ** Purpose : Compute the global minimum of an array ptab 844 !! and also give its global position 845 !! 846 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 847 !! 848 !!-------------------------------------------------------------------------- 849 REAL(wp), DIMENSION(:,:,:), INTENT(in ) :: ptab ! Local 2D array 850 REAL(wp), DIMENSION(:,:,:), INTENT(in ) :: pmask ! Local mask 851 REAL(wp) , INTENT( out) :: pmin ! Global minimum of ptab 852 INTEGER , INTENT( out) :: ki, kj, kk ! index of minimum in global frame 853 ! 854 INTEGER :: ierror 855 REAL(wp) :: zmin ! local minimum 856 INTEGER , DIMENSION(3) :: ilocs 857 REAL(wp), DIMENSION(2,1) :: zain, zaout 858 !!----------------------------------------------------------------------- 859 ! 860 zmin = MINVAL( ptab(:,:,:) , mask= pmask == 1._wp ) 861 ilocs = MINLOC( ptab(:,:,:) , mask= pmask == 1._wp ) 862 ! 863 ki = ilocs(1) + nimpp - 1 864 kj = ilocs(2) + njmpp - 1 865 kk = ilocs(3) 866 ! 867 zain(1,:) = zmin 868 zain(2,:) = ki + 10000.*kj + 100000000.*kk 869 ! 870 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OCE,ierror) 871 ! 872 pmin = zaout(1,1) 873 kk = INT( zaout(2,1) / 100000000. ) 874 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 875 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 876 ! 877 END SUBROUTINE mpp_minloc3d 878 879 880 SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) 881 !!------------------------------------------------------------------------ 882 !! *** routine mpp_maxloc *** 883 !! 884 !! ** Purpose : Compute the global maximum of an array ptab 885 !! and also give its global position 886 !! 887 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 888 !! 889 !!-------------------------------------------------------------------------- 890 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: ptab ! Local 2D array 891 REAL(wp), DIMENSION (jpi,jpj), INTENT(in ) :: pmask ! Local mask 892 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 893 INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame 894 !! 895 INTEGER :: ierror 896 INTEGER, DIMENSION (2) :: ilocs 897 REAL(wp) :: zmax ! local maximum 898 REAL(wp), DIMENSION(2,1) :: zain, zaout 899 !!----------------------------------------------------------------------- 900 ! 901 zmax = MAXVAL( ptab(:,:) , mask= pmask == 1._wp ) 902 ilocs = MAXLOC( ptab(:,:) , mask= pmask == 1._wp ) 903 ! 904 ki = ilocs(1) + nimpp - 1 905 kj = ilocs(2) + njmpp - 1 906 ! 907 zain(1,:) = zmax 908 zain(2,:) = ki + 10000. * kj 909 ! 910 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OCE,ierror) 911 ! 912 pmax = zaout(1,1) 913 kj = INT( zaout(2,1) / 10000. ) 914 ki = INT( zaout(2,1) - 10000.* kj ) 915 ! 916 END SUBROUTINE mpp_maxloc2d 917 918 919 SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) 920 !!------------------------------------------------------------------------ 921 !! *** routine mpp_maxloc *** 922 !! 923 !! ** Purpose : Compute the global maximum of an array ptab 924 !! and also give its global position 925 !! 926 !! ** Method : Use MPI_ALLREDUCE with MPI_MINLOC 927 !! 928 !!-------------------------------------------------------------------------- 929 REAL(wp), DIMENSION (:,:,:), INTENT(in ) :: ptab ! Local 2D array 930 REAL(wp), DIMENSION (:,:,:), INTENT(in ) :: pmask ! Local mask 931 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 932 INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame 933 ! 934 INTEGER :: ierror ! local integer 935 REAL(wp) :: zmax ! local maximum 936 REAL(wp), DIMENSION(2,1) :: zain, zaout 937 INTEGER , DIMENSION(3) :: ilocs 938 !!----------------------------------------------------------------------- 939 ! 940 zmax = MAXVAL( ptab(:,:,:) , mask= pmask == 1._wp ) 941 ilocs = MAXLOC( ptab(:,:,:) , mask= pmask == 1._wp ) 942 ! 943 ki = ilocs(1) + nimpp - 1 944 kj = ilocs(2) + njmpp - 1 945 kk = ilocs(3) 946 ! 947 zain(1,:) = zmax 948 zain(2,:) = ki + 10000.*kj + 100000000.*kk 949 ! 950 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OCE,ierror) 951 ! 952 pmax = zaout(1,1) 953 kk = INT( zaout(2,1) / 100000000. ) 954 kj = INT( zaout(2,1) - kk * 100000000. ) / 10000 955 ki = INT( zaout(2,1) - kk * 100000000. -kj * 10000. ) 956 ! 957 END SUBROUTINE mpp_maxloc3d 958 839 # define OPERATION_MINLOC 840 # define DIM_2d 841 # define ROUTINE_LOC mpp_minloc2d 842 # include "mpp_loc_generic.h90" 843 # undef ROUTINE_LOC 844 # undef DIM_2d 845 # define DIM_3d 846 # define ROUTINE_LOC mpp_minloc3d 847 # include "mpp_loc_generic.h90" 848 # undef ROUTINE_LOC 849 # undef DIM_3d 850 # undef OPERATION_MINLOC 851 852 # define OPERATION_MAXLOC 853 # define DIM_2d 854 # define ROUTINE_LOC mpp_maxloc2d 855 # include "mpp_loc_generic.h90" 856 # undef ROUTINE_LOC 857 # undef DIM_2d 858 # define DIM_3d 859 # define ROUTINE_LOC mpp_maxloc3d 860 # include "mpp_loc_generic.h90" 861 # undef ROUTINE_LOC 862 # undef DIM_3d 863 # undef OPERATION_MAXLOC 959 864 960 865 SUBROUTINE mppsync() … … 973 878 974 879 975 SUBROUTINE mppstop 880 SUBROUTINE mppstop( ldfinal, ld_force_abort ) 976 881 !!---------------------------------------------------------------------- 977 882 !! *** routine mppstop *** … … 980 885 !! 981 886 !!---------------------------------------------------------------------- 887 LOGICAL, OPTIONAL, INTENT(in) :: ldfinal ! source process number 888 LOGICAL, OPTIONAL, INTENT(in) :: ld_force_abort ! source process number 889 LOGICAL :: llfinal, ll_force_abort 982 890 INTEGER :: info 983 891 !!---------------------------------------------------------------------- 984 ! 985 CALL mppsync 986 CALL mpi_finalize( info ) 892 llfinal = .FALSE. 893 IF( PRESENT(ldfinal) ) llfinal = ldfinal 894 ll_force_abort = .FALSE. 895 IF( PRESENT(ld_force_abort) ) ll_force_abort = ld_force_abort 896 ! 897 IF(ll_force_abort) THEN 898 CALL mpi_abort( MPI_COMM_WORLD ) 899 ELSE 900 CALL mppsync 901 CALL mpi_finalize( info ) 902 ENDIF 903 IF( .NOT. llfinal ) STOP 123456 987 904 ! 988 905 END SUBROUTINE mppstop … … 999 916 ! 1000 917 END SUBROUTINE mpp_comm_free 1001 1002 1003 SUBROUTINE mpp_ini_ice( pindic, kumout )1004 !!----------------------------------------------------------------------1005 !! *** routine mpp_ini_ice ***1006 !!1007 !! ** Purpose : Initialize special communicator for ice areas1008 !! condition together with global variables needed in the ddmpp folding1009 !!1010 !! ** Method : - Look for ice processors in ice routines1011 !! - Put their number in nrank_ice1012 !! - Create groups for the world processors and the ice processors1013 !! - Create a communicator for ice processors1014 !!1015 !! ** output1016 !! njmppmax = njmpp for northern procs1017 !! ndim_rank_ice = number of processors with ice1018 !! nrank_ice (ndim_rank_ice) = ice processors1019 !! ngrp_iworld = group ID for the world processors1020 !! ngrp_ice = group ID for the ice processors1021 !! ncomm_ice = communicator for the ice procs.1022 !! n_ice_root = number (in the world) of proc 0 in the ice comm.1023 !!1024 !!----------------------------------------------------------------------1025 INTEGER, INTENT(in) :: pindic1026 INTEGER, INTENT(in) :: kumout ! ocean.output logical unit1027 !!1028 INTEGER :: jjproc1029 INTEGER :: ii, ierr1030 INTEGER, ALLOCATABLE, DIMENSION(:) :: kice1031 INTEGER, ALLOCATABLE, DIMENSION(:) :: zwork1032 !!----------------------------------------------------------------------1033 !1034 ALLOCATE( kice(jpnij), zwork(jpnij), STAT=ierr )1035 IF( ierr /= 0 ) THEN1036 WRITE(kumout, cform_err)1037 WRITE(kumout,*) 'mpp_ini_ice : failed to allocate 2, 1D arrays (jpnij in length)'1038 CALL mppstop1039 ENDIF1040 1041 ! Look for how many procs with sea-ice1042 !1043 kice = 01044 DO jjproc = 1, jpnij1045 IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 11046 END DO1047 !1048 zwork = 01049 CALL MPI_ALLREDUCE( kice, zwork, jpnij, mpi_integer, mpi_sum, mpi_comm_oce, ierr )1050 ndim_rank_ice = SUM( zwork )1051 1052 ! Allocate the right size to nrank_north1053 IF( ALLOCATED ( nrank_ice ) ) DEALLOCATE( nrank_ice )1054 ALLOCATE( nrank_ice(ndim_rank_ice) )1055 !1056 ii = 01057 nrank_ice = 01058 DO jjproc = 1, jpnij1059 IF( zwork(jjproc) == 1) THEN1060 ii = ii + 11061 nrank_ice(ii) = jjproc -11062 ENDIF1063 END DO1064 1065 ! Create the world group1066 CALL MPI_COMM_GROUP( mpi_comm_oce, ngrp_iworld, ierr )1067 1068 ! Create the ice group from the world group1069 CALL MPI_GROUP_INCL( ngrp_iworld, ndim_rank_ice, nrank_ice, ngrp_ice, ierr )1070 1071 ! Create the ice communicator , ie the pool of procs with sea-ice1072 CALL MPI_COMM_CREATE( mpi_comm_oce, ngrp_ice, ncomm_ice, ierr )1073 1074 ! Find proc number in the world of proc 0 in the north1075 ! The following line seems to be useless, we just comment & keep it as reminder1076 ! CALL MPI_GROUP_TRANSLATE_RANKS(ngrp_ice,1,0,ngrp_iworld,n_ice_root,ierr)1077 !1078 CALL MPI_GROUP_FREE(ngrp_ice, ierr)1079 CALL MPI_GROUP_FREE(ngrp_iworld, ierr)1080 1081 DEALLOCATE(kice, zwork)1082 !1083 END SUBROUTINE mpp_ini_ice1084 918 1085 919 … … 1175 1009 l_znl_root = .FALSE. 1176 1010 kwork (1) = nimpp 1177 CALL mpp_min ( kwork(1), kcom = ncomm_znl)1011 CALL mpp_min ( 'lib_mpp', kwork(1), kcom = ncomm_znl) 1178 1012 IF ( nimpp == kwork(1)) l_znl_root = .TRUE. 1179 1013 END IF … … 1384 1218 ! 1385 1219 itaille = jpimax * ( ipj + 2*kextj ) 1220 ! 1221 IF( ln_timing ) CALL tic_tac(.TRUE.) 1386 1222 CALL MPI_ALLGATHER( znorthloc_e(1,1-kextj) , itaille, MPI_DOUBLE_PRECISION, & 1387 1223 & znorthgloio_e(1,1-kextj,1), itaille, MPI_DOUBLE_PRECISION, & 1388 1224 & ncomm_north, ierr ) 1225 ! 1226 IF( ln_timing ) CALL tic_tac(.FALSE.) 1389 1227 ! 1390 1228 DO jr = 1, ndim_rank_north ! recover the global north array … … 1418 1256 1419 1257 1420 SUBROUTINE mpp_lnk_2d_icb( pt2d, cd_type, psgn, kexti, kextj )1258 SUBROUTINE mpp_lnk_2d_icb( cdname, pt2d, cd_type, psgn, kexti, kextj ) 1421 1259 !!---------------------------------------------------------------------- 1422 1260 !! *** routine mpp_lnk_2d_icb *** … … 1440 1278 !! nono : number for local neighboring processors 1441 1279 !!---------------------------------------------------------------------- 1280 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 1442 1281 REAL(wp), DIMENSION(1-kexti:jpi+kexti,1-kextj:jpj+kextj), INTENT(inout) :: pt2d ! 2D array with extra halo 1443 1282 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of ptab array grid-points … … 1459 1298 iprecj = nn_hls + kextj 1460 1299 1300 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, 1, 1, 1, ld_lbc = .TRUE. ) 1461 1301 1462 1302 ! 1. standard boundary treatment … … 1510 1350 ! ! Migrations 1511 1351 imigr = ipreci * ( jpj + 2*kextj ) 1352 ! 1353 IF( ln_timing ) CALL tic_tac(.TRUE.) 1512 1354 ! 1513 1355 SELECT CASE ( nbondi ) … … 1529 1371 END SELECT 1530 1372 ! 1373 IF( ln_timing ) CALL tic_tac(.FALSE.) 1374 ! 1531 1375 ! ! Write Dirichlet lateral conditions 1532 1376 iihom = jpi - nn_hls … … 1563 1407 ! ! Migrations 1564 1408 imigr = iprecj * ( jpi + 2*kexti ) 1409 ! 1410 IF( ln_timing ) CALL tic_tac(.TRUE.) 1565 1411 ! 1566 1412 SELECT CASE ( nbondj ) … … 1582 1428 END SELECT 1583 1429 ! 1430 IF( ln_timing ) CALL tic_tac(.FALSE.) 1431 ! 1584 1432 ! ! Write Dirichlet lateral conditions 1585 1433 ijhom = jpj - nn_hls … … 1602 1450 ! 1603 1451 END SUBROUTINE mpp_lnk_2d_icb 1452 1453 1454 SUBROUTINE mpp_report( cdname, kpk, kpl, kpf, ld_lbc, ld_glb ) 1455 !!---------------------------------------------------------------------- 1456 !! *** routine mpp_report *** 1457 !! 1458 !! ** Purpose : report use of mpp routines per time-setp 1459 !! 1460 !!---------------------------------------------------------------------- 1461 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1462 INTEGER , OPTIONAL, INTENT(in ) :: kpk, kpl, kpf 1463 LOGICAL , OPTIONAL, INTENT(in ) :: ld_lbc, ld_glb 1464 !! 1465 LOGICAL :: ll_lbc, ll_glb 1466 INTEGER :: ji, jj, jk, jh, jf ! dummy loop indices 1467 !!---------------------------------------------------------------------- 1468 ! 1469 ll_lbc = .FALSE. 1470 IF( PRESENT(ld_lbc) ) ll_lbc = ld_lbc 1471 ll_glb = .FALSE. 1472 IF( PRESENT(ld_glb) ) ll_glb = ld_glb 1473 ! 1474 ! find the smallest common frequency: default = frequency product, if multiple, choose the larger of the 2 frequency 1475 IF( ncom_dttrc /= 1 ) CALL ctl_stop( 'STOP', 'mpp_report, ncom_dttrc /= 1 not coded...' ) 1476 ncom_freq = ncom_fsbc 1477 ! 1478 IF ( ncom_stp == nit000+ncom_freq ) THEN ! avoid to count extra communications in potential initializations at nit000 1479 IF( ll_lbc ) THEN 1480 IF( .NOT. ALLOCATED(ncomm_sequence) ) ALLOCATE( ncomm_sequence(ncom_rec_max,2) ) 1481 IF( .NOT. ALLOCATED( crname_lbc) ) ALLOCATE( crname_lbc(ncom_rec_max ) ) 1482 n_sequence_lbc = n_sequence_lbc + 1 1483 IF( n_sequence_lbc > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lib_mpp, increase ncom_rec_max' ) ! deadlock 1484 crname_lbc(n_sequence_lbc) = cdname ! keep the name of the calling routine 1485 ncomm_sequence(n_sequence_lbc,1) = kpk*kpl ! size of 3rd and 4th dimensions 1486 ncomm_sequence(n_sequence_lbc,2) = kpf ! number of arrays to be treated (multi) 1487 ENDIF 1488 IF( ll_glb ) THEN 1489 IF( .NOT. ALLOCATED(crname_glb) ) ALLOCATE( crname_glb(ncom_rec_max) ) 1490 n_sequence_glb = n_sequence_glb + 1 1491 IF( n_sequence_glb > ncom_rec_max ) CALL ctl_stop( 'STOP', 'lib_mpp, increase ncom_rec_max' ) ! deadlock 1492 crname_glb(n_sequence_glb) = cdname ! keep the name of the calling routine 1493 ENDIF 1494 ELSE IF ( ncom_stp == nit000+2*ncom_freq ) THEN 1495 CALL ctl_opn( numcom, 'communication_report.txt', 'REPLACE', 'FORMATTED', 'SEQUENTIAL', -1, numout, .FALSE., narea ) 1496 WRITE(numcom,*) ' ' 1497 WRITE(numcom,*) ' ------------------------------------------------------------' 1498 WRITE(numcom,*) ' Communication pattern report (second oce+sbc+top time step):' 1499 WRITE(numcom,*) ' ------------------------------------------------------------' 1500 WRITE(numcom,*) ' ' 1501 WRITE(numcom,'(A,I4)') ' Exchanged halos : ', n_sequence_lbc 1502 jj = 0; jk = 0; jf = 0; jh = 0 1503 DO ji = 1, n_sequence_lbc 1504 IF ( ncomm_sequence(ji,1) .GT. 1 ) jk = jk + 1 1505 IF ( ncomm_sequence(ji,2) .GT. 1 ) jf = jf + 1 1506 IF ( ncomm_sequence(ji,1) .GT. 1 .AND. ncomm_sequence(ji,2) .GT. 1 ) jj = jj + 1 1507 jh = MAX (jh, ncomm_sequence(ji,1)*ncomm_sequence(ji,2)) 1508 END DO 1509 WRITE(numcom,'(A,I3)') ' 3D Exchanged halos : ', jk 1510 WRITE(numcom,'(A,I3)') ' Multi arrays exchanged halos : ', jf 1511 WRITE(numcom,'(A,I3)') ' from which 3D : ', jj 1512 WRITE(numcom,'(A,I10)') ' Array max size : ', jh*jpi*jpj 1513 WRITE(numcom,*) ' ' 1514 WRITE(numcom,*) ' lbc_lnk called' 1515 jj = 1 1516 DO ji = 2, n_sequence_lbc 1517 IF( crname_lbc(ji-1) /= crname_lbc(ji) ) THEN 1518 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(ji-1)) 1519 jj = 0 1520 END IF 1521 jj = jj + 1 1522 END DO 1523 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_lbc(n_sequence_lbc)) 1524 WRITE(numcom,*) ' ' 1525 IF ( n_sequence_glb > 0 ) THEN 1526 WRITE(numcom,'(A,I4)') ' Global communications : ', n_sequence_glb 1527 jj = 1 1528 DO ji = 2, n_sequence_glb 1529 IF( crname_glb(ji-1) /= crname_glb(ji) ) THEN 1530 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(ji-1)) 1531 jj = 0 1532 END IF 1533 jj = jj + 1 1534 END DO 1535 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(n_sequence_glb)) 1536 DEALLOCATE(crname_glb) 1537 ELSE 1538 WRITE(numcom,*) ' No MPI global communication ' 1539 ENDIF 1540 WRITE(numcom,*) ' ' 1541 WRITE(numcom,*) ' -----------------------------------------------' 1542 WRITE(numcom,*) ' ' 1543 DEALLOCATE(ncomm_sequence) 1544 DEALLOCATE(crname_lbc) 1545 ENDIF 1546 END SUBROUTINE mpp_report 1547 1548 1549 SUBROUTINE tic_tac (ld_tic, ld_global) 1550 1551 LOGICAL, INTENT(IN) :: ld_tic 1552 LOGICAL, OPTIONAL, INTENT(IN) :: ld_global 1553 REAL(wp), DIMENSION(2), SAVE :: tic_wt 1554 REAL(wp), SAVE :: tic_ct = 0._wp 1555 INTEGER :: ii 1556 1557 IF( ncom_stp <= nit000 ) RETURN 1558 IF( ncom_stp == nitend ) RETURN 1559 ii = 1 1560 IF( PRESENT( ld_global ) ) THEN 1561 IF( ld_global ) ii = 2 1562 END IF 1563 1564 IF ( ld_tic ) THEN 1565 tic_wt(ii) = MPI_Wtime() ! start count tic->tac (waiting time) 1566 IF ( tic_ct > 0.0_wp ) compute_time = compute_time + MPI_Wtime() - tic_ct ! cumulate count tac->tic 1567 ELSE 1568 waiting_time(ii) = waiting_time(ii) + MPI_Wtime() - tic_wt(ii) ! cumulate count tic->tac 1569 tic_ct = MPI_Wtime() ! start count tac->tic (waiting time) 1570 ENDIF 1571 1572 END SUBROUTINE tic_tac 1573 1604 1574 1605 1575 #else … … 1610 1580 1611 1581 INTERFACE mpp_sum 1612 MODULE PROCEDURE mpp _sum_a2s, mpp_sum_as, mpp_sum_ai, mpp_sum_s, mpp_sum_i, mppsum_realdd, mppsum_a_realdd1582 MODULE PROCEDURE mppsum_int, mppsum_a_int, mppsum_real, mppsum_a_real, mppsum_realdd, mppsum_a_realdd 1613 1583 END INTERFACE 1614 1584 INTERFACE mpp_max … … 1624 1594 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 1625 1595 END INTERFACE 1626 INTERFACE mpp_max_multiple1627 MODULE PROCEDURE mppmax_real_multiple1628 END INTERFACE1629 1596 1630 1597 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag 1631 1598 LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms (needed here in case "key_mpp_mpi" is not used) 1632 INTEGER :: ncomm_ice1633 1599 INTEGER, PUBLIC :: mpi_comm_oce ! opa local communicator 1600 1601 INTEGER, PARAMETER, PUBLIC :: nbdelay = 0 ! make sure we don't enter loops: DO ji = 1, nbdelay 1602 CHARACTER(len=32), DIMENSION(1), PUBLIC :: c_delaylist = 'empty' 1603 CHARACTER(len=32), DIMENSION(1), PUBLIC :: c_delaycpnt = 'empty' 1604 TYPE :: DELAYARR 1605 REAL( wp), POINTER, DIMENSION(:) :: z1d => NULL() 1606 COMPLEX(wp), POINTER, DIMENSION(:) :: y1d => NULL() 1607 END TYPE DELAYARR 1608 TYPE( DELAYARR ), DIMENSION(1), PUBLIC :: todelay 1609 INTEGER, PUBLIC, DIMENSION(1) :: ndelayid = -1 1634 1610 !!---------------------------------------------------------------------- 1635 1611 CONTAINS … … 1654 1630 END SUBROUTINE mppsync 1655 1631 1656 SUBROUTINE mpp_sum_as( parr, kdim, kcom ) ! Dummy routine 1657 REAL , DIMENSION(:) :: parr 1658 INTEGER :: kdim 1659 INTEGER, OPTIONAL :: kcom 1660 WRITE(*,*) 'mpp_sum_as: You should not have seen this print! error?', kdim, parr(1), kcom 1661 END SUBROUTINE mpp_sum_as 1662 1663 SUBROUTINE mpp_sum_a2s( parr, kdim, kcom ) ! Dummy routine 1664 REAL , DIMENSION(:,:) :: parr 1665 INTEGER :: kdim 1666 INTEGER, OPTIONAL :: kcom 1667 WRITE(*,*) 'mpp_sum_a2s: You should not have seen this print! error?', kdim, parr(1,1), kcom 1668 END SUBROUTINE mpp_sum_a2s 1669 1670 SUBROUTINE mpp_sum_ai( karr, kdim, kcom ) ! Dummy routine 1671 INTEGER, DIMENSION(:) :: karr 1672 INTEGER :: kdim 1673 INTEGER, OPTIONAL :: kcom 1674 WRITE(*,*) 'mpp_sum_ai: You should not have seen this print! error?', kdim, karr(1), kcom 1675 END SUBROUTINE mpp_sum_ai 1676 1677 SUBROUTINE mpp_sum_s( psca, kcom ) ! Dummy routine 1678 REAL :: psca 1679 INTEGER, OPTIONAL :: kcom 1680 WRITE(*,*) 'mpp_sum_s: You should not have seen this print! error?', psca, kcom 1681 END SUBROUTINE mpp_sum_s 1682 1683 SUBROUTINE mpp_sum_i( kint, kcom ) ! Dummy routine 1684 integer :: kint 1685 INTEGER, OPTIONAL :: kcom 1686 WRITE(*,*) 'mpp_sum_i: You should not have seen this print! error?', kint, kcom 1687 END SUBROUTINE mpp_sum_i 1688 1689 SUBROUTINE mppsum_realdd( ytab, kcom ) 1690 COMPLEX(wp), INTENT(inout) :: ytab ! input scalar 1691 INTEGER , INTENT( in ), OPTIONAL :: kcom 1692 WRITE(*,*) 'mppsum_realdd: You should not have seen this print! error?', ytab 1693 END SUBROUTINE mppsum_realdd 1694 1695 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 1696 INTEGER , INTENT( in ) :: kdim ! size of ytab 1697 COMPLEX(wp), DIMENSION(kdim), INTENT( inout ) :: ytab ! input array 1698 INTEGER , INTENT( in ), OPTIONAL :: kcom 1699 WRITE(*,*) 'mppsum_a_realdd: You should not have seen this print! error?', kdim, ytab(1), kcom 1700 END SUBROUTINE mppsum_a_realdd 1701 1702 SUBROUTINE mppmax_a_real( parr, kdim, kcom ) 1703 REAL , DIMENSION(:) :: parr 1704 INTEGER :: kdim 1705 INTEGER, OPTIONAL :: kcom 1706 WRITE(*,*) 'mppmax_a_real: You should not have seen this print! error?', kdim, parr(1), kcom 1707 END SUBROUTINE mppmax_a_real 1708 1709 SUBROUTINE mppmax_real( psca, kcom ) 1710 REAL :: psca 1711 INTEGER, OPTIONAL :: kcom 1712 WRITE(*,*) 'mppmax_real: You should not have seen this print! error?', psca, kcom 1713 END SUBROUTINE mppmax_real 1714 1715 SUBROUTINE mppmin_a_real( parr, kdim, kcom ) 1716 REAL , DIMENSION(:) :: parr 1717 INTEGER :: kdim 1718 INTEGER, OPTIONAL :: kcom 1719 WRITE(*,*) 'mppmin_a_real: You should not have seen this print! error?', kdim, parr(1), kcom 1720 END SUBROUTINE mppmin_a_real 1721 1722 SUBROUTINE mppmin_real( psca, kcom ) 1723 REAL :: psca 1724 INTEGER, OPTIONAL :: kcom 1725 WRITE(*,*) 'mppmin_real: You should not have seen this print! error?', psca, kcom 1726 END SUBROUTINE mppmin_real 1727 1728 SUBROUTINE mppmax_a_int( karr, kdim ,kcom) 1729 INTEGER, DIMENSION(:) :: karr 1730 INTEGER :: kdim 1731 INTEGER, OPTIONAL :: kcom 1732 WRITE(*,*) 'mppmax_a_int: You should not have seen this print! error?', kdim, karr(1), kcom 1733 END SUBROUTINE mppmax_a_int 1734 1735 SUBROUTINE mppmax_int( kint, kcom) 1736 INTEGER :: kint 1737 INTEGER, OPTIONAL :: kcom 1738 WRITE(*,*) 'mppmax_int: You should not have seen this print! error?', kint, kcom 1739 END SUBROUTINE mppmax_int 1740 1741 SUBROUTINE mppmin_a_int( karr, kdim, kcom ) 1742 INTEGER, DIMENSION(:) :: karr 1743 INTEGER :: kdim 1744 INTEGER, OPTIONAL :: kcom 1745 WRITE(*,*) 'mppmin_a_int: You should not have seen this print! error?', kdim, karr(1), kcom 1746 END SUBROUTINE mppmin_a_int 1747 1748 SUBROUTINE mppmin_int( kint, kcom ) 1749 INTEGER :: kint 1750 INTEGER, OPTIONAL :: kcom 1751 WRITE(*,*) 'mppmin_int: You should not have seen this print! error?', kint, kcom 1752 END SUBROUTINE mppmin_int 1753 1754 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki, kj ) 1755 REAL :: pmin 1756 REAL , DIMENSION (:,:) :: ptab, pmask 1757 INTEGER :: ki, kj 1758 WRITE(*,*) 'mpp_minloc2d: You should not have seen this print! error?', pmin, ki, kj, ptab(1,1), pmask(1,1) 1759 END SUBROUTINE mpp_minloc2d 1760 1761 SUBROUTINE mpp_minloc3d( ptab, pmask, pmin, ki, kj, kk ) 1762 REAL :: pmin 1763 REAL , DIMENSION (:,:,:) :: ptab, pmask 1764 INTEGER :: ki, kj, kk 1765 WRITE(*,*) 'mpp_minloc3d: You should not have seen this print! error?', pmin, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) 1766 END SUBROUTINE mpp_minloc3d 1767 1768 SUBROUTINE mpp_maxloc2d( ptab, pmask, pmax, ki, kj ) 1769 REAL :: pmax 1770 REAL , DIMENSION (:,:) :: ptab, pmask 1771 INTEGER :: ki, kj 1772 WRITE(*,*) 'mpp_maxloc2d: You should not have seen this print! error?', pmax, ki, kj, ptab(1,1), pmask(1,1) 1773 END SUBROUTINE mpp_maxloc2d 1774 1775 SUBROUTINE mpp_maxloc3d( ptab, pmask, pmax, ki, kj, kk ) 1776 REAL :: pmax 1777 REAL , DIMENSION (:,:,:) :: ptab, pmask 1778 INTEGER :: ki, kj, kk 1779 WRITE(*,*) 'mpp_maxloc3d: You should not have seen this print! error?', pmax, ki, kj, kk, ptab(1,1,1), pmask(1,1,1) 1780 END SUBROUTINE mpp_maxloc3d 1781 1782 SUBROUTINE mppstop 1632 !!---------------------------------------------------------------------- 1633 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 1634 !! 1635 !!---------------------------------------------------------------------- 1636 !! 1637 # define OPERATION_MAX 1638 # define INTEGER_TYPE 1639 # define DIM_0d 1640 # define ROUTINE_ALLREDUCE mppmax_int 1641 # include "mpp_allreduce_generic.h90" 1642 # undef ROUTINE_ALLREDUCE 1643 # undef DIM_0d 1644 # define DIM_1d 1645 # define ROUTINE_ALLREDUCE mppmax_a_int 1646 # include "mpp_allreduce_generic.h90" 1647 # undef ROUTINE_ALLREDUCE 1648 # undef DIM_1d 1649 # undef INTEGER_TYPE 1650 ! 1651 # define REAL_TYPE 1652 # define DIM_0d 1653 # define ROUTINE_ALLREDUCE mppmax_real 1654 # include "mpp_allreduce_generic.h90" 1655 # undef ROUTINE_ALLREDUCE 1656 # undef DIM_0d 1657 # define DIM_1d 1658 # define ROUTINE_ALLREDUCE mppmax_a_real 1659 # include "mpp_allreduce_generic.h90" 1660 # undef ROUTINE_ALLREDUCE 1661 # undef DIM_1d 1662 # undef REAL_TYPE 1663 # undef OPERATION_MAX 1664 !!---------------------------------------------------------------------- 1665 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** 1666 !! 1667 !!---------------------------------------------------------------------- 1668 !! 1669 # define OPERATION_MIN 1670 # define INTEGER_TYPE 1671 # define DIM_0d 1672 # define ROUTINE_ALLREDUCE mppmin_int 1673 # include "mpp_allreduce_generic.h90" 1674 # undef ROUTINE_ALLREDUCE 1675 # undef DIM_0d 1676 # define DIM_1d 1677 # define ROUTINE_ALLREDUCE mppmin_a_int 1678 # include "mpp_allreduce_generic.h90" 1679 # undef ROUTINE_ALLREDUCE 1680 # undef DIM_1d 1681 # undef INTEGER_TYPE 1682 ! 1683 # define REAL_TYPE 1684 # define DIM_0d 1685 # define ROUTINE_ALLREDUCE mppmin_real 1686 # include "mpp_allreduce_generic.h90" 1687 # undef ROUTINE_ALLREDUCE 1688 # undef DIM_0d 1689 # define DIM_1d 1690 # define ROUTINE_ALLREDUCE mppmin_a_real 1691 # include "mpp_allreduce_generic.h90" 1692 # undef ROUTINE_ALLREDUCE 1693 # undef DIM_1d 1694 # undef REAL_TYPE 1695 # undef OPERATION_MIN 1696 1697 !!---------------------------------------------------------------------- 1698 !! *** mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real *** 1699 !! 1700 !! Global sum of 1D array or a variable (integer, real or complex) 1701 !!---------------------------------------------------------------------- 1702 !! 1703 # define OPERATION_SUM 1704 # define INTEGER_TYPE 1705 # define DIM_0d 1706 # define ROUTINE_ALLREDUCE mppsum_int 1707 # include "mpp_allreduce_generic.h90" 1708 # undef ROUTINE_ALLREDUCE 1709 # undef DIM_0d 1710 # define DIM_1d 1711 # define ROUTINE_ALLREDUCE mppsum_a_int 1712 # include "mpp_allreduce_generic.h90" 1713 # undef ROUTINE_ALLREDUCE 1714 # undef DIM_1d 1715 # undef INTEGER_TYPE 1716 ! 1717 # define REAL_TYPE 1718 # define DIM_0d 1719 # define ROUTINE_ALLREDUCE mppsum_real 1720 # include "mpp_allreduce_generic.h90" 1721 # undef ROUTINE_ALLREDUCE 1722 # undef DIM_0d 1723 # define DIM_1d 1724 # define ROUTINE_ALLREDUCE mppsum_a_real 1725 # include "mpp_allreduce_generic.h90" 1726 # undef ROUTINE_ALLREDUCE 1727 # undef DIM_1d 1728 # undef REAL_TYPE 1729 # undef OPERATION_SUM 1730 1731 # define OPERATION_SUM_DD 1732 # define COMPLEX_TYPE 1733 # define DIM_0d 1734 # define ROUTINE_ALLREDUCE mppsum_realdd 1735 # include "mpp_allreduce_generic.h90" 1736 # undef ROUTINE_ALLREDUCE 1737 # undef DIM_0d 1738 # define DIM_1d 1739 # define ROUTINE_ALLREDUCE mppsum_a_realdd 1740 # include "mpp_allreduce_generic.h90" 1741 # undef ROUTINE_ALLREDUCE 1742 # undef DIM_1d 1743 # undef COMPLEX_TYPE 1744 # undef OPERATION_SUM_DD 1745 1746 !!---------------------------------------------------------------------- 1747 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 1748 !! 1749 !!---------------------------------------------------------------------- 1750 !! 1751 # define OPERATION_MINLOC 1752 # define DIM_2d 1753 # define ROUTINE_LOC mpp_minloc2d 1754 # include "mpp_loc_generic.h90" 1755 # undef ROUTINE_LOC 1756 # undef DIM_2d 1757 # define DIM_3d 1758 # define ROUTINE_LOC mpp_minloc3d 1759 # include "mpp_loc_generic.h90" 1760 # undef ROUTINE_LOC 1761 # undef DIM_3d 1762 # undef OPERATION_MINLOC 1763 1764 # define OPERATION_MAXLOC 1765 # define DIM_2d 1766 # define ROUTINE_LOC mpp_maxloc2d 1767 # include "mpp_loc_generic.h90" 1768 # undef ROUTINE_LOC 1769 # undef DIM_2d 1770 # define DIM_3d 1771 # define ROUTINE_LOC mpp_maxloc3d 1772 # include "mpp_loc_generic.h90" 1773 # undef ROUTINE_LOC 1774 # undef DIM_3d 1775 # undef OPERATION_MAXLOC 1776 1777 SUBROUTINE mpp_delay_sum( cdname, cdelay, y_in, pout, ldlast, kcom ) 1778 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1779 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 1780 COMPLEX(wp), INTENT(in ), DIMENSION(:) :: y_in 1781 REAL(wp), INTENT( out), DIMENSION(:) :: pout 1782 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 1783 INTEGER, INTENT(in ), OPTIONAL :: kcom 1784 ! 1785 pout(:) = REAL(y_in(:), wp) 1786 END SUBROUTINE mpp_delay_sum 1787 1788 SUBROUTINE mpp_delay_max( cdname, cdelay, p_in, pout, ldlast, kcom ) 1789 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 1790 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 1791 REAL(wp), INTENT(in ), DIMENSION(:) :: p_in 1792 REAL(wp), INTENT( out), DIMENSION(:) :: pout 1793 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 1794 INTEGER, INTENT(in ), OPTIONAL :: kcom 1795 ! 1796 pout(:) = p_in(:) 1797 END SUBROUTINE mpp_delay_max 1798 1799 SUBROUTINE mpp_delay_rcv( kid ) 1800 INTEGER,INTENT(in ) :: kid 1801 WRITE(*,*) 'mpp_delay_rcv: You should not have seen this print! error?', kid 1802 END SUBROUTINE mpp_delay_rcv 1803 1804 SUBROUTINE mppstop( ldfinal, ld_force_abort ) 1805 LOGICAL, OPTIONAL, INTENT(in) :: ldfinal ! source process number 1806 LOGICAL, OPTIONAL, INTENT(in) :: ld_force_abort ! source process number 1783 1807 STOP ! non MPP case, just stop the run 1784 1808 END SUBROUTINE mppstop 1785 1786 SUBROUTINE mpp_ini_ice( kcom, knum )1787 INTEGER :: kcom, knum1788 WRITE(*,*) 'mpp_ini_ice: You should not have seen this print! error?', kcom, knum1789 END SUBROUTINE mpp_ini_ice1790 1809 1791 1810 SUBROUTINE mpp_ini_znl( knum ) … … 1799 1818 END SUBROUTINE mpp_comm_free 1800 1819 1801 SUBROUTINE mppmax_real_multiple( ptab, kdim , kcom )1802 REAL, DIMENSION(:) :: ptab !1803 INTEGER :: kdim !1804 INTEGER, OPTIONAL :: kcom !1805 WRITE(*,*) 'mppmax_real_multiple: You should not have seen this print! error?', ptab(1), kdim1806 END SUBROUTINE mppmax_real_multiple1807 1808 1820 #endif 1809 1821 … … 1825 1837 ! 1826 1838 nstop = nstop + 1 1827 IF(lwp) THEN 1828 WRITE(numout,cform_err) 1829 IF( PRESENT(cd1 ) ) WRITE(numout,*) cd1 1830 IF( PRESENT(cd2 ) ) WRITE(numout,*) cd2 1831 IF( PRESENT(cd3 ) ) WRITE(numout,*) cd3 1832 IF( PRESENT(cd4 ) ) WRITE(numout,*) cd4 1833 IF( PRESENT(cd5 ) ) WRITE(numout,*) cd5 1834 IF( PRESENT(cd6 ) ) WRITE(numout,*) cd6 1835 IF( PRESENT(cd7 ) ) WRITE(numout,*) cd7 1836 IF( PRESENT(cd8 ) ) WRITE(numout,*) cd8 1837 IF( PRESENT(cd9 ) ) WRITE(numout,*) cd9 1838 IF( PRESENT(cd10) ) WRITE(numout,*) cd10 1839 ENDIF 1839 1840 ! force to open ocean.output file 1841 IF( numout == 6 ) CALL ctl_opn( numout, 'ocean.output', 'APPEND', 'FORMATTED', 'SEQUENTIAL', -1, 6, .FALSE. ) 1842 1843 WRITE(numout,cform_err) 1844 IF( PRESENT(cd1 ) ) WRITE(numout,*) TRIM(cd1) 1845 IF( PRESENT(cd2 ) ) WRITE(numout,*) TRIM(cd2) 1846 IF( PRESENT(cd3 ) ) WRITE(numout,*) TRIM(cd3) 1847 IF( PRESENT(cd4 ) ) WRITE(numout,*) TRIM(cd4) 1848 IF( PRESENT(cd5 ) ) WRITE(numout,*) TRIM(cd5) 1849 IF( PRESENT(cd6 ) ) WRITE(numout,*) TRIM(cd6) 1850 IF( PRESENT(cd7 ) ) WRITE(numout,*) TRIM(cd7) 1851 IF( PRESENT(cd8 ) ) WRITE(numout,*) TRIM(cd8) 1852 IF( PRESENT(cd9 ) ) WRITE(numout,*) TRIM(cd9) 1853 IF( PRESENT(cd10) ) WRITE(numout,*) TRIM(cd10) 1854 1840 1855 CALL FLUSH(numout ) 1841 1856 IF( numstp /= -1 ) CALL FLUSH(numstp ) … … 1844 1859 ! 1845 1860 IF( cd1 == 'STOP' ) THEN 1846 IF(lwp)WRITE(numout,*) 'huge E-R-R-O-R : immediate stop'1847 CALL mppstop( )1861 WRITE(numout,*) 'huge E-R-R-O-R : immediate stop' 1862 CALL mppstop(ld_force_abort = .true.) 1848 1863 ENDIF 1849 1864 ! … … 1866 1881 IF(lwp) THEN 1867 1882 WRITE(numout,cform_war) 1868 IF( PRESENT(cd1 ) ) WRITE(numout,*) cd11869 IF( PRESENT(cd2 ) ) WRITE(numout,*) cd21870 IF( PRESENT(cd3 ) ) WRITE(numout,*) cd31871 IF( PRESENT(cd4 ) ) WRITE(numout,*) cd41872 IF( PRESENT(cd5 ) ) WRITE(numout,*) cd51873 IF( PRESENT(cd6 ) ) WRITE(numout,*) cd61874 IF( PRESENT(cd7 ) ) WRITE(numout,*) cd71875 IF( PRESENT(cd8 ) ) WRITE(numout,*) cd81876 IF( PRESENT(cd9 ) ) WRITE(numout,*) cd91877 IF( PRESENT(cd10) ) WRITE(numout,*) cd101883 IF( PRESENT(cd1 ) ) WRITE(numout,*) TRIM(cd1) 1884 IF( PRESENT(cd2 ) ) WRITE(numout,*) TRIM(cd2) 1885 IF( PRESENT(cd3 ) ) WRITE(numout,*) TRIM(cd3) 1886 IF( PRESENT(cd4 ) ) WRITE(numout,*) TRIM(cd4) 1887 IF( PRESENT(cd5 ) ) WRITE(numout,*) TRIM(cd5) 1888 IF( PRESENT(cd6 ) ) WRITE(numout,*) TRIM(cd6) 1889 IF( PRESENT(cd7 ) ) WRITE(numout,*) TRIM(cd7) 1890 IF( PRESENT(cd8 ) ) WRITE(numout,*) TRIM(cd8) 1891 IF( PRESENT(cd9 ) ) WRITE(numout,*) TRIM(cd9) 1892 IF( PRESENT(cd10) ) WRITE(numout,*) TRIM(cd10) 1878 1893 ENDIF 1879 1894 CALL FLUSH(numout) … … 1916 1931 knum=get_unit() 1917 1932 #endif 1933 IF( TRIM(cdfile) == '/dev/null' ) clfile = TRIM(cdfile) ! force the use of /dev/null 1918 1934 ! 1919 1935 iost=0 1920 IF( cdacce(1:6) == 'DIRECT' ) THEN 1921 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat, RECL=klengh, ERR=100, IOSTAT=iost ) 1936 IF( cdacce(1:6) == 'DIRECT' ) THEN ! cdacce has always more than 6 characters 1937 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat, RECL=klengh , ERR=100, IOSTAT=iost ) 1938 ELSE IF( TRIM(cdstat) == 'APPEND' ) THEN ! cdstat can have less than 6 characters 1939 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS='UNKNOWN', POSITION='APPEND', ERR=100, IOSTAT=iost ) 1922 1940 ELSE 1923 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 1924 ENDIF 1941 OPEN( UNIT=knum, FILE=clfile, FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 1942 ENDIF 1943 IF( iost /= 0 .AND. TRIM(clfile) == '/dev/null' ) & ! for windows 1944 & OPEN(UNIT=knum,FILE='NUL', FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 1925 1945 IF( iost == 0 ) THEN 1926 1946 IF(ldwp) THEN 1927 WRITE(kout,*) ' file : ', clfile,' open ok'1947 WRITE(kout,*) ' file : ', TRIM(clfile),' open ok' 1928 1948 WRITE(kout,*) ' unit = ', knum 1929 1949 WRITE(kout,*) ' status = ', cdstat … … 1937 1957 IF(ldwp) THEN 1938 1958 WRITE(kout,*) 1939 WRITE(kout,*) ' ===>>>> : bad opening file: ', clfile1959 WRITE(kout,*) ' ===>>>> : bad opening file: ', TRIM(clfile) 1940 1960 WRITE(kout,*) ' ======= === ' 1941 1961 WRITE(kout,*) ' unit = ', knum … … 1948 1968 ELSE !!! Force writing to make sure we get the information - at least once - in this violent STOP!! 1949 1969 WRITE(*,*) 1950 WRITE(*,*) ' ===>>>> : bad opening file: ', clfile1970 WRITE(*,*) ' ===>>>> : bad opening file: ', TRIM(clfile) 1951 1971 WRITE(*,*) ' ======= === ' 1952 1972 WRITE(*,*) ' unit = ', knum -
NEMO/trunk/src/OCE/LBC/mpp_bdy_generic.h90
r10068 r10425 21 21 # endif 22 22 23 SUBROUTINE ROUTINE_BDY( ptab, cd_nat, psgn , kb_bdy )23 SUBROUTINE ROUTINE_BDY( cdname, ptab, cd_nat, psgn , kb_bdy ) 24 24 !!---------------------------------------------------------------------- 25 25 !! *** routine mpp_lnk_bdy_3d *** … … 42 42 !! 43 43 !!---------------------------------------------------------------------- 44 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 44 45 ARRAY_TYPE(:,:,:,:,:) ! array or pointer of arrays on which the boundary condition is applied 45 46 CHARACTER(len=1) , INTENT(in ) :: NAT_IN(:) ! nature of array grid-points … … 61 62 ipl = L_SIZE(ptab) ! 4th - 62 63 ipf = F_SIZE(ptab) ! 5th - use in "multi" case (array of pointers) 64 ! 65 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ipk, ipl, ipf, ld_lbc = .TRUE. ) 63 66 ! 64 67 ALLOCATE( zt3ns(jpi,nn_hls,ipk,ipl,ipf,2), zt3sn(jpi,nn_hls,ipk,ipl,ipf,2), & … … 132 135 imigr = nn_hls * jpj * ipk * ipl 133 136 ! 137 IF( ln_timing ) CALL tic_tac(.TRUE.) 138 ! 134 139 SELECT CASE ( nbondi_bdy(IBD_IN(jf)) ) 135 140 CASE ( -1 ) … … 150 155 END SELECT 151 156 ! 157 IF( ln_timing ) CALL tic_tac(.FALSE.) 158 ! 152 159 ! ! Write Dirichlet lateral conditions 153 160 iihom = nlci-nn_hls … … 205 212 imigr = nn_hls * jpi * ipk * ipl 206 213 ! 214 IF( ln_timing ) CALL tic_tac(.TRUE.) 215 ! 207 216 SELECT CASE ( nbondj_bdy(IBD_IN(jf)) ) 208 217 CASE ( -1 ) … … 222 231 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 223 232 END SELECT 233 ! 234 IF( ln_timing ) CALL tic_tac(.FALSE.) 224 235 ! 225 236 ! ! Write Dirichlet lateral conditions -
NEMO/trunk/src/OCE/LBC/mpp_lnk_generic.h90
r10068 r10425 46 46 47 47 #if defined MULTI 48 SUBROUTINE ROUTINE_LNK( ptab, cd_nat, psgn, kfld, cd_mpp, pval )48 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn, kfld, cd_mpp, pval ) 49 49 INTEGER , INTENT(in ) :: kfld ! number of pt3d arrays 50 50 #else 51 SUBROUTINE ROUTINE_LNK( ptab, cd_nat, psgn , cd_mpp, pval )51 SUBROUTINE ROUTINE_LNK( cdname, ptab, cd_nat, psgn , cd_mpp, pval ) 52 52 #endif 53 53 ARRAY_TYPE(:,:,:,:,:) ! array or pointer of arrays on which the boundary condition is applied 54 CHARACTER(len=*) , INTENT(in ) :: cdname ! name of the calling subroutine 54 55 CHARACTER(len=1) , INTENT(in ) :: NAT_IN(:) ! nature of array grid-points 55 56 REAL(wp) , INTENT(in ) :: SGN_IN(:) ! sign used across the north fold boundary … … 61 62 INTEGER :: imigr, iihom, ijhom ! local integers 62 63 INTEGER :: ml_req1, ml_req2, ml_err ! for key_mpi_isend 64 INTEGER :: ierr 63 65 REAL(wp) :: zland 64 66 INTEGER , DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for key_mpi_isend … … 71 73 ipf = F_SIZE(ptab) ! 5th - use in "multi" case (array of pointers) 72 74 ! 73 ALLOCATE( zt3ns(jpi,nn_hls,ipk,ipl,ipf,2), zt3sn(jpi,nn_hls,ipk,ipl,ipf,2), & 74 & zt3ew(jpj,nn_hls,ipk,ipl,ipf,2), zt3we(jpj,nn_hls,ipk,ipl,ipf,2) ) 75 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ipk, ipl, ipf, ld_lbc = .TRUE. ) 75 76 ! 76 77 IF( PRESENT( pval ) ) THEN ; zland = pval ! set land value … … 82 83 ! ------------------------------- ! 83 84 ! 84 IF( PRESENT( cd_mpp ) ) THEN !== halos filled with inner values ==! 85 ! 86 DO jf = 1, ipf ! number of arrays to be treated 87 ! 88 DO jl = 1, ipl ! CAUTION: ptab is defined only between nld and nle 89 DO jk = 1, ipk 90 DO jj = nlcj+1, jpj ! added line(s) (inner only) 91 ARRAY_IN(nldi :nlei ,jj,jk,jl,jf) = ARRAY_IN(nldi:nlei,nlej,jk,jl,jf) 92 ARRAY_IN(1 :nldi-1,jj,jk,jl,jf) = ARRAY_IN(nldi ,nlej,jk,jl,jf) 93 ARRAY_IN(nlei+1:nlci ,jj,jk,jl,jf) = ARRAY_IN( nlei,nlej,jk,jl,jf) 94 END DO 95 DO ji = nlci+1, jpi ! added column(s) (full) 96 ARRAY_IN(ji,nldj :nlej ,jk,jl,jf) = ARRAY_IN(nlei,nldj:nlej,jk,jl,jf) 97 ARRAY_IN(ji,1 :nldj-1,jk,jl,jf) = ARRAY_IN(nlei,nldj ,jk,jl,jf) 98 ARRAY_IN(ji,nlej+1:jpj ,jk,jl,jf) = ARRAY_IN(nlei, nlej,jk,jl,jf) 99 END DO 100 END DO 101 END DO 102 ! 103 END DO 104 ! 105 ELSE !== standard close or cyclic treatment ==! 85 IF( .NOT. PRESENT( cd_mpp ) ) THEN !== standard close or cyclic treatment ==! 106 86 ! 107 87 DO jf = 1, ipf ! number of arrays to be treated … … 132 112 ! we play with the neigbours AND the row number because of the periodicity 133 113 ! 114 IF( ABS(nbondi) == 1 ) ALLOCATE( zt3ew(jpj,nn_hls,ipk,ipl,ipf,1), zt3we(jpj,nn_hls,ipk,ipl,ipf,1) ) 115 IF( nbondi == 0 ) ALLOCATE( zt3ew(jpj,nn_hls,ipk,ipl,ipf,2), zt3we(jpj,nn_hls,ipk,ipl,ipf,2) ) 116 ! 134 117 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions 135 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 118 CASE ( -1 ) 119 iihom = nlci-nreci 120 DO jf = 1, ipf 121 DO jl = 1, ipl 122 DO jk = 1, ipk 123 DO jh = 1, nn_hls 124 zt3we(:,jh,jk,jl,jf,1) = ARRAY_IN(iihom +jh,:,jk,jl,jf) 125 END DO 126 END DO 127 END DO 128 END DO 129 CASE ( 0 ) 136 130 iihom = nlci-nreci 137 131 DO jf = 1, ipf … … 145 139 END DO 146 140 END DO 147 END SELECT 148 ! 141 CASE ( 1 ) 142 iihom = nlci-nreci 143 DO jf = 1, ipf 144 DO jl = 1, ipl 145 DO jk = 1, ipk 146 DO jh = 1, nn_hls 147 zt3ew(:,jh,jk,jl,jf,1) = ARRAY_IN(nn_hls+jh,:,jk,jl,jf) 148 END DO 149 END DO 150 END DO 151 END DO 152 END SELECT 149 153 ! ! Migrations 150 imigr = nn_hls * jpj * ipk * ipl * ipf 154 imigr = nn_hls * jpj * ipk * ipl * ipf 155 ! 156 IF( ln_timing ) CALL tic_tac(.TRUE.) 151 157 ! 152 158 SELECT CASE ( nbondi ) 153 159 CASE ( -1 ) 154 160 CALL mppsend( 2, zt3we(1,1,1,1,1,1), imigr, noea, ml_req1 ) 155 CALL mpprecv( 1, zt3ew(1,1,1,1,1, 2), imigr, noea )161 CALL mpprecv( 1, zt3ew(1,1,1,1,1,1), imigr, noea ) 156 162 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 157 163 CASE ( 0 ) … … 164 170 CASE ( 1 ) 165 171 CALL mppsend( 1, zt3ew(1,1,1,1,1,1), imigr, nowe, ml_req1 ) 166 CALL mpprecv( 2, zt3we(1,1,1,1,1, 2), imigr, nowe )172 CALL mpprecv( 2, zt3we(1,1,1,1,1,1), imigr, nowe ) 167 173 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 168 174 END SELECT 175 ! 176 IF( ln_timing ) CALL tic_tac(.FALSE.) 169 177 ! 170 178 ! ! Write Dirichlet lateral conditions … … 177 185 DO jk = 1, ipk 178 186 DO jh = 1, nn_hls 179 ARRAY_IN(iihom+jh,:,jk,jl,jf) = zt3ew(:,jh,jk,jl,jf, 2)187 ARRAY_IN(iihom+jh,:,jk,jl,jf) = zt3ew(:,jh,jk,jl,jf,1) 180 188 END DO 181 189 END DO … … 198 206 DO jk = 1, ipk 199 207 DO jh = 1, nn_hls 200 ARRAY_IN(jh ,:,jk,jl,jf) = zt3we(:,jh,jk,jl,jf,2) 201 END DO 202 END DO 203 END DO 204 END DO 205 END SELECT 208 ARRAY_IN(jh ,:,jk,jl,jf) = zt3we(:,jh,jk,jl,jf,1) 209 END DO 210 END DO 211 END DO 212 END DO 213 END SELECT 214 ! 215 IF( nbondi /= 2 ) DEALLOCATE( zt3ew, zt3we ) 206 216 207 217 ! 3. North and south directions … … 209 219 ! always closed : we play only with the neigbours 210 220 ! 211 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 221 IF( ABS(nbondj) == 1 ) ALLOCATE( zt3ns(jpi,nn_hls,ipk,ipl,ipf,1), zt3sn(jpi,nn_hls,ipk,ipl,ipf,1) ) 222 IF( nbondj == 0 ) ALLOCATE( zt3ns(jpi,nn_hls,ipk,ipl,ipf,2), zt3sn(jpi,nn_hls,ipk,ipl,ipf,2) ) 223 ! 224 SELECT CASE ( nbondj ) 225 CASE ( -1 ) 226 ijhom = nlcj-nrecj 227 DO jf = 1, ipf 228 DO jl = 1, ipl 229 DO jk = 1, ipk 230 DO jh = 1, nn_hls 231 zt3sn(:,jh,jk,jl,jf,1) = ARRAY_IN(:,ijhom +jh,jk,jl,jf) 232 END DO 233 END DO 234 END DO 235 END DO 236 CASE ( 0 ) 212 237 ijhom = nlcj-nrecj 213 238 DO jf = 1, ipf … … 221 246 END DO 222 247 END DO 223 ENDIF 248 CASE ( 1 ) 249 ijhom = nlcj-nrecj 250 DO jf = 1, ipf 251 DO jl = 1, ipl 252 DO jk = 1, ipk 253 DO jh = 1, nn_hls 254 zt3ns(:,jh,jk,jl,jf,1) = ARRAY_IN(:,nn_hls+jh,jk,jl,jf) 255 END DO 256 END DO 257 END DO 258 END DO 259 END SELECT 224 260 ! 225 261 ! ! Migrations 226 262 imigr = nn_hls * jpi * ipk * ipl * ipf 227 263 ! 264 IF( ln_timing ) CALL tic_tac(.TRUE.) 265 ! 228 266 SELECT CASE ( nbondj ) 229 267 CASE ( -1 ) 230 268 CALL mppsend( 4, zt3sn(1,1,1,1,1,1), imigr, nono, ml_req1 ) 231 CALL mpprecv( 3, zt3ns(1,1,1,1,1, 2), imigr, nono )269 CALL mpprecv( 3, zt3ns(1,1,1,1,1,1), imigr, nono ) 232 270 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 233 271 CASE ( 0 ) … … 240 278 CASE ( 1 ) 241 279 CALL mppsend( 3, zt3ns(1,1,1,1,1,1), imigr, noso, ml_req1 ) 242 CALL mpprecv( 4, zt3sn(1,1,1,1,1, 2), imigr, noso )280 CALL mpprecv( 4, zt3sn(1,1,1,1,1,1), imigr, noso ) 243 281 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err ) 244 282 END SELECT 245 283 ! 284 IF( ln_timing ) CALL tic_tac(.FALSE.) 246 285 ! ! Write Dirichlet lateral conditions 247 286 ijhom = nlcj-nn_hls … … 253 292 DO jk = 1, ipk 254 293 DO jh = 1, nn_hls 255 ARRAY_IN(:,ijhom+jh,jk,jl,jf) = zt3ns(:,jh,jk,jl,jf, 2)294 ARRAY_IN(:,ijhom+jh,jk,jl,jf) = zt3ns(:,jh,jk,jl,jf,1) 256 295 END DO 257 296 END DO … … 274 313 DO jk = 1, ipk 275 314 DO jh = 1, nn_hls 276 ARRAY_IN(:,jh,jk,jl,jf) = zt3sn(:,jh,jk,jl,jf,2) 277 END DO 278 END DO 279 END DO 280 END DO 281 END SELECT 315 ARRAY_IN(:,jh,jk,jl,jf) = zt3sn(:,jh,jk,jl,jf,1) 316 END DO 317 END DO 318 END DO 319 END DO 320 END SELECT 321 ! 322 IF( nbondj /= 2 ) DEALLOCATE( zt3ns, zt3sn ) 282 323 283 324 ! 4. north fold treatment … … 293 334 ENDIF 294 335 ! 295 DEALLOCATE( zt3ns, zt3sn, zt3ew, zt3we )296 !297 336 END SUBROUTINE ROUTINE_LNK 298 337 -
NEMO/trunk/src/OCE/LBC/mpp_nfd_generic.h90
r10068 r10425 56 56 INTEGER :: ipi, ipj, ipk, ipl, ipf ! dimension of the input array 57 57 INTEGER :: imigr, iihom, ijhom ! local integers 58 INTEGER :: ierr, i taille, ilci, ildi, ilei, iilb58 INTEGER :: ierr, ibuffsize, ilci, ildi, ilei, iilb 59 59 INTEGER :: ij, iproc 60 60 INTEGER, DIMENSION (jpmaxngh) :: ml_req_nf ! for mpi_isend when avoiding mpi_allgather … … 62 62 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for mpi_isend when avoiding mpi_allgather 63 63 ! ! Workspace for message transfers avoiding mpi_allgather 64 REAL(wp), DIMENSION(:,:,:,:,:) , ALLOCATABLE :: ztab, ztabl, ztabr 64 INTEGER :: ipf_j ! sum of lines for all multi fields 65 INTEGER :: js ! counter 66 INTEGER, DIMENSION(:,:), ALLOCATABLE :: jj_s ! position of sent lines 67 INTEGER, DIMENSION(:), ALLOCATABLE :: ipj_s ! number of sent lines 68 REAL(wp), DIMENSION(:,:,:) , ALLOCATABLE :: ztabl 69 REAL(wp), DIMENSION(:,:,:,:,:) , ALLOCATABLE :: ztab, ztabr 65 70 REAL(wp), DIMENSION(:,:,:,:,:) , ALLOCATABLE :: znorthloc, zfoldwk 66 71 REAL(wp), DIMENSION(:,:,:,:,:,:), ALLOCATABLE :: znorthgloio … … 71 76 ipf = F_SIZE(ptab) ! 5th - use in "multi" case (array of pointers) 72 77 ! 73 ipj = 4 ! 2nd dimension of message transfers (last j-lines) 74 ! 75 ALLOCATE( znorthloc(jpimax,4,ipk,ipl,ipf) ) 76 ! 77 znorthloc(:,:,:,:,:) = 0._wp 78 ! 79 DO jf = 1, ipf ! put in znorthloc the last ipj j-lines of ptab 80 DO jl = 1, ipl 81 DO jk = 1, ipk 82 DO jj = nlcj - ipj +1, nlcj 83 ij = jj - nlcj + ipj 84 znorthloc(1:jpi,ij,jk,jl,jf) = ARRAY_IN(1:jpi,jj,jk,jl,jf) 78 IF( l_north_nogather ) THEN !== ???? ==! 79 80 ALLOCATE(ipj_s(ipf)) 81 82 ipj = 2 ! Max 2nd dimension of message transfers (last two j-line only) 83 ipj_s(:) = 1 ! Real 2nd dimension of message transfers (depending on perf requirement) 84 ! by default, only one line is exchanged 85 86 ALLOCATE( jj_s(ipf,2) ) 87 88 ! re-define number of exchanged lines : 89 ! must be two during the first two time steps 90 ! to correct possible incoherent values on North fold lines from restart 91 92 !!!!!!!!! temporary switch off this optimisation ==> force TRUE !!!!!!!! 93 l_full_nf_update = .TRUE. 94 95 ! Two lines update (slower but necessary to avoid different values ion identical grid points 96 IF ( l_full_nf_update .OR. & ! if coupling fields 97 ( ncom_stp == nit000 .AND. .NOT. ln_rstart ) ) & ! at first time step, if not restart 98 ipj_s(:) = 2 99 100 ! Index of modifying lines in input 101 DO jf = 1, ipf ! Loop over the number of arrays to be processed 102 ! 103 SELECT CASE ( npolj ) 104 ! 105 CASE ( 3, 4 ) ! * North fold T-point pivot 106 ! 107 SELECT CASE ( NAT_IN(jf) ) 108 ! 109 CASE ( 'T' , 'W' ,'U' ) ! T-, U-, W-point 110 jj_s(jf,1) = nlcj - 2 ; jj_s(jf,2) = nlcj - 1 111 CASE ( 'V' , 'F' ) ! V-, F-point 112 jj_s(jf,1) = nlcj - 3 ; jj_s(jf,2) = nlcj - 2 113 END SELECT 114 ! 115 CASE ( 5, 6 ) ! * North fold F-point pivot 116 SELECT CASE ( NAT_IN(jf) ) 117 ! 118 CASE ( 'T' , 'W' ,'U' ) ! T-, U-, W-point 119 jj_s(jf,1) = nlcj - 1 120 ipj_s(jf) = 1 ! need only one line anyway 121 CASE ( 'V' , 'F' ) ! V-, F-point 122 jj_s(jf,1) = nlcj - 2 ; jj_s(jf,2) = nlcj - 1 123 END SELECT 124 ! 125 END SELECT 126 ! 127 ENDDO 128 ! 129 ipf_j = sum (ipj_s(:)) ! Total number of lines to be exchanged 130 ! 131 ALLOCATE( znorthloc(jpimax,ipf_j,ipk,ipl,1) ) 132 ! 133 js = 0 134 DO jf = 1, ipf ! Loop over the number of arrays to be processed 135 DO jj = 1, ipj_s(jf) 136 js = js + 1 137 DO jl = 1, ipl 138 DO jk = 1, ipk 139 znorthloc(1:jpi,js,jk,jl,1) = ARRAY_IN(1:jpi,jj_s(jf,jj),jk,jl,jf) 140 END DO 85 141 END DO 86 142 END DO 87 143 END DO 88 END DO 89 ! 90 ! 91 itaille = jpimax * ipj * ipk * ipl * ipf 92 ! 93 IF( l_north_nogather ) THEN !== ???? ==! 94 ALLOCATE( zfoldwk(jpimax,4,ipk,ipl,ipf) ) 95 ALLOCATE( ztabl(jpimax ,4,ipk,ipl,ipf) , ztabr(jpimax*jpmaxngh,4,ipk,ipl,ipf) ) 96 ! 144 ! 145 ibuffsize = jpimax * ipf_j * ipk * ipl 146 ! 147 ALLOCATE( zfoldwk(jpimax,ipf_j,ipk,ipl,1) ) 148 ALLOCATE( ztabr(jpimax*jpmaxngh,ipj,ipk,ipl,ipf) ) 97 149 ! when some processors of the north fold are suppressed, 98 150 ! values of ztab* arrays corresponding to these suppressed domain won't be defined 99 151 ! and we need a default definition to 0. 100 152 ! a better test should be: a testing if "suppressed land-processors" belongs to the north-pole folding 101 IF ( jpni*jpnj /= jpnij ) THEN 102 ztabr(:,:,:,:,:) = 0._wp 103 ztabl(:,:,:,:,:) = 0._wp 104 END IF 105 ! 106 DO jf = 1, ipf 107 DO jl = 1, ipl 108 DO jk = 1, ipk 109 DO jj = nlcj-ipj+1, nlcj ! First put local values into the global array 110 ij = jj - nlcj + ipj 111 DO ji = nfsloop, nfeloop 112 ztabl(ji,ij,jk,jl,jf) = ARRAY_IN(ji,jj,jk,jl,jf) 113 END DO 114 END DO 115 END DO 116 END DO 117 END DO 153 IF ( jpni*jpnj /= jpnij ) ztabr(:,:,:,:,:) = 0._wp 154 ! 155 ! start waiting time measurement 156 IF( ln_timing ) CALL tic_tac(.TRUE.) 118 157 ! 119 158 DO jr = 1, nsndto 120 159 IF( nfipproc(isendto(jr),jpnj) /= narea-1 .AND. nfipproc(isendto(jr),jpnj) /= -1 ) THEN 121 CALL mppsend( 5, znorthloc, itaille, nfipproc(isendto(jr),jpnj), ml_req_nf(jr) )160 CALL mppsend( 5, znorthloc, ibuffsize, nfipproc(isendto(jr),jpnj), ml_req_nf(jr) ) 122 161 ENDIF 123 162 END DO 163 ! 124 164 DO jr = 1,nsndto 125 165 iproc = nfipproc(isendto(jr),jpnj) … … 134 174 ENDIF 135 175 IF( iproc /= narea-1 .AND. iproc /= -1 ) THEN 136 CALL mpprecv(5, zfoldwk, itaille, iproc) 137 DO jf = 1, ipf 176 CALL mpprecv(5, zfoldwk, ibuffsize, iproc) 177 js = 0 178 DO jf = 1, ipf ; DO jj = 1, ipj_s(jf) 179 js = js + 1 138 180 DO jl = 1, ipl 139 181 DO jk = 1, ipk 140 DO jj = 1, ipj 141 DO ji = ildi, ilei 142 ztabr(iilb+ji,jj,jk,jl,jf) = zfoldwk(ji,jj,jk,jl,jf) 143 END DO 182 DO ji = ildi, ilei 183 ztabr(iilb+ji,jj,jk,jl,jf) = zfoldwk(ji,js,jk,jl,1) 144 184 END DO 145 185 END DO 146 186 END DO 147 END DO 187 END DO; END DO 148 188 ELSE IF( iproc == narea-1 ) THEN 149 DO jf = 1, ipf 189 DO jf = 1, ipf ; DO jj = 1, ipj_s(jf) 150 190 DO jl = 1, ipl 151 191 DO jk = 1, ipk 152 DO jj = 1, ipj 153 DO ji = ildi, ilei 154 ztabr(iilb+ji,jj,jk,jl,jf) = ARRAY_IN(ji,nlcj-ipj+jj,jk,jl,jf) 155 END DO 192 DO ji = ildi, ilei 193 ztabr(iilb+ji,jj,jk,jl,jf) = ARRAY_IN(ji,jj_s(jf,jj),jk,jl,jf) 156 194 END DO 157 195 END DO 158 196 END DO 159 END DO 197 END DO; END DO 160 198 ENDIF 161 199 END DO … … 164 202 IF( nfipproc(isendto(jr),jpnj) /= narea-1 .AND. nfipproc(isendto(jr),jpnj) /= -1 ) THEN 165 203 CALL mpi_wait( ml_req_nf(jr), ml_stat, ml_err ) 166 ENDIF 204 ENDIF 167 205 END DO 168 206 ENDIF 207 ! 208 IF( ln_timing ) CALL tic_tac(.FALSE.) 209 ! 210 ! North fold boundary condition 211 ! 169 212 DO jf = 1, ipf 170 CALL lbc_nfd_nogather( ztabl(:,:,:,:,jf), ztabr(:,:,:,:,jf), cd_nat LBC_ARG, psgn LBC_ARG ) ! North fold boundary condition 171 END DO 172 DO jf = 1, ipf 213 CALL lbc_nfd_nogather(ARRAY_IN(:,:,:,:,jf), ztabr(:,1:ipj_s(jf),:,:,jf), cd_nat LBC_ARG, psgn LBC_ARG ) 214 END DO 215 ! 216 DEALLOCATE( zfoldwk ) 217 DEALLOCATE( ztabr ) 218 DEALLOCATE( jj_s ) 219 DEALLOCATE( ipj_s ) 220 ELSE !== ???? ==! 221 ! 222 ipj = 4 ! 2nd dimension of message transfers (last j-lines) 223 ! 224 ALLOCATE( znorthloc(jpimax,ipj,ipk,ipl,ipf) ) 225 ! 226 DO jf = 1, ipf ! put in znorthloc the last ipj j-lines of ptab 173 227 DO jl = 1, ipl 174 228 DO jk = 1, ipk 175 DO jj = nlcj -ipj+1, nlcj ! Scatter back to ARRAY_IN229 DO jj = nlcj - ipj +1, nlcj 176 230 ij = jj - nlcj + ipj 177 DO ji= 1, nlci 178 ARRAY_IN(ji,jj,jk,jl,jf) = ztabl(ji,ij,jk,jl,jf) 179 END DO 231 znorthloc(1:jpi,ij,jk,jl,jf) = ARRAY_IN(1:jpi,jj,jk,jl,jf) 180 232 END DO 181 233 END DO … … 183 235 END DO 184 236 ! 185 DEALLOCATE( zfoldwk ) 186 DEALLOCATE( ztabl, ztabr ) 187 ELSE !== ???? ==! 188 ALLOCATE( ztab (jpiglo,4,ipk,ipl,ipf ) ) 189 ALLOCATE( znorthgloio(jpimax,4,ipk,ipl,ipf,jpni) ) 237 ibuffsize = jpimax * ipj * ipk * ipl * ipf 238 ! 239 ALLOCATE( ztab (jpiglo,ipj,ipk,ipl,ipf ) ) 240 ALLOCATE( znorthgloio(jpimax,ipj,ipk,ipl,ipf,jpni) ) 190 241 ! 191 242 ! when some processors of the north fold are suppressed, … … 193 244 ! and we need a default definition to 0. 194 245 ! a better test should be: a testing if "suppressed land-processors" belongs to the north-pole folding 195 IF ( jpni*jpnj /= jpnij ) ztab(:,:,:,:,:)=0._wp 196 ! 197 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 198 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 246 IF ( jpni*jpnj /= jpnij ) ztab(:,:,:,:,:) = 0._wp 247 ! 248 ! start waiting time measurement 249 IF( ln_timing ) CALL tic_tac(.TRUE.) 250 CALL MPI_ALLGATHER( znorthloc , ibuffsize, MPI_DOUBLE_PRECISION, & 251 & znorthgloio, ibuffsize, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 252 ! 253 ! stop waiting time measurement 254 IF( ln_timing ) CALL tic_tac(.FALSE.) 199 255 ! 200 256 DO jr = 1, ndim_rank_north ! recover the global north array -
NEMO/trunk/src/OCE/LBC/mppini.F90
r10068 r10425 36 36 PUBLIC mpp_init ! called by opa.F90 37 37 38 INTEGER :: numbot = -1 ! 'bottom_level' local logical unit 39 INTEGER :: numbdy = -1 ! 'bdy_msk' local logical unit 40 38 41 !!---------------------------------------------------------------------- 39 42 !! NEMO/OCE 4.0 , NEMO Consortium (2018) … … 136 139 !!---------------------------------------------------------------------- 137 140 INTEGER :: ji, jj, jn, jproc, jarea ! dummy loop indices 141 INTEGER :: inijmin 142 INTEGER :: i2add 138 143 INTEGER :: inum ! local logical unit 139 INTEGER :: idir, ifreq, icont , isurf! local integers144 INTEGER :: idir, ifreq, icont ! local integers 140 145 INTEGER :: ii, il1, ili, imil ! - - 141 146 INTEGER :: ij, il2, ilj, ijm1 ! - - 142 147 INTEGER :: iino, ijno, iiso, ijso ! - - 143 148 INTEGER :: iiea, ijea, iiwe, ijwe ! - - 144 INTEGER :: iresti, irestj, iarea0 ! - - 145 INTEGER :: ierr ! local logical unit 146 REAL(wp):: zidom, zjdom ! local scalars 149 INTEGER :: iarea0 ! - - 150 INTEGER :: ierr, ios ! 151 INTEGER :: inbi, inbj, iimax, ijmax, icnt1, icnt2 152 LOGICAL :: llbest 147 153 INTEGER, ALLOCATABLE, DIMENSION(:) :: iin, ii_nono, ii_noea ! 1D workspace 148 154 INTEGER, ALLOCATABLE, DIMENSION(:) :: ijn, ii_noso, ii_nowe ! - - … … 151 157 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ilei, ildi, iono, ioea ! - - 152 158 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ilej, ildj, ioso, iowe ! - - 153 INTEGER, DIMENSION(jpiglo,jpjglo) :: imask ! 2D global domain workspace 154 !!---------------------------------------------------------------------- 155 159 LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: llisoce ! - - 160 NAMELIST/nambdy/ ln_bdy, nb_bdy, ln_coords_file, cn_coords_file, & 161 & ln_mask_file, cn_mask_file, cn_dyn2d, nn_dyn2d_dta, & 162 & cn_dyn3d, nn_dyn3d_dta, cn_tra, nn_tra_dta, & 163 & ln_tra_dmp, ln_dyn3d_dmp, rn_time_dmp, rn_time_dmp_out, & 164 & cn_ice, nn_ice_dta, & 165 & rn_ice_tem, rn_ice_sal, rn_ice_age, & 166 & ln_vol, nn_volctl, nn_rimwidth, nb_jpk_bdy 167 !!---------------------------------------------------------------------- 168 169 ! do we need to take into account bdy_msk? 170 REWIND( numnam_ref ) ! Namelist nambdy in reference namelist : BDY 171 READ ( numnam_ref, nambdy, IOSTAT = ios, ERR = 903) 172 903 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nambdy in reference namelist (mppini)', lwp ) 173 REWIND( numnam_cfg ) ! Namelist nambdy in configuration namelist : BDY 174 READ ( numnam_cfg, nambdy, IOSTAT = ios, ERR = 904 ) 175 904 IF( ios > 0 ) CALL ctl_nam ( ios , 'nambdy in configuration namelist (mppini)', lwp ) 176 ! 177 IF( ln_read_cfg ) CALL iom_open( cn_domcfg, numbot ) 178 IF( ln_bdy .AND. ln_mask_file ) CALL iom_open( cn_mask_file, numbdy ) 179 ! 180 ! 1. Dimension arrays for subdomains 181 ! ----------------------------------- 182 ! 156 183 ! If dimensions of processor grid weren't specified in the namelist file 157 184 ! then we calculate them here now that we have our communicator size 158 IF( jpni < 1 .OR. jpnj < 1 ) CALL mpp_init_partition( mppsize ) 159 ! 160 #if defined key_agrif 161 IF( jpnij /= jpni*jpnj ) CALL ctl_stop( 'STOP', 'Cannot remove land proc with AGRIF' ) 162 #endif 163 ! 185 IF( jpni < 1 .OR. jpnj < 1 ) THEN 186 CALL mpp_init_bestpartition( mppsize, jpni, jpnj ) 187 llbest = .TRUE. 188 ELSE 189 CALL mpp_init_bestpartition( mppsize, inbi, inbj, icnt2 ) 190 CALL mpp_basic_decomposition( jpni, jpnj, jpimax, jpjmax ) 191 CALL mpp_basic_decomposition( inbi, inbj, iimax, ijmax ) 192 IF( iimax*ijmax < jpimax*jpjmax ) THEN 193 llbest = .FALSE. 194 icnt1 = jpni*jpnj - mppsize 195 WRITE(ctmp1,9000) ' The chosen domain decomposition ', jpni, ' x ', jpnj, ' with ', icnt1, ' land sub-domains' 196 WRITE(ctmp2,9000) ' has larger MPI subdomains (jpi = ', jpimax, ', jpj = ', jpjmax, ', jpi*jpj = ', jpimax*jpjmax, ')' 197 WRITE(ctmp3,9000) ' than the following domain decompostion ', inbi, ' x ', inbj, ' with ', icnt2, ' land sub-domains' 198 WRITE(ctmp4,9000) ' which MPI subdomains size is jpi = ', iimax, ', jpj = ', ijmax, ', jpi*jpj = ', iimax*ijmax, ' ' 199 CALL ctl_warn( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4, ' ', ' --- YOU ARE WASTING CPU... ---', ' ' ) 200 ELSE 201 llbest = .TRUE. 202 ENDIF 203 ENDIF 204 205 ! look for land mpi subdomains... 206 ALLOCATE( llisoce(jpni,jpnj) ) 207 CALL mpp_init_isoce( jpni, jpnj, llisoce ) 208 inijmin = COUNT( llisoce ) ! number of oce subdomains 209 210 IF( mppsize < inijmin ) THEN 211 WRITE(ctmp1,9001) ' With this specified domain decomposition: jpni = ', jpni, ' jpnj = ', jpnj 212 WRITE(ctmp2,9002) ' we can eliminate only ', jpni*jpnj - inijmin, ' land mpi subdomains therefore ' 213 WRITE(ctmp3,9001) ' the number of ocean mpi subdomains (', inijmin,') exceed the number of MPI processes:', mppsize 214 WRITE(ctmp4,*) ' ==>>> There is the list of best domain decompositions you should use: ' 215 CALL ctl_stop( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4 ) 216 CALL mpp_init_bestpartition( mppsize, ldlist = .TRUE. ) ! must be done by all core 217 CALL ctl_stop( 'STOP' ) 218 ENDIF 219 220 IF( mppsize > jpni*jpnj ) THEN 221 WRITE(ctmp1,9003) ' The number of mpi processes: ', mppsize 222 WRITE(ctmp2,9003) ' exceeds the maximum number of subdomains (ocean+land) = ', jpni*jpnj 223 WRITE(ctmp3,9001) ' defined by the following domain decomposition: jpni = ', jpni, ' jpnj = ', jpnj 224 WRITE(ctmp4,*) ' ==>>> There is the list of best domain decompositions you should use: ' 225 CALL ctl_stop( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4 ) 226 CALL mpp_init_bestpartition( mppsize, ldlist = .TRUE. ) ! must be done by all core 227 CALL ctl_stop( 'STOP' ) 228 ENDIF 229 230 jpnij = mppsize ! force jpnij definition <-- remove as much land subdomains as needed to reach this condition 231 IF( mppsize > inijmin ) THEN 232 WRITE(ctmp1,9003) ' The number of mpi processes: ', mppsize 233 WRITE(ctmp2,9003) ' exceeds the maximum number of ocean subdomains = ', inijmin 234 WRITE(ctmp3,9002) ' we suppressed ', jpni*jpnj - mppsize, ' land subdomains ' 235 WRITE(ctmp4,9002) ' BUT we had to keep ', mppsize - inijmin, ' land subdomains that are useless...' 236 CALL ctl_warn( 'mpp_init:', '~~~~~~~~ ', ctmp1, ctmp2, ctmp3, ctmp4, ' ', ' --- YOU ARE WASTING CPU... ---', ' ' ) 237 ELSE ! mppsize = inijmin 238 IF(lwp) THEN 239 IF(llbest) WRITE(numout,*) 'mpp_init: You use an optimal domain decomposition' 240 WRITE(numout,*) '~~~~~~~~ ' 241 WRITE(numout,9003) ' Number of mpi processes: ', mppsize 242 WRITE(numout,9003) ' Number of ocean subdomains = ', inijmin 243 WRITE(numout,9003) ' Number of suppressed land subdomains = ', jpni*jpnj - inijmin 244 WRITE(numout,*) 245 ENDIF 246 ENDIF 247 9000 FORMAT (a, i4, a, i4, a, i7, a) 248 9001 FORMAT (a, i4, a, i4) 249 9002 FORMAT (a, i4, a) 250 9003 FORMAT (a, i5) 251 252 IF( numbot /= -1 ) CALL iom_close( numbot ) 253 IF( numbdy /= -1 ) CALL iom_close( numbdy ) 254 164 255 ALLOCATE( nfiimpp(jpni,jpnj), nfipproc(jpni,jpnj), nfilcit(jpni,jpnj) , & 165 256 & nimppt(jpnij) , ibonit(jpnij) , nlcit(jpnij) , nlcjt(jpnij) , & … … 173 264 & ilej(jpni,jpnj), ildj(jpni,jpnj), ioso(jpni,jpnj), iowe(jpni,jpnj), & 174 265 & STAT=ierr ) 175 CALL mpp_sum( ierr )266 CALL mpp_sum( 'mppini', ierr ) 176 267 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'mpp_init: unable to allocate standard ocean arrays' ) 177 268 178 !179 269 #if defined key_agrif 180 270 IF( .NOT. Agrif_Root() ) THEN ! AGRIF children: specific setting (cf. agrif_user.F90) … … 186 276 ENDIF 187 277 #endif 188 189 #if defined key_nemocice_decomp 190 jpimax = ( nx_global+2-2*nn_hls + (jpni-1) ) / jpni + 2*nn_hls ! first dim. 191 jpjmax = ( ny_global+2-2*nn_hls + (jpnj-1) ) / jpnj + 2*nn_hls ! second dim. 192 #else 193 jpimax = ( jpiglo - 2*nn_hls + (jpni-1) ) / jpni + 2*nn_hls ! first dim. 194 jpjmax = ( jpjglo - 2*nn_hls + (jpnj-1) ) / jpnj + 2*nn_hls ! second dim. 195 #endif 196 197 ! 198 IF ( jpni * jpnj == jpnij ) THEN ! regular domain lay out over processors 199 imask(:,:) = 1 200 ELSEIF ( jpni*jpnj > jpnij ) THEN ! remove land-only processor (i.e. where imask(:,:)=0) 201 CALL mpp_init_mask( imask ) 202 ELSE ! error 203 CALL ctl_stop( 'mpp_init: jpnij > jpni x jpnj. Check namelist setting!' ) 204 ENDIF 205 ! 206 ! 1. Dimension arrays for subdomains 278 ! 279 ! 2. Index arrays for subdomains 207 280 ! ----------------------------------- 208 ! Computation of local domain sizes ilci() ilcj()209 ! These dimensions depend on global sizes jpni,jpnj and jpiglo,jpjglo210 ! The subdomains are squares lesser than or equal to the global211 ! dimensions divided by the number of processors minus the overlap array.212 281 ! 213 282 nreci = 2 * nn_hls 214 283 nrecj = 2 * nn_hls 215 iresti = 1 + MOD( jpiglo - nreci -1 , jpni ) 216 irestj = 1 + MOD( jpjglo - nrecj -1 , jpnj ) 217 ! 218 ! Need to use jpimax and jpjmax here since jpi and jpj not yet defined 219 #if defined key_nemocice_decomp 220 ! Change padding to be consistent with CICE 221 ilci(1:jpni-1 ,:) = jpimax 222 ilci(jpni ,:) = jpiglo - (jpni - 1) * (jpimax - nreci) 223 ! 224 ilcj(:, 1:jpnj-1) = jpjmax 225 ilcj(:, jpnj) = jpjglo - (jpnj - 1) * (jpjmax - nrecj) 226 #else 227 ilci(1:iresti ,:) = jpimax 228 ilci(iresti+1:jpni ,:) = jpimax-1 229 230 ilcj(:, 1:irestj) = jpjmax 231 ilcj(:, irestj+1:jpnj) = jpjmax-1 232 #endif 233 ! 234 zidom = nreci + sum(ilci(:,1) - nreci ) 235 zjdom = nrecj + sum(ilcj(1,:) - nrecj ) 284 CALL mpp_basic_decomposition( jpni, jpnj, jpimax, jpjmax, iimppt, ijmppt, ilci, ilcj ) 285 nfiimpp(:,:) = iimppt(:,:) 286 nfilcit(:,:) = ilci(:,:) 236 287 ! 237 288 IF(lwp) THEN 238 289 WRITE(numout,*) 239 WRITE(numout,*) ' mpp_init :MPI Message Passing MPI - domain lay out over processors'240 WRITE(numout,*) '~~~~~~~~ '290 WRITE(numout,*) 'MPI Message Passing MPI - domain lay out over processors' 291 WRITE(numout,*) 241 292 WRITE(numout,*) ' defines mpp subdomains' 242 WRITE(numout,*) ' iresti = ', iresti, 'jpni = ', jpni243 WRITE(numout,*) ' irestj = ', irestj, 'jpnj = ', jpnj293 WRITE(numout,*) ' jpni = ', jpni 294 WRITE(numout,*) ' jpnj = ', jpnj 244 295 WRITE(numout,*) 245 WRITE(numout,*) ' sum ilci(i,1) = ', zidom, ' jpiglo = ', jpiglo 246 WRITE(numout,*) ' sum ilcj(1,j) = ', zjdom, ' jpjglo = ', jpjglo 247 ENDIF 248 249 ! 2. Index arrays for subdomains 250 ! ------------------------------- 251 iimppt(:,:) = 1 252 ijmppt(:,:) = 1 253 ipproc(:,:) = -1 254 ! 255 IF( jpni > 1 ) THEN 256 DO jj = 1, jpnj 257 DO ji = 2, jpni 258 iimppt(ji,jj) = iimppt(ji-1,jj) + ilci(ji-1,jj) - nreci 259 END DO 260 END DO 261 ENDIF 262 nfiimpp(:,:) = iimppt(:,:) 263 ! 264 IF( jpnj > 1 )THEN 265 DO jj = 2, jpnj 266 DO ji = 1, jpni 267 ijmppt(ji,jj) = ijmppt(ji,jj-1) + ilcj(ji,jj-1) - nrecj 268 END DO 269 END DO 270 ENDIF 271 296 WRITE(numout,*) ' sum ilci(i,1) = ', sum(ilci(:,1)), ' jpiglo = ', jpiglo 297 WRITE(numout,*) ' sum ilcj(1,j) = ', sum(ilcj(1,:)), ' jpjglo = ', jpjglo 298 ENDIF 299 272 300 ! 3. Subdomain description in the Regular Case 273 301 ! -------------------------------------------- … … 277 305 l_Jperio = jpnj == 1 .AND. (jperio == 2 .OR. jperio == 7) 278 306 279 icont = -1280 307 DO jarea = 1, jpni*jpnj 308 ! 281 309 iarea0 = jarea - 1 282 310 ii = 1 + MOD(iarea0,jpni) … … 334 362 ENDIF 335 363 ! 336 ! Check wet points over the entire domain to preserve the MPI communication stencil 337 isurf = 0 338 DO jj = 1, ilj 339 DO ji = 1, ili 340 IF( imask(ji+iimppt(ii,ij)-1, jj+ijmppt(ii,ij)-1) == 1) isurf = isurf+1 341 END DO 342 END DO 343 ! 344 IF( isurf /= 0 ) THEN 364 END DO 365 366 ! 4. deal with land subdomains 367 ! ---------------------------- 368 ! 369 ! specify which subdomains are oce subdomains; other are land subdomains 370 ipproc(:,:) = -1 371 icont = -1 372 DO jarea = 1, jpni*jpnj 373 iarea0 = jarea - 1 374 ii = 1 + MOD(iarea0,jpni) 375 ij = 1 + iarea0/jpni 376 IF( llisoce(ii,ij) ) THEN 345 377 icont = icont + 1 346 378 ipproc(ii,ij) = icont … … 349 381 ENDIF 350 382 END DO 351 ! 383 ! if needed add some land subdomains to reach jpnij active subdomains 384 i2add = jpnij - inijmin 385 DO jarea = 1, jpni*jpnj 386 iarea0 = jarea - 1 387 ii = 1 + MOD(iarea0,jpni) 388 ij = 1 + iarea0/jpni 389 IF( .NOT. llisoce(ii,ij) .AND. i2add > 0 ) THEN 390 icont = icont + 1 391 ipproc(ii,ij) = icont 392 iin(icont+1) = ii 393 ijn(icont+1) = ij 394 i2add = i2add - 1 395 ENDIF 396 END DO 352 397 nfipproc(:,:) = ipproc(:,:) 353 398 354 ! Check potential error 355 IF( icont+1 /= jpnij ) THEN 356 WRITE(ctmp1,*) ' jpni =',jpni,' jpnj =',jpnj 357 WRITE(ctmp2,*) ' jpnij =',jpnij, '< jpni x jpnj' 358 WRITE(ctmp3,*) ' ***********, mpp_init2 finds jpnij=',icont+1 359 CALL ctl_stop( 'STOP', 'mpp_init: Eliminate land processors algorithm', '', ctmp1, ctmp2, '', ctmp3 ) 360 ENDIF 361 362 ! 4. Subdomain print 399 ! neighbour treatment: change ibondi, ibondj if next to a land zone 400 DO jarea = 1, jpni*jpnj 401 ii = 1 + MOD( jarea-1 , jpni ) 402 ij = 1 + (jarea-1) / jpni 403 ! land-only area with an active n neigbour 404 IF ( ipproc(ii,ij) == -1 .AND. 0 <= iono(ii,ij) .AND. iono(ii,ij) <= jpni*jpnj-1 ) THEN 405 iino = 1 + MOD( iono(ii,ij) , jpni ) ! ii index of this n neigbour 406 ijno = 1 + iono(ii,ij) / jpni ! ij index of this n neigbour 407 ! In case of north fold exchange: I am the n neigbour of my n neigbour!! (#1057) 408 ! --> for northern neighbours of northern row processors (in case of north-fold) 409 ! need to reverse the LOGICAL direction of communication 410 idir = 1 ! we are indeed the s neigbour of this n neigbour 411 IF( ij == jpnj .AND. ijno == jpnj ) idir = -1 ! both are on the last row, we are in fact the n neigbour 412 IF( ibondj(iino,ijno) == idir ) ibondj(iino,ijno) = 2 ! this n neigbour had only a s/n neigbour -> no more 413 IF( ibondj(iino,ijno) == 0 ) ibondj(iino,ijno) = -idir ! this n neigbour had both, n-s neighbours -> keep 1 414 ENDIF 415 ! land-only area with an active s neigbour 416 IF( ipproc(ii,ij) == -1 .AND. 0 <= ioso(ii,ij) .AND. ioso(ii,ij) <= jpni*jpnj-1 ) THEN 417 iiso = 1 + MOD( ioso(ii,ij) , jpni ) ! ii index of this s neigbour 418 ijso = 1 + ioso(ii,ij) / jpni ! ij index of this s neigbour 419 IF( ibondj(iiso,ijso) == -1 ) ibondj(iiso,ijso) = 2 ! this s neigbour had only a n neigbour -> no more neigbour 420 IF( ibondj(iiso,ijso) == 0 ) ibondj(iiso,ijso) = 1 ! this s neigbour had both, n-s neighbours -> keep s neigbour 421 ENDIF 422 ! land-only area with an active e neigbour 423 IF( ipproc(ii,ij) == -1 .AND. 0 <= ioea(ii,ij) .AND. ioea(ii,ij) <= jpni*jpnj-1 ) THEN 424 iiea = 1 + MOD( ioea(ii,ij) , jpni ) ! ii index of this e neigbour 425 ijea = 1 + ioea(ii,ij) / jpni ! ij index of this e neigbour 426 IF( ibondi(iiea,ijea) == 1 ) ibondi(iiea,ijea) = 2 ! this e neigbour had only a w neigbour -> no more neigbour 427 IF( ibondi(iiea,ijea) == 0 ) ibondi(iiea,ijea) = -1 ! this e neigbour had both, e-w neighbours -> keep e neigbour 428 ENDIF 429 ! land-only area with an active w neigbour 430 IF( ipproc(ii,ij) == -1 .AND. 0 <= iowe(ii,ij) .AND. iowe(ii,ij) <= jpni*jpnj-1) THEN 431 iiwe = 1 + MOD( iowe(ii,ij) , jpni ) ! ii index of this w neigbour 432 ijwe = 1 + iowe(ii,ij) / jpni ! ij index of this w neigbour 433 IF( ibondi(iiwe,ijwe) == -1 ) ibondi(iiwe,ijwe) = 2 ! this w neigbour had only a e neigbour -> no more neigbour 434 IF( ibondi(iiwe,ijwe) == 0 ) ibondi(iiwe,ijwe) = 1 ! this w neigbour had both, e-w neighbours -> keep w neigbour 435 ENDIF 436 END DO 437 438 ! Update il[de][ij] according to modified ibond[ij] 439 ! ---------------------- 440 DO jproc = 1, jpnij 441 ii = iin(jproc) 442 ij = ijn(jproc) 443 IF( ibondi(ii,ij) == -1 .OR. ibondi(ii,ij) == 2 ) ildi(ii,ij) = 1 444 IF( ibondi(ii,ij) == 1 .OR. ibondi(ii,ij) == 2 ) ilei(ii,ij) = ilci(ii,ij) 445 IF( ibondj(ii,ij) == -1 .OR. ibondj(ii,ij) == 2 ) ildj(ii,ij) = 1 446 IF( ibondj(ii,ij) == 1 .OR. ibondj(ii,ij) == 2 ) ilej(ii,ij) = ilcj(ii,ij) 447 END DO 448 449 ! 5. Subdomain print 363 450 ! ------------------ 364 451 IF(lwp) THEN … … 385 472 9404 FORMAT(' * ' ,20(' ',i3,' * ') ) 386 473 ENDIF 387 388 ! 5. neighbour treatment: change ibondi, ibondj if next to a land zone389 ! ----------------------390 DO jarea = 1, jpni*jpnj391 ii = 1 + MOD( jarea-1 , jpni )392 ij = 1 + (jarea-1) / jpni393 ! land-only area with an active n neigbour394 IF ( ipproc(ii,ij) == -1 .AND. 0 <= iono(ii,ij) .AND. iono(ii,ij) <= jpni*jpnj-1 ) THEN395 iino = 1 + MOD( iono(ii,ij) , jpni ) ! ii index of this n neigbour396 ijno = 1 + iono(ii,ij) / jpni ! ij index of this n neigbour397 ! In case of north fold exchange: I am the n neigbour of my n neigbour!! (#1057)398 ! --> for northern neighbours of northern row processors (in case of north-fold)399 ! need to reverse the LOGICAL direction of communication400 idir = 1 ! we are indeed the s neigbour of this n neigbour401 IF( ij == jpnj .AND. ijno == jpnj ) idir = -1 ! both are on the last row, we are in fact the n neigbour402 IF( ibondj(iino,ijno) == idir ) ibondj(iino,ijno) = 2 ! this n neigbour had only a s/n neigbour -> no more403 IF( ibondj(iino,ijno) == 0 ) ibondj(iino,ijno) = -idir ! this n neigbour had both, n-s neighbours -> keep 1404 ENDIF405 ! land-only area with an active s neigbour406 IF( ipproc(ii,ij) == -1 .AND. 0 <= ioso(ii,ij) .AND. ioso(ii,ij) <= jpni*jpnj-1 ) THEN407 iiso = 1 + MOD( ioso(ii,ij) , jpni ) ! ii index of this s neigbour408 ijso = 1 + ioso(ii,ij) / jpni ! ij index of this s neigbour409 IF( ibondj(iiso,ijso) == -1 ) ibondj(iiso,ijso) = 2 ! this s neigbour had only a n neigbour -> no more neigbour410 IF( ibondj(iiso,ijso) == 0 ) ibondj(iiso,ijso) = 1 ! this s neigbour had both, n-s neighbours -> keep s neigbour411 ENDIF412 ! land-only area with an active e neigbour413 IF( ipproc(ii,ij) == -1 .AND. 0 <= ioea(ii,ij) .AND. ioea(ii,ij) <= jpni*jpnj-1 ) THEN414 iiea = 1 + MOD( ioea(ii,ij) , jpni ) ! ii index of this e neigbour415 ijea = 1 + ioea(ii,ij) / jpni ! ij index of this e neigbour416 IF( ibondi(iiea,ijea) == 1 ) ibondi(iiea,ijea) = 2 ! this e neigbour had only a w neigbour -> no more neigbour417 IF( ibondi(iiea,ijea) == 0 ) ibondi(iiea,ijea) = -1 ! this e neigbour had both, e-w neighbours -> keep e neigbour418 ENDIF419 ! land-only area with an active w neigbour420 IF( ipproc(ii,ij) == -1 .AND. 0 <= iowe(ii,ij) .AND. iowe(ii,ij) <= jpni*jpnj-1) THEN421 iiwe = 1 + MOD( iowe(ii,ij) , jpni ) ! ii index of this w neigbour422 ijwe = 1 + iowe(ii,ij) / jpni ! ij index of this w neigbour423 IF( ibondi(iiwe,ijwe) == -1 ) ibondi(iiwe,ijwe) = 2 ! this w neigbour had only a e neigbour -> no more neigbour424 IF( ibondi(iiwe,ijwe) == 0 ) ibondi(iiwe,ijwe) = 1 ! this w neigbour had both, e-w neighbours -> keep w neigbour425 ENDIF426 END DO427 428 ! Update il[de][ij] according to modified ibond[ij]429 ! ----------------------430 DO jproc = 1, jpnij431 ii = iin(jproc)432 ij = ijn(jproc)433 IF( ibondi(ii,ij) == -1 .OR. ibondi(ii,ij) == 2 ) ildi(ii,ij) = 1434 IF( ibondi(ii,ij) == 1 .OR. ibondi(ii,ij) == 2 ) ilei(ii,ij) = ilci(ii,ij)435 IF( ibondj(ii,ij) == -1 .OR. ibondj(ii,ij) == 2 ) ildj(ii,ij) = 1436 IF( ibondj(ii,ij) == 1 .OR. ibondj(ii,ij) == 2 ) ilej(ii,ij) = ilcj(ii,ij)437 END DO438 474 439 475 ! just to save nono etc for all proc … … 516 552 njmppt(jproc) = ijmppt(ii,ij) 517 553 END DO 518 nfilcit(:,:) = ilci(:,:)519 554 520 555 ! Save processor layout in ascii file … … 610 645 & iimppt, ijmppt, ibondi, ibondj, ipproc, ipolj, & 611 646 & ilci, ilcj, ilei, ilej, ildi, ildj, & 612 & iono, ioea, ioso, iowe )647 & iono, ioea, ioso, iowe, llisoce) 613 648 ! 614 649 END SUBROUTINE mpp_init 615 650 616 651 617 SUBROUTINE mpp_init_mask( kmask ) 618 !!---------------------------------------------------------------------- 619 !! *** ROUTINE mpp_init_mask *** 620 !! 621 !! ** Purpose : Read relevant bathymetric information in a global array 622 !! in order to provide a land/sea mask used for the elimination 652 SUBROUTINE mpp_basic_decomposition( knbi, knbj, kimax, kjmax, kimppt, kjmppt, klci, klcj) 653 !!---------------------------------------------------------------------- 654 !! *** ROUTINE mpp_basic_decomposition *** 655 !! 656 !! ** Purpose : Lay out the global domain over processors. 657 !! 658 !! ** Method : Global domain is distributed in smaller local domains. 659 !! 660 !! ** Action : - set for all knbi*knbj domains: 661 !! kimppt : longitudinal index 662 !! kjmppt : latitudinal index 663 !! klci : first dimension 664 !! klcj : second dimension 665 !!---------------------------------------------------------------------- 666 INTEGER, INTENT(in ) :: knbi, knbj 667 INTEGER, INTENT( out) :: kimax, kjmax 668 INTEGER, DIMENSION(knbi,knbj), OPTIONAL, INTENT( out) :: kimppt, kjmppt 669 INTEGER, DIMENSION(knbi,knbj), OPTIONAL, INTENT( out) :: klci, klcj 670 ! 671 INTEGER :: ji, jj 672 INTEGER :: iresti, irestj 673 INTEGER :: ireci, irecj 674 !!---------------------------------------------------------------------- 675 ! 676 #if defined key_nemocice_decomp 677 kimax = ( nx_global+2-2*nn_hls + (knbi-1) ) / knbi + 2*nn_hls ! first dim. 678 kjmax = ( ny_global+2-2*nn_hls + (knbj-1) ) / knbj + 2*nn_hls ! second dim. 679 #else 680 kimax = ( jpiglo - 2*nn_hls + (knbi-1) ) / knbi + 2*nn_hls ! first dim. 681 kjmax = ( jpjglo - 2*nn_hls + (knbj-1) ) / knbj + 2*nn_hls ! second dim. 682 #endif 683 IF( .NOT. PRESENT(kimppt) ) RETURN 684 ! 685 ! 1. Dimension arrays for subdomains 686 ! ----------------------------------- 687 ! Computation of local domain sizes klci() klcj() 688 ! These dimensions depend on global sizes knbi,knbj and jpiglo,jpjglo 689 ! The subdomains are squares lesser than or equal to the global 690 ! dimensions divided by the number of processors minus the overlap array. 691 ! 692 ireci = 2 * nn_hls 693 irecj = 2 * nn_hls 694 iresti = 1 + MOD( jpiglo - ireci -1 , knbi ) 695 irestj = 1 + MOD( jpjglo - irecj -1 , knbj ) 696 ! 697 ! Need to use kimax and kjmax here since jpi and jpj not yet defined 698 #if defined key_nemocice_decomp 699 ! Change padding to be consistent with CICE 700 klci(1:knbi-1 ,:) = kimax 701 klci(knbi ,:) = jpiglo - (knbi - 1) * (kimax - nreci) 702 klcj(:, 1:knbj-1) = kjmax 703 klcj(:, knbj) = jpjglo - (knbj - 1) * (kjmax - nrecj) 704 #else 705 klci(1:iresti ,:) = kimax 706 klci(iresti+1:knbi ,:) = kimax-1 707 klcj(:, 1:irestj) = kjmax 708 klcj(:, irestj+1:knbj) = kjmax-1 709 #endif 710 711 ! 2. Index arrays for subdomains 712 ! ------------------------------- 713 kimppt(:,:) = 1 714 kjmppt(:,:) = 1 715 ! 716 IF( knbi > 1 ) THEN 717 DO jj = 1, knbj 718 DO ji = 2, knbi 719 kimppt(ji,jj) = kimppt(ji-1,jj) + klci(ji-1,jj) - ireci 720 END DO 721 END DO 722 ENDIF 723 ! 724 IF( knbj > 1 )THEN 725 DO jj = 2, knbj 726 DO ji = 1, knbi 727 kjmppt(ji,jj) = kjmppt(ji,jj-1) + klcj(ji,jj-1) - irecj 728 END DO 729 END DO 730 ENDIF 731 732 END SUBROUTINE mpp_basic_decomposition 733 734 735 SUBROUTINE mpp_init_bestpartition( knbij, knbi, knbj, knbcnt, ldlist ) 736 !!---------------------------------------------------------------------- 737 !! *** ROUTINE mpp_init_bestpartition *** 738 !! 739 !! ** Purpose : 740 !! 741 !! ** Method : 742 !!---------------------------------------------------------------------- 743 INTEGER, INTENT(in ) :: knbij ! total number if subdomains (knbi*knbj) 744 INTEGER, OPTIONAL, INTENT( out) :: knbi, knbj ! number if subdomains along i and j (knbi and knbj) 745 INTEGER, OPTIONAL, INTENT( out) :: knbcnt ! number of land subdomains 746 LOGICAL, OPTIONAL, INTENT(in ) :: ldlist ! .true.: print the list the best domain decompositions (with land) 747 ! 748 INTEGER :: ji, jj, ii, iitarget 749 INTEGER :: iszitst, iszjtst 750 INTEGER :: isziref, iszjref 751 INTEGER :: inbij, iszij 752 INTEGER :: inbimax, inbjmax, inbijmax 753 INTEGER :: isz0, isz1 754 INTEGER, DIMENSION( :), ALLOCATABLE :: indexok 755 INTEGER, DIMENSION( :), ALLOCATABLE :: inbi0, inbj0, inbij0 ! number of subdomains along i,j 756 INTEGER, DIMENSION( :), ALLOCATABLE :: iszi0, iszj0, iszij0 ! max size of the subdomains along i,j 757 INTEGER, DIMENSION( :), ALLOCATABLE :: inbi1, inbj1, inbij1 ! number of subdomains along i,j 758 INTEGER, DIMENSION( :), ALLOCATABLE :: iszi1, iszj1, iszij1 ! max size of the subdomains along i,j 759 LOGICAL :: llist 760 LOGICAL, DIMENSION(:,:), ALLOCATABLE :: llmsk2d ! max size of the subdomains along i,j 761 LOGICAL, DIMENSION(:,:), ALLOCATABLE :: llisoce ! - - 762 REAL(wp):: zpropland 763 !!---------------------------------------------------------------------- 764 ! 765 llist = .FALSE. 766 IF( PRESENT(ldlist) ) llist = ldlist 767 768 CALL mpp_init_landprop( zpropland ) ! get the proportion of land point over the gloal domain 769 inbij = NINT( REAL(knbij, wp) / ( 1.0 - zpropland ) ) ! define the largest possible value for jpni*jpnj 770 ! 771 IF( llist ) THEN ; inbijmax = inbij*2 772 ELSE ; inbijmax = inbij 773 ENDIF 774 ! 775 ALLOCATE(inbi0(inbijmax),inbj0(inbijmax),iszi0(inbijmax),iszj0(inbijmax)) 776 ! 777 inbimax = 0 778 inbjmax = 0 779 isziref = jpiglo*jpjglo+1 780 iszjref = jpiglo*jpjglo+1 781 ! 782 ! get the list of knbi that gives a smaller jpimax than knbi-1 783 ! get the list of knbj that gives a smaller jpjmax than knbj-1 784 DO ji = 1, inbijmax 785 #if defined key_nemocice_decomp 786 iszitst = ( nx_global+2-2*nn_hls + (ji-1) ) / ji + 2*nn_hls ! first dim. 787 #else 788 iszitst = ( jpiglo - 2*nn_hls + (ji-1) ) / ji + 2*nn_hls 789 #endif 790 IF( iszitst < isziref ) THEN 791 isziref = iszitst 792 inbimax = inbimax + 1 793 inbi0(inbimax) = ji 794 iszi0(inbimax) = isziref 795 ENDIF 796 #if defined key_nemocice_decomp 797 iszjtst = ( ny_global+2-2*nn_hls + (ji-1) ) / ji + 2*nn_hls ! first dim. 798 #else 799 iszjtst = ( jpjglo - 2*nn_hls + (ji-1) ) / ji + 2*nn_hls 800 #endif 801 IF( iszjtst < iszjref ) THEN 802 iszjref = iszjtst 803 inbjmax = inbjmax + 1 804 inbj0(inbjmax) = ji 805 iszj0(inbjmax) = iszjref 806 ENDIF 807 END DO 808 809 ! combine these 2 lists to get all possible knbi*knbj < inbijmax 810 ALLOCATE( llmsk2d(inbimax,inbjmax) ) 811 DO jj = 1, inbjmax 812 DO ji = 1, inbimax 813 IF ( inbi0(ji) * inbj0(jj) <= inbijmax ) THEN ; llmsk2d(ji,jj) = .TRUE. 814 ELSE ; llmsk2d(ji,jj) = .FALSE. 815 ENDIF 816 END DO 817 END DO 818 isz1 = COUNT(llmsk2d) 819 ALLOCATE( inbi1(isz1), inbj1(isz1), iszi1(isz1), iszj1(isz1) ) 820 ii = 0 821 DO jj = 1, inbjmax 822 DO ji = 1, inbimax 823 IF( llmsk2d(ji,jj) .EQV. .TRUE. ) THEN 824 ii = ii + 1 825 inbi1(ii) = inbi0(ji) 826 inbj1(ii) = inbj0(jj) 827 iszi1(ii) = iszi0(ji) 828 iszj1(ii) = iszj0(jj) 829 END IF 830 END DO 831 END DO 832 DEALLOCATE( inbi0, inbj0, iszi0, iszj0 ) 833 DEALLOCATE( llmsk2d ) 834 835 ALLOCATE( inbij1(isz1), iszij1(isz1) ) 836 inbij1(:) = inbi1(:) * inbj1(:) 837 iszij1(:) = iszi1(:) * iszj1(:) 838 839 ! if therr is no land and no print 840 IF( .NOT. llist .AND. numbot == -1 .AND. numbdy == -1 ) THEN 841 ! get the smaller partition which gives the smallest subdomain size 842 ii = MINLOC(inbij1, mask = iszij1 == MINVAL(iszij1), dim = 1) 843 knbi = inbi1(ii) 844 knbj = inbj1(ii) 845 IF(PRESENT(knbcnt)) knbcnt = 0 846 DEALLOCATE( inbi1, inbj1, inbij1, iszi1, iszj1, iszij1 ) 847 RETURN 848 ENDIF 849 850 ! extract only the partitions which reduce the subdomain size in comparison with smaller partitions 851 ALLOCATE( indexok(isz1) ) ! to store indices of the best partitions 852 isz0 = 0 ! number of best partitions 853 inbij = 1 ! start with the min value of inbij1 => 1 854 iszij = jpiglo*jpjglo+1 ! default: larger than global domain 855 DO WHILE( inbij <= inbijmax ) ! if we did not reach the max of inbij1 856 ii = MINLOC(iszij1, mask = inbij1 == inbij, dim = 1) ! warning: send back the first occurence if multiple results 857 IF ( iszij1(ii) < iszij ) THEN 858 isz0 = isz0 + 1 859 indexok(isz0) = ii 860 iszij = iszij1(ii) 861 ENDIF 862 inbij = MINVAL(inbij1, mask = inbij1 > inbij) ! warning: return largest integer value if mask = .false. everywhere 863 END DO 864 DEALLOCATE( inbij1, iszij1 ) 865 866 ! keep only the best partitions (sorted by increasing order of subdomains number and decreassing subdomain size) 867 ALLOCATE( inbi0(isz0), inbj0(isz0), iszi0(isz0), iszj0(isz0) ) 868 DO ji = 1, isz0 869 ii = indexok(ji) 870 inbi0(ji) = inbi1(ii) 871 inbj0(ji) = inbj1(ii) 872 iszi0(ji) = iszi1(ii) 873 iszj0(ji) = iszj1(ii) 874 END DO 875 DEALLOCATE( indexok, inbi1, inbj1, iszi1, iszj1 ) 876 877 IF( llist ) THEN ! we print about 21 best partitions 878 IF(lwp) THEN 879 WRITE(numout,*) 880 WRITE(numout, *) ' For your information:' 881 WRITE(numout,'(a,i5,a)') ' list of the best partitions around ', knbij, ' mpi processes' 882 WRITE(numout, *) ' --------------------------------------', '-----', '--------------' 883 WRITE(numout,*) 884 END IF 885 iitarget = MINLOC( inbi0(:)*inbj0(:), mask = inbi0(:)*inbj0(:) >= knbij, dim = 1 ) 886 DO ji = MAX(1,iitarget-10), MIN(isz0,iitarget+10) 887 ALLOCATE( llisoce(inbi0(ji), inbj0(ji)) ) 888 CALL mpp_init_isoce( inbi0(ji), inbj0(ji), llisoce ) ! Warning: must be call by all cores (call mpp_sum) 889 inbij = COUNT(llisoce) 890 DEALLOCATE( llisoce ) 891 IF(lwp) WRITE(numout,'(a, i5, a, i5, a, i4, a, i4, a, i9, a, i5, a, i5, a)') & 892 & 'nb_cores ' , inbij,' oce + ', inbi0(ji)*inbj0(ji) - inbij & 893 & , ' land ( ', inbi0(ji),' x ', inbj0(ji), & 894 & ' ), nb_points ', iszi0(ji)*iszj0(ji),' ( ', iszi0(ji),' x ', iszj0(ji),' )' 895 END DO 896 DEALLOCATE( inbi0, inbj0, iszi0, iszj0 ) 897 RETURN 898 ENDIF 899 900 DEALLOCATE( iszi0, iszj0 ) 901 inbij = inbijmax + 1 ! default: larger than possible 902 ii = isz0+1 ! start from the end of the list (smaller subdomains) 903 DO WHILE( inbij > knbij ) ! while the number of ocean subdomains exceed the number of procs 904 ii = ii -1 905 ALLOCATE( llisoce(inbi0(ii), inbj0(ii)) ) 906 CALL mpp_init_isoce( inbi0(ii), inbj0(ii), llisoce ) ! must be done by all core 907 inbij = COUNT(llisoce) 908 DEALLOCATE( llisoce ) 909 END DO 910 knbi = inbi0(ii) 911 knbj = inbj0(ii) 912 IF(PRESENT(knbcnt)) knbcnt = knbi * knbj - inbij 913 DEALLOCATE( inbi0, inbj0 ) 914 ! 915 END SUBROUTINE mpp_init_bestpartition 916 917 918 SUBROUTINE mpp_init_landprop( propland ) 919 !!---------------------------------------------------------------------- 920 !! *** ROUTINE mpp_init_landprop *** 921 !! 922 !! ** Purpose : the the proportion of land points in the surface land-sea mask 923 !! 924 !! ** Method : read iproc strips (of length jpiglo) of the land-sea mask 925 !!---------------------------------------------------------------------- 926 REAL(wp), INTENT( out) :: propland ! proportion of land points (between 0 and 1) 927 ! 928 INTEGER, DIMENSION(jpni*jpnj) :: kusedom_1d 929 INTEGER :: inboce 930 INTEGER :: iproc, idiv, ijsz 931 INTEGER :: ijstr, ijend, ijcnt 932 LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: lloce 933 !!---------------------------------------------------------------------- 934 ! do nothing if there is no land-sea mask 935 IF( numbot == -1 .and. numbdy == -1 ) THEN 936 propland = 0. 937 RETURN 938 ENDIF 939 940 ! number of processes reading the bathymetry file 941 iproc = MINVAL( (/mppsize, jpjglo/2, 100/) ) ! read a least 2 lines, no more that 100 processes reading at the same time 942 943 ! we want to read iproc strips of the land-sea mask. -> pick up iproc processes among mppsize processes 944 IF( iproc == 1 ) THEN ; idiv = mppsize 945 ELSE ; idiv = ( mppsize - 1 ) / ( iproc - 1 ) 946 ENDIF 947 ijsz = jpjglo / iproc 948 IF( narea <= MOD(jpjglo,iproc) ) ijsz = ijsz + 1 949 950 IF( MOD( narea-1, idiv ) == 0 .AND. (idiv /= 1 .OR. narea <= iproc ) ) THEN 951 ! 952 ijstr = (narea-1)*(jpjglo/iproc) + MIN(narea-1, MOD(jpjglo,iproc)) + 1 953 ijend = ijstr + ijsz - 1 954 ijcnt = ijend - ijstr + 1 955 ! 956 ALLOCATE( lloce(jpiglo, ijcnt) ) ! allocate the strip 957 CALL mpp_init_readbot_strip( ijstr, ijcnt, lloce ) 958 inboce = COUNT(lloce) 959 DEALLOCATE(lloce) 960 ! 961 ELSE 962 inboce = 0 963 ENDIF 964 CALL mpp_sum( 'mppini', inboce ) 965 ! 966 propland = REAL( jpiglo*jpjglo - inboce, wp ) / REAL( jpiglo*jpjglo, wp ) 967 ! 968 END SUBROUTINE mpp_init_landprop 969 970 971 SUBROUTINE mpp_init_isoce( knbi, knbj, ldisoce ) 972 !!---------------------------------------------------------------------- 973 !! *** ROUTINE mpp_init_nboce *** 974 !! 975 !! ** Purpose : check for a mpi domain decomposition knbi x knbj which 976 !! subdomains contain at least 1 ocean point 977 !! 978 !! ** Method : read knbj strips (of length jpiglo) of the land-sea mask 979 !!---------------------------------------------------------------------- 980 INTEGER, INTENT(in ) :: knbi, knbj 981 LOGICAL, DIMENSION(knbi,knbj), INTENT( out) :: ldisoce ! 982 ! 983 INTEGER, DIMENSION(knbi,knbj) :: inboce 984 INTEGER, DIMENSION(knbi*knbj) :: inboce_1d 985 INTEGER :: idiv, i2read, inj 986 INTEGER :: iimax, ijmax 987 INTEGER :: ji,jj 988 LOGICAL, ALLOCATABLE, DIMENSION(:,:) :: lloce 989 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: iimppt, ilci 990 INTEGER, ALLOCATABLE, DIMENSION(:,:) :: ijmppt, ilcj 991 !!---------------------------------------------------------------------- 992 ! do nothing if there is no land-sea mask 993 IF( numbot == -1 .AND. numbdy == -1 ) THEN 994 ldisoce(:,:) = .TRUE. 995 RETURN 996 ENDIF 997 998 ! we want to read knbj strips of the land-sea mask. -> pick up knbj processes among mppsize processes 999 IF( knbj == 1 ) THEN ; idiv = mppsize 1000 ELSE ; idiv = ( mppsize - 1 ) / ( knbj - 1 ) 1001 ENDIF 1002 inboce(:,:) = 0 1003 IF( MOD( narea-1, idiv ) == 0 .AND. (idiv /= 1 .OR. narea <= knbj ) ) THEN 1004 ! 1005 ALLOCATE( iimppt(knbi,knbj), ijmppt(knbi,knbj), ilci(knbi,knbj), ilcj(knbi,knbj) ) 1006 CALL mpp_basic_decomposition( knbi, knbj, iimax, ijmax, iimppt, ijmppt, ilci, ilcj ) 1007 ! 1008 i2read = knbj / mppsize ! strip number to be read by this process 1009 IF( ( narea - 1 ) / idiv < MOD(knbj,mppsize) ) i2read = i2read + 1 1010 DO jj = 1, i2read 1011 ! strip number to be read (from 1 to knbj) 1012 inj = ( narea - 1 ) * ( knbj / mppsize ) + MIN( MOD(knbj,mppsize), ( narea - 1 ) / idiv ) + jj 1013 ALLOCATE( lloce(jpiglo, ilcj(1,inj)) ) ! allocate the strip 1014 CALL mpp_init_readbot_strip( ijmppt(1,inj), ilcj(1,inj), lloce ) ! read the strip 1015 DO ji = 1, knbi 1016 inboce(ji,inj) = COUNT( lloce(iimppt(ji,1):iimppt(ji,1)+ilci(ji,1)-1,:) ) 1017 END DO 1018 DEALLOCATE(lloce) 1019 END DO 1020 ! 1021 DEALLOCATE(iimppt, ijmppt, ilci, ilcj) 1022 ! 1023 ENDIF 1024 1025 inboce_1d = RESHAPE(inboce, (/ knbi*knbj /)) 1026 CALL mpp_sum( 'mppini', inboce_1d ) 1027 inboce = RESHAPE(inboce_1d, (/knbi, knbj/)) 1028 ldisoce = inboce /= 0 1029 ! 1030 END SUBROUTINE mpp_init_isoce 1031 1032 1033 SUBROUTINE mpp_init_readbot_strip( kjstr, kjcnt, ldoce ) 1034 !!---------------------------------------------------------------------- 1035 !! *** ROUTINE mpp_init_readbot_strip *** 1036 !! 1037 !! ** Purpose : Read relevant bathymetric information in order to 1038 !! provide a land/sea mask used for the elimination 623 1039 !! of land domains, in an mpp computation. 624 1040 !! 625 !! ** Method : Read the namelist ln_zco and ln_isfcav in namelist namzgr 626 !! in order to choose the correct bathymetric information 627 !! (file and variables) 628 !!---------------------------------------------------------------------- 629 INTEGER, DIMENSION(jpiglo,jpjglo), INTENT(out) :: kmask ! global domain 630 631 INTEGER :: inum !: logical unit for configuration file 632 INTEGER :: ios !: iostat error flag 633 INTEGER :: ijstartrow ! temporary integers 634 REAL(wp), DIMENSION(jpiglo,jpjglo) :: zbot, zbdy ! global workspace 635 REAL(wp) :: zidom , zjdom ! local scalars 636 NAMELIST/nambdy/ ln_bdy, nb_bdy, ln_coords_file, cn_coords_file, & 637 & ln_mask_file, cn_mask_file, cn_dyn2d, nn_dyn2d_dta, & 638 & cn_dyn3d, nn_dyn3d_dta, cn_tra, nn_tra_dta, & 639 & ln_tra_dmp, ln_dyn3d_dmp, rn_time_dmp, rn_time_dmp_out, & 640 & cn_ice, nn_ice_dta, & 641 & rn_ice_tem, rn_ice_sal, rn_ice_age, & 642 & ln_vol, nn_volctl, nn_rimwidth, nb_jpk_bdy 643 !!---------------------------------------------------------------------- 644 ! 0. initialisation 645 ! ----------------- 646 CALL iom_open( cn_domcfg, inum ) 647 ! 648 ! ocean bottom level 649 CALL iom_get( inum, jpdom_unknown, 'bottom_level' , zbot , lrowattr=ln_use_jattr ) ! nb of ocean T-points 650 ! 651 CALL iom_close( inum ) 652 ! 653 ! 2D ocean mask (=1 if at least one level of the water column is ocean, =0 otherwise) 654 WHERE( zbot(:,:) > 0 ) ; kmask(:,:) = 1 655 ELSEWHERE ; kmask(:,:) = 0 656 END WHERE 657 658 ! Adjust kmask with bdy_msk if it exists 659 660 REWIND( numnam_ref ) ! Namelist nambdy in reference namelist : BDY 661 READ ( numnam_ref, nambdy, IOSTAT = ios, ERR = 903) 662 903 IF( ios /= 0 ) CALL ctl_nam ( ios , 'nambdy in reference namelist (mppini)', lwp ) 663 ! 664 REWIND( numnam_cfg ) ! Namelist nambdy in configuration namelist : BDY 665 READ ( numnam_cfg, nambdy, IOSTAT = ios, ERR = 904 ) 666 904 IF( ios > 0 ) CALL ctl_nam ( ios , 'nambdy in configuration namelist (mppini)', lwp ) 667 668 IF( ln_bdy .AND. ln_mask_file ) THEN 669 CALL iom_open( cn_mask_file, inum ) 670 CALL iom_get ( inum, jpdom_unknown, 'bdy_msk', zbdy ) 671 CALL iom_close( inum ) 672 WHERE ( zbdy(:,:) <= 0. ) kmask = 0 673 ENDIF 674 ! 675 END SUBROUTINE mpp_init_mask 1041 !! ** Method : read stipe of size (jpiglo,...) 1042 !!---------------------------------------------------------------------- 1043 INTEGER , INTENT(in ) :: kjstr ! 1044 INTEGER , INTENT(in ) :: kjcnt ! 1045 LOGICAL, DIMENSION(jpiglo,kjcnt), INTENT( out) :: ldoce ! 1046 ! 1047 INTEGER :: inumsave ! local logical unit 1048 REAL(wp), DIMENSION(jpiglo,kjcnt) :: zbot, zbdy 1049 !!---------------------------------------------------------------------- 1050 ! 1051 inumsave = numout ; numout = numnul ! redirect all print to /dev/null 1052 ! 1053 IF( numbot /= -1 ) THEN 1054 CALL iom_get( numbot, jpdom_unknown, 'bottom_level', zbot, kstart = (/1,kjstr/), kcount = (/jpiglo, kjcnt/) ) 1055 ELSE 1056 zbot(:,:) = 1. ! put a non-null value 1057 ENDIF 1058 1059 IF( numbdy /= -1 ) THEN ! Adjust with bdy_msk if it exists 1060 CALL iom_get ( numbdy, jpdom_unknown, 'bdy_msk', zbdy, kstart = (/1,kjstr/), kcount = (/jpiglo, kjcnt/) ) 1061 zbot(:,:) = zbot(:,:) * zbdy(:,:) 1062 ENDIF 1063 ! 1064 ldoce = zbot > 0. 1065 numout = inumsave 1066 ! 1067 END SUBROUTINE mpp_init_readbot_strip 676 1068 677 1069 … … 720 1112 ! 721 1113 END SUBROUTINE mpp_init_ioipsl 722 723 724 SUBROUTINE mpp_init_partition( num_pes )725 !!----------------------------------------------------------------------726 !! *** ROUTINE mpp_init_partition ***727 !!728 !! ** Purpose :729 !!730 !! ** Method :731 !!----------------------------------------------------------------------732 INTEGER, INTENT(in) :: num_pes ! The number of MPI processes we have733 !734 INTEGER, PARAMETER :: nfactmax = 20735 INTEGER :: nfact ! The no. of factors returned736 INTEGER :: ierr ! Error flag737 INTEGER :: ji738 INTEGER :: idiff, mindiff, imin ! For choosing pair of factors that are closest in value739 INTEGER, DIMENSION(nfactmax) :: ifact ! Array of factors740 !!----------------------------------------------------------------------741 !742 ierr = 0743 !744 CALL factorise( ifact, nfactmax, nfact, num_pes, ierr )745 !746 IF( nfact <= 1 ) THEN747 WRITE (numout, *) 'WARNING: factorisation of number of PEs failed'748 WRITE (numout, *) ' : using grid of ',num_pes,' x 1'749 jpnj = 1750 jpni = num_pes751 ELSE752 ! Search through factors for the pair that are closest in value753 mindiff = 1000000754 imin = 1755 DO ji = 1, nfact-1, 2756 idiff = ABS( ifact(ji) - ifact(ji+1) )757 IF( idiff < mindiff ) THEN758 mindiff = idiff759 imin = ji760 ENDIF761 END DO762 jpnj = ifact(imin)763 jpni = ifact(imin + 1)764 ENDIF765 !766 jpnij = jpni*jpnj767 !768 END SUBROUTINE mpp_init_partition769 770 771 SUBROUTINE factorise( kfax, kmaxfax, knfax, kn, kerr )772 !!----------------------------------------------------------------------773 !! *** ROUTINE factorise ***774 !!775 !! ** Purpose : return the prime factors of n.776 !! knfax factors are returned in array kfax which is of777 !! maximum dimension kmaxfax.778 !! ** Method :779 !!----------------------------------------------------------------------780 INTEGER , INTENT(in ) :: kn, kmaxfax781 INTEGER , INTENT( out) :: kerr, knfax782 INTEGER, DIMENSION(kmaxfax), INTENT( out) :: kfax783 !784 INTEGER :: ifac, jl, inu785 INTEGER, PARAMETER :: ntest = 14786 INTEGER, DIMENSION(ntest) :: ilfax787 !!----------------------------------------------------------------------788 !789 ! lfax contains the set of allowed factors.790 ilfax(:) = (/(2**jl,jl=ntest,1,-1)/)791 !792 ! Clear the error flag and initialise output vars793 kerr = 0794 kfax = 1795 knfax = 0796 !797 IF( kn /= 1 ) THEN ! Find the factors of n798 !799 ! nu holds the unfactorised part of the number.800 ! knfax holds the number of factors found.801 ! l points to the allowed factor list.802 ! ifac holds the current factor.803 !804 inu = kn805 knfax = 0806 !807 DO jl = ntest, 1, -1808 !809 ifac = ilfax(jl)810 IF( ifac > inu ) CYCLE811 !812 ! Test whether the factor will divide.813 !814 IF( MOD(inu,ifac) == 0 ) THEN815 !816 knfax = knfax + 1 ! Add the factor to the list817 IF( knfax > kmaxfax ) THEN818 kerr = 6819 write (*,*) 'FACTOR: insufficient space in factor array ', knfax820 return821 ENDIF822 kfax(knfax) = ifac823 ! Store the other factor that goes with this one824 knfax = knfax + 1825 kfax(knfax) = inu / ifac826 !WRITE (*,*) 'ARPDBG, factors ',knfax-1,' & ',knfax,' are ', kfax(knfax-1),' and ',kfax(knfax)827 ENDIF828 !829 END DO830 !831 ENDIF832 !833 END SUBROUTINE factorise834 1114 835 1115 … … 896 1176 END SUBROUTINE mpp_init_nfdcom 897 1177 898 1178 899 1179 #endif 900 1180
Note: See TracChangeset
for help on using the changeset viewer.