Changeset 7525 for branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC
- Timestamp:
- 2017-01-04T17:47:47+01:00 (7 years ago)
- Location:
- branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC
- Files:
-
- 24 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diafwb.F90
r7037 r7525 55 55 REAL(wp) :: zarea, zvol, zwei 56 56 REAL(wp) :: ztemi(4), ztemo(4), zsali(4), zsalo(4), zflxi(4), zflxo(4) 57 REAL(wp) :: zt, zs, zu 57 REAL(wp) :: zt, zs, zu 58 58 REAL(wp) :: zsm0, zfwfnew 59 REAL(wp), DIMENSION(:,:) :: ztmp ! 2D workspace 59 60 IF( cp_cfg == "orca" .AND. jp_cfg == 1 .OR. jp_cfg == 2 .OR. jp_cfg == 4 ) THEN 60 61 !!---------------------------------------------------------------------- … … 63 64 ! Mean global salinity 64 65 zsm0 = 34.72654 65 66 66 ! To compute fwf mean value mean fwf 67 67 … … 72 72 a_salb = 0.e0 ! valeur de sal au debut de la simulation 73 73 ! sshb used because diafwb called after tranxt (i.e. after the swap) 74 a_sshb = SUM( e1e2t(:,:) * sshb(:,:) * tmask_i(:,:) ) 74 !$OMP PARALLEL DO schedule(static) private(jj,ji) REDUCTION(+:a_sshb) 75 DO jj = 1, jpj 76 DO ji = 1, jpi 77 ztmp(ji,jj) = e1e2t(ji,jj) * sshb(ji,jj) * tmask_i(ji,jj) 78 a_sshb = a_sshb + ztmp(ji,jj) 79 END DO 80 END DO 75 81 IF( lk_mpp ) CALL mpp_sum( a_sshb ) ! sum over the global domain 76 82 … … 86 92 IF( lk_mpp ) CALL mpp_sum( a_salb ) ! sum over the global domain 87 93 ENDIF 88 89 a_fwf = SUM( e1e2t(:,:) * ( emp(:,:)-rnf(:,:) ) * tmask_i(:,:) ) 94 95 !$OMP PARALLEL DO schedule(static) private(jj,ji) REDUCTION(+:a_fwf) 96 DO jj = 1, jpj 97 DO ji = 1, jpi 98 ztmp(ji,jj) = e1e2t(ji,jj) * ( emp(ji,jj)-rnf(ji,jj) ) * tmask_i(ji,jj) 99 a_fwf = a_fwf + ztmp(ji,jj) 100 END DO 101 END DO 102 90 103 IF( lk_mpp ) CALL mpp_sum( a_fwf ) ! sum over the global domain 91 104 … … 97 110 zfwfnew = 0.e0 98 111 ! Mean sea level at nitend 99 a_sshn = SUM( e1e2t(:,:) * sshn(:,:) * tmask_i(:,:) ) 112 !$OMP PARALLEL DO schedule(static) private(jj,ji) REDUCTION(+:a_sshn) 113 DO jj = 1, jpj 114 DO ji = 1, jpi 115 ztmp(ji,jj) = e1e2t(ji,jj) * sshn(ji,jj) * tmask_i(ji,jj) 116 a_sshn = a_sshn + ztmp(ji,jj) 117 END DO 118 END DO 100 119 IF( lk_mpp ) CALL mpp_sum( a_sshn ) ! sum over the global domain 101 zarea = SUM( e1e2t(:,:) * tmask_i(:,:) ) 120 !$OMP PARALLEL DO schedule(static) private(jj,ji) REDUCTION(+:zarea) 121 DO jj = 1, jpj 122 DO ji = 1, jpi 123 ztmp(ji,jj) = e1e2t(ji,jj) * tmask_i(ji,jj) 124 zarea = zarea + ztmp(ji,jj) 125 END DO 126 END DO 102 127 IF( lk_mpp ) CALL mpp_sum( zarea ) ! sum over the global domain 103 128 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diawri.F90
r7508 r7525 241 241 END DO 242 242 END DO 243 !$OMP DO schedule(static) private(jk )243 !$OMP DO schedule(static) private(jk,jj,ji) 244 244 DO jk = 1, jpk 245 z3d(:,:,jk) = wn(:,:,jk) * z2d(:,:) 245 DO jj = 1, jpj 246 DO ji = 1, jpi 247 z3d(ji,jj,jk) = wn(ji,jj,jk) * z2d(ji,jj) 248 END DO 249 END DO 246 250 END DO 247 251 !$OMP END DO NOWAIT -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domngb.F90
r7508 r7525 63 63 CASE( 'U' ) 64 64 !$OMP PARALLEL DO schedule(static) private(jj, ji) 65 DO jj = 1, jpj 66 DO ji = 1, jpi 67 zglam(ji,jj) = glamu(ji,jj) ; zgphi(ji,jj) = gphiu(ji,jj) 65 DO jj = 1, jpj 66 DO ji = 1, jpi 67 zglam(ji,jj) = glamu(ji,jj) ; zgphi(ji,jj) = gphiu(ji,jj) 68 END DO 68 69 END DO 69 END DO 70 zmask(nldi:nlei,nldj:nlej) = umask(nldi:nlei,nldj:nlej,ik) 70 zmask(nldi:nlei,nldj:nlej) = umask(nldi:nlei,nldj:nlej,ik) 71 71 CASE( 'V' ) 72 72 !$OMP PARALLEL DO schedule(static) private(jj, ji) 73 DO jj = 1, jpj 74 DO ji = 1, jpi 75 zglam(ji,jj) = glamv(ji,jj) ; zgphi(ji,jj) = gphiv(ji,jj) 73 DO jj = 1, jpj 74 DO ji = 1, jpi 75 zglam(ji,jj) = glamv(ji,jj) ; zgphi(ji,jj) = gphiv(ji,jj) 76 END DO 76 77 END DO 77 END DO 78 zmask(nldi:nlei,nldj:nlej) = vmask(nldi:nlei,nldj:nlej,ik) 78 zmask(nldi:nlei,nldj:nlej) = vmask(nldi:nlei,nldj:nlej,ik) 79 79 CASE( 'F' ) 80 80 !$OMP PARALLEL DO schedule(static) private(jj, ji) 81 DO jj = 1, jpj 82 DO ji = 1, jpi 83 zglam(ji,jj) = glamf(ji,jj) ; zgphi(ji,jj) = gphif(ji,jj) 81 DO jj = 1, jpj 82 DO ji = 1, jpi 83 zglam(ji,jj) = glamf(ji,jj) ; zgphi(ji,jj) = gphif(ji,jj) 84 END DO 84 85 END DO 85 END DO 86 zmask(nldi:nlei,nldj:nlej) = fmask(nldi:nlei,nldj:nlej,ik) 86 zmask(nldi:nlei,nldj:nlej) = fmask(nldi:nlei,nldj:nlej,ik) 87 87 CASE DEFAULT 88 88 !$OMP PARALLEL DO schedule(static) private(jj, ji) 89 DO jj = 1, jpj 90 DO ji = 1, jpi 91 zglam(ji,jj) = glamt(ji,jj) ; zgphi(ji,jj) = gphit(ji,jj) 89 DO jj = 1, jpj 90 DO ji = 1, jpi 91 zglam(ji,jj) = glamt(ji,jj) ; zgphi(ji,jj) = gphit(ji,jj) 92 END DO 92 93 END DO 93 END DO 94 zmask(nldi:nlei,nldj:nlej) = tmask(nldi:nlei,nldj:nlej,ik) 94 zmask(nldi:nlei,nldj:nlej) = tmask(nldi:nlei,nldj:nlej,ik) 95 95 END SELECT 96 96 97 97 IF (jphgr_msh /= 2 .AND. jphgr_msh /= 3) THEN 98 98 zlon = MOD( plon + 720., 360. ) ! plon between 0 and 360 99 !$OMP PARALLEL DO schedule(static) private(jj, ji, z glam, zlon)99 !$OMP PARALLEL DO schedule(static) private(jj, ji, zlon) 100 100 DO jj = 1, jpj 101 101 DO ji = 1, jpi -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynbfr.F90
r7508 r7525 66 66 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 67 67 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 68 DO jk = 1, jpk 69 DO jj = 1, jpj 70 DO ji = 1, jpi 71 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 72 ztrdv(ji,jj,jk) = va(ji,jj,jk) 68 DO jk = 1, jpk 69 DO jj = 1, jpj 70 DO ji = 1, jpi 71 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 72 ztrdv(ji,jj,jk) = va(ji,jj,jk) 73 END DO 73 74 END DO 74 75 END DO 75 END DO76 76 ENDIF 77 77 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynkeg.F90
r7508 r7525 93 93 CALL wrk_alloc( jpi,jpj,jpk, ztrdu, ztrdv ) 94 94 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 95 DO jk = 1, jpk96 DO jj = 1, jpj97 DO ji = 1, jpi98 ztrdu(ji,jj,jk) = ua(ji,jj,jk)99 ztrdv(ji,jj,jk) = va(ji,jj,jk)100 END DO101 END DO102 END DO95 DO jk = 1, jpk 96 DO jj = 1, jpj 97 DO ji = 1, jpi 98 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 99 ztrdv(ji,jj,jk) = va(ji,jj,jk) 100 END DO 101 END DO 102 END DO 103 103 ENDIF 104 104 !$OMP PARALLEL DO schedule(static) private(jj, ji) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynnxt.F90
r7513 r7525 132 132 END DO 133 133 END DO 134 !$OMP DO schedule(static) private(jk )134 !$OMP DO schedule(static) private(jk,jj,ji) 135 135 DO jk = 1, jpkm1 136 ua(:,:,jk) = ( ua(:,:,jk) - zue(:,:) * r1_hu_a(:,:) + ua_b(:,:) ) * umask(:,:,jk) 137 va(:,:,jk) = ( va(:,:,jk) - zve(:,:) * r1_hv_a(:,:) + va_b(:,:) ) * vmask(:,:,jk) 136 DO jj = 1, jpj 137 DO ji = 1, jpi 138 ua(ji,jj,jk) = ( ua(ji,jj,jk) - zue(ji,jj) * r1_hu_a(ji,jj) + ua_b(ji,jj) ) * umask(ji,jj,jk) 139 va(ji,jj,jk) = ( va(ji,jj,jk) - zve(ji,jj) * r1_hv_a(ji,jj) + va_b(ji,jj) ) * vmask(ji,jj,jk) 140 END DO 141 END DO 138 142 END DO 139 143 !$OMP END DO NOWAIT … … 145 149 ! In the forward case, this is done below after asselin filtering 146 150 ! so that asselin contribution is removed at the same time 147 !$OMP PARALLEL DO schedule(static) private(jk )151 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 148 152 DO jk = 1, jpkm1 149 un(:,:,jk) = ( un(:,:,jk) - un_adv(:,:) + un_b(:,:) )*umask(:,:,jk) 150 vn(:,:,jk) = ( vn(:,:,jk) - vn_adv(:,:) + vn_b(:,:) )*vmask(:,:,jk) 153 DO jj = 1, jpj 154 DO ji = 1, jpi 155 un(ji,jj,jk) = ( un(ji,jj,jk) - un_adv(ji,jj) + un_b(ji,jj) )*umask(ji,jj,jk) 156 vn(ji,jj,jk) = ( vn(ji,jj,jk) - vn_adv(ji,jj) + vn_b(ji,jj) )*vmask(ji,jj,jk) 157 END DO 158 END DO 151 159 END DO 152 160 ENDIF … … 198 206 zua(ji,jj,jk) = un(ji,jj,jk) ! save the now velocity before the asselin filter 199 207 zva(ji,jj,jk) = vn(ji,jj,jk) ! (caution: there will be a shift by 1 timestep in the 200 END DO201 END DO202 END DO203 ! ! computation of the asselin filter trends)208 ! computation of the asselin filter trends) 209 END DO 210 END DO 211 END DO 204 212 ENDIF 205 213 … … 208 216 IF( neuler == 0 .AND. kt == nit000 ) THEN !* Euler at first time-step: only swap 209 217 !$OMP PARALLEL 210 !$OMP DO schedule(static) private(jk )218 !$OMP DO schedule(static) private(jk,jj,ji) 211 219 DO jk = 1, jpkm1 212 un(:,:,jk) = ua(:,:,jk) ! un <-- ua 213 vn(:,:,jk) = va(:,:,jk) 220 DO jj = 1, jpj 221 DO ji = 1, jpi 222 un(ji,jj,jk) = ua(ji,jj,jk) ! un <-- ua 223 vn(ji,jj,jk) = va(ji,jj,jk) 224 END DO 225 END DO 214 226 END DO 215 227 !$OMP END DO NOWAIT 216 228 IF(.NOT.ln_linssh ) THEN 217 !$OMP DO schedule(static) private(jk )229 !$OMP DO schedule(static) private(jk,jj,ji) 218 230 DO jk = 1, jpkm1 219 e3t_b(:,:,jk) = e3t_n(:,:,jk) 220 e3u_b(:,:,jk) = e3u_n(:,:,jk) 221 e3v_b(:,:,jk) = e3v_n(:,:,jk) 231 DO jj = 1, jpj 232 DO ji = 1, jpi 233 e3t_b(ji,jj,jk) = e3t_n(ji,jj,jk) 234 e3u_b(ji,jj,jk) = e3u_n(ji,jj,jk) 235 e3v_b(ji,jj,jk) = e3v_n(ji,jj,jk) 236 END DO 237 END DO 222 238 END DO 223 239 !$OMP END DO NOWAIT … … 256 272 END DO 257 273 ELSE 258 !$OMP PARALLEL DO schedule(static) private(jk )274 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 259 275 DO jk = 1, jpkm1 260 e3t_b(:,:,jk) = e3t_n(:,:,jk) + atfp * ( e3t_b(:,:,jk) - 2._wp * e3t_n(:,:,jk) + e3t_a(:,:,jk) ) 276 DO jj = 1, jpj 277 DO ji = 1, jpi 278 e3t_b(ji,jj,jk) = e3t_n(ji,jj,jk) + atfp * ( e3t_b(ji,jj,jk) - 2._wp * e3t_n(ji,jj,jk) + e3t_a(ji,jj,jk) ) 279 END DO 280 END DO 261 281 END DO 262 282 ! Add volume filter correction: compatibility with tracer advection scheme … … 365 385 END DO 366 386 END DO 367 !$OMP DO schedule(static) private(jk )387 !$OMP DO schedule(static) private(jk,jj,ji) 368 388 DO jk = 1, jpkm1 369 ub(:,:,jk) = ub(:,:,jk) - (zue(:,:) * r1_hu_n(:,:) - un_b(:,:)) * umask(:,:,jk) 370 vb(:,:,jk) = vb(:,:,jk) - (zve(:,:) * r1_hv_n(:,:) - vn_b(:,:)) * vmask(:,:,jk) 389 DO jj = 1, jpj 390 DO ji = 1, jpi 391 ub(ji,jj,jk) = ub(ji,jj,jk) - (zue(ji,jj) * r1_hu_n(ji,jj) - un_b(ji,jj)) * umask(ji,jj,jk) 392 vb(ji,jj,jk) = vb(ji,jj,jk) - (zve(ji,jj) * r1_hv_n(ji,jj) - vn_b(ji,jj)) * vmask(ji,jj,jk) 393 END DO 394 END DO 371 395 END DO 372 396 !$OMP END DO NOWAIT … … 446 470 ENDIF 447 471 IF( l_trddyn ) THEN ! 3D output: asselin filter trends on momentum 448 !$OMP DO schedule(static) private(jk, jj, ji)472 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 449 473 DO jk = 1, jpkm1 450 474 DO jj = 1, jpj -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynspg_ts.F90
r7512 r7525 1314 1314 ! Update barotropic trend: 1315 1315 IF( ln_dynadv_vec .OR. ln_linssh ) THEN 1316 !$OMP PARALLEL DO schedule(static) private(jk )1316 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 1317 1317 DO jk=1,jpkm1 1318 ua(:,:,jk) = ua(:,:,jk) + ( ua_b(:,:) - ub_b(:,:) ) * z1_2dt_b 1319 va(:,:,jk) = va(:,:,jk) + ( va_b(:,:) - vb_b(:,:) ) * z1_2dt_b 1318 DO jj = 1, jpj 1319 DO ji = 1, jpi 1320 ua(ji,jj,jk) = ua(ji,jj,jk) + ( ua_b(ji,jj) - ub_b(ji,jj) ) * z1_2dt_b 1321 va(ji,jj,jk) = va(ji,jj,jk) + ( va_b(ji,jj) - vb_b(ji,jj) ) * z1_2dt_b 1322 END DO 1323 END DO 1320 1324 END DO 1321 1325 ELSE … … 1335 1339 ! 1336 1340 !$OMP PARALLEL 1337 !$OMP DO schedule(static) private(jk )1341 !$OMP DO schedule(static) private(jk,jj,ji) 1338 1342 DO jk=1,jpkm1 1339 ua(:,:,jk) = ua(:,:,jk) + r1_hu_n(:,:) * ( ua_b(:,:) - ub_b(:,:) * hu_b(:,:) ) * z1_2dt_b 1340 va(:,:,jk) = va(:,:,jk) + r1_hv_n(:,:) * ( va_b(:,:) - vb_b(:,:) * hv_b(:,:) ) * z1_2dt_b 1343 DO jj = 1, jpj 1344 DO ji = 1, jpi 1345 ua(ji,jj,jk) = ua(ji,jj,jk) + r1_hu_n(ji,jj) * ( ua_b(ji,jj) - ub_b(ji,jj) * hu_b(ji,jj) ) * z1_2dt_b 1346 va(ji,jj,jk) = va(ji,jj,jk) + r1_hv_n(ji,jj) * ( va_b(ji,jj) - vb_b(ji,jj) * hv_b(ji,jj) ) * z1_2dt_b 1347 END DO 1348 END DO 1341 1349 END DO 1342 1350 !$OMP END DO NOWAIT … … 1352 1360 ENDIF 1353 1361 ! 1354 !$OMP PARALLEL DO schedule(static) private(jk )1362 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 1355 1363 DO jk = 1, jpkm1 1356 ! Correct velocities: 1357 un(:,:,jk) = ( un(:,:,jk) + un_adv(:,:) - un_b(:,:) ) * umask(:,:,jk) 1358 vn(:,:,jk) = ( vn(:,:,jk) + vn_adv(:,:) - vn_b(:,:) ) * vmask(:,:,jk) 1359 ! 1364 DO jj = 1, jpj 1365 DO ji = 1, jpi 1366 ! Correct velocities: 1367 un(ji,jj,jk) = ( un(ji,jj,jk) + un_adv(ji,jj) - un_b(ji,jj) ) * umask(ji,jj,jk) 1368 vn(ji,jj,jk) = ( vn(ji,jj,jk) + vn_adv(ji,jj) - vn_b(ji,jj) ) * vmask(ji,jj,jk) 1369 ! 1370 END DO 1371 END DO 1360 1372 END DO 1361 1373 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynvor.F90
r7508 r7525 108 108 IF( l_trddyn ) THEN ! trend diagnostics: split the trend in two 109 109 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 110 DO jk = 1, jpk111 DO jj = 1, jpj112 DO ji = 1, jpi113 ztrdu(ji,jj,jk) = ua(ji,jj,jk)114 ztrdv(ji,jj,jk) = va(ji,jj,jk)115 END DO116 END DO117 END DO110 DO jk = 1, jpk 111 DO jj = 1, jpj 112 DO ji = 1, jpi 113 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 114 ztrdv(ji,jj,jk) = va(ji,jj,jk) 115 END DO 116 END DO 117 END DO 118 118 CALL vor_ene( kt, nrvm, ua, va ) ! relative vorticity or metric trend 119 119 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 120 DO jk = 1, jpk121 DO jj = 1, jpj122 DO ji = 1, jpi123 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk)124 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk)125 END DO126 END DO127 END DO120 DO jk = 1, jpk 121 DO jj = 1, jpj 122 DO ji = 1, jpi 123 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 124 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 125 END DO 126 END DO 127 END DO 128 128 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 129 129 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 130 DO jk = 1, jpk131 DO jj = 1, jpj132 DO ji = 1, jpi133 ztrdu(ji,jj,jk) = ua(ji,jj,jk)134 ztrdv(ji,jj,jk) = va(ji,jj,jk)135 END DO136 END DO137 END DO130 DO jk = 1, jpk 131 DO jj = 1, jpj 132 DO ji = 1, jpi 133 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 134 ztrdv(ji,jj,jk) = va(ji,jj,jk) 135 END DO 136 END DO 137 END DO 138 138 CALL vor_ene( kt, ncor, ua, va ) ! planetary vorticity trend 139 139 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 140 DO jk = 1, jpk141 DO jj = 1, jpj142 DO ji = 1, jpi143 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk)144 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk)145 END DO146 END DO147 END DO140 DO jk = 1, jpk 141 DO jj = 1, jpj 142 DO ji = 1, jpi 143 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 144 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 145 END DO 146 END DO 147 END DO 148 148 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 149 149 ELSE … … 154 154 IF( l_trddyn ) THEN ! trend diagnostics: splitthe trend in two 155 155 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 156 DO jk = 1, jpk157 DO jj = 1, jpj158 DO ji = 1, jpi159 ztrdu(ji,jj,jk) = ua(ji,jj,jk)160 ztrdv(ji,jj,jk) = va(ji,jj,jk)161 END DO162 END DO163 END DO156 DO jk = 1, jpk 157 DO jj = 1, jpj 158 DO ji = 1, jpi 159 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 160 ztrdv(ji,jj,jk) = va(ji,jj,jk) 161 END DO 162 END DO 163 END DO 164 164 CALL vor_ens( kt, nrvm, ua, va ) ! relative vorticity or metric trend 165 165 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 166 DO jk = 1, jpk167 DO jj = 1, jpj168 DO ji = 1, jpi169 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk)170 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk)171 END DO172 END DO173 END DO166 DO jk = 1, jpk 167 DO jj = 1, jpj 168 DO ji = 1, jpi 169 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 170 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 171 END DO 172 END DO 173 END DO 174 174 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 175 175 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 176 DO jk = 1, jpk177 DO jj = 1, jpj178 DO ji = 1, jpi179 ztrdu(ji,jj,jk) = ua(ji,jj,jk)180 ztrdv(ji,jj,jk) = va(ji,jj,jk)181 END DO182 END DO183 END DO176 DO jk = 1, jpk 177 DO jj = 1, jpj 178 DO ji = 1, jpi 179 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 180 ztrdv(ji,jj,jk) = va(ji,jj,jk) 181 END DO 182 END DO 183 END DO 184 184 CALL vor_ens( kt, ncor, ua, va ) ! planetary vorticity trend 185 185 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 186 DO jk = 1, jpk187 DO jj = 1, jpj188 DO ji = 1, jpi189 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk)190 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk)191 END DO192 END DO193 END DO186 DO jk = 1, jpk 187 DO jj = 1, jpj 188 DO ji = 1, jpi 189 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 190 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 191 END DO 192 END DO 193 END DO 194 194 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 195 195 ELSE … … 200 200 IF( l_trddyn ) THEN ! trend diagnostics: split the trend in two 201 201 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 202 DO jk = 1, jpk203 DO jj = 1, jpj204 DO ji = 1, jpi205 ztrdu(ji,jj,jk) = ua(ji,jj,jk)206 ztrdv(ji,jj,jk) = va(ji,jj,jk)207 END DO208 END DO209 END DO202 DO jk = 1, jpk 203 DO jj = 1, jpj 204 DO ji = 1, jpi 205 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 206 ztrdv(ji,jj,jk) = va(ji,jj,jk) 207 END DO 208 END DO 209 END DO 210 210 CALL vor_ens( kt, nrvm, ua, va ) ! relative vorticity or metric trend (ens) 211 211 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 212 DO jk = 1, jpk213 DO jj = 1, jpj214 DO ji = 1, jpi215 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk)216 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk)217 END DO218 END DO219 END DO212 DO jk = 1, jpk 213 DO jj = 1, jpj 214 DO ji = 1, jpi 215 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 216 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 217 END DO 218 END DO 219 END DO 220 220 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 221 221 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 222 DO jk = 1, jpk223 DO jj = 1, jpj224 DO ji = 1, jpi225 ztrdu(ji,jj,jk) = ua(ji,jj,jk)226 ztrdv(ji,jj,jk) = va(ji,jj,jk)227 END DO228 END DO229 END DO222 DO jk = 1, jpk 223 DO jj = 1, jpj 224 DO ji = 1, jpi 225 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 226 ztrdv(ji,jj,jk) = va(ji,jj,jk) 227 END DO 228 END DO 229 END DO 230 230 CALL vor_ene( kt, ncor, ua, va ) ! planetary vorticity trend (ene) 231 231 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 232 DO jk = 1, jpk233 DO jj = 1, jpj234 DO ji = 1, jpi235 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk)236 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk)237 END DO238 END DO239 END DO232 DO jk = 1, jpk 233 DO jj = 1, jpj 234 DO ji = 1, jpi 235 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 236 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 237 END DO 238 END DO 239 END DO 240 240 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 241 241 ELSE … … 247 247 IF( l_trddyn ) THEN ! trend diagnostics: split the trend in two 248 248 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 249 DO jk = 1, jpk250 DO jj = 1, jpj251 DO ji = 1, jpi252 ztrdu(ji,jj,jk) = ua(ji,jj,jk)253 ztrdv(ji,jj,jk) = va(ji,jj,jk)254 END DO255 END DO256 END DO249 DO jk = 1, jpk 250 DO jj = 1, jpj 251 DO ji = 1, jpi 252 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 253 ztrdv(ji,jj,jk) = va(ji,jj,jk) 254 END DO 255 END DO 256 END DO 257 257 CALL vor_een( kt, nrvm, ua, va ) ! relative vorticity or metric trend 258 258 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 259 DO jk = 1, jpk260 DO jj = 1, jpj261 DO ji = 1, jpi262 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk)263 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk)264 END DO265 END DO266 END DO259 DO jk = 1, jpk 260 DO jj = 1, jpj 261 DO ji = 1, jpi 262 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 263 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 264 END DO 265 END DO 266 END DO 267 267 CALL trd_dyn( ztrdu, ztrdv, jpdyn_rvo, kt ) 268 268 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 269 DO jk = 1, jpk270 DO jj = 1, jpj271 DO ji = 1, jpi272 ztrdu(ji,jj,jk) = ua(ji,jj,jk)273 ztrdv(ji,jj,jk) = va(ji,jj,jk)274 END DO275 END DO276 END DO269 DO jk = 1, jpk 270 DO jj = 1, jpj 271 DO ji = 1, jpi 272 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 273 ztrdv(ji,jj,jk) = va(ji,jj,jk) 274 END DO 275 END DO 276 END DO 277 277 CALL vor_een( kt, ncor, ua, va ) ! planetary vorticity trend 278 278 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 279 DO jk = 1, jpk280 DO jj = 1, jpj281 DO ji = 1, jpi282 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk)283 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk)284 END DO285 END DO286 END DO279 DO jk = 1, jpk 280 DO jj = 1, jpj 281 DO ji = 1, jpi 282 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 283 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 284 END DO 285 END DO 286 END DO 287 287 CALL trd_dyn( ztrdu, ztrdv, jpdyn_pvo, kt ) 288 288 ELSE -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynzad.F90
r7508 r7525 78 78 CALL wrk_alloc( jpi, jpj, jpk, ztrdu, ztrdv ) 79 79 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 80 DO jk = 1, jpk81 DO jj = 1, jpj82 DO ji = 1, jpi83 ztrdu(ji,jj,jk) = ua(ji,jj,jk)84 ztrdv(ji,jj,jk) = va(ji,jj,jk)85 END DO86 END DO87 END DO80 DO jk = 1, jpk 81 DO jj = 1, jpj 82 DO ji = 1, jpi 83 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 84 ztrdv(ji,jj,jk) = va(ji,jj,jk) 85 END DO 86 END DO 87 END DO 88 88 ENDIF 89 89 … … 145 145 IF( l_trddyn ) THEN ! save the vertical advection trends for diagnostic 146 146 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 147 148 149 150 151 152 153 154 147 DO jk = 1, jpk 148 DO jj = 1, jpj 149 DO ji = 1, jpi 150 ztrdu(ji,jj,jk) = ua(ji,jj,jk) - ztrdu(ji,jj,jk) 151 ztrdv(ji,jj,jk) = va(ji,jj,jk) - ztrdv(ji,jj,jk) 152 END DO 153 END DO 154 END DO 155 155 CALL trd_dyn( ztrdu, ztrdv, jpdyn_zad, kt ) 156 156 CALL wrk_dealloc( jpi, jpj, jpk, ztrdu, ztrdv ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynzdf.F90
r7508 r7525 68 68 CALL wrk_alloc( jpi, jpj, jpk, ztrdu, ztrdv ) 69 69 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 70 DO jk = 1, jpk 71 DO jj = 1, jpj 72 DO ji = 1, jpi 73 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 74 ztrdv(ji,jj,jk) = va(ji,jj,jk) 70 DO jk = 1, jpk 71 DO jj = 1, jpj 72 DO ji = 1, jpi 73 ztrdu(ji,jj,jk) = ua(ji,jj,jk) 74 ztrdv(ji,jj,jk) = va(ji,jj,jk) 75 END DO 75 76 END DO 76 77 END DO 77 END DO78 78 ENDIF 79 79 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/sshwzv.F90
r7508 r7525 105 105 DO jk = 1, jpkm1 ! Horizontal divergence of barotropic transports 106 106 !$OMP DO schedule(static) private(jj, ji) 107 108 109 110 111 107 DO jj = 1, jpj 108 DO ji = 1, jpi ! vector opt. 109 zhdiv(ji,jj) = zhdiv(ji,jj) + e3t_n(ji,jj,jk) * hdivn(ji,jj,jk) 110 END DO 111 END DO 112 112 END DO 113 113 !$OMP END PARALLEL … … 120 120 IF(ln_wd) CALL wad_lmt(sshb, zcoef * (emp_b(:,:) + emp(:,:)), z2dt) 121 121 !$OMP PARALLEL DO schedule(static) private(jj, ji) 122 DO jj = 1, jpj123 DO ji = 1, jpi124 ssha(ji,jj) = ( sshb(ji,jj) - z2dt * ( zcoef * ( emp_b(ji,jj) + emp(ji,jj) ) + zhdiv(ji,jj) ) ) * ssmask(ji,jj)125 END DO126 END DO122 DO jj = 1, jpj 123 DO ji = 1, jpi 124 ssha(ji,jj) = ( sshb(ji,jj) - z2dt * ( zcoef * ( emp_b(ji,jj) + emp(ji,jj) ) + zhdiv(ji,jj) ) ) * ssmask(ji,jj) 125 END DO 126 END DO 127 127 IF ( .NOT.ln_dynspg_ts ) THEN 128 128 ! These lines are not necessary with time splitting since … … 143 143 CALL ssh_asm_inc( kt ) 144 144 !$OMP PARALLEL DO schedule(static) private(jj, ji) 145 DO jj = 1, jpj146 DO ji = 1, jpi147 ssha(ji,jj) = ssha(ji,jj) + z2dt * ssh_iau(ji,jj)148 END DO149 END DO145 DO jj = 1, jpj 146 DO ji = 1, jpi 147 ssha(ji,jj) = ssha(ji,jj) + z2dt * ssh_iau(ji,jj) 148 END DO 149 END DO 150 150 ENDIF 151 151 #endif … … 193 193 IF(lwp) WRITE(numout,*) '~~~~~ ' 194 194 ! 195 wn(ji,jj,jpk) = 0._wp ! bottom boundary condition: w=0 (set once for all) 195 !$OMP PARALLEL DO schedule(static) private(jj, ji) 196 DO jj = 1, jpj 197 DO ji = 1, jpi 198 wn(ji,jj,jpk) = 0._wp ! bottom boundary condition: w=0 (set once for all) 199 END DO 200 END DO 196 201 ENDIF 197 202 ! !------------------------------! … … 221 226 DO jj = 1, jpj 222 227 DO ji = 1, jpi ! vector opt. 223 wn(ji,jj,jk) = wn(ji,jj,jk+1) - ( e3t_n(ji,jj,jk) * hdivn(ji,jj,jk) + zhdiv(ji,jj,jk) &224 & + z1_2dt * ( e3t_a(ji,jj,jk) - e3t_b(ji,jj,jk) ) ) * tmask(ji,jj,jk)228 wn(ji,jj,jk) = wn(ji,jj,jk+1) - ( e3t_n(ji,jj,jk) * hdivn(ji,jj,jk) + zhdiv(ji,jj,jk) & 229 & + z1_2dt * ( e3t_a(ji,jj,jk) - e3t_b(ji,jj,jk) ) ) * tmask(ji,jj,jk) 225 230 END DO 226 231 END DO … … 233 238 DO jj = 1, jpj 234 239 DO ji = 1, jpi ! vector opt. 235 ! computation of w236 wn(ji,jj,jk) = wn(ji,jj,jk+1) - ( e3t_n(ji,jj,jk) * hdivn(ji,jj,jk) &237 & + z1_2dt * ( e3t_a(ji,jj,jk) - e3t_b(ji,jj,jk) ) ) * tmask(ji,jj,jk)240 ! computation of w 241 wn(ji,jj,jk) = wn(ji,jj,jk+1) - ( e3t_n(ji,jj,jk) * hdivn(ji,jj,jk) & 242 & + z1_2dt * ( e3t_a(ji,jj,jk) - e3t_b(ji,jj,jk) ) ) * tmask(ji,jj,jk) 238 243 END DO 239 244 END DO … … 291 296 & ( ln_bt_fw .AND. ln_dynspg_ts ) ) THEN 292 297 !$OMP PARALLEL DO schedule(static) private(jj, ji) 293 DO jj = 1, jpj294 DO ji = 1, jpi295 sshb(ji,jj) = sshn(ji,jj) ! before <-- now296 sshn(ji,jj) = ssha(ji,jj) ! now <-- after (before already = now)297 END DO298 END DO298 DO jj = 1, jpj 299 DO ji = 1, jpi 300 sshb(ji,jj) = sshn(ji,jj) ! before <-- now 301 sshn(ji,jj) = ssha(ji,jj) ! now <-- after (before already = now) 302 END DO 303 END DO 299 304 ! 300 305 ELSE !== Leap-Frog time-stepping: Asselin filter + swap ==! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldfdyn.F90
r7508 r7525 160 160 IF(lwp) WRITE(numout,*) ' momentum mixing coef. = constant ' 161 161 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 162 DO jk = 1, jpk163 DO jj = 1, jpj164 DO ji = 1, jpi165 ahmt(ji,jj,jk) = zah0 * tmask(ji,jj,jk)166 ahmf(ji,jj,jk) = zah0 * fmask(ji,jj,jk)167 END DO168 END DO169 END DO170 !162 DO jk = 1, jpk 163 DO jj = 1, jpj 164 DO ji = 1, jpi 165 ahmt(ji,jj,jk) = zah0 * tmask(ji,jj,jk) 166 ahmf(ji,jj,jk) = zah0 * fmask(ji,jj,jk) 167 END DO 168 END DO 169 END DO 170 ! 171 171 CASE( 10 ) !== fixed profile ==! 172 172 IF(lwp) WRITE(numout,*) ' momentum mixing coef. = F( depth )' … … 189 189 !! do we introduce a scaling by the max value of the array, and then multiply by zah0 ???? 190 190 !! better: check that the max is <=1 i.e. it is a shape from 0 to 1, not a coef that has physical dimension 191 !$OMP PARALLEL DO schedule(static) private(jk )191 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 192 192 DO jk = 2, jpkm1 193 ahmt(:,:,jk) = ahmt(:,:,1) * tmask(:,:,jk) 194 ahmf(:,:,jk) = ahmf(:,:,1) * fmask(:,:,jk) 193 DO jj = 1, jpj 194 DO ji = 1, jpi 195 ahmt(ji,jj,jk) = ahmt(ji,jj,1) * tmask(ji,jj,jk) 196 ahmf(ji,jj,jk) = ahmf(ji,jj,1) * fmask(ji,jj,jk) 197 END DO 198 END DO 195 199 END DO 196 200 ! … … 208 212 !!gm Question : info for LAP or BLP case to take into account the SQRT in the bilaplacian case ???? 209 213 !! do we introduce a scaling by the max value of the array, and then multiply by zah0 ???? 210 !$OMP PARALLEL DO schedule(static) private(jk )214 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 211 215 DO jk = 1, jpkm1 212 ahmt(:,:,jk) = ahmt(:,:,jk) * tmask(:,:,jk) 213 ahmf(:,:,jk) = ahmf(:,:,jk) * fmask(:,:,jk) 216 DO jj = 1, jpj 217 DO ji = 1, jpi 218 ahmt(ji,jj,jk) = ahmt(ji,jj,jk) * tmask(ji,jj,jk) 219 ahmf(ji,jj,jk) = ahmf(ji,jj,jk) * fmask(ji,jj,jk) 220 END DO 221 END DO 214 222 END DO 215 223 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/geo2ocean.F90
r7037 r7525 158 158 ! 159 159 !$OMP PARALLEL 160 !$OMP DO schedule(static) private(jj,ji,zlam,zphi,zxnpt,zynpt,znnpt,zxnpu,zynpu,znnpu,zxnpv,zynpv,znnpv,zxnpf,zynpf,znnpf,zlan,zphh,zxvvt,zyvvt,znvvt,zxffu,zyffu,znffu,zxffv,zyffv,znffv,zxuuf,zyuuf,znuuf) 160 !$OMP DO schedule(static) private(jj,ji,zlam,zphi,zxnpt,zynpt,znnpt,zxnpu,zynpu,znnpu,zxnpv,zynpv,znnpv,zxnpf) & 161 !$OMP& private(zynpf,znnpf,zlan,zphh,zxvvt,zyvvt,znvvt,zxffu,zyffu,znffu,zxffv,zyffv,znffv,zxuuf,zyuuf,znuuf) 161 162 DO jj = 2, jpjm1 162 163 DO ji = fs_2, jpi ! vector opt. -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcana.F90
r7508 r7525 272 272 ztau_sais = 0.015 273 273 ztaun = ztau - ztau_sais * COS( (ztime - ztimemax) / (ztimemin - ztimemax) * rpi ) 274 ! module of wind stress and wind speed at T-point 274 275 275 zcoef = 1. / ( zrhoa * zcdrag ) 276 276 !$OMP PARALLEL … … 285 285 END DO 286 286 287 ! module of wind stress and wind speed at T-point 287 288 !$OMP DO schedule(static) private(jj, ji, ztx, zty, zmod) 288 289 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcblk_core.F90
r7508 r7525 282 282 zst(ji,jj) = pst(ji,jj) + rt0 ! convert SST from Celcius to Kelvin (and set minimum value far above 0 K) 283 283 284 ! ... components ( U10m - U_oce ) at T-point (unmasked) 285 zwnd_i(ji,jj) = 0.e0 286 zwnd_j(ji,jj) = 0.e0 287 END DO 288 END DO 289 284 290 ! ----------------------------------------------------------------------------- ! 285 291 ! 0 Wind components and module at T-point relative to the moving ocean ! 286 292 ! ----------------------------------------------------------------------------- ! 287 293 288 ! ... components ( U10m - U_oce ) at T-point (unmasked)289 zwnd_i(ji,jj) = 0.e0290 zwnd_j(ji,jj) = 0.e0291 END DO292 END DO293 294 #if defined key_cyclone 294 295 CALL wnd_cyc( kt, zwnd_i, zwnd_j ) ! add analytical tropical cyclone (Vincent et al. JGR 2012) … … 325 326 ! ocean albedo assumed to be constant + modify now Qsr to include the diurnal cycle ! Short Wave 326 327 zztmp = 1. - albo 327 IF( ln_dm2dc ) THEN 328 qsr(:,:) = zztmp * sbc_dcy( sf(jp_qsr)%fnow(:,:,1) ) * tmask(:,:,1) 329 ELSE 330 qsr(:,:) = zztmp * sf(jp_qsr)%fnow(:,:,1) * tmask(:,:,1) 328 IF( ln_dm2dc ) THEN ; qsr(:,:) = zztmp * sbc_dcy( sf(jp_qsr)%fnow(:,:,1) ) * tmask(:,:,1) 329 ELSE ; qsr(:,:) = zztmp * sf(jp_qsr)%fnow(:,:,1) * tmask(:,:,1) 331 330 ENDIF 332 331 333 !$OMP PARALLEL DO schedule(static) private(jj, ji) 332 !$OMP PARALLEL 333 !$OMP DO schedule(static) private(jj, ji) 334 334 DO jj = 1, jpj 335 335 DO ji = 1, jpi 336 336 zqlw(ji,jj) = ( sf(jp_qlw)%fnow(ji,jj,1) - Stef * zst(ji,jj)*zst(ji,jj)*zst(ji,jj)*zst(ji,jj) ) * tmask(ji,jj,1) ! Long Wave 337 END DO 338 END DO 339 !OMP END DO NOWAIT 337 340 ! ----------------------------------------------------------------------------- ! 338 341 ! II Turbulent FLUXES ! 339 342 ! ----------------------------------------------------------------------------- ! 340 343 344 !$OMP DO schedule(static) private(jj, ji) 345 DO jj = 1, jpj 346 DO ji = 1, jpi 341 347 ! ... specific humidity at SST and IST 342 348 zqsatw(ji,jj) = zcoef_qsatw * EXP( -5107.4 / zst(ji,jj) ) 343 344 END DO 345 END DO 349 END DO 350 END DO 351 !$OMP END PARALLEL 352 346 353 ! ... NCAR Bulk formulae, computation of Cd, Ch, Ce at T-point : 347 354 CALL turb_core_2z( rn_zqt, rn_zu, zst, sf(jp_tair)%fnow, zqsatw, sf(jp_humi)%fnow, wndm, & … … 388 395 ! Turbulent fluxes over ocean 389 396 ! ----------------------------- 397 IF( ABS( rn_zu - rn_zqt) < 0.01_wp ) THEN 398 !$OMP PARALLEL DO schedule(static) private(jj, ji) 399 DO jj = 1, jpj 400 DO ji = 1, jpi 401 !! q_air and t_air are (or "are almost") given at 10m (wind reference height) 402 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - sf(jp_humi)%fnow(ji,jj,1) )*wndm(ji,jj) ) ! Evaporation 403 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - sf(jp_tair)%fnow(ji,jj,1) )*wndm(ji,jj) ! Sensible Heat 404 END DO 405 END DO 406 ELSE 407 !$OMP PARALLEL DO schedule(static) private(jj, ji) 408 DO jj = 1, jpj 409 DO ji = 1, jpi 410 !! q_air and t_air are not given at 10m (wind reference height) 411 ! Values of temp. and hum. adjusted to height of wind during bulk algorithm iteration must be used!!! 412 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - zq_zu(ji,jj) )*wndm(ji,jj) ) ! Evaporation 413 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - zt_zu(ji,jj) )*wndm(ji,jj) ! Sensible Heat 414 END DO 415 END DO 416 ENDIF 390 417 !$OMP PARALLEL DO schedule(static) private(jj, ji) 391 418 DO jj = 1, jpj 392 419 DO ji = 1, jpi 393 IF( ABS( rn_zu - rn_zqt) < 0.01_wp ) THEN394 !! q_air and t_air are (or "are almost") given at 10m (wind reference height)395 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - sf(jp_humi)%fnow(ji,jj,1) )*wndm(ji,jj) ) ! Evaporation396 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - sf(jp_tair)%fnow(ji,jj,1) )*wndm(ji,jj) ! Sensible Heat397 ELSE398 !! q_air and t_air are not given at 10m (wind reference height)399 ! Values of temp. and hum. adjusted to height of wind during bulk algorithm iteration must be used!!!400 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - zq_zu(ji,jj) )*wndm(ji,jj) ) ! Evaporation401 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - zt_zu(ji,jj) )*wndm(ji,jj) ! Sensible Heat402 ENDIF403 420 zqla (ji,jj) = Lv * zevap(ji,jj) ! Latent Heat 404 421 END DO … … 422 439 DO jj = 1, jpj 423 440 DO ji = 1, jpi 424 emp (ji,jj) = ( zevap(ji,jj) & ! mass flux (evap. - precip.)425 & - sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ) * tmask(ji,jj,1)426 !427 qns(ji,jj) = zqlw(ji,jj) - zqsb(ji,jj) - zqla(ji,jj) & ! Downward Non Solar428 & - sf(jp_snow)%fnow(ji,jj,1) * rn_pfac * lfus & ! remove latent melting heat for solid precip429 & - zevap(ji,jj) * pst(ji,jj) * rcp & ! remove evap heat content at SST430 & + ( sf(jp_prec)%fnow(ji,jj,1) - sf(jp_snow)%fnow(ji,jj,1) ) * rn_pfac & ! add liquid precip heat content at Tair431 & * ( sf(jp_tair)%fnow(ji,jj,1) - rt0 ) * rcp &432 & + sf(jp_snow)%fnow(ji,jj,1) * rn_pfac & ! add solid precip heat content at min(Tair,Tsnow)433 & * ( MIN( sf(jp_tair)%fnow(ji,jj,1), rt0_snow ) - rt0 ) * cpic * tmask(ji,jj,1)441 emp (ji,jj) = ( zevap(ji,jj) & ! mass flux (evap. - precip.) 442 & - sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ) * tmask(ji,jj,1) 443 ! 444 qns(ji,jj) = zqlw(ji,jj) - zqsb(ji,jj) - zqla(ji,jj) & ! Downward Non Solar 445 & - sf(jp_snow)%fnow(ji,jj,1) * rn_pfac * lfus & ! remove latent melting heat for solid precip 446 & - zevap(ji,jj) * pst(ji,jj) * rcp & ! remove evap heat content at SST 447 & + ( sf(jp_prec)%fnow(ji,jj,1) - sf(jp_snow)%fnow(ji,jj,1) ) * rn_pfac & ! add liquid precip heat content at Tair 448 & * ( sf(jp_tair)%fnow(ji,jj,1) - rt0 ) * rcp & 449 & + sf(jp_snow)%fnow(ji,jj,1) * rn_pfac & ! add solid precip heat content at min(Tair,Tsnow) 450 & * ( MIN( sf(jp_tair)%fnow(ji,jj,1), rt0_snow ) - rt0 ) * cpic * tmask(ji,jj,1) 434 451 END DO 435 452 END DO … … 454 471 CALL iom_put( "qt_oce" , qns+qsr ) ! output total downward heat over the ocean 455 472 !$OMP PARALLEL DO schedule(static) private(jj, ji) 456 DO jj = 1, jpj457 DO ji = 1, jpi458 tprecip(ji,jj) = sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ! output total precipitation [kg/m2/s]459 sprecip(ji,jj) = sf(jp_snow)%fnow(ji,jj,1) * rn_pfac ! output solid precipitation [kg/m2/s]460 END DO461 END DO473 DO jj = 1, jpj 474 DO ji = 1, jpi 475 tprecip(ji,jj) = sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ! output total precipitation [kg/m2/s] 476 sprecip(ji,jj) = sf(jp_snow)%fnow(ji,jj,1) * rn_pfac ! output solid precipitation [kg/m2/s] 477 END DO 478 END DO 462 479 CALL iom_put( 'snowpre', sprecip * 86400. ) ! Snow 463 480 CALL iom_put( 'precip' , tprecip * 86400. ) ! Total precipitation … … 598 615 REAL(wp) :: zst2, zst3 599 616 REAL(wp) :: zcoef_dqlw, zcoef_dqla, zcoef_dqsb 600 REAL(wp) :: zztmp, z1_lsub 617 REAL(wp) :: zztmp, z1_lsub, ztmp1, ztmp2 ! temporary variable 601 618 !! 602 619 REAL(wp), DIMENSION(:,:,:), POINTER :: z_qlw ! long wave heat flux over ice … … 706 723 !$OMP END PARALLEL 707 724 CALL lim_thd_snwblow( pfrld, zsnw ) ! snow distribution over ice after wind blowing 708 709 725 emp_oce(:,:) = pfrld(:,:) * zevap(:,:) - ( tprecip(:,:) - sprecip(:,:) ) - sprecip(:,:) * (1._wp - zsnw ) 710 726 emp_ice(:,:) = SUM( a_i_b(:,:,:) * evap_ice(:,:,:), dim=3 ) - sprecip(:,:) * zsnw … … 712 728 713 729 ! --- heat flux associated with emp --- ! 714 qemp_oce(:,:) = - pfrld(:,:) * zevap(:,:) * sst_m(:,:) * rcp & ! evap at sst715 & + ( tprecip(:,:) - sprecip(:,:) ) * ( sf(jp_tair)%fnow(:,:,1) - rt0 ) * rcp & ! liquid precip at Tair716 & + sprecip(:,:) * ( 1._wp - zsnw ) *& ! solid precip at min(Tair,Tsnow)717 & ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus )718 qemp_ice(:,:) = sprecip(:,:) * zsnw * & ! solid precip (only)719 & ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus )730 qemp_oce(:,:) = - pfrld(:,:) * zevap(:,:) * sst_m(:,:) * rcp & ! evap at sst 731 & + ( tprecip(:,:) - sprecip(:,:) ) * ( sf(jp_tair)%fnow(:,:,1) - rt0 ) * rcp & ! liquid precip at Tair 732 & + sprecip(:,:) * ( 1._wp - zsnw ) * & ! solid precip at min(Tair,Tsnow) 733 & ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus ) 734 qemp_ice(:,:) = sprecip(:,:) * zsnw * & ! solid precip (only) 735 & ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus ) 720 736 721 737 ! --- total solar and non solar fluxes --- ! … … 723 739 qsr_tot(:,:) = pfrld(:,:) * qsr_oce(:,:) + SUM( a_i_b(:,:,:) * qsr_ice(:,:,:), dim=3 ) 724 740 725 ! --- heat content of precip over ice in J/m3 (to be used in 1D-thermo) 726 ! --- ! 741 ! --- heat content of precip over ice in J/m3 (to be used in 1D-thermo) --- ! 727 742 qprec_ice(:,:) = rhosn * ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus ) 728 743 … … 741 756 ! ( Maykut and Untersteiner, 1971 ; Ebert and Curry, 1993 ) 742 757 ! 758 ztmp1 = ( 0.18 * ( 1.0 - cldf_ice ) + 0.35 * cldf_ice ) 759 ztmp2 = ( 0.82 * ( 1.0 - cldf_ice ) + 0.65 * cldf_ice ) 743 760 !$OMP PARALLEL DO schedule(static) private(jj, ji) 744 761 DO jj = 1, jpj 745 762 DO ji = 1, jpi 746 fr1_i0(ji,jj) = ( 0.18 * ( 1.0 - cldf_ice ) + 0.35 * cldf_ice )747 fr2_i0(ji,jj) = ( 0.82 * ( 1.0 - cldf_ice ) + 0.65 * cldf_ice )763 fr1_i0(ji,jj) = ztmp1 764 fr2_i0(ji,jj) = ztmp2 748 765 END DO 749 766 END DO -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcmod.F90
r7508 r7525 195 195 END DO 196 196 !$OMP END DO NOWAIT 197 !$OMP DO schedule(static) private(jp,jj,ji)198 197 DO jp = 1, jpts 198 !$OMP DO schedule(static) private(jj,ji) 199 199 DO jj = 1, jpj 200 200 DO ji = 1, jpi … … 336 336 ! ! ---------------------------------------- ! 337 337 !$OMP PARALLEL DO schedule(static) private(jj,ji) 338 DO jj = 1, jpj 339 DO ji = 1, jpi 340 utau_b(ji,jj) = utau(ji,jj) ! Swap the ocean forcing fields 341 vtau_b(ji,jj) = vtau(ji,jj) ! (except at nit000 where before fields 342 qns_b (ji,jj) = qns (ji,jj) ! are set at the end of the routine) 343 emp_b (ji,jj) = emp (ji,jj) 344 sfx_b (ji,jj) = sfx (ji,jj) 345 END DO 338 DO jj = 1, jpj 339 DO ji = 1, jpi 340 utau_b(ji,jj) = utau(ji,jj) ! Swap the ocean forcing fields 341 vtau_b(ji,jj) = vtau(ji,jj) ! (except at nit000 where before fields 342 qns_b (ji,jj) = qns (ji,jj) ! are set at the end of the routine) 343 emp_b (ji,jj) = emp (ji,jj) 344 sfx_b (ji,jj) = sfx (ji,jj) 346 345 END DO 346 END DO 347 347 IF ( ln_rnf ) THEN 348 348 !$OMP PARALLEL … … 354 354 END DO 355 355 !$OMP END DO NOWAIT 356 !$OMP DO schedule(static) private(jp,jj,ji)357 356 DO jp = 1, jpts 357 !$OMP DO schedule(static) private(jj,ji) 358 358 DO jj = 1, jpj 359 359 DO ji = 1, jpi -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv.F90
r7508 r7525 100 100 ! 101 101 ! !== effective transport ==! 102 !$OMP PARALLEL DO schedule(static) private(jk )102 !$OMP PARALLEL DO schedule(static) private(jk jj, ji) 103 103 DO jk = 1, jpkm1 104 zun(:,:,jk) = e2u (:,:) * e3u_n(:,:,jk) * un(:,:,jk) ! eulerian transport only 105 zvn(:,:,jk) = e1v (:,:) * e3v_n(:,:,jk) * vn(:,:,jk) 106 zwn(:,:,jk) = e1e2t(:,:) * wn(:,:,jk) 104 DO jj = 1, jpj 105 DO ji = 1, jpi 106 zun(ji,jj,jk) = e2u (ji,jj) * e3u_n(ji,jj,jk) * un(ji,jj,jk) ! eulerian transport only 107 zvn(ji,jj,jk) = e1v (ji,jj) * e3v_n(ji,jj,jk) * vn(ji,jj,jk) 108 zwn(ji,jj,jk) = e1e2t(ji,jj) * wn(ji,jj,jk) 109 END DO 110 END DO 107 111 END DO 108 112 ! 109 113 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN ! add z-tilde and/or vvl corrections 110 114 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 111 DO jk = 1, jpk 115 DO jk = 1, jpkm1 112 116 DO jj = 1, jpj 113 117 DO ji = 1, jpi -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_fct.F90
r7508 r7525 339 339 IF( ln_linssh ) THEN ! top ocean value: high order = upstream ==>> zwz=0 340 340 !$OMP PARALLEL DO schedule(static) private(jj, ji) 341 342 343 344 345 341 DO jj = 1, jpj 342 DO ji = 1, jpi 343 zwz(ji,jj,1) = 0._wp ! only ocean surface as interior zwz values have been w-masked 344 END DO 345 END DO 346 346 ENDIF 347 347 ! … … 368 368 ! 369 369 IF( l_trd ) THEN ! trend diagnostics (contribution of upstream fluxes) 370 !$OMP DO schedule(static) private(jk, jj, ji)370 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 371 371 DO jk = 1, jpk 372 372 DO jj = 1, jpj … … 375 375 ztrdy(ji,jj,jk) = ztrdy(ji,jj,jk) + zwy(ji,jj,jk) ! <<< Add to previously computed 376 376 ztrdz(ji,jj,jk) = ztrdz(ji,jj,jk) + zwz(ji,jj,jk) ! <<< Add to previously computed 377 END DO378 END DO379 END DO377 END DO 378 END DO 379 END DO 380 380 ! 381 381 CALL trd_tra( kt, cdtype, jn, jptra_xad, ztrdx, pun, ptn(:,:,:,jn) ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_mus.F90
r7508 r7525 114 114 !$OMP PARALLEL 115 115 !$OMP DO schedule(static) private(jj, ji) 116 DO jj = 1, jpj117 DO ji = 1, jpi118 upsmsk(ji,jj) = 0._wp ! not upstream by default119 END DO120 END DO116 DO jj = 1, jpj 117 DO ji = 1, jpi 118 upsmsk(ji,jj) = 0._wp ! not upstream by default 119 END DO 120 END DO 121 121 ! 122 !$OMP DO schedule(static) private(jk )122 !$OMP DO schedule(static) private(jk,jj,ji) 123 123 DO jk = 1, jpkm1 124 xind(:,:,jk) = 1._wp & ! =>1 where up-stream is not needed 125 & - MAX ( rnfmsk(:,:) * rnfmsk_z(jk), & ! =>0 near runoff mouths (& closed sea outflows) 126 & upsmsk(:,:) ) * tmask(:,:,jk) ! =>0 in some user defined area 124 DO jj = 1, jpj 125 DO ji = 1, jpi 126 xind(ji,jj,jk) = 1._wp & ! =>1 where up-stream is not needed 127 & - MAX ( rnfmsk(ji,jj) * rnfmsk_z(jk), & ! =>0 near runoff mouths (& closed sea outflows) 128 & upsmsk(ji,jj) ) * tmask(ji,jj,jk) ! =>0 in some user defined area 129 END DO 130 END DO 127 131 END DO 128 132 !$OMP END DO NOWAIT -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traldf_iso.F90
r7508 r7525 328 328 ztfw(1,jj,jk) = 0._wp ; ztfw(jpi,jj,jk) = 0._wp 329 329 END DO 330 330 END DO 331 331 ! 332 332 ! Vertical fluxes … … 338 338 ztfw(ji,jj, 1 ) = 0._wp ; ztfw(ji,jj,jpk) = 0._wp 339 339 END DO 340 340 END DO 341 341 342 342 !$OMP DO schedule(static) private(jk, jj, ji, zmsku, zmskv, zahu_w, zahv_w, zcoef3, zcoef4) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traqsr.F90
r7037 r7525 128 128 IF( l_trdtra ) THEN ! trends diagnostic: save the input temperature trend 129 129 CALL wrk_alloc( jpi,jpj,jpk, ztrdt ) 130 !$OMP PARALLEL WORKSHARE 131 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 132 !$OMP END PARALLEL WORKSHARE 130 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 131 DO jk = 1, jpk 132 DO jj = 1, jpj 133 DO ji = 1, jpi 134 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) 135 END DO 136 END DO 137 END DO 133 138 ENDIF 134 139 ! … … 144 149 ELSE ! No restart or restart not found: Euler forward time stepping 145 150 z1_2 = 1._wp 146 !$OMP PARALLEL WORKSHARE 147 qsr_hc_b(:,:,:) = 0._wp 148 !$OMP END PARALLEL WORKSHARE 151 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 152 DO jk = 1, jpk 153 DO jj = 1, jpj 154 DO ji = 1, jpi 155 qsr_hc_b(ji,jj,jk) = 0._wp 156 END DO 157 END DO 158 END DO 149 159 ENDIF 150 160 ELSE !== Swap of qsr heat content ==! 151 161 z1_2 = 0.5_wp 152 !$OMP PARALLEL WORKSHARE 153 qsr_hc_b(:,:,:) = qsr_hc(:,:,:) 154 !$OMP END PARALLEL WORKSHARE 162 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 163 DO jk = 1, jpk 164 DO jj = 1, jpj 165 DO ji = 1, jpi 166 qsr_hc_b(ji,jj,jk) = qsr_hc(ji,jj,jk) 167 END DO 168 END DO 169 END DO 155 170 ENDIF 156 171 ! … … 161 176 CASE( np_BIO ) !== bio-model fluxes ==! 162 177 ! 163 !$OMP PARALLEL DO schedule(static) private(jk )178 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 164 179 DO jk = 1, nksr 165 qsr_hc(:,:,jk) = r1_rau0_rcp * ( etot3(:,:,jk) - etot3(:,:,jk+1) ) 180 DO jj = 1, jpj 181 DO ji = 1, jpi 182 qsr_hc(ji,jj,jk) = r1_rau0_rcp * ( etot3(ji,jj,jk) - etot3(ji,jj,jk+1) ) 183 END DO 184 END DO 166 185 END DO 167 186 ! … … 198 217 END DO 199 218 ELSE !* constant chrlorophyll 200 !$OMP PARALLEL DO schedule(static) private(jk )219 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 201 220 DO jk = 1, nksr + 1 202 zchl3d(:,:,jk) = 0.05 221 DO jj = 1, jpj 222 DO ji = 1, jpi 223 zchl3d(ji,jj,jk) = 0.05 224 ENDDO 225 ENDDO 203 226 ENDDO 204 227 ENDIF … … 305 328 ! 306 329 !$OMP PARALLEL 307 !$OMP WORKSHARE 308 zetot(:,:,nksr+1:jpk) = 0._wp ! below ~400m set to zero 309 !$OMP END WORKSHARE 330 !$OMP DO schedule(static) private(jj,ji) 331 DO jj = 1, jpj 332 DO ji = 1, jpi ! vector opt. 333 zetot(ji,jj,nksr+1:jpk) = 0._wp ! below ~400m set to zero 334 END DO 335 END DO 310 336 DO jk = nksr, 1, -1 311 337 !$OMP DO schedule(static) private(jj,ji) … … 329 355 ! 330 356 IF( l_trdtra ) THEN ! qsr tracers trends saved for diagnostics 331 !$OMP PARALLEL WORKSHARE 332 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 333 !$OMP END PARALLEL WORKSHARE 357 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 358 DO jk = 1, jpk 359 DO jj = 1, jpj 360 DO ji = 1, jpi 361 ztrdt(ji,jj,jk) = tsa(ji,jj,jk,jp_tem) - ztrdt(ji,jj,jk) 362 END DO 363 END DO 364 END DO 334 365 CALL trd_tra( kt, 'TRA', jp_tem, jptra_qsr, ztrdt ) 335 366 CALL wrk_dealloc( jpi,jpj,jpk, ztrdt ) … … 458 489 END SELECT 459 490 ! 460 !$OMP PARALLEL WORKSHARE 461 qsr_hc(:,:,:) = 0._wp ! now qsr heat content set to zero where it will not be computed 462 !$OMP END PARALLEL WORKSHARE 491 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 492 DO jk = 1, jpk 493 DO jj = 1, jpj 494 DO ji = 1, jpi 495 qsr_hc(ji,jj,jk) = 0._wp ! now qsr heat content set to zero where it will not be computed 496 END DO 497 END DO 498 END DO 463 499 ! 464 500 ! 1st ocean level attenuation coefficient (used in sbcssm) … … 466 502 CALL iom_get( numror, jpdom_autoglo, 'fraqsr_1lev' , fraqsr_1lev ) 467 503 ELSE 468 !$OMP PARALLEL WORKSHARE 469 fraqsr_1lev(:,:) = 1._wp ! default : no penetration 470 !$OMP END PARALLEL WORKSHARE 504 !$OMP PARALLEL DO schedule(static) private(jj,ji) 505 DO jj = 1, jpj 506 DO ji = 1, jpi 507 fraqsr_1lev(ji,jj) = 1._wp ! default : no penetration 508 END DO 509 END DO 471 510 ENDIF 472 511 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdftke.F90
r7508 r7525 900 900 ! !* set vertical eddy coef. to the background value 901 901 !$OMP PARALLEL 902 !$OMP DO schedule(static) private(jk )902 !$OMP DO schedule(static) private(jk,jj,ji) 903 903 DO jk = 1, jpk 904 avt (:,:,jk) = avtb(jk) * wmask (:,:,jk) 905 avm (:,:,jk) = avmb(jk) * wmask (:,:,jk) 906 avmu(:,:,jk) = avmb(jk) * wumask(:,:,jk) 907 avmv(:,:,jk) = avmb(jk) * wvmask(:,:,jk) 904 DO jj = 1, jpj 905 DO ji = 1, jpi 906 avt (ji,jj,jk) = avtb(jk) * wmask (ji,jj,jk) 907 avm (ji,jj,jk) = avmb(jk) * wmask (ji,jj,jk) 908 avmu(ji,jj,jk) = avmb(jk) * wumask(ji,jj,jk) 909 avmv(ji,jj,jk) = avmb(jk) * wvmask(ji,jj,jk) 910 END DO 911 END DO 908 912 END DO 909 913 !$OMP END DO NOWAIT -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdftmx.F90
r7508 r7525 529 529 !$OMP PARALLEL 530 530 !$OMP DO schedule(static) private(jk, jj, ji) 531 DO jk = 1, jpk532 DO jj = 1, jpj533 DO ji = 1, jpi534 zav_tide(ji,jj,jk) = 0.e0535 END DO536 END DO537 END DO531 DO jk = 1, jpk 532 DO jj = 1, jpj 533 DO ji = 1, jpi 534 zav_tide(ji,jj,jk) = 0.e0 535 END DO 536 END DO 537 END DO 538 538 !$OMP DO schedule(static) private(jk) 539 539 DO jk = 2, jpkm1 … … 1024 1024 DO jj = 1, jpj 1025 1025 DO ji = 1, jpi 1026 znu_t(ji,jj,jk) = 1.e-4_wp * ( 17.91_wp - 0.53810_wp * tsn(ji,jj,jk,jp_tem) + 0.00694_wp * tsn(ji,jj,jk,jp_tem) * tsn(ji,jj,jk,jp_tem) & 1026 znu_t(ji,jj,jk) = 1.e-4_wp * ( 17.91_wp - 0.53810_wp * tsn(ji,jj,jk,jp_tem) & 1027 & + 0.00694_wp * tsn(ji,jj,jk,jp_tem) * tsn(ji,jj,jk,jp_tem) & 1027 1028 & + 0.02305_wp * tsn(ji,jj,jk,jp_sal) ) * tmask(ji,jj,jk) * r1_rau0 1028 1029 END DO -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/step.F90
r7508 r7525 73 73 !! -8- Outputs and diagnostics 74 74 !!---------------------------------------------------------------------- 75 INTEGER :: j k, jj, ji, jt! dummy loop indice75 INTEGER :: jn, jk, jj, ji ! dummy loop indice 76 76 INTEGER :: indic ! error indicator if < 0 77 77 INTEGER :: kcall ! optional integer argument (dom_vvl_sf_nxt) … … 202 202 & rhd, gru , grv , grui, grvi ) ! of t, s, rd at the first ocean level 203 203 !!jc: fs simplification 204 !$OMP PARALLEL 205 !$OMP DO schedule(static) private(jk, jj, ji) 206 DO jk = 1, jpk 207 DO jj = 1, jpj 208 DO ji = 1, jpi 209 ua(ji,jj,jk) = 0._wp ! set dynamics trends to zero 210 va(ji,jj,jk) = 0._wp 211 END DO 204 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 205 DO jk = 1, jpk 206 DO jj = 1, jpj 207 DO ji = 1, jpi 208 ua(ji,jj,jk) = 0._wp ! set dynamics trends to zero 209 va(ji,jj,jk) = 0._wp 212 210 END DO 213 211 END DO 214 !$OMP END DO NOWAIT 215 !$OMP DO schedule(static) private(jt, jk, jj, ji) 216 DO jt = 1, jpts 217 DO jk = 1, jpk 218 DO jj = 1, jpj 219 DO ji = 1, jpi 220 tsa(ji,jj,jk,jt) = 0._wp ! set tracer trends to zero 221 END DO 222 END DO 223 END DO 224 END DO 225 !$OMP END PARALLEL 212 END DO 226 213 IF( lk_asminc .AND. ln_asmiau .AND. ln_dyninc ) & 227 214 & CALL dyn_asm_inc ( kstp ) ! apply dynamics assimilation increment … … 276 263 ! Active tracers 277 264 !<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< 265 DO jn = 1, jpts 266 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 267 DO jk = 1, jpk 268 DO jj = 1, jpj 269 DO ji = 1, jpi 270 tsa(ji,jj,jk,jn) = 0._wp ! set tracer trends to zero 271 END DO 272 END DO 273 END DO 274 END DO 278 275 IF( lk_asminc .AND. ln_asmiau .AND. & 279 276 & ln_trainc ) CALL tra_asm_inc ( kstp ) ! apply tracer assimilation increment
Note: See TracChangeset
for help on using the changeset viewer.