- Timestamp:
- 2016-10-18T15:32:04+02:00 (8 years ago)
- Location:
- branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO
- Files:
-
- 89 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limadv_2.F90
r3625 r7037 85 85 zrdt = rdt_ice * pdf ! If ice drift field is too fast, use an appropriate time step for advection. 86 86 87 !$OMP PARALLEL 88 !$OMP DO schedule(static) private(jj,ji,zslpmax,zs1max,zs1new,zs2new,zin0) 87 89 DO jj = 1, jpj 88 90 DO ji = 1, jpi … … 102 104 END DO 103 105 END DO 106 !$OMP END DO NOWAIT 104 107 105 108 ! Initialize volumes of boxes (=area if adv_x first called, =psm otherwise) 106 psm (:,:) = MAX( pcrh * area(:,:) + ( 1.0 - pcrh ) * psm(:,:) , epsi20 ) 109 !$OMP DO schedule(static) private(jj,ji) 110 DO jj = 1, jpj 111 DO ji = 1, jpi 112 psm (ji,jj) = MAX( pcrh * area(ji,jj) + ( 1.0 - pcrh ) * psm(ji,jj) , epsi20 ) 113 END DO 114 END DO 107 115 108 116 ! Calculate fluxes and moments between boxes i<-->i+1 117 !$OMP DO schedule(static) private(jj,ji,zalf,zalfq,zalf1,zalf1q) 109 118 DO jj = 1, jpj ! Flux from i to i+1 WHEN u GT 0 110 119 DO ji = 1, jpi … … 133 142 END DO 134 143 END DO 135 144 !$OMP END DO 145 146 !$OMP DO schedule(static) private(jj,ji,zalf,zalfq,zalf1,zalf1q) 136 147 DO jj = 1, jpjm1 ! Flux from i+1 to i when u LT 0. 137 148 DO ji = 1, fs_jpim1 … … 153 164 END DO 154 165 166 !$OMP DO schedule(static) private(jj,ji,zbt,zbt1) 155 167 DO jj = 2, jpjm1 ! Readjust moments remaining in the box. 156 168 DO ji = fs_2, fs_jpim1 … … 168 180 169 181 ! Put the temporary moments into appropriate neighboring boxes. 182 !$OMP DO schedule(static) private(jj,ji,zbt,zbt1,zalf,zalf1,ztemp) 170 183 DO jj = 2, jpjm1 ! Flux from i to i+1 IF u GT 0. 171 184 DO ji = fs_2, fs_jpim1 … … 190 203 END DO 191 204 205 !$OMP DO schedule(static) private(jj,ji,zbt,zbt1,zalf,zalf1,ztemp) 192 206 DO jj = 2, jpjm1 ! Flux from i+1 to i IF u LT 0. 193 207 DO ji = fs_2, fs_jpim1 … … 210 224 END DO 211 225 END DO 226 !$OMP END DO NOWAIT 227 !$OMP END PARALLEL 212 228 213 229 !-- Lateral boundary conditions … … 268 284 zrdt = rdt_ice * pdf ! If ice drift field is too fast, use an appropriate time step for advection. 269 285 286 !$OMP PARALLEL 287 !$OMP DO schedule(static) private(jj,ji,zslpmax,zs1max,zs1new,zs2new,zin0) 270 288 DO jj = 1, jpj 271 289 DO ji = 1, jpi … … 285 303 END DO 286 304 END DO 305 !$OMP END DO NOWAIT 287 306 288 307 ! Initialize volumes of boxes (=area if adv_x first called, =psm otherwise) 289 psm(:,:) = MAX( pcrh * area(:,:) + ( 1.0 - pcrh ) * psm(:,:) , epsi20 ) 308 !$OMP DO schedule(static) private(jj,ji,zslpmax,zs1max,zs1new,zs2new,zin0) 309 DO jj = 1, jpj 310 DO ji = 1, jpi 311 psm(ji,jj) = MAX( pcrh * area(ji,jj) + ( 1.0 - pcrh ) * psm(ji,jj) , epsi20 ) 312 END DO 313 END DO 290 314 291 315 ! Calculate fluxes and moments between boxes j<-->j+1 316 !$OMP DO schedule(static) private(jj,ji,zalf,zalfq,zalf1,zalf1q) 292 317 DO jj = 1, jpj ! Flux from j to j+1 WHEN v GT 0 293 318 DO ji = 1, jpi … … 316 341 END DO 317 342 ! 343 !$OMP DO schedule(static) private(jj,ji,zalf,zalfq,zalf1,zalf1q) 318 344 DO jj = 1, jpjm1 ! Flux from j+1 to j when v LT 0. 319 345 DO ji = 1, jpi … … 336 362 337 363 ! Readjust moments remaining in the box. 364 !$OMP DO schedule(static) private(jj,ji,zbt,zbt1) 338 365 DO jj = 2, jpj 339 366 DO ji = 1, jpi … … 352 379 353 380 ! Put the temporary moments into appropriate neighboring boxes. 381 !$OMP DO schedule(static) private(jj,ji,zbt,zbt1,zalf,zalf1,ztemp) 354 382 DO jj = 2, jpjm1 ! Flux from j to j+1 IF v GT 0. 355 383 DO ji = 1, jpi … … 375 403 END DO 376 404 ! 405 !$OMP DO schedule(static) private(jj,ji,zbt,zbt1,zalf,zalf1,ztemp) 377 406 DO jj = 2, jpjm1 ! Flux from j+1 to j IF v LT 0. 378 407 DO ji = 1, jpi … … 396 425 END DO 397 426 END DO 427 !$OMP END DO NOWAIT 428 !$OMP END PARALLEL 398 429 399 430 !-- Lateral boundary conditions -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limdyn_2.F90
r5123 r7037 78 78 ! 79 79 ! Mean ice and snow thicknesses. 80 !$OMP PARALLEL WORKSHARE 80 81 hsnm(:,:) = ( 1.0 - frld(:,:) ) * hsnif(:,:) 81 82 hicm(:,:) = ( 1.0 - frld(:,:) ) * hicif(:,:) 83 !$OMP END PARALLEL WORKSHARE 82 84 ! 83 85 ! ! Rheology (ice dynamics) … … 170 172 SELECT CASE( cp_ice_msh ) ! ice-ocean relative velocity at u- & v-pts 171 173 CASE( 'C' ) ! EVP : C-grid ice dynamics 174 !$OMP PARALLEL WORKSHARE 172 175 zu_io(:,:) = u_ice(:,:) - ssu_m(:,:) ! ice-ocean & ice velocity at ocean velocity points 173 176 zv_io(:,:) = v_ice(:,:) - ssv_m(:,:) 177 !$OMP END PARALLEL WORKSHARE 174 178 CASE( 'I' ) ! VP : B-grid ice dynamics (I-point) 179 !$OMP PARALLEL DO schedule(static) private(jj, ji) 175 180 DO jj = 1, jpjm1 ! u_ice v_ice at I-point ; ssu_m, ssv_m at U- & V-points 176 181 DO ji = 1, jpim1 ! NO vector opt. ! … … 183 188 ! frictional velocity at T-point 184 189 zcoef = 0.5_wp * cw 190 !$OMP PARALLEL DO schedule(static) private(jj, ji) 185 191 DO jj = 2, jpjm1 186 192 DO ji = 2, jpim1 ! NO vector opt. because of zu_io … … 193 199 ! 194 200 zcoef = SQRT( 0.5 ) / rau0 201 !$OMP PARALLEL DO schedule(static) private(jj, ji) 195 202 DO jj = 2, jpjm1 196 203 DO ji = fs_2, fs_jpim1 ! vector opt. … … 278 285 pstarh = pstar / 2.0 279 286 ! 287 !$OMP PARALLEL WORKSHARE 280 288 ahiu(:,:) = ahi0 * umask(:,:,1) ! Ice eddy Diffusivity coefficients. 281 289 ahiv(:,:) = ahi0 * vmask(:,:,1) 290 !$OMP END PARALLEL WORKSHARE 282 291 ! 283 292 END SUBROUTINE lim_dyn_init_2 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limhdf_2.F90
r4990 r7037 70 70 IF( lk_mpp ) CALL mpp_sum( ierr ) 71 71 IF( ierr /= 0 ) CALL ctl_stop( 'STOP', 'lim_hdf_2 : unable to allocate standard arrays' ) 72 !$OMP PARALLEL DO schedule(static) private(jj, ji) 72 73 DO jj = 2, jpjm1 73 74 DO ji = fs_2 , fs_jpim1 ! vector opt. … … 83 84 zeps = 2._wp * epsi04 84 85 ! 86 !$OMP PARALLEL WORKSHARE 85 87 ztab0(:, : ) = ptab(:,:) ! Arrays initialization 88 !$OMP END PARALLEL WORKSHARE 86 89 zdiv0(:, 1 ) = 0._wp 87 90 zdiv0(:,jpj) = 0._wp 88 zflu (jpi,:) = 0._wp 91 zflu (jpi,:) = 0._wp 89 92 zflv (jpi,:) = 0._wp 90 93 zdiv0(1, :) = 0._wp … … 98 101 iter = iter + 1 ! incrementation of the sub-time step number 99 102 ! 103 !$OMP PARALLEL 104 !$OMP DO schedule(static) private(jj, ji) 100 105 DO jj = 1, jpjm1 ! diffusive fluxes in U- and V- direction 101 106 DO ji = 1 , fs_jpim1 ! vector opt. … … 105 110 END DO 106 111 ! 112 !$OMP DO schedule(static) private(jj, ji) 107 113 DO jj= 2, jpjm1 ! diffusive trend : divergence of the fluxes 108 114 DO ji = fs_2 , fs_jpim1 ! vector opt. … … 111 117 END DO 112 118 END DO 119 !$OMP END DO NOWAIT 120 !$OMP END PARALLEL 113 121 ! 114 IF( iter == 1 ) zdiv0(:,:) = zdiv(:,:) ! save the 1st evaluation of the diffusive trend in zdiv0 122 IF( iter == 1 ) THEN 123 !$OMP PARALLEL DO schedule(static) private(jj, ji) 124 DO jj= 2, jpjm1 ! diffusive trend : divergence of the fluxes 125 DO ji = fs_2 , fs_jpim1 ! vector opt. 126 zdiv0(ji,jj) = zdiv(ji,jj) ! save the 1st evaluation of the diffusive trend in zdiv0 127 END DO 128 END DO 129 END IF 115 130 ! 131 !$OMP PARALLEL DO schedule(static) private(jj,ji,zrlxint) 116 132 DO jj = 2, jpjm1 ! iterative evaluation 117 133 DO ji = fs_2 , fs_jpim1 ! vector opt. … … 127 143 zconv = 0._wp ! convergence test 128 144 145 !$OMP PARALLEL DO schedule(static) private(jj,ji) REDUCTION(MAX:zconv) 129 146 DO jj = 2, jpjm1 130 147 DO ji = 2, jpim1 … … 134 151 IF( lk_mpp ) CALL mpp_max( zconv ) ! max over the global domain 135 152 153 !$OMP PARALLEL WORKSHARE 136 154 ptab(:,:) = zrlx(:,:) 155 !$OMP END PARALLEL WORKSHARE 137 156 ! 138 157 END DO ! end of sub-time step loop 139 158 140 159 IF(ln_ctl) THEN 160 !$OMP PARALLEL WORKSHARE 141 161 zrlx(:,:) = ptab(:,:) - ztab0(:,:) 162 !$OMP END PARALLEL WORKSHARE 142 163 WRITE(charout,FMT="(' lim_hdf : zconv =',D23.16, ' iter =',I4,2X)") zconv, iter 143 164 CALL prt_ctl( tab2d_1=zrlx, clinfo1=charout ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limistate_2.F90
r5541 r7037 70 70 71 71 CALL eos_fzp( tsn(:,:,1,jp_sal), tfu(:,:) ) ! freezing/melting point of sea water [Celcius] 72 !$OMP PARALLEL 73 !$OMP WORKSHARE 72 74 tfu(:,:) = tfu(:,:) * tmask(:,:,1) 73 75 !$OMP END WORKSHARE 76 77 !$OMP DO schedule(static) private(jj, ji) 74 78 DO jj = 1, jpj 75 79 DO ji = 1, jpi … … 91 95 END DO 92 96 97 !$OMP WORKSHARE 93 98 tfu(:,:) = tfu(:,:) + rt0 ! ftu converted from Celsius to Kelvin (rt0 over land) 94 99 … … 97 102 tbif (:,:,2) = tfu(:,:) 98 103 tbif (:,:,3) = tfu(:,:) 99 104 !$OMP END WORKSHARE 105 106 !$OMP END PARALLEL 100 107 ENDIF 101 108 109 !$OMP PARALLEL WORKSHARE 102 110 fsbbq (:,:) = 0.e0 103 111 qstoif(:,:) = 0.e0 … … 129 137 stress12_i(:,:) = 0._wp 130 138 #endif 139 !$OMP END PARALLEL WORKSHARE 131 140 132 141 !-- lateral boundary conditions … … 136 145 ! C A U T I O N frld = 1 over land and lbc_lnk put zero along 137 146 ! ************* closed boundaries herefore we force to one over land 147 !$OMP PARALLEL WORKSHARE 138 148 frld(:,:) = tms(:,:) * frld(:,:) + ( 1. - tms(:,:) ) 149 !$OMP END PARALLEL WORKSHARE 139 150 140 151 CALL lbc_lnk( hsnif, 'T', 1. ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limmsh_2.F90
r3625 r7037 135 135 !---------------------------------------- 136 136 ! ! akappa 137 !$OMP PARALLEL DO schedule(static) private(jj, ji) 137 138 DO jj = 2, jpj 138 zd1d2(:,jj) = e1v(:,jj) - e1v(:,jj-1) 139 DO ji = 1, jpi 140 zd1d2(ji,jj) = e1v(ji,jj) - e1v(ji,jj-1) 141 END DO 139 142 END DO 140 143 CALL lbc_lnk( zd1d2, 'T', -1. ) 141 144 142 DO ji = 2, jpi 143 zd2d1(ji,:) = e2u(ji,:) - e2u(ji-1,:) 145 !$OMP PARALLEL DO schedule(static) private(jj, ji) 146 DO jj = 1, jpj 147 DO ji = 2, jpi 148 zd2d1(ji,jj) = e2u(ji,jj) - e2u(ji-1,jj) 149 END DO 144 150 END DO 145 151 CALL lbc_lnk( zd2d1, 'T', -1. ) 146 152 153 !$OMP PARALLEL 154 !$OMP WORKSHARE 147 155 akappa(:,:,1,1) = 1.0 / ( 2.0 * e1t(:,:) ) 148 156 akappa(:,:,1,2) = zd1d2(:,:) / ( 4.0 * e1t(:,:) * e2t(:,:) ) 149 157 akappa(:,:,2,1) = zd2d1(:,:) / ( 4.0 * e1t(:,:) * e2t(:,:) ) 150 158 akappa(:,:,2,2) = 1.0 / ( 2.0 * e2t(:,:) ) 159 !$OMP END WORKSHARE NOWAIT 151 160 152 161 ! ! weights (wght) 162 !$OMP DO schedule(static) private(jj,ji,zusden) 153 163 DO jj = 2, jpj 154 164 DO ji = 2, jpi … … 161 171 END DO 162 172 END DO 173 !$OMP END DO NOWAIT 174 !$OMP END PARALLEL 163 175 CALL lbc_lnk( wght(:,:,1,1), 'I', 1. ) ! CAUTION: even with the lbc_lnk at ice U-V-point 164 176 CALL lbc_lnk( wght(:,:,1,2), 'I', 1. ) ! the value of wght at jpj is wrong … … 168 180 ! metric coefficients for sea ice dynamic (EVP rheology) 169 181 !---------------------------------------- 182 !$OMP PARALLEL DO schedule(static) private(jj,ji,zusden) 170 183 DO jj = 1, jpjm1 ! weights (wght) at F-points 171 184 DO ji = 1, jpim1 … … 186 199 187 200 #if defined key_lim2_vp 201 !$OMP PARALLEL DO schedule(static) private(jj,ji,zh1p,zh2p,zusden,zusden2,zd1d2p,zd2d1p) 188 202 DO jj = 2, jpj 189 203 DO ji = 2, jpi ! NO vector opt. … … 259 273 !---------------------------- 260 274 275 !$OMP PARALLEL WORKSHARE 261 276 tms(:,:) = tmask(:,:,1) ! ice T-point : use surface tmask 262 277 278 !$OMP END PARALLEL WORKSHARE 263 279 #if defined key_lim2_vp 264 280 ! VP rheology : ice velocity point is I-point … … 266 282 tmu(:,1) = 0.e0 267 283 tmu(1,:) = 0.e0 284 !$OMP PARALLEL DO schedule(static) private(jj, ji) 268 285 DO jj = 2, jpj ! ice U.V-point: computed from ice T-point mask 269 286 DO ji = 2, jpim1 ! NO vector opt. … … 275 292 ! EVP rheology : ice velocity point are U- & V-points ; ice vorticity 276 293 ! point is F-point 294 !$OMP PARALLEL WORKSHARE 277 295 tmu(:,:) = umask(:,:,1) 278 296 tmv(:,:) = vmask(:,:,1) 279 297 tmf(:,:) = 0.e0 ! used of fmask except its special value along the coast (rn_shlat) 280 298 WHERE( fmask(:,:,1) == 1.e0 ) tmf(:,:) = 1.e0 299 !$OMP END PARALLEL WORKSHARE 281 300 #endif 282 301 ! 283 302 ! unmasked and masked area of T-grid cell 303 !$OMP PARALLEL WORKSHARE 284 304 area(:,:) = e1t(:,:) * e2t(:,:) 305 !$OMP END PARALLEL WORKSHARE 285 306 ! 286 307 #if defined key_lim2_vp -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limsbc_2.F90
r6140 r7037 128 128 !------------------------------------------! 129 129 130 !$OMP PARALLEL 131 !$OMP WORKSHARE 130 132 zqnsoce(:,:) = qns(:,:) 133 !$OMP END WORKSHARE NOWAIT 134 !$OMP DO schedule(static) private(jj,ji,zinda,ifvt,i1mfr,idfr,iflt,ial,iadv,ifral,ifrdv,zqsr,zqns,zqhc,zemp,zemp_snw,zfmm,zfsalt,zcd) 131 135 DO jj = 1, jpj 132 136 DO ji = 1, jpi … … 229 233 END DO 230 234 END DO 235 !$OMP END DO NOWAIT 236 !$OMP END PARALLEL 231 237 ! !------------------------------------------! 232 238 ! ! mass of snow and ice per unit area ! 233 239 ! !------------------------------------------! 234 240 IF( nn_ice_embd /= 0 ) THEN ! embedded sea-ice (mass required) 241 !$OMP PARALLEL WORKSHARE 235 242 snwice_mass_b(:,:) = snwice_mass(:,:) ! save mass from the previous ice time step 236 243 ! ! new mass per unit area … … 238 245 ! ! time evolution of snow+ice mass 239 246 snwice_fmass (:,:) = ( snwice_mass(:,:) - snwice_mass_b(:,:) ) / rdt_ice 247 !$OMP END PARALLEL WORKSHARE 240 248 ENDIF 241 249 … … 253 261 254 262 IF( ln_cpl) THEN 263 !$OMP PARALLEL WORKSHARE 255 264 tn_ice(:,:,1) = sist(:,:) ! sea-ice surface temperature 256 265 ht_i(:,:,1) = hicif(:,:) 257 266 ht_s(:,:,1) = hsnif(:,:) 258 267 a_i(:,:,1) = fr_i(:,:) 268 !$OMP END PARALLEL WORKSHARE 259 269 ! ! Computation of snow/ice and ocean albedo 260 270 CALL albedo_ice( tn_ice, ht_i, ht_s, zalbp, zalb ) 271 !$OMP PARALLEL WORKSHARE 261 272 alb_ice(:,:,1) = 0.5 * ( zalbp(:,:,1) + zalb (:,:,1) ) ! Ice albedo (mean clear and overcast skys) 273 !$OMP END PARALLEL WORKSHARE 262 274 IF( iom_use('icealb_cea' ) ) CALL iom_put( 'icealb_cea', alb_ice(:,:,1) * fr_i(:,:) ) ! ice albedo 263 275 ENDIF … … 320 332 IF( MOD( kt-1, nn_fsbc ) == 0 ) THEN !== Ice time-step only ==! (i.e. surface module time-step) 321 333 ! 334 !$OMP PARALLEL 335 !$OMP DO schedule(static) private(jj,ji,zu_i,zv_i) 322 336 DO jj = 1, jpj !* modulus of ice-ocean relative velocity at I-point 323 337 DO ji = 1, jpi … … 327 341 END DO 328 342 END DO 343 !$OMP DO schedule(static) private(jj,ji,zumt) 329 344 DO jj = 1, jpjm1 !* update the modulus of stress at ocean surface (T-point) 330 345 DO ji = 1, jpim1 ! NO vector opt. … … 336 351 END DO 337 352 END DO 353 !$OMP END DO NOWAIT 354 !$OMP END PARALLEL 338 355 CALL lbc_lnk( taum, 'T', 1. ) 339 356 ! 357 !$OMP PARALLEL WORKSHARE 340 358 utau_oce(:,:) = utau(:,:) !* save the air-ocean stresses at ice time-step 341 359 vtau_oce(:,:) = vtau(:,:) 360 !$OMP END PARALLEL WORKSHARE 342 361 ! 343 362 ENDIF … … 346 365 ! 347 366 ! !* ice/ocean stress WITH a ice-ocean rotation angle at I-point 367 !$OMP PARALLEL 368 !$OMP DO schedule(static) private(jj,ji,zsang,zu_i,zv_i,zmodi) 348 369 DO jj = 2, jpj 349 370 zsang = SIGN( 1._wp, gphif(1,jj) ) * sangvg ! change the cosine angle sign in the SH … … 359 380 END DO 360 381 ! !* surface ocean stresses at u- and v-points 382 !$OMP DO schedule(static) private(jj,ji,zutau_ice,zvtau_ice,zfrldu,zfrldv) 361 383 DO jj = 2, jpjm1 362 384 DO ji = 2, jpim1 ! NO vector opt. … … 372 394 END DO 373 395 END DO 396 !$OMP END DO NOWAIT 397 !$OMP END PARALLEL 374 398 CALL lbc_lnk( utau, 'U', -1. ) ; CALL lbc_lnk( vtau, 'V', -1. ) ! lateral boundary condition 375 399 ! … … 381 405 IF( MOD( kt-1, nn_fsbc ) == 0 ) THEN !== Ice time-step only ==! (i.e. surface module time-step) 382 406 ! 407 !$OMP PARALLEL DO schedule(static) private(jj,ji,zu_t,zv_t,zmodt) 383 408 DO jj = 2, jpjm1 !* modulus of the ice-ocean velocity at T-point 384 409 DO ji = fs_2, fs_jpim1 … … 393 418 CALL lbc_lnk( taum, 'T', 1. ) ; CALL lbc_lnk( tmod_io, 'T', 1. ) 394 419 ! 420 !$OMP PARALLEL WORKSHARE 395 421 utau_oce(:,:) = utau(:,:) !* save the air-ocean stresses at ice time-step 396 422 vtau_oce(:,:) = vtau(:,:) 423 !$OMP END PARALLEL WORKSHARE 397 424 ! 398 425 ENDIF … … 400 427 ! !== at each ocean time-step ==! 401 428 ! 429 !$OMP PARALLEL DO schedule(static) private(jj,ji,zutau_ice,zvtau_ice,zfrldu,zfrldv) 402 430 DO jj = 2, jpjm1 !* ice stress over ocean WITHOUT a ice-ocean rotation angle 403 431 DO ji = fs_2, fs_jpim1 … … 435 463 !! ** input : Namelist namicedia 436 464 !!------------------------------------------------------------------- 437 INTEGER :: jk ! local integer465 INTEGER :: jk,jj,ji ! local integer 438 466 !!------------------------------------------------------------------- 439 467 ! … … 447 475 r1_rdtice = 1._wp / rdt_ice 448 476 ! 477 !$OMP PARALLEL WORKSHARE 449 478 soce_0(:,:) = soce ! constant SSS and ice salinity used in levitating sea-ice case 450 479 sice_0(:,:) = sice 480 !$OMP END PARALLEL WORKSHARE 451 481 ! 452 482 IF( cp_cfg == "orca" ) THEN ! decrease ocean & ice reference salinities in the Baltic sea … … 459 489 ! ! embedded sea ice 460 490 IF( nn_ice_embd /= 0 ) THEN ! mass exchanges between ice and ocean (case 1 or 2) set the snow+ice mass 491 !$OMP PARALLEL WORKSHARE 461 492 snwice_mass (:,:) = tms(:,:) * ( rhosn * hsnif(:,:) + rhoic * hicif(:,:) ) * ( 1.0 - frld(:,:) ) 462 493 snwice_mass_b(:,:) = snwice_mass(:,:) 494 !$OMP END PARALLEL WORKSHARE 463 495 ELSE 496 !$OMP PARALLEL WORKSHARE 464 497 snwice_mass (:,:) = 0.e0 ! no mass exchanges 465 498 snwice_mass_b(:,:) = 0.e0 ! no mass exchanges 466 499 snwice_fmass (:,:) = 0.e0 ! no mass exchanges 500 !$OMP END PARALLEL WORKSHARE 467 501 ENDIF 468 502 IF( nn_ice_embd == 2 .AND. & ! full embedment (case 2) & no restart : 469 503 & .NOT.ln_rstart ) THEN ! deplete the initial ssh below sea-ice area 504 !$OMP PARALLEL WORKSHARE 470 505 sshn(:,:) = sshn(:,:) - snwice_mass(:,:) * r1_rau0 471 506 sshb(:,:) = sshb(:,:) - snwice_mass(:,:) * r1_rau0 507 !$OMP END PARALLEL WORKSHARE 472 508 !!gm I really don't like this staff here... Find a way to put that elsewhere or differently 473 509 !!gm 474 510 IF( .NOT.ln_linssh ) THEN 475 511 512 !$OMP PARALLEL 513 !$OMP DO schedule(static) private(jk) 476 514 do jk = 1,jpkm1 ! adjust initial vertical scale factors 477 515 e3t_n(:,:,jk) = e3t_0(:,:,jk)*( 1._wp + sshn(:,:)*tmask(:,:,1)/(ht_0(:,:) + 1.0 - tmask(:,:,1)) ) 478 516 e3t_b(:,:,jk) = e3t_0(:,:,jk)*( 1._wp + sshb(:,:)*tmask(:,:,1)/(ht_0(:,:) + 1.0 - tmask(:,:,1)) ) 479 517 end do 518 !$OMP WORKSHARE 480 519 e3t_a(:,:,:) = e3t_b(:,:,:) 520 !$OMP END WORKSHARE NOWAIT 521 !$OMP END PARALLEL 481 522 ! Reconstruction of all vertical scale factors at now and before time steps 482 523 ! ! Horizontal scale factor interpolations … … 493 534 CALL dom_vvl_interpol( e3v_b(:,:,:), e3vw_b(:,:,:), 'VW' ) 494 535 ! ! t- and w- points depth 536 !$OMP PARALLEL 537 !$OMP WORKSHARE 495 538 gdept_n(:,:,1) = 0.5_wp * e3w_n(:,:,1) 496 539 gdepw_n(:,:,1) = 0.0_wp 497 540 gde3w_n(:,:,1) = gdept_n(:,:,1) - sshn(:,:) 541 !$OMP END WORKSHARE 498 542 DO jk = 2, jpk 499 gdept_n(:,:,jk) = gdept_n(:,:,jk-1) + e3w_n(:,:,jk) 500 gdepw_n(:,:,jk) = gdepw_n(:,:,jk-1) + e3t_n(:,:,jk-1) 501 gde3w_n(:,:,jk) = gdept_n(:,:,jk ) - sshn (:,:) 502 END DO 543 !$OMP DO schedule(static) private(jj,ji) 544 DO jj = 1, jpj 545 DO ji = 1, jpi 546 gdept_n(ji,jj,jk) = gdept_n(ji,jj,jk-1) + e3w_n(ji,jj,jk) 547 gdepw_n(ji,jj,jk) = gdepw_n(ji,jj,jk-1) + e3t_n(ji,jj,jk-1) 548 gde3w_n(ji,jj,jk) = gdept_n(ji,jj,jk ) - sshn (ji,jj) 549 END DO 550 END DO 551 !$OMP END DO NOWAIT 552 END DO 553 !$OMP END PARALLEL 503 554 ENDIF 504 555 !!gm end -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limthd_2.F90
r6140 r7037 121 121 122 122 !!gm needed? yes at least for some of these arrays 123 !$OMP PARALLEL 124 !$OMP WORKSHARE 123 125 zdvosif(:,:) = 0.e0 ! variation of ice volume at surface 124 126 zdvobif(:,:) = 0.e0 ! variation of ice volume at bottom … … 137 139 rdq_ice(:,:) = 0.e0 ! heat content associated with rdm_ice 138 140 zmsk (:,:,:) = 0.e0 141 !$OMP END WORKSHARE NOWAIT 139 142 140 143 ! set to zero snow thickness smaller than epsi04 144 !$OMP DO schedule(static) private(jj,ji) 141 145 DO jj = 1, jpj 142 146 DO ji = 1, jpi … … 144 148 END DO 145 149 END DO 150 !$OMP END DO NOWAIT 151 !$OMP END PARALLEL 146 152 !!gm better coded (do not use SIGN...) 147 153 ! WHERE( hsnif(:,:) < epsi04 ) hsnif(:,:) = 0.e0 … … 154 160 !-----------------------------------! 155 161 162 !$OMP PARALLEL DO schedule(static) private(jj,ji,zindg,zthsnice,zindb,za,zh) 156 163 DO jj = 1, jpj 157 164 DO ji = 1, jpi … … 196 203 !-------------------------------------------------------------------------- 197 204 205 !$OMP PARALLEL DO schedule(static) private(jj,ji,zthsnice,zindb,zinda,zfricp,zfric_u,zfntlat,zpareff) 198 206 DO jj = 1, jpj 199 207 DO ji = 1, jpi … … 337 345 ! Up-date sea ice thickness 338 346 !-------------------------- 347 !$OMP PARALLEL DO schedule(static) private(jj,ji) 339 348 DO jj = 1, jpj 340 349 DO ji = 1, jpi … … 348 357 !-------------------------------------------------------- 349 358 IF( fcor(1,1) < 0.e0 ) THEN 359 !$OMP PARALLEL DO schedule(static) private(jj,ji) 350 360 DO jj = 1, njeqm1 351 361 DO ji = 1, jpi … … 418 428 ! Update daily thermodynamic ice production. 419 429 !------------------------------------------------------------------------------ 430 !$OMP PARALLEL DO schedule(static) private(jj,ji) 420 431 DO jj = 1, jpj 421 432 DO ji = 1, jpi … … 453 464 IF( iom_use('u_imasstr') ) THEN 454 465 zztmp = 0.25 * rhoic 466 !$OMP PARALLEL DO schedule(static) private(jj,ji,zuice_m,zhice_u) 455 467 DO jj = 1, jpjm1 456 468 DO ji = 1, jpim1 ! NO vector opt. … … 466 478 IF( iom_use('v_imasstr') ) THEN 467 479 zztmp = 0.25 * rhoic 480 !$OMP PARALLEL DO schedule(static) private(jj,ji,zvice_m,zhice_v) 468 481 DO jj = 1, jpjm1 469 482 DO ji = 1, jpim1 ! NO vector opt. … … 480 493 !! Fram Strait sea-ice transport (sea-ice + snow) (in ORCA2 = 5 points) 481 494 IF( iom_use('fram_trans') .and. cp_cfg == "orca" .AND. jp_cfg == 2 ) THEN ! ORCA R2 configuration 495 !$OMP PARALLEL DO schedule(static) private(jj,ji,zrhoij,zrhoijm1,ztr_fram) 482 496 DO jj = mj0(137), mj1(137) ! B grid 483 497 IF( mj0(jj-1) >= nldj ) THEN … … 497 511 !! ce A big warning because the model crashes on IDRIS/IBM SP6 with xlf 13.1.0.3, see ticket #761 498 512 !! ce We Unroll the loop and everything works fine 513 !$OMP PARALLEL DO schedule(static) private(jj,ji) 499 514 DO jj = 1, jpj 500 515 DO ji = 1, jpi -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limtrp_2.F90
r4624 r7037 100 100 IF( lk_lim2_vp ) THEN ! VP rheology : B-grid sea-ice dynamics (I-point ice velocity) 101 101 zvbord = 1._wp + ( 1._wp - bound ) ! zvbord=2 no-slip, =0 free slip boundary conditions 102 !$OMP PARALLEL DO schedule(static) private(jj,ji) 102 103 DO jj = 1, jpjm1 103 104 DO ji = 1, jpim1 ! NO vector opt. … … 109 110 ! 110 111 ELSE ! EVP rheology : C-grid sea-ice dynamics (u- & v-points ice velocity) 112 !$OMP PARALLEL WORKSHARE 111 113 zui_u(:,:) = u_ice(:,:) ! EVP rheology: ice (u,v) at u- and v-points 112 114 zvi_v(:,:) = v_ice(:,:) 115 !$OMP END PARALLEL WORKSHARE 113 116 ENDIF 114 117 … … 125 128 ! content of properties 126 129 ! --------------------- 130 !$OMP PARALLEL WORKSHARE 127 131 zs0sn (:,:) = hsnm(:,:) * area (:,:) ! Snow volume. 128 132 zs0ice(:,:) = hicm(:,:) * area (:,:) ! Ice volume. … … 132 136 zs0c2 (:,:) = tbif(:,:,3) / rt0_ice * zs0ice(:,:) ! Heat content of the second ice layer. 133 137 zs0st (:,:) = qstoif(:,:) / xlic * zs0a (:,:) ! Heat reservoir for brine pockets. 138 !$OMP END PARALLEL WORKSHARE 134 139 135 140 … … 178 183 ! ------------------------------------------ 179 184 !!gm Define in limmsh one for all area = 1 /area (CPU time saved !) 185 !$OMP PARALLEL 186 !$OMP WORKSHARE 180 187 zs0ice(:,:) = zs0ice(:,:) / area(:,:) 181 188 zs0sn (:,:) = zs0sn (:,:) / area(:,:) … … 186 193 zs0st (:,:) = zs0st (:,:) / area(:,:) 187 194 195 !$OMP END WORKSHARE NOWAIT 188 196 189 197 !-------------------------------------! … … 193 201 ! Masked eddy diffusivity coefficient at ocean U- and V-points 194 202 ! ------------------------------------------------------------ 203 !$OMP DO schedule(static) private(jj,ji) 195 204 DO jj = 1, jpjm1 ! NB: has not to be defined on jpj line and jpi row 196 205 DO ji = 1 , fs_jpim1 ! vector opt. … … 201 210 END DO 202 211 END DO 212 !$OMP END DO NOWAIT 213 !$OMP END PARALLEL 203 214 !!gm more readable coding: (and avoid an error in F90 with sign of zero) 204 215 ! DO jj = 1, jpjm1 ! NB: has not to be defined on jpj line and jpi row … … 221 232 222 233 !!gm see comment this can be skipped 223 zs0ice(:,:) = MAX( rzero, zs0ice(:,:) * area(:,:) ) !!bug: useless 224 zs0sn (:,:) = MAX( rzero, zs0sn (:,:) * area(:,:) ) !!bug: cf /area just below 225 zs0a (:,:) = MAX( rzero, zs0a (:,:) * area(:,:) ) !! caution: the suppression of the 2 changes 226 zs0c0 (:,:) = MAX( rzero, zs0c0 (:,:) * area(:,:) ) !! the last digit of the results 227 zs0c1 (:,:) = MAX( rzero, zs0c1 (:,:) * area(:,:) ) 228 zs0c2 (:,:) = MAX( rzero, zs0c2 (:,:) * area(:,:) ) 229 zs0st (:,:) = MAX( rzero, zs0st (:,:) * area(:,:) ) 230 234 !$OMP PARALLEL 235 !$OMP DO schedule(static) private(jj,ji) 236 DO jj = 1, jpj ! NB: has not to be defined on jpj line and jpi row 237 DO ji = 1 , jpi ! vector opt. 238 zs0ice(ji,jj) = MAX( rzero, zs0ice(ji,jj) * area(ji,jj) ) !!bug: useless 239 zs0sn (ji,jj) = MAX( rzero, zs0sn (ji,jj) * area(ji,jj) ) !!bug: cf /area just below 240 zs0a (ji,jj) = MAX( rzero, zs0a (ji,jj) * area(ji,jj) ) !! caution: the suppression of the 2 changes 241 zs0c0 (ji,jj) = MAX( rzero, zs0c0 (ji,jj) * area(ji,jj) ) !! the last digit of the results 242 zs0c1 (ji,jj) = MAX( rzero, zs0c1 (ji,jj) * area(ji,jj) ) 243 zs0c2 (ji,jj) = MAX( rzero, zs0c2 (ji,jj) * area(ji,jj) ) 244 zs0st (ji,jj) = MAX( rzero, zs0st (ji,jj) * area(ji,jj) ) 245 END DO 246 END DO 231 247 232 248 !-------------------------------------------------------------------! 233 249 ! Updating and limitation of sea ice properties after transport ! 234 250 !-------------------------------------------------------------------! 251 !$OMP DO schedule(static) private(jj,ji,zindhe,zindb,zacrith,zindsn,zindic,zusvosn,zusvoic,zignm,zrtt,ztsn,ztic1,ztic2) 235 252 DO jj = 1, jpj 236 253 zindhe = MAX( 0.e0, SIGN( 1.e0, fcor(1,jj) ) ) ! = 0 for SH, =1 for NH … … 274 291 END DO 275 292 END DO 293 !$OMP END DO NOWAIT 294 !$OMP END PARALLEL 276 295 ! 277 296 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_2/limwri_2.F90
r6140 r7037 142 142 !-- Store instantaneous values in zcmo 143 143 144 !$OMP PARALLEL 145 !$OMP WORKSHARE 144 146 zcmo(:,:, 1:jpnoumax ) = 0.e0 147 !$OMP END WORKSHARE 148 !$OMP DO schedule(static) private(jj,ji,zindh,zinda,zindb,ztmu) 145 149 DO jj = 2 , jpjm1 146 150 DO ji = 2 , jpim1 ! NO vector opt. … … 187 191 END DO 188 192 END DO 193 !$OMP END DO NOWAIT 194 !$OMP END PARALLEL 189 195 ! 190 196 ! Write the netcdf file … … 192 198 niter = niter + 1 193 199 DO jf = 1 , noumef 194 zfield(:,:) = zcmo(:,:,jf) * cmulti(jf) + cadd(jf) * tmask(:,:,1) 200 !$OMP PARALLEL DO schedule(static) private(jj,ji) 201 DO jj = 1 , jpj 202 DO ji = 1 , jpi 203 zfield(ji,jj) = zcmo(ji,jj,jf) * cmulti(jf) + cadd(jf) * tmask(ji,jj,1) 204 END DO 205 END DO 195 206 SELECT CASE ( jf ) 196 207 CASE ( 7, 8, 15, 16, 20, 21 ) ! velocity or stress fields (vectors) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/LIM_SRC_3/limrhg.F90
r6416 r7037 180 180 ! 181 181 ! Put every vector to 0 182 183 #if defined key_lim3 184 CALL lim_itd_me_icestrength( nn_icestr ) ! LIM-3: Ice strength on T-points 185 #endif 186 187 !$OMP PARALLEL 188 !$OMP WORKSHARE 182 189 delta_i(:,:) = 0._wp ; 183 190 zpresh (:,:) = 0._wp ; … … 186 193 divu_i (:,:) = 0._wp ; zdt (:,:) = 0._wp ; zds(:,:) = 0._wp 187 194 shear_i(:,:) = 0._wp 188 189 #if defined key_lim3 190 CALL lim_itd_me_icestrength( nn_icestr ) ! LIM-3: Ice strength on T-points 191 #endif 192 195 !$OMP END WORKSHARE 196 197 !$OMP DO schedule(static) private(jj,ji) 193 198 DO jj = k_j1 , k_jpj ! Ice mass and temp variables 194 199 DO ji = 1 , jpi … … 206 211 ! Ice strength on grid cell corners (zpreshc) 207 212 ! needed for calculation of shear stress 213 !$OMP DO schedule(static) private(jj,ji,zstms) 208 214 DO jj = k_j1+1, k_jpj-1 209 215 DO ji = 2, jpim1 !RB caution no fs_ (ji+1,jj+1) … … 215 221 END DO 216 222 END DO 223 !$OMP END DO NOWAIT 224 !$OMP END PARALLEL 217 225 CALL lbc_lnk( zpreshc(:,:), 'F', 1. ) 218 226 ! … … 242 250 zintb = REAL( nn_fsbc + 1 ) / REAL( nn_fsbc ) * 0.5_wp 243 251 ! 252 !$OMP PARALLEL WORKSHARE 244 253 zpice(:,:) = ssh_m(:,:) + ( zintn * snwice_mass(:,:) + zintb * snwice_mass_b(:,:) ) * r1_rau0 254 !$OMP END PARALLEL WORKSHARE 245 255 ! 246 256 ELSE !== non-embedded sea ice: use ocean surface for slope calculation ==! 257 !$OMP PARALLEL WORKSHARE 247 258 zpice(:,:) = ssh_m(:,:) 259 !$OMP END PARALLEL WORKSHARE 248 260 ENDIF 249 261 262 !$OMP PARALLEL DO schedule(static) private(jj,ji,zc1,zc2,zc3,zt11,zt12,zt21,zt22,ztagnx,ztagny,zdsshx,zdsshy) 250 263 DO jj = k_j1+1, k_jpj-1 251 264 DO ji = fs_2, fs_jpim1 … … 319 332 320 333 !-Initialise stress tensor 334 !$OMP PARALLEL WORKSHARE 321 335 zs1 (:,:) = stress1_i (:,:) 322 336 zs2 (:,:) = stress2_i (:,:) 323 337 zs12(:,:) = stress12_i(:,:) 338 !$OMP END PARALLEL WORKSHARE 324 339 325 340 ! !----------------------! 326 341 DO jter = 1 , nn_nevp ! loop over jter ! 327 342 ! !----------------------! 343 !$OMP PARALLEL 344 !$OMP DO schedule(static) private(jj,ji) 328 345 DO jj = k_j1, k_jpj-1 329 zu_ice(:,jj) = u_ice(:,jj) ! velocity at previous time step 330 zv_ice(:,jj) = v_ice(:,jj) 331 END DO 332 346 DO ji = 1, jpi 347 zu_ice(ji,jj) = u_ice(ji,jj) ! velocity at previous time step 348 zv_ice(ji,jj) = v_ice(ji,jj) 349 END DO 350 END DO 351 !$OMP END DO NOWAIT 352 353 !$OMP DO schedule(static) private(jj,ji) 333 354 DO jj = k_j1+1, k_jpj-1 334 355 DO ji = fs_2, fs_jpim1 !RB bug no vect opt due to zmask … … 377 398 END DO 378 399 END DO 400 !$OMP END DO NOWAIT 401 !$OMP END PARALLEL 379 402 380 403 CALL lbc_lnk_multi( v_ice1, 'U', -1., u_ice2, 'V', -1. ) ! lateral boundary cond. 381 404 405 !$OMP PARALLEL DO schedule(static) private(jj,ji,zdst,delta,zddc,zdtc) 382 406 DO jj = k_j1+1, k_jpj-1 383 407 DO ji = fs_2, fs_jpim1 … … 417 441 418 442 ! Ice internal stresses (Appendix C of Hunke and Dukowicz, 2002) 443 !$OMP PARALLEL DO schedule(static) private(jj,ji) 419 444 DO jj = k_j1+1, k_jpj-1 420 445 DO ji = fs_2, fs_jpim1 … … 438 463 IF (MOD(jter,2).eq.0) THEN 439 464 465 !$OMP PARALLEL DO schedule(static) private(jj,ji,rswitch,z0,zv_ice1,za,zr,zcca,zccb) 440 466 DO jj = k_j1+1, k_jpj-1 441 467 DO ji = fs_2, fs_jpim1 … … 464 490 #endif 465 491 492 !$OMP PARALLEL DO schedule(static) private(jj,ji,rswitch,z0,zu_ice2,za,zr,zcca,zccb) 466 493 DO jj = k_j1+1, k_jpj-1 467 494 DO ji = fs_2, fs_jpim1 … … 491 518 492 519 ELSE 520 !$OMP PARALLEL DO schedule(static) private(jj,ji,rswitch,z0,zu_ice2,za,zr,zcca,zccb) 493 521 DO jj = k_j1+1, k_jpj-1 494 522 DO ji = fs_2, fs_jpim1 … … 517 545 #endif 518 546 547 !$OMP PARALLEL DO schedule(static) private(jj,ji,rswitch,z0,zv_ice1,za,zr,zcca,zccb) 519 548 DO jj = k_j1+1, k_jpj-1 520 549 DO ji = fs_2, fs_jpim1 … … 562 591 ! If the ice volume is below zvmin then ice velocity should equal the 563 592 ! ocean velocity. This prevents high velocity when ice is thin 593 !$OMP PARALLEL DO schedule(static) private(jj,ji) 564 594 DO jj = k_j1+1, k_jpj-1 565 595 DO ji = fs_2, fs_jpim1 … … 582 612 #endif 583 613 614 !$OMP PARALLEL DO schedule(static) private(jj,ji) 584 615 DO jj = k_j1+1, k_jpj-1 585 616 DO ji = fs_2, fs_jpim1 … … 599 630 600 631 ! Recompute delta, shear and div, inputs for mechanical redistribution 632 !$OMP PARALLEL 633 !$OMP DO schedule(static) private(jj,ji,zdst,delta) 601 634 DO jj = k_j1+1, k_jpj-1 602 635 DO ji = fs_2, jpim1 !RB bug no vect opt due to zmask … … 634 667 ! * Invariants of the stress tensor are required for limitd_me 635 668 ! (accelerates convergence and improves stability) 669 !$OMP DO schedule(static) private(jj,ji,zdst) 636 670 DO jj = k_j1+1, k_jpj-1 637 671 DO ji = fs_2, fs_jpim1 … … 641 675 END DO 642 676 END DO 643 644 ! Lateral boundary condition 645 CALL lbc_lnk_multi( divu_i (:,:), 'T', 1., delta_i(:,:), 'T', 1., shear_i(:,:), 'T', 1. ) 646 677 !$OMP END DO NOWAIT 647 678 ! * Store the stress tensor for the next time step 679 !$OMP WORKSHARE 648 680 stress1_i (:,:) = zs1 (:,:) 649 681 stress2_i (:,:) = zs2 (:,:) 650 682 stress12_i(:,:) = zs12(:,:) 683 !$OMP END WORKSHARE NOWAIT 684 !$OMP END PARALLEL 685 ! Lateral boundary condition 686 CALL lbc_lnk_multi( divu_i (:,:), 'T', 1., delta_i(:,:), 'T', 1., shear_i(:,:), 'T', 1. ) 651 687 652 688 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/dia25h.F90
r6748 r7037 225 225 ENDIF 226 226 227 !$OMP PARALLEL WORKSHARE 227 228 tn_25h(:,:,:) = tn_25h(:,:,:) + tsn(:,:,:,jp_tem) 228 229 sn_25h(:,:,:) = sn_25h(:,:,:) + tsn(:,:,:,jp_sal) … … 239 240 rmxln_25h(:,:,:) = rmxln_25h(:,:,:) + mxln(:,:,:) 240 241 #endif 242 !$OMP END PARALLEL WORKSHARE 241 243 cnt_25h = cnt_25h + 1 242 244 … … 255 257 ENDIF 256 258 259 !$OMP PARALLEL WORKSHARE 257 260 tn_25h(:,:,:) = tn_25h(:,:,:) / 25.0_wp 258 261 sn_25h(:,:,:) = sn_25h(:,:,:) / 25.0_wp … … 269 272 rmxln_25h(:,:,:) = rmxln_25h(:,:,:) / 25.0_wp 270 273 #endif 274 !$OMP END PARALLEL WORKSHARE 271 275 272 276 IF (lwp) WRITE(numout,*) 'dia_wri_tide : Mean calculated by dividing 25 hour sums and writing output' 273 277 zmdi=1.e+20 !missing data indicator for masking 274 278 ! write tracers (instantaneous) 279 !$OMP PARALLEL WORKSHARE 275 280 zw3d(:,:,:) = tn_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 281 !$OMP END PARALLEL WORKSHARE 276 282 CALL iom_put("temper25h", zw3d) ! potential temperature 283 !$OMP PARALLEL WORKSHARE 277 284 zw3d(:,:,:) = sn_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 285 !$OMP END PARALLEL WORKSHARE 278 286 CALL iom_put( "salin25h", zw3d ) ! salinity 287 !$OMP PARALLEL WORKSHARE 279 288 zw2d(:,:) = sshn_25h(:,:)*tmask(:,:,1) + zmdi*(1.0-tmask(:,:,1)) 289 !$OMP END PARALLEL WORKSHARE 280 290 CALL iom_put( "ssh25h", zw2d ) ! sea surface 281 291 282 292 283 293 ! Write velocities (instantaneous) 294 !$OMP PARALLEL WORKSHARE 284 295 zw3d(:,:,:) = un_25h(:,:,:)*umask(:,:,:) + zmdi*(1.0-umask(:,:,:)) 296 !$OMP END PARALLEL WORKSHARE 285 297 CALL iom_put("vozocrtx25h", zw3d) ! i-current 298 !$OMP PARALLEL WORKSHARE 286 299 zw3d(:,:,:) = vn_25h(:,:,:)*vmask(:,:,:) + zmdi*(1.0-vmask(:,:,:)) 300 !$OMP END PARALLEL WORKSHARE 287 301 CALL iom_put("vomecrty25h", zw3d ) ! j-current 288 302 303 !$OMP PARALLEL WORKSHARE 289 304 zw3d(:,:,:) = wn_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 305 !$OMP END PARALLEL WORKSHARE 290 306 CALL iom_put("vomecrtz25h", zw3d ) ! k-current 307 !$OMP PARALLEL WORKSHARE 291 308 zw3d(:,:,:) = avt_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 309 !$OMP END PARALLEL WORKSHARE 292 310 CALL iom_put("avt25h", zw3d ) ! diffusivity 311 !$OMP PARALLEL WORKSHARE 293 312 zw3d(:,:,:) = avm_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 313 !$OMP END PARALLEL WORKSHARE 294 314 CALL iom_put("avm25h", zw3d) ! viscosity 295 315 #if defined key_zdftke || defined key_zdfgls 316 !$OMP PARALLEL WORKSHARE 296 317 zw3d(:,:,:) = en_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 318 !$OMP END PARALLEL WORKSHARE 297 319 CALL iom_put("tke25h", zw3d) ! tke 298 320 #endif 299 321 #if defined key_zdfgls 322 !$OMP PARALLEL WORKSHARE 300 323 zw3d(:,:,:) = rmxln_25h(:,:,:)*tmask(:,:,:) + zmdi*(1.0-tmask(:,:,:)) 324 !$OMP END PARALLEL WORKSHARE 301 325 CALL iom_put( "mxln25h",zw3d) 302 326 #endif 303 327 304 328 ! After the write reset the values to cnt=1 and sum values equal current value 329 !$OMP PARALLEL WORKSHARE 305 330 tn_25h(:,:,:) = tsn(:,:,:,jp_tem) 306 331 sn_25h(:,:,:) = tsn(:,:,:,jp_sal) … … 317 342 rmxln_25h(:,:,:) = mxln(:,:,:) 318 343 #endif 344 !$OMP END PARALLEL WORKSHARE 319 345 cnt_25h = 1 320 346 IF (lwp) WRITE(numout,*) 'dia_wri_tide : After 25hr mean write, reset sum to current value and cnt_25h to one for overlapping average',cnt_25h -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diacfl.F90
r6748 r7037 71 71 72 72 ! calculate Courant numbers 73 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 73 74 DO jk = 1, jpk 74 75 DO jj = 1, jpj -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diafwb.F90
r6748 r7037 75 75 IF( lk_mpp ) CALL mpp_sum( a_sshb ) ! sum over the global domain 76 76 77 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zwei ,a_salb)77 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zwei) REDUCTION(+:a_salb) 78 78 DO jk = 1, jpkm1 79 79 DO jj = 2, jpjm1 … … 102 102 IF( lk_mpp ) CALL mpp_sum( zarea ) ! sum over the global domain 103 103 104 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zwei ,a_saln,zvol)104 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zwei) REDUCTION (+:a_saln,zvol) 105 105 DO jk = 1, jpkm1 106 106 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DIA/diawri.F90
r6748 r7037 181 181 182 182 IF ( iom_use("taubot") ) THEN ! bottom stress 183 !$OMP PARALLEL 184 !$OMP WORKSHARE 183 185 z2d(:,:) = 0._wp 184 !$OMP PARALLEL DO schedule(static) private(jj, ji, zztmpx,zztmpy) 186 !$OMP END WORKSHARE 187 !$OMP DO schedule(static) private(jj, ji, zztmpx,zztmpy) 185 188 DO jj = 2, jpjm1 186 189 DO ji = fs_2, fs_jpim1 ! vector opt. … … 193 196 ENDDO 194 197 ENDDO 198 !$OMP END DO NOWAIT 199 !$OMP END PARALLEL 195 200 CALL lbc_lnk( z2d, 'T', 1. ) 196 201 CALL iom_put( "taubot", z2d ) … … 226 231 IF( iom_use('w_masstr') .OR. iom_use('w_masstr2') ) THEN ! vertical mass transport & its square value 227 232 ! Caution: in the VVL case, it only correponds to the baroclinic mass transport. 233 !$OMP PARALLEL 234 !$OMP WORKSHARE 228 235 z2d(:,:) = rau0 * e1e2t(:,:) 229 !$OMP PARALLEL DO schedule(static) private(jk) 236 !$OMP END WORKSHARE 237 !$OMP DO schedule(static) private(jk) 230 238 DO jk = 1, jpk 231 239 z3d(:,:,jk) = wn(:,:,jk) * z2d(:,:) 232 240 END DO 241 !$OMP END DO NOWAIT 242 !$OMP END PARALLEL 233 243 CALL iom_put( "w_masstr" , z3d ) 234 244 IF( iom_use('w_masstr2') ) CALL iom_put( "w_masstr2", z3d(:,:,:) * z3d(:,:,:) ) … … 255 265 CALL lbc_lnk( z2d, 'T', 1. ) 256 266 CALL iom_put( "sstgrad2", z2d ) ! square of module of sst gradient 257 z2d(:,:) = SQRT( z2d(:,:) ) 267 !$OMP PARALLEL DO schedule(static) private(jj, ji) 268 DO jj = 1, jpj 269 DO ji = 1, jpi 270 z2d(ji,jj) = SQRT( z2d(ji,jj) ) 271 END DO 272 END DO 258 273 CALL iom_put( "sstgrad" , z2d ) ! module of sst gradient 259 274 ENDIF … … 261 276 ! clem: heat and salt content 262 277 IF( iom_use("heatc") ) THEN 278 !$OMP PARALLEL 279 !$OMP WORKSHARE 263 280 z2d(:,:) = 0._wp 264 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 281 !$OMP END WORKSHARE 282 !$OMP DO schedule(static) private(jk, jj, ji) 265 283 DO jk = 1, jpkm1 266 284 DO jj = 1, jpj … … 270 288 END DO 271 289 END DO 290 !$OMP END DO NOWAIT 291 !$OMP END PARALLEL 272 292 CALL iom_put( "heatc", (rau0 * rcp) * z2d ) ! vertically integrated heat content (J/m2) 273 293 ENDIF 274 294 275 295 IF( iom_use("saltc") ) THEN 296 !$OMP PARALLEL 297 !$OMP WORKSHARE 276 298 z2d(:,:) = 0._wp 277 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 299 !$OMP END WORKSHARE 300 !$OMP DO schedule(static) private(jk, jj, ji) 278 301 DO jk = 1, jpkm1 279 302 DO jj = 1, jpj … … 283 306 END DO 284 307 END DO 308 !$OMP END DO NOWAIT 309 !$OMP END PARALLEL 285 310 CALL iom_put( "saltc", rau0 * z2d ) ! vertically integrated salt content (PSU*kg/m2) 286 311 ENDIF 287 312 ! 288 313 IF ( iom_use("eken") ) THEN 314 !$OMP PARALLEL 315 !$OMP WORKSHARE 289 316 rke(:,:,jk) = 0._wp ! kinetic energy 290 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zztmp, zztmpx, zztmpy) 317 !$OMP END WORKSHARE 318 !$OMP DO schedule(static) private(jk, jj, ji, zztmp, zztmpx, zztmpy) 291 319 DO jk = 1, jpkm1 292 320 DO jj = 2, jpjm1 … … 306 334 ENDDO 307 335 ENDDO 336 !$OMP END DO NOWAIT 337 !$OMP END PARALLEL 308 338 CALL lbc_lnk( rke, 'T', 1. ) 309 339 CALL iom_put( "eken", rke ) … … 313 343 ! 314 344 IF( iom_use("u_masstr") .OR. iom_use("u_heattr") .OR. iom_use("u_salttr") ) THEN 345 !$OMP PARALLEL 346 !$OMP WORKSHARE 315 347 z3d(:,:,jpk) = 0.e0 316 !$OMP PARALLEL DO schedule(static) private(jk) 348 !$OMP END WORKSHARE 349 !$OMP DO schedule(static) private(jk) 317 350 DO jk = 1, jpkm1 318 351 z3d(:,:,jk) = rau0 * un(:,:,jk) * e2u(:,:) * e3u_n(:,:,jk) * umask(:,:,jk) 319 352 END DO 353 !$OMP END DO NOWAIT 354 !$OMP END PARALLEL 320 355 CALL iom_put( "u_masstr", z3d ) ! mass transport in i-direction 321 356 ENDIF 322 357 323 358 IF( iom_use("u_heattr") ) THEN 359 !$OMP PARALLEL 360 !$OMP WORKSHARE 324 361 z2d(:,:) = 0.e0 325 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 362 !$OMP END WORKSHARE 363 !$OMP DO schedule(static) private(jk, jj, ji) 326 364 DO jk = 1, jpkm1 327 365 DO jj = 2, jpjm1 … … 331 369 END DO 332 370 END DO 371 !$OMP END DO NOWAIT 372 !$OMP END PARALLEL 333 373 CALL lbc_lnk( z2d, 'U', -1. ) 334 374 CALL iom_put( "u_heattr", (0.5 * rcp) * z2d ) ! heat transport in i-direction … … 336 376 337 377 IF( iom_use("u_salttr") ) THEN 378 !$OMP PARALLEL 379 !$OMP WORKSHARE 338 380 z2d(:,:) = 0.e0 339 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 381 !$OMP END WORKSHARE 382 !$OMP DO schedule(static) private(jk, jj, ji) 340 383 DO jk = 1, jpkm1 341 384 DO jj = 2, jpjm1 … … 345 388 END DO 346 389 END DO 390 !$OMP END DO NOWAIT 391 !$OMP END PARALLEL 347 392 CALL lbc_lnk( z2d, 'U', -1. ) 348 393 CALL iom_put( "u_salttr", 0.5 * z2d ) ! heat transport in i-direction … … 351 396 352 397 IF( iom_use("v_masstr") .OR. iom_use("v_heattr") .OR. iom_use("v_salttr") ) THEN 398 !$OMP PARALLEL 399 !$OMP WORKSHARE 353 400 z3d(:,:,jpk) = 0.e0 354 !$OMP PARALLEL DO schedule(static) private(jk) 401 !$OMP END WORKSHARE 402 !$OMP DO schedule(static) private(jk) 355 403 DO jk = 1, jpkm1 356 404 z3d(:,:,jk) = rau0 * vn(:,:,jk) * e1v(:,:) * e3v_n(:,:,jk) * vmask(:,:,jk) 357 405 END DO 406 !$OMP END DO NOWAIT 407 !$OMP END PARALLEL 358 408 CALL iom_put( "v_masstr", z3d ) ! mass transport in j-direction 359 409 ENDIF 360 410 361 411 IF( iom_use("v_heattr") ) THEN 412 !$OMP PARALLEL 413 !$OMP WORKSHARE 362 414 z2d(:,:) = 0.e0 363 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 415 !$OMP END WORKSHARE 416 !$OMP DO schedule(static) private(jk, jj, ji) 364 417 DO jk = 1, jpkm1 365 418 DO jj = 2, jpjm1 … … 369 422 END DO 370 423 END DO 424 !$OMP END DO NOWAIT 425 !$OMP END PARALLEL 371 426 CALL lbc_lnk( z2d, 'V', -1. ) 372 427 CALL iom_put( "v_heattr", (0.5 * rcp) * z2d ) ! heat transport in j-direction … … 374 429 375 430 IF( iom_use("v_salttr") ) THEN 431 !$OMP PARALLEL 432 !$OMP WORKSHARE 376 433 z2d(:,:) = 0.e0 377 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 434 !$OMP END WORKSHARE 435 !$OMP DO schedule(static) private(jk, jj, ji) 378 436 DO jk = 1, jpkm1 379 437 DO jj = 2, jpjm1 … … 383 441 END DO 384 442 END DO 443 !$OMP END DO NOWAIT 444 !$OMP END PARALLEL 385 445 CALL lbc_lnk( z2d, 'V', -1. ) 386 446 CALL iom_put( "v_salttr", 0.5 * z2d ) ! heat transport in j-direction … … 758 818 ENDIF 759 819 IF( .NOT.ln_linssh ) THEN 820 !$OMP PARALLEL WORKSHARE 760 821 zw3d(:,:,:) = ( ( e3t_n(:,:,:) - e3t_0(:,:,:) ) / e3t_0(:,:,:) * 100 * tmask(:,:,:) ) ** 2 822 !$OMP END PARALLEL WORKSHARE 761 823 CALL histwrite( nid_T, "vovvle3t", it, e3t_n (:,:,:) , ndim_T , ndex_T ) ! level thickness 762 824 CALL histwrite( nid_T, "vovvldep", it, gdept_n(:,:,:) , ndim_T , ndex_T ) ! t-point depth … … 770 832 ! in linear free surface case) 771 833 IF( ln_linssh ) THEN 834 !$OMP PARALLEL WORKSHARE 772 835 zw2d(:,:) = emp (:,:) * tsn(:,:,1,jp_tem) 836 !$OMP END PARALLEL WORKSHARE 773 837 CALL histwrite( nid_T, "sosst_cd", it, zw2d, ndim_hT, ndex_hT ) ! c/d term on sst 838 !$OMP PARALLEL WORKSHARE 774 839 zw2d(:,:) = emp (:,:) * tsn(:,:,1,jp_sal) 840 !$OMP END PARALLEL WORKSHARE 775 841 CALL histwrite( nid_T, "sosss_cd", it, zw2d, ndim_hT, ndex_hT ) ! c/d term on sss 776 842 ENDIF … … 808 874 CALL histwrite( nid_T, "sohefldp", it, qrp , ndim_hT, ndex_hT ) ! heat flux damping 809 875 CALL histwrite( nid_T, "sowafldp", it, erp , ndim_hT, ndex_hT ) ! freshwater flux damping 810 IF( ln_ssr ) zw2d(:,:) = erp(:,:) * tsn(:,:,1,jp_sal) * tmask(:,:,1) 876 IF( ln_ssr ) THEN 877 !$OMP PARALLEL WORKSHARE 878 zw2d(:,:) = erp(:,:) * tsn(:,:,1,jp_sal) * tmask(:,:,1) 879 !$OMP END PARALLEL WORKSHARE 880 END IF 811 881 CALL histwrite( nid_T, "sosafldp", it, zw2d , ndim_hT, ndex_hT ) ! salt flux damping 812 882 ENDIF … … 814 884 CALL histwrite( nid_T, "sohefldp", it, qrp , ndim_hT, ndex_hT ) ! heat flux damping 815 885 CALL histwrite( nid_T, "sowafldp", it, erp , ndim_hT, ndex_hT ) ! freshwater flux damping 816 IF( ln_ssr ) zw2d(:,:) = erp(:,:) * tsn(:,:,1,jp_sal) * tmask(:,:,1) 886 IF( ln_ssr ) THEN 887 !$OMP PARALLEL WORKSHARE 888 zw2d(:,:) = erp(:,:) * tsn(:,:,1,jp_sal) * tmask(:,:,1) 889 !$OMP END PARALLEL WORKSHARE 890 END IF 817 891 CALL histwrite( nid_T, "sosafldp", it, zw2d , ndim_hT, ndex_hT ) ! salt flux damping 818 892 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domain.F90
r6748 r7037 92 92 IF( ln_sco ) CALL dom_stiff ! Maximum stiffness ratio/hydrostatic consistency 93 93 ! 94 !$OMP PARALLEL WORKSHARE 94 !$OMP PARALLEL 95 !$OMP WORKSHARE 95 96 ht_0(:,:) = e3t_0(:,:,1) * tmask(:,:,1) ! Reference ocean thickness 96 97 hu_0(:,:) = e3u_0(:,:,1) * umask(:,:,1) 97 98 hv_0(:,:) = e3v_0(:,:,1) * vmask(:,:,1) 98 !$OMP END PARALLELWORKSHARE99 !$OMP END WORKSHARE 99 100 DO jk = 2, jpk 100 !$OMP PARALLELDO schedule(static) private(jj,ji)101 !$OMP DO schedule(static) private(jj,ji) 101 102 DO jj =1, jpj 102 103 DO ji=1, jpi … … 107 108 END DO 108 109 END DO 110 !$OMP END PARALLEL 109 111 ! 110 112 ! !== time varying part of coordinate system ==! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domhgr.F90
r6748 r7037 203 203 ENDIF 204 204 #endif 205 !$OMP PARALLEL DO schedule(static) private(jj, ji) 205 !$OMP PARALLEL 206 !$OMP DO schedule(static) private(jj, ji) 206 207 DO jj = 1, jpj 207 208 DO ji = 1, jpi … … 217 218 END DO 218 219 END DO 220 !$OMP END DO NOWAIT 219 221 ! 220 222 ! Horizontal scale factors (in meters) 221 223 ! ====== 222 !$OMP PARALLELWORKSHARE224 !$OMP WORKSHARE 223 225 e1t(:,:) = ppe1_m ; e2t(:,:) = ppe2_m 224 226 e1u(:,:) = ppe1_m ; e2u(:,:) = ppe2_m 225 227 e1v(:,:) = ppe1_m ; e2v(:,:) = ppe2_m 226 228 e1f(:,:) = ppe1_m ; e2f(:,:) = ppe2_m 227 !$OMP END PARALLEL WORKSHARE 229 !$OMP END WORKSHARE NOWAIT 230 !$OMP END PARALLEL 228 231 ! 229 232 CASE ( 4 ) !== geographical mesh on the sphere, isotropic MERCATOR type ==! … … 303 306 ENDIF 304 307 ! 305 !$OMP PARALLEL DO schedule(static) private(jj, ji, zim1, zjm1) 308 !$OMP PARALLEL 309 !$OMP DO schedule(static) private(jj, ji, zim1, zjm1, zim05, zjm05) 306 310 DO jj = 1, jpj 307 311 DO ji = 1, jpi … … 322 326 END DO 323 327 END DO 328 !$OMP END DO NOWAIT 324 329 ! 325 330 ! Horizontal scale factors (in meters) 326 331 ! ====== 327 !$OMP PARALLELWORKSHARE332 !$OMP WORKSHARE 328 333 e1t(:,:) = ze1 ; e2t(:,:) = ze1 329 334 e1u(:,:) = ze1 ; e2u(:,:) = ze1 330 335 e1v(:,:) = ze1 ; e2v(:,:) = ze1 331 336 e1f(:,:) = ze1 ; e2f(:,:) = ze1 332 !$OMP END PARALLEL WORKSHARE 337 !$OMP END WORKSHARE 338 !$OMP END PARALLEL 333 339 ! 334 340 CASE DEFAULT -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/dommsk.F90
r6790 r7037 146 146 ! N.B. tmask has already the right boundary conditions since mbathy is ok 147 147 ! 148 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 148 !$OMP PARALLEL 149 !$OMP DO schedule(static) private(jk, jj, ji) 149 150 DO jk = 1, jpk 150 151 DO jj = 1, jpj … … 157 158 158 159 ! (ISF) define barotropic mask and mask the ice shelf point 159 !$OMP PARALLELWORKSHARE160 !$OMP WORKSHARE 160 161 ssmask(:,:)=tmask(:,:,1) ! at this stage ice shelf is not masked 161 !$OMP END PARALLELWORKSHARE162 !$OMP PARALLELDO schedule(static) private(jk, jj, ji)162 !$OMP END WORKSHARE 163 !$OMP DO schedule(static) private(jk, jj, ji) 163 164 DO jk = 1, jpk 164 165 DO jj = 1, jpj … … 173 174 ! Interior domain mask (used for global sum) 174 175 ! -------------------- 175 !$OMP PARALLELWORKSHARE176 !$OMP WORKSHARE 176 177 tmask_i(:,:) = ssmask(:,:) ! (ISH) tmask_i = 1 even on the ice shelf 177 178 178 179 tmask_h(:,:) = 1._wp ! 0 on the halo and 1 elsewhere 179 !$OMP END PARALLEL WORKSHARE 180 !$OMP END WORKSHARE NOWAIT 181 !$OMP END PARALLEL 180 182 iif = jpreci ! ??? 181 183 iil = nlci - jpreci + 1 … … 211 213 ! 2. Ocean/land mask at u-, v-, and z-points (computed from tmask) 212 214 ! ------------------------------------------- 213 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 215 !$OMP PARALLEL 216 !$OMP DO schedule(static) private(jk, jj, ji) 214 217 DO jk = 1, jpk 215 218 DO jj = 1, jpjm1 … … 225 228 END DO 226 229 ! (ISF) MIN(1,SUM(umask)) is here to check if you have effectively at least 1 wet cell at u point 227 !$OMP PARALLELDO schedule(static) private(jj, ji)230 !$OMP DO schedule(static) private(jj, ji) 228 231 DO jj = 1, jpjm1 229 232 DO ji = 1, fs_jpim1 ! vector loop … … 236 239 END DO 237 240 END DO 241 !$OMP END DO NOWAIT 242 !$OMP END PARALLEL 238 243 CALL lbc_lnk( umask , 'U', 1._wp ) ! Lateral boundary conditions 239 244 CALL lbc_lnk( vmask , 'V', 1._wp ) … … 245 250 ! 3. Ocean/land mask at wu-, wv- and w points 246 251 !---------------------------------------------- 252 !$OMP PARALLEL 253 !$OMP WORKSHARE 247 254 wmask (:,:,1) = tmask(:,:,1) ! surface 248 255 wumask(:,:,1) = umask(:,:,1) 249 256 wvmask(:,:,1) = vmask(:,:,1) 250 !$OMP PARALLEL DO schedule(static) private(jk) 257 !$OMP END WORKSHARE 258 !$OMP DO schedule(static) private(jk) 251 259 DO jk = 2, jpk ! interior values 252 260 wmask (:,:,jk) = tmask(:,:,jk) * tmask(:,:,jk-1) … … 254 262 wvmask(:,:,jk) = vmask(:,:,jk) * vmask(:,:,jk-1) 255 263 END DO 264 !$OMP END DO NOWAIT 265 !$OMP END PARALLEL 256 266 257 267 ! Lateral boundary conditions on velocity (modify fmask) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/domzgr.F90
r6748 r7037 325 325 ! need to be like this to compute the pressure gradient with ISF. If not, level beneath the ISF are not aligned (sum(e3t) /= depth) 326 326 ! define e3t_0 and e3w_0 as the differences between gdept and gdepw respectively 327 !$OMP PARALLEL DO schedule(static) private(jk)328 327 DO jk = 1, jpkm1 329 328 e3t_1d(jk) = gdepw_1d(jk+1)-gdepw_1d(jk) 330 329 END DO 331 330 e3t_1d(jpk) = e3t_1d(jpk-1) ! we don't care because this level is masked in NEMO 332 !$OMP PARALLEL DO schedule(static) private(jk)333 331 DO jk = 2, jpk 334 332 e3w_1d(jk) = gdept_1d(jk) - gdept_1d(jk-1) … … 429 427 !$OMP END PARALLEL WORKSHARE 430 428 ELSE ! z-coordinate (zco or zps): step-like topography 431 !$OMP PARALLEL WORKSHARE 429 !$OMP PARALLEL 430 !$OMP WORKSHARE 432 431 idta(:,:) = jpkm1 433 !$OMP END PARALLELWORKSHARE432 !$OMP END WORKSHARE 434 433 DO jk = 1, jpkm1 434 !$OMP WORKSHARE 435 435 WHERE( gdept_1d(jk) < zdta(:,:) .AND. zdta(:,:) <= gdept_1d(jk+1) ) idta(:,:) = jk 436 !$OMP END WORKSHARE 436 437 END DO 438 !$OMP END PARALLEL 437 439 ENDIF 438 440 ELSE … … 506 508 507 509 ! ! local domain level and meter bathymetries (mbathy,bathy) 508 !$OMP PARALLEL WORKSHARE 510 !$OMP PARALLEL 511 !$OMP WORKSHARE 509 512 mbathy(:,:) = 0 ! set to zero extra halo points 510 513 bathy (:,:) = 0._wp ! (require for mpp case) 511 !$OMP END PARALLELWORKSHARE512 !$OMP PARALLELDO schedule(static) private(jj, ji)514 !$OMP END WORKSHARE 515 !$OMP DO schedule(static) private(jj, ji) 513 516 DO jj = 1, nlcj ! interior values 514 517 DO ji = 1, nlci … … 517 520 END DO 518 521 END DO 519 !$OMP PARALLEL WORKSHARE 522 !$OMP END DO NOWAIT 523 !$OMP WORKSHARE 520 524 risfdep(:,:)=0.e0 521 525 misfdep(:,:)=1 522 !$OMP END PARALLEL WORKSHARE 526 !$OMP END WORKSHARE NOWAIT 527 !$OMP END PARALLEL 523 528 ! 524 529 DEALLOCATE( idta, zdta ) … … 532 537 CALL iom_get ( inum, jpdom_data, 'Bathy_level', bathy ) 533 538 CALL iom_close( inum ) 534 !$OMP PARALLEL WORKSHARE535 539 mbathy(:,:) = INT( bathy(:,:) ) 536 540 ! initialisation isf variables 541 !$OMP PARALLEL WORKSHARE 537 542 risfdep(:,:)=0._wp ; misfdep(:,:)=1 538 543 !$OMP END PARALLEL WORKSHARE … … 1196 1201 END DO 1197 1202 ELSE ! no cavity 1203 !$OMP PARALLEL 1204 !$OMP WORKSHARE 1198 1205 gde3w_0(:,:,1) = 0.5_wp * e3w_0(:,:,1) 1206 !$OMP END WORKSHARE 1199 1207 DO jk = 2, jpk 1200 !$OMP PARALLELDO schedule(static) private(jj, ji)1208 !$OMP DO schedule(static) private(jj, ji) 1201 1209 DO jj =1, jpj 1202 1210 DO ji=1, jpi … … 1205 1213 END DO 1206 1214 END DO 1215 !$OMP END PARALLEL 1207 1216 END IF 1208 1217 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/dtatsd.F90
r6140 r7037 160 160 ij0 = 101 ; ij1 = 109 ! Reduced T & S in the Alboran Sea 161 161 ii0 = 141 ; ii1 = 155 162 !$OMP PARALLEL DO schedule(static) private(jj, ji) 162 163 DO jj = mj0(ij0), mj1(ij1) 163 164 DO ji = mi0(ii0), mi1(ii1) … … 179 180 ENDIF 180 181 ! 182 !$OMP PARALLEL WORKSHARE 181 183 ptsd(:,:,:,jp_tem) = sf_tsd(jp_tem)%fnow(:,:,:) ! NO mask 182 184 ptsd(:,:,:,jp_sal) = sf_tsd(jp_sal)%fnow(:,:,:) 185 !$OMP END PARALLEL WORKSHARE 183 186 ! 184 187 IF( ln_sco ) THEN !== s- or mixed s-zps-coordinate ==! … … 191 194 ENDIF 192 195 ! 196 !$OMP PARALLEL DO schedule(static) private(jj, ji, jk, zl, jkk, zi) 193 197 DO jj = 1, jpj ! vertical interpolation of T & S 194 198 DO ji = 1, jpi … … 224 228 ELSE !== z- or zps- coordinate ==! 225 229 ! 230 !$OMP PARALLEL WORKSHARE 226 231 ptsd(:,:,:,jp_tem) = ptsd(:,:,:,jp_tem) * tmask(:,:,:) ! Mask 227 232 ptsd(:,:,:,jp_sal) = ptsd(:,:,:,jp_sal) * tmask(:,:,:) 233 !$OMP END PARALLEL WORKSHARE 228 234 ! 229 235 IF( ln_zps ) THEN ! zps-coordinate (partial steps) interpolation at the last ocean level 236 !$OMP PARALLEL DO schedule(static) private(jj, ji, ik, zl) 230 237 DO jj = 1, jpj 231 238 DO ji = 1, jpi -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DOM/istate.F90
r6748 r7037 102 102 ub (:,:,:) = 0._wp ; un (:,:,:) = 0._wp 103 103 vb (:,:,:) = 0._wp ; vn (:,:,:) = 0._wp 104 hdivn(:,:,:) = 0._wp 104 105 !$OMP END PARALLEL WORKSHARE 105 hdivn(:,:,:) = 0._wp106 106 ! 107 107 IF( cp_cfg == 'eel' ) THEN … … 145 145 ! Do it whatever the free surface method, these arrays being eventually used 146 146 ! 147 !$OMP PARALLEL WORKSHARE 147 !$OMP PARALLEL 148 !$OMP WORKSHARE 148 149 un_b(:,:) = 0._wp ; vn_b(:,:) = 0._wp 149 150 ub_b(:,:) = 0._wp ; vb_b(:,:) = 0._wp 150 !$OMP END PARALLELWORKSHARE151 !$OMP END WORKSHARE 151 152 ! 152 153 !!gm the use of umsak & vmask is not necessary belox as un, vn, ub, vb are always masked 153 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji)154 154 DO jk = 1, jpkm1 155 !$OMP DO schedule(static) private(jj, ji) 155 156 DO jj = 1, jpj 156 157 DO ji = 1, jpi … … 164 165 END DO 165 166 ! 166 !$OMP PARALLELWORKSHARE167 !$OMP WORKSHARE 167 168 un_b(:,:) = un_b(:,:) * r1_hu_n(:,:) 168 169 vn_b(:,:) = vn_b(:,:) * r1_hv_n(:,:) … … 170 171 ub_b(:,:) = ub_b(:,:) * r1_hu_b(:,:) 171 172 vb_b(:,:) = vb_b(:,:) * r1_hv_b(:,:) 172 !$OMP END PARALLEL WORKSHARE 173 !$OMP END WORKSHARE NOWAIT 174 !$OMP END PARALLEL 173 175 ! 174 176 IF( nn_timing == 1 ) CALL timing_stop('istate_init') -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynhpg.F90
r6748 r7037 230 230 ! divided by 2 later 231 231 ziceload = 0._wp 232 !$OMP PARALLEL 233 !$OMP DO schedule(static) private(jj,ji,ikt,jk) 232 234 DO jj = 1, jpj 233 235 DO ji = 1, jpi … … 242 244 END DO 243 245 END DO 244 !$OMP PARALLELWORKSHARE246 !$OMP WORKSHARE 245 247 riceload(:,:)=ziceload(:,:) ! need to be saved for diaar5 246 !$OMP END PARALLEL WORKSHARE 248 !$OMP END WORKSHARE NOWAIT 249 !$OMP END PARALLEL 247 250 248 251 CALL wrk_dealloc( jpi,jpj, 2, ztstop) … … 288 291 289 292 ! Surface value 290 !$OMP PARALLEL DO private(ji,jj, zcoef1) 293 !$OMP PARALLEL 294 !$OMP DO private(ji,jj, zcoef1) 291 295 DO jj = 2, jpjm1 292 296 DO ji = fs_2, fs_jpim1 ! vector opt. … … 304 308 ! interior value (2=<jk=<jpkm1) 305 309 DO jk = 2, jpkm1 306 !$OMP PARALLELDO private(ji,jj, zcoef1)310 !$OMP DO private(ji,jj, zcoef1) 307 311 DO jj = 2, jpjm1 308 312 DO ji = fs_2, fs_jpim1 ! vector opt. … … 321 325 END DO 322 326 END DO 323 END DO 327 !$OMP END DO NOWAIT 328 END DO 329 !$OMP END PARALLEL 324 330 ! 325 331 CALL wrk_dealloc( jpi,jpj,jpk, zhpi, zhpj ) … … 359 365 360 366 ! Surface value (also valid in partial step case) 367 !$OMP PARALLEL 368 !$OMP DO private(ji,jj, zcoef1) 361 369 DO jj = 2, jpjm1 362 370 DO ji = fs_2, fs_jpim1 ! vector opt. … … 373 381 ! interior value (2=<jk=<jpkm1) 374 382 DO jk = 2, jpkm1 383 !$OMP DO private(ji,jj, zcoef1) 375 384 DO jj = 2, jpjm1 376 385 DO ji = fs_2, fs_jpim1 ! vector opt. … … 392 401 393 402 ! partial steps correction at the last level (use gru & grv computed in zpshde.F90) 403 !$OMP DO private(ji,jj,iku,ikv,zcoef2,zcoef3) 394 404 DO jj = 2, jpjm1 395 405 DO ji = 2, jpim1 … … 412 422 END DO 413 423 END DO 424 !$OMP END DO NOWAIT 425 !$OMP END PARALLEL 414 426 ! 415 427 CALL wrk_dealloc( jpi,jpj,jpk, zhpi, zhpj ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynldf_lap_blp.F90
r6748 r7037 75 75 ! 76 76 ! ! =============== 77 !$OMP PARALLEL 77 78 DO jk = 1, jpkm1 ! Horizontal slab 78 79 ! ! =============== 79 !$OMP PARALLELDO schedule(static) private(jj, ji)80 !$OMP DO schedule(static) private(jj, ji) 80 81 DO jj = 2, jpj 81 82 DO ji = fs_2, jpi ! vector opt. … … 94 95 END DO 95 96 ! 96 !$OMP PARALLELDO schedule(static) private(jj, ji)97 !$OMP DO schedule(static) private(jj, ji) 97 98 DO jj = 2, jpjm1 ! - curl( curl) + grad( div ) 98 99 DO ji = fs_2, fs_jpim1 ! vector opt. … … 108 109 ! ! =============== 109 110 END DO ! End of slab 111 !$OMP END PARALLEL 110 112 ! ! =============== 111 113 CALL wrk_dealloc( jpi, jpj, zcur, zdiv ) … … 146 148 ENDIF 147 149 ! 150 !$OMP PARALLEL WORKSHARE 148 151 zulap(:,:,:) = 0._wp 149 152 zvlap(:,:,:) = 0._wp 153 !$OMP END PARALLEL WORKSHARE 150 154 ! 151 155 CALL dyn_ldf_lap( kt, pub, pvb, zulap, zvlap, 1 ) ! rotated laplacian applied to ptb (output in zlap) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynnxt.F90
r6748 r7037 115 115 ! Ensure below that barotropic velocities match time splitting estimate 116 116 ! Compute actual transport and replace it with ts estimate at "after" time step 117 !$OMP PARALLEL WORKSHARE 117 !$OMP PARALLEL 118 !$OMP WORKSHARE 118 119 zue(:,:) = e3u_a(:,:,1) * ua(:,:,1) * umask(:,:,1) 119 120 zve(:,:) = e3v_a(:,:,1) * va(:,:,1) * vmask(:,:,1) 120 !$OMP END PARALLELWORKSHARE121 !$OMP END WORKSHARE 121 122 DO jk = 2, jpkm1 122 zue(:,:) = zue(:,:) + e3u_a(:,:,jk) * ua(:,:,jk) * umask(:,:,jk) 123 zve(:,:) = zve(:,:) + e3v_a(:,:,jk) * va(:,:,jk) * vmask(:,:,jk) 123 !$OMP DO schedule(static) private(jj,ji) 124 DO jj = 1, jpj 125 DO ji = 1, jpi 126 zue(ji,jj) = zue(ji,jj) + e3u_a(ji,jj,jk) * ua(ji,jj,jk) * umask(ji,jj,jk) 127 zve(ji,jj) = zve(ji,jj) + e3v_a(ji,jj,jk) * va(ji,jj,jk) * vmask(ji,jj,jk) 128 END DO 129 END DO 124 130 END DO 125 !$OMP PARALLELDO schedule(static) private(jk)131 !$OMP DO schedule(static) private(jk) 126 132 DO jk = 1, jpkm1 127 133 ua(:,:,jk) = ( ua(:,:,jk) - zue(:,:) * r1_hu_a(:,:) + ua_b(:,:) ) * umask(:,:,jk) 128 134 va(:,:,jk) = ( va(:,:,jk) - zve(:,:) * r1_hv_a(:,:) + va_b(:,:) ) * vmask(:,:,jk) 129 135 END DO 136 !$OMP END DO NOWAIT 137 !$OMP END PARALLEL 130 138 ! 131 139 IF( .NOT.ln_bt_fw ) THEN … … 186 194 ! ------------------------------------------ 187 195 IF( neuler == 0 .AND. kt == nit000 ) THEN !* Euler at first time-step: only swap 188 !$OMP PARALLEL DO schedule(static) private(jk) 196 !$OMP PARALLEL 197 !$OMP DO schedule(static) private(jk) 189 198 DO jk = 1, jpkm1 190 199 un(:,:,jk) = ua(:,:,jk) ! un <-- ua 191 200 vn(:,:,jk) = va(:,:,jk) 192 201 END DO 202 !$OMP END DO NOWAIT 193 203 IF(.NOT.ln_linssh ) THEN 194 !$OMP PARALLELDO schedule(static) private(jk)204 !$OMP DO schedule(static) private(jk) 195 205 DO jk = 1, jpkm1 196 206 e3t_b(:,:,jk) = e3t_n(:,:,jk) … … 198 208 e3v_b(:,:,jk) = e3v_n(:,:,jk) 199 209 END DO 210 !$OMP END DO NOWAIT 200 211 ENDIF 212 !$OMP END PARALLEL 201 213 ELSE !* Leap-Frog : Asselin filter and swap 202 214 ! ! =============! … … 224 236 ! ---------------------------------------------------- 225 237 IF( ln_dynspg_ts .AND. ln_bt_fw ) THEN ! No asselin filtering on thicknesses if forward time splitting 226 e3t_b(:,:,1:jpkm1) = e3t_n(:,:,1:jpkm1) 238 !$OMP PARALLEL DO schedule(static) private(jj,ji) 239 DO jj = 1, jpj 240 DO ji = 1, jpi 241 e3t_b(ji,jj,1:jpkm1) = e3t_n(ji,jj,1:jpkm1) 242 END DO 243 END DO 227 244 ELSE 228 245 !$OMP PARALLEL DO schedule(static) private(jk) … … 234 251 zcoef = atfp * rdt * r1_rau0 235 252 IF ( .NOT. ln_isf ) THEN ! if no ice shelf melting 236 e3t_b(:,:,1) = e3t_b(:,:,1) - zcoef * ( emp_b(:,:) - emp(:,:) & 237 & - rnf_b(:,:) + rnf(:,:) ) * tmask(:,:,1) 253 !$OMP PARALLEL DO schedule(static) private(jj,ji) 254 DO jj = 1, jpj 255 DO ji = 1, jpi 256 e3t_b(ji,jj,1) = e3t_b(ji,jj,1) - zcoef * ( emp_b(ji,jj) - emp(ji,jj) & 257 & - rnf_b(ji,jj) + rnf(ji,jj) ) * tmask(ji,jj,1) 258 END DO 259 END DO 238 260 ELSE ! if ice shelf melting 261 !$OMP PARALLEL DO schedule(static) private(jj,ji,ikt) 239 262 DO jj = 1, jpj 240 263 DO ji = 1, jpi … … 273 296 CALL dom_vvl_interpol( e3t_b(:,:,:), ze3u_f, 'U' ) 274 297 CALL dom_vvl_interpol( e3t_b(:,:,:), ze3v_f, 'V' ) 275 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zue3a, zve3a, zue3n, zve3n, zue3b, zve3b, zuf, zvf) 298 !$OMP PARALLEL 299 !$OMP DO schedule(static) private(jk, jj, ji, zue3a, zve3a, zue3n, zve3n, zue3b, zve3b, zuf, zvf) 276 300 DO jk = 1, jpkm1 277 301 DO jj = 1, jpj … … 294 318 END DO 295 319 END DO 320 !$OMP WORKSHARE 296 321 e3u_b(:,:,1:jpkm1) = ze3u_f(:,:,1:jpkm1) ! e3u_b <-- filtered scale factor 297 322 e3v_b(:,:,1:jpkm1) = ze3v_f(:,:,1:jpkm1) 323 !$OMP END WORKSHARE NOWAIT 324 !$OMP END PARALLEL 298 325 ! 299 326 CALL wrk_dealloc( jpi,jpj,jpk, ze3u_f, ze3v_f ) … … 305 332 ! Revert "before" velocities to time split estimate 306 333 ! Doing it here also means that asselin filter contribution is removed 334 !$OMP PARALLEL 335 !$OMP WORKSHARE 307 336 zue(:,:) = e3u_b(:,:,1) * ub(:,:,1) * umask(:,:,1) 308 337 zve(:,:) = e3v_b(:,:,1) * vb(:,:,1) * vmask(:,:,1) 338 !$OMP END WORKSHARE 309 339 DO jk = 2, jpkm1 310 zue(:,:) = zue(:,:) + e3u_b(:,:,jk) * ub(:,:,jk) * umask(:,:,jk) 311 zve(:,:) = zve(:,:) + e3v_b(:,:,jk) * vb(:,:,jk) * vmask(:,:,jk) 312 END DO 313 !$OMP PARALLEL DO schedule(static) private(jk) 340 !$OMP DO schedule(static) private(jj, ji) 341 DO jj = 1, jpj 342 DO ji = 1, jpi 343 zue(ji,jj) = zue(ji,jj) + e3u_b(ji,jj,jk) * ub(ji,jj,jk) * umask(ji,jj,jk) 344 zve(ji,jj) = zve(ji,jj) + e3v_b(ji,jj,jk) * vb(ji,jj,jk) * vmask(ji,jj,jk) 345 END DO 346 END DO 347 END DO 348 !$OMP DO schedule(static) private(jk) 314 349 DO jk = 1, jpkm1 315 350 ub(:,:,jk) = ub(:,:,jk) - (zue(:,:) * r1_hu_n(:,:) - un_b(:,:)) * umask(:,:,jk) 316 351 vb(:,:,jk) = vb(:,:,jk) - (zve(:,:) * r1_hv_n(:,:) - vn_b(:,:)) * vmask(:,:,jk) 317 352 END DO 353 !$OMP END DO NOWAIT 354 !$OMP END PARALLEL 318 355 ENDIF 319 356 ! … … 326 363 ! 327 364 IF(.NOT.ln_linssh ) THEN 365 !$OMP PARALLEL 366 !$OMP WORKSHARE 328 367 hu_b(:,:) = e3u_b(:,:,1) * umask(:,:,1) 329 368 hv_b(:,:) = e3v_b(:,:,1) * vmask(:,:,1) 369 !$OMP END WORKSHARE 330 370 DO jk = 2, jpkm1 331 hu_b(:,:) = hu_b(:,:) + e3u_b(:,:,jk) * umask(:,:,jk) 332 hv_b(:,:) = hv_b(:,:) + e3v_b(:,:,jk) * vmask(:,:,jk) 371 !$OMP DO schedule(static) private(jj, ji) 372 DO jj = 1, jpj 373 DO ji = 1, jpi 374 hu_b(ji,jj) = hu_b(ji,jj) + e3u_b(ji,jj,jk) * umask(ji,jj,jk) 375 hv_b(ji,jj) = hv_b(ji,jj) + e3v_b(ji,jj,jk) * vmask(ji,jj,jk) 376 END DO 377 END DO 378 !$OMP END DO 333 379 END DO 380 !$OMP WORKSHARE 334 381 r1_hu_b(:,:) = ssumask(:,:) / ( hu_b(:,:) + 1._wp - ssumask(:,:) ) 335 382 r1_hv_b(:,:) = ssvmask(:,:) / ( hv_b(:,:) + 1._wp - ssvmask(:,:) ) 336 ENDIF 337 ! 338 !$OMP PARALLEL WORKSHARE 383 !$OMP END WORKSHARE 384 !$OMP END PARALLEL 385 ENDIF 386 ! 387 !$OMP PARALLEL 388 !$OMP WORKSHARE 339 389 un_b(:,:) = e3u_a(:,:,1) * un(:,:,1) * umask(:,:,1) 340 390 ub_b(:,:) = e3u_b(:,:,1) * ub(:,:,1) * umask(:,:,1) 341 391 vn_b(:,:) = e3v_a(:,:,1) * vn(:,:,1) * vmask(:,:,1) 342 392 vb_b(:,:) = e3v_b(:,:,1) * vb(:,:,1) * vmask(:,:,1) 343 !$OMP END PARALLELWORKSHARE393 !$OMP END WORKSHARE 344 394 DO jk = 2, jpkm1 345 un_b(:,:) = un_b(:,:) + e3u_a(:,:,jk) * un(:,:,jk) * umask(:,:,jk) 346 ub_b(:,:) = ub_b(:,:) + e3u_b(:,:,jk) * ub(:,:,jk) * umask(:,:,jk) 347 vn_b(:,:) = vn_b(:,:) + e3v_a(:,:,jk) * vn(:,:,jk) * vmask(:,:,jk) 348 vb_b(:,:) = vb_b(:,:) + e3v_b(:,:,jk) * vb(:,:,jk) * vmask(:,:,jk) 395 !$OMP DO schedule(static) private(jj, ji) 396 DO jj = 1, jpj 397 DO ji = 1, jpi 398 un_b(ji,jj) = un_b(ji,jj) + e3u_a(ji,jj,jk) * un(ji,jj,jk) * umask(ji,jj,jk) 399 ub_b(ji,jj) = ub_b(ji,jj) + e3u_b(ji,jj,jk) * ub(ji,jj,jk) * umask(ji,jj,jk) 400 vn_b(ji,jj) = vn_b(ji,jj) + e3v_a(ji,jj,jk) * vn(ji,jj,jk) * vmask(ji,jj,jk) 401 vb_b(ji,jj) = vb_b(ji,jj) + e3v_b(ji,jj,jk) * vb(ji,jj,jk) * vmask(ji,jj,jk) 402 END DO 403 END DO 349 404 END DO 350 !$OMP PARALLELWORKSHARE405 !$OMP WORKSHARE 351 406 un_b(:,:) = un_b(:,:) * r1_hu_a(:,:) 352 407 vn_b(:,:) = vn_b(:,:) * r1_hv_a(:,:) 353 408 ub_b(:,:) = ub_b(:,:) * r1_hu_b(:,:) 354 409 vb_b(:,:) = vb_b(:,:) * r1_hv_b(:,:) 355 !$OMP END PARALLEL WORKSHARE 410 !$OMP END WORKSHARE NOWAIT 411 !$OMP END PARALLEL 356 412 ! 357 413 IF( .NOT.ln_dynspg_ts ) THEN ! output the barotropic currents -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynspg.F90
r6748 r7037 133 133 zintp = REAL( MOD( kt-1, nn_fsbc ) ) / REAL( nn_fsbc ) 134 134 zgrau0r = - grav * r1_rau0 135 !$OMP PARALLEL 136 !$OMP WORKSHARE 135 137 zpice(:,:) = ( zintp * snwice_mass(:,:) + ( 1.- zintp ) * snwice_mass_b(:,:) ) * zgrau0r 136 !$OMP PARALLEL DO schedule(static) private(jj, ji) 138 !$OMP END WORKSHARE 139 !$OMP DO schedule(static) private(jj, ji) 137 140 DO jj = 2, jpjm1 138 141 DO ji = fs_2, fs_jpim1 ! vector opt. … … 141 144 END DO 142 145 END DO 146 !$OMP END DO NOWAIT 147 !$OMP END PARALLEL 143 148 ! 144 149 CALL wrk_dealloc( jpi,jpj, zpice ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynspg_ts.F90
r6748 r7037 72 72 REAL(wp), ALLOCATABLE, SAVE, DIMENSION(:,:) :: ftnw, ftne !: triad of coriolis parameter 73 73 REAL(wp), ALLOCATABLE, SAVE, DIMENSION(:,:) :: ftsw, ftse !: (only used with een vorticity scheme) 74 75 74 !! Time filtered arrays at baroclinic time step: 76 75 REAL(wp), PUBLIC, ALLOCATABLE, SAVE, DIMENSION(:,:) :: un_adv , vn_adv !: Advection vel. at "now" barocl. step … … 244 243 CALL lbc_lnk( zwz, 'F', 1._wp ) 245 244 ! 245 !$OMP PARALLEL 246 !$OMP WORKSHARE 246 247 ftne(1,:) = 0._wp ; ftnw(1,:) = 0._wp ; ftse(1,:) = 0._wp ; ftsw(1,:) = 0._wp 247 248 !$OMP PARALLEL DO schedule(static) private(jj, ji) 248 !$OMP END WORKSHARE 249 250 !$OMP DO schedule(static) private(jj, ji) 249 251 DO jj = 2, jpj 250 252 DO ji = 2, jpi … … 255 257 END DO 256 258 END DO 259 !$OMP END DO NOWAIT 260 !$OMP END PARALLEL 257 261 ! 258 262 ELSE !== all other schemes (ENE, ENS, MIX) … … 272 276 ! zhf(:,:) = gdepw_0(:,:,jk+1) 273 277 ELSE 278 !$OMP PARALLEL WORKSHARE 274 279 zhf(:,:) = hbatf(:,:) 280 !$OMP END PARALLEL WORKSHARE 275 281 END IF 276 282 277 !$OMP PARALLEL DO schedule(static) private(jj, ji) 283 !$OMP PARALLEL 284 !$OMP DO schedule(static) private(jj, ji) 278 285 DO jj = 1, jpjm1 279 286 DO ji = 1, jpi … … 283 290 284 291 DO jk = 1, jpkm1 285 !$OMP PARALLELDO schedule(static) private(jj, ji)292 !$OMP DO schedule(static) private(jj, ji) 286 293 DO jj = 1, jpjm1 287 294 DO ji = 1, jpi … … 289 296 END DO 290 297 END DO 291 END DO 298 !$OMP END DO NOWAIT 299 END DO 300 !$OMP END PARALLEL 292 301 CALL lbc_lnk( zhf, 'F', 1._wp ) 293 302 ! JC: TBC. hf should be greater than 0 294 !$OMP PARALLEL DO schedule(static) private(jj, ji) 303 !$OMP PARALLEL 304 !$OMP DO schedule(static) private(jj, ji) 295 305 DO jj = 1, jpj 296 306 DO ji = 1, jpi … … 298 308 END DO 299 309 END DO 300 !$OMP PARALLELWORKSHARE310 !$OMP WORKSHARE 301 311 zwz(:,:) = ff(:,:) * zwz(:,:) 302 !$OMP END PARALLEL WORKSHARE 312 !$OMP END WORKSHARE NOWAIT 313 !$OMP END PARALLEL 303 314 ENDIF 304 315 ENDIF … … 318 329 ! !* e3*d/dt(Ua) (Vertically integrated) 319 330 ! ! -------------------------------------------------- 320 !$OMP PARALLEL WORKSHARE 331 !$OMP PARALLEL 332 !$OMP WORKSHARE 321 333 zu_frc(:,:) = 0._wp 322 334 zv_frc(:,:) = 0._wp 323 !$OMP END PARALLELWORKSHARE335 !$OMP END WORKSHARE 324 336 ! 325 337 DO jk = 1, jpkm1 326 !$OMP PARALLELDO schedule(static) private(jj,ji)327 DO jj=1,jpj328 DO ji=1,jpi329 zu_frc(ji,jj) = zu_frc(ji,jj) + e3u_n(ji,jj,jk) * ua(ji,jj,jk) * umask(ji,jj,jk)330 zv_frc(ji,jj) = zv_frc(ji,jj) + e3v_n(ji,jj,jk) * va(ji,jj,jk) * vmask(ji,jj,jk)331 END DO332 END DO338 !$OMP DO schedule(static) private(jj,ji) 339 DO jj=1,jpj 340 DO ji=1,jpi 341 zu_frc(ji,jj) = zu_frc(ji,jj) + e3u_n(ji,jj,jk) * ua(ji,jj,jk) * umask(ji,jj,jk) 342 zv_frc(ji,jj) = zv_frc(ji,jj) + e3v_n(ji,jj,jk) * va(ji,jj,jk) * vmask(ji,jj,jk) 343 END DO 344 END DO 333 345 END DO 334 346 ! 335 !$OMP PARALLELWORKSHARE347 !$OMP WORKSHARE 336 348 zu_frc(:,:) = zu_frc(:,:) * r1_hu_n(:,:) 337 349 zv_frc(:,:) = zv_frc(:,:) * r1_hv_n(:,:) 338 !$OMP END PARALLELWORKSHARE350 !$OMP END WORKSHARE 339 351 ! 340 352 ! 341 353 ! !* baroclinic momentum trend (remove the vertical mean trend) 342 !$OMP PARALLELDO schedule(static) private(jk,jj,ji)354 !$OMP DO schedule(static) private(jk,jj,ji) 343 355 DO jk = 1, jpkm1 ! ----------------------------------------------------------- 344 356 DO jj = 2, jpjm1 … … 349 361 END DO 350 362 END DO 363 !$OMP END DO NOWAIT 351 364 ! !* barotropic Coriolis trends (vorticity scheme dependent) 352 365 ! ! -------------------------------------------------------- 353 !$OMP PARALLELWORKSHARE366 !$OMP WORKSHARE 354 367 zwx(:,:) = un_b(:,:) * hu_n(:,:) * e2u(:,:) ! now fluxes 355 368 zwy(:,:) = vn_b(:,:) * hv_n(:,:) * e1v(:,:) 356 !$OMP END PARALLEL WORKSHARE 369 !$OMP END WORKSHARE NOWAIT 370 !$OMP END PARALLEL 357 371 ! 358 372 IF( ln_dynvor_ene .OR. ln_dynvor_mix ) THEN ! energy conserving or mixed scheme … … 404 418 IF( .NOT.ln_linssh ) THEN ! Variable volume : remove surface pressure gradient 405 419 IF( ln_wd ) THEN ! Calculating and applying W/D gravity filters 420 !$OMP PARALLEL 421 !$OMP WORKSHARE 406 422 wduflt1(:,:) = 1.0_wp 407 423 wdvflt1(:,:) = 1.0_wp 408 !$OMP PARALLEL DO schedule(static) private(jj,ji,ll_tmp1,ll_tmp2) 424 !$OMP END WORKSHARE 425 !$OMP DO schedule(static) private(jj,ji,ll_tmp1,ll_tmp2) 409 426 DO jj = 2, jpjm1 410 427 DO ji = 2, jpim1 … … 443 460 END DO 444 461 END DO 462 !$OMP END DO NOWAIT 463 !$OMP END PARALLEL 445 464 446 465 CALL lbc_lnk( zcpx, 'U', 1._wp ) ; CALL lbc_lnk( zcpy, 'V', 1._wp ) … … 502 521 ! Note that the "unclipped" bottom friction parameter is used even with explicit drag 503 522 IF( ln_wd ) THEN 504 zu_frc(:,:) = zu_frc(:,:) + MAX(r1_hu_n(:,:) * bfrua(:,:),-1._wp / rdtbt) * zwx(:,:) 505 zv_frc(:,:) = zv_frc(:,:) + MAX(r1_hv_n(:,:) * bfrva(:,:),-1._wp / rdtbt) * zwy(:,:) 523 !$OMP PARALLEL DO schedule(static) private(jj,ji) 524 DO jj = 1, jpj 525 DO ji = 1, jpi ! vector opt. 526 zu_frc(ji,jj) = zu_frc(ji,jj) + MAX(r1_hu_n(ji,jj) * bfrua(ji,jj),-1._wp / rdtbt) * zwx(ji,jj) 527 zv_frc(ji,jj) = zv_frc(ji,jj) + MAX(r1_hv_n(ji,jj) * bfrva(ji,jj),-1._wp / rdtbt) * zwy(ji,jj) 528 END DO 529 END DO 506 530 ELSE 531 !$OMP PARALLEL WORKSHARE 507 532 zu_frc(:,:) = zu_frc(:,:) + r1_hu_n(:,:) * bfrua(:,:) * zwx(:,:) 508 533 zv_frc(:,:) = zv_frc(:,:) + r1_hv_n(:,:) * bfrva(:,:) * zwy(:,:) 534 !$OMP END PARALLEL WORKSHARE 509 535 END IF 510 536 ! … … 579 605 ! ! Surface net water flux and rivers 580 606 IF (ln_bt_fw) THEN 607 !$OMP PARALLEL WORKSHARE 581 608 zssh_frc(:,:) = zraur * ( emp(:,:) - rnf(:,:) + fwfisf(:,:) ) 609 !$OMP END PARALLEL WORKSHARE 582 610 ELSE 611 !$OMP PARALLEL WORKSHARE 583 612 zssh_frc(:,:) = zraur * z1_2 * ( emp(:,:) + emp_b(:,:) - rnf(:,:) - rnf_b(:,:) & 584 613 & + fwfisf(:,:) + fwfisf_b(:,:) ) 614 !$OMP END PARALLEL WORKSHARE 585 615 ENDIF 586 616 #if defined key_asminc 587 617 ! ! Include the IAU weighted SSH increment 588 618 IF( lk_asminc .AND. ln_sshinc .AND. ln_asmiau ) THEN 619 !$OMP PARALLEL WORKSHARE 589 620 zssh_frc(:,:) = zssh_frc(:,:) - ssh_iau(:,:) 621 !$OMP END PARALLEL WORKSHARE 590 622 ENDIF 591 623 #endif … … 617 649 IF( ln_wd ) THEN !preserve the positivity of water depth 618 650 !ssh[b,n,a] should have already been processed for this 619 !$OMP PARALLEL WORKSHARE 620 sshbb_e(:,:) = MAX(sshbb_e(:,:), rn_wdmin1 - bathy(:,:)) 621 sshb_e(:,:) = MAX(sshb_e(:,:) , rn_wdmin1 - bathy(:,:)) 622 !$OMP END PARALLEL WORKSHARE 651 !$OMP PARALLEL DO schedule(static) private(jj,ji) 652 DO jj = 1, jpj 653 DO ji = 1, jpi ! vector opt. 654 sshbb_e(ji,jj) = MAX(sshbb_e(ji,jj), rn_wdmin1 - bathy(ji,jj)) 655 sshb_e(ji,jj) = MAX(sshb_e(ji,jj) , rn_wdmin1 - bathy(ji,jj)) 656 END DO 657 END DO 623 658 ENDIF 624 659 ! … … 688 723 ! ! ------------------ 689 724 ! Extrapolate Sea Level at step jit+0.5: 725 !$OMP PARALLEL 726 !$OMP WORKSHARE 690 727 zsshp2_e(:,:) = za1 * sshn_e(:,:) + za2 * sshb_e(:,:) + za3 * sshbb_e(:,:) 728 !$OMP END WORKSHARE 691 729 ! 692 !$OMP PARALLELDO schedule(static) private(jj,ji)730 !$OMP DO schedule(static) private(jj,ji) 693 731 DO jj = 2, jpjm1 ! Sea Surface Height at u- & v-points 694 732 DO ji = 2, fs_jpim1 ! Vector opt. … … 701 739 END DO 702 740 END DO 741 !$OMP END DO NOWAIT 742 !$OMP END PARALLEL 703 743 CALL lbc_lnk_multi( zwx, 'U', 1._wp, zwy, 'V', 1._wp ) 704 744 ! … … 708 748 !$OMP END PARALLEL WORKSHARE 709 749 IF( ln_wd ) THEN 710 zhup2_e(:,:) = MAX(zhup2_e (:,:), rn_wdmin1) 711 zhvp2_e(:,:) = MAX(zhvp2_e (:,:), rn_wdmin1) 750 !$OMP PARALLEL DO schedule(static) private(jj,ji) 751 DO jj = 1, jpj 752 DO ji = 1, jpi ! vector opt. 753 zhup2_e(ji,jj) = MAX(zhup2_e (ji,jj), rn_wdmin1) 754 zhvp2_e(ji,jj) = MAX(zhvp2_e (ji,jj), rn_wdmin1) 755 END DO 756 END DO 712 757 END IF 713 758 ELSE … … 756 801 ! Sum over sub-time-steps to compute advective velocities 757 802 za2 = wgtbtp2(jn) 758 !$OMP PARALLEL WORKSHARE 803 !$OMP PARALLEL 804 !$OMP WORKSHARE 759 805 un_adv(:,:) = un_adv(:,:) + za2 * zwx(:,:) * r1_e2u(:,:) 760 806 vn_adv(:,:) = vn_adv(:,:) + za2 * zwy(:,:) * r1_e1v(:,:) 761 !$OMP END PARALLEL WORKSHARE807 !$OMP END WORKSHARE NOWAIT 762 808 ! 763 809 ! Set next sea level: 764 !$OMP PARALLELDO schedule(static) private(jj,ji)810 !$OMP DO schedule(static) private(jj,ji) 765 811 DO jj = 2, jpjm1 766 812 DO ji = fs_2, fs_jpim1 ! vector opt. … … 769 815 END DO 770 816 END DO 771 !$OMP PARALLELWORKSHARE817 !$OMP WORKSHARE 772 818 ssha_e(:,:) = ( sshn_e(:,:) - rdtbt * ( zssh_frc(:,:) + zhdiv(:,:) ) ) * ssmask(:,:) 773 !$OMP END PARALLEL WORKSHARE 774 IF( ln_wd ) ssha_e(:,:) = MAX(ssha_e(:,:), rn_wdmin1 - bathy(:,:)) 819 !$OMP END WORKSHARE NOWAIT 820 !$OMP END PARALLEL 821 IF( ln_wd ) THEN 822 !$OMP PARALLEL DO schedule(static) private(jj,ji) 823 DO jj = 1, jpj 824 DO ji = 1, jpi ! vector opt. 825 ssha_e(ji,jj) = MAX(ssha_e(ji,jj), rn_wdmin1 - bathy(ji,jj)) 826 END DO 827 END DO 828 END IF 775 829 CALL lbc_lnk( ssha_e, 'T', 1._wp ) 776 830 … … 821 875 & + za2 * sshb_e(:,:) + za3 * sshbb_e(:,:) 822 876 IF( ln_wd ) THEN ! Calculating and applying W/D gravity filters 877 !$OMP PARALLEL 878 !$OMP WORKSHARE 823 879 wduflt1(:,:) = 1._wp 824 880 wdvflt1(:,:) = 1._wp 825 !$OMP PARALLEL DO schedule(static) private(jj,ji,ll_tmp1,ll_tmp2) 881 !$OMP END WORKSHARE 882 !$OMP DO schedule(static) private(jj,ji,ll_tmp1,ll_tmp2) 826 883 DO jj = 2, jpjm1 827 884 DO ji = 2, jpim1 … … 859 916 END DO 860 917 END DO 918 !$OMP END DO NOWAIT 919 !$OMP END PARALLEL 861 920 CALL lbc_lnk( zcpx, 'U', 1._wp ) ; CALL lbc_lnk( zcpy, 'V', 1._wp ) 862 921 ENDIF … … 880 939 881 940 IF( ln_wd ) THEN 882 zhust_e(:,:) = MAX(zhust_e (:,:), rn_wdmin1 ) 883 zhvst_e(:,:) = MAX(zhvst_e (:,:), rn_wdmin1 ) 941 !$OMP PARALLEL DO schedule(static) private(jj,ji) 942 DO jj = 1, jpj 943 DO ji = 1, jpi ! vector opt. 944 zhust_e(ji,jj) = MAX(zhust_e (ji,jj), rn_wdmin1 ) 945 zhvst_e(ji,jj) = MAX(zhvst_e (ji,jj), rn_wdmin1 ) 946 END DO 947 END DO 884 948 END IF 885 949 … … 953 1017 zu_trd(:,:) = zu_trd(:,:) + bfrua(:,:) * un_e(:,:) * hur_e(:,:) 954 1018 zv_trd(:,:) = zv_trd(:,:) + bfrva(:,:) * vn_e(:,:) * hvr_e(:,:) 955 !$OMP END PARALLEL WORKSHARE956 1019 ! 957 1020 ! Add top stresses: 958 !$OMP PARALLEL WORKSHARE959 1021 zu_trd(:,:) = zu_trd(:,:) + tfrua(:,:) * un_e(:,:) * hur_e(:,:) 960 1022 zv_trd(:,:) = zv_trd(:,:) + tfrva(:,:) * vn_e(:,:) * hvr_e(:,:) … … 1039 1101 IF( .NOT.ln_linssh ) THEN !* Update ocean depth (variable volume case only) 1040 1102 IF( ln_wd ) THEN 1041 hu_e (:,:) = MAX(hu_0(:,:) + zsshu_a(:,:), rn_wdmin1) 1042 hv_e (:,:) = MAX(hv_0(:,:) + zsshv_a(:,:), rn_wdmin1) 1103 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1104 DO jj = 1, jpj 1105 DO ji = 1, jpi ! vector opt. 1106 hu_e (ji,jj) = MAX(hu_0(ji,jj) + zsshu_a(ji,jj), rn_wdmin1) 1107 hv_e (ji,jj) = MAX(hv_0(ji,jj) + zsshv_a(ji,jj), rn_wdmin1) 1108 END DO 1109 END DO 1043 1110 ELSE 1044 1111 !$OMP PARALLEL WORKSHARE … … 1150 1217 CALL lbc_lnk_multi( zsshu_a, 'U', 1._wp, zsshv_a, 'V', 1._wp ) ! Boundary conditions 1151 1218 ! 1152 !$OMP PARALLEL DO schedule(static) private(jk) 1219 !$OMP PARALLEL 1220 !$OMP DO schedule(static) private(jk) 1153 1221 DO jk=1,jpkm1 1154 1222 ua(:,:,jk) = ua(:,:,jk) + r1_hu_n(:,:) * ( ua_b(:,:) - ub_b(:,:) * hu_b(:,:) ) * z1_2dt_b 1155 1223 va(:,:,jk) = va(:,:,jk) + r1_hv_n(:,:) * ( va_b(:,:) - vb_b(:,:) * hv_b(:,:) ) * z1_2dt_b 1156 1224 END DO 1225 !$OMP END DO NOWAIT 1157 1226 ! Save barotropic velocities not transport: 1158 !$OMP PARALLELWORKSHARE1227 !$OMP WORKSHARE 1159 1228 ua_b(:,:) = ua_b(:,:) / ( hu_0(:,:) + zsshu_a(:,:) + 1._wp - ssumask(:,:) ) 1160 1229 va_b(:,:) = va_b(:,:) / ( hv_0(:,:) + zsshv_a(:,:) + 1._wp - ssvmask(:,:) ) 1161 !$OMP END PARALLEL WORKSHARE 1230 !$OMP END WORKSHARE NOWAIT 1231 !$OMP END PARALLEL 1162 1232 ENDIF 1163 1233 ! … … 1179 1249 IF( .NOT.Agrif_Root() .AND. ln_bt_fw ) THEN 1180 1250 IF( Agrif_NbStepint() == 0 ) THEN 1251 !$OMP PARALLEL WORKSHARE 1181 1252 ub2_i_b(:,:) = 0._wp 1182 1253 vb2_i_b(:,:) = 0._wp 1254 !$OMP END PARALLEL WORKSHARE 1183 1255 END IF 1184 1256 ! 1185 1257 za1 = 1._wp / REAL(Agrif_rhot(), wp) 1258 !$OMP PARALLEL WORKSHARE 1186 1259 ub2_i_b(:,:) = ub2_i_b(:,:) + za1 * ub2_b(:,:) 1187 1260 vb2_i_b(:,:) = vb2_i_b(:,:) + za1 * vb2_b(:,:) 1261 !$OMP END PARALLEL WORKSHARE 1188 1262 ENDIF 1189 1263 #endif -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynvor.F90
r6748 r7037 322 322 323 323 IF( ln_sco ) THEN 324 !$OMP PARALLEL WORKSHARE 324 325 zwz(:,:) = zwz(:,:) / e3f_n(:,:,jk) 325 326 zwx(:,:) = e2u(:,:) * e3u_n(:,:,jk) * un(:,:,jk) 326 327 zwy(:,:) = e1v(:,:) * e3v_n(:,:,jk) * vn(:,:,jk) 328 !$OMP END PARALLEL WORKSHARE 327 329 ELSE 328 330 !$OMP PARALLEL WORKSHARE … … 521 523 SELECT CASE( nn_een_e3f ) ! == reciprocal of e3 at F-point 522 524 CASE ( 0 ) ! original formulation (masked averaging of e3t divided by 4) 525 !$OMP PARALLEL DO private(jj,ji,ze3) 523 526 DO jj = 1, jpjm1 524 527 DO ji = 1, fs_jpim1 ! vector opt. … … 531 534 END DO 532 535 CASE ( 1 ) ! new formulation (masked averaging of e3t divided by the sum of mask) 536 !$OMP PARALLEL DO private(jj,ji,ze3,zmsk) 533 537 DO jj = 1, jpjm1 534 538 DO ji = 1, fs_jpim1 ! vector opt. … … 546 550 SELECT CASE( kvor ) !== vorticity considered ==! 547 551 CASE ( np_COR ) !* Coriolis (planetary vorticity) 552 !$OMP PARALLEL DO private(jj,ji) 548 553 DO jj = 1, jpjm1 549 554 DO ji = 1, fs_jpim1 ! vector opt. … … 552 557 END DO 553 558 CASE ( np_RVO ) !* relative vorticity 559 !$OMP PARALLEL DO private(jj,ji) 554 560 DO jj = 1, jpjm1 555 561 DO ji = 1, fs_jpim1 ! vector opt. … … 560 566 END DO 561 567 CASE ( np_MET ) !* metric term 568 !$OMP PARALLEL DO private(jj,ji) 562 569 DO jj = 1, jpjm1 563 570 DO ji = 1, fs_jpim1 ! vector opt. … … 568 575 END DO 569 576 CASE ( np_CRV ) !* Coriolis + relative vorticity 577 !$OMP PARALLEL DO private(jj,ji) 570 578 DO jj = 1, jpjm1 571 579 DO ji = 1, fs_jpim1 ! vector opt. … … 576 584 END DO 577 585 CASE ( np_CME ) !* Coriolis + metric 586 !$OMP PARALLEL DO private(jj,ji) 578 587 DO jj = 1, jpjm1 579 588 DO ji = 1, fs_jpim1 ! vector opt. … … 589 598 ! 590 599 IF( ln_dynvor_msk ) THEN !== mask/unmask vorticity ==! 600 !$OMP PARALLEL DO private(jj,ji) 591 601 DO jj = 1, jpjm1 592 602 DO ji = 1, fs_jpim1 ! vector opt. … … 599 609 ! 600 610 ! !== horizontal fluxes ==! 601 zwx(:,:) = e2u(:,:) * e3u_n(:,:,jk) * un(:,:,jk) 602 zwy(:,:) = e1v(:,:) * e3v_n(:,:,jk) * vn(:,:,jk) 611 !$OMP PARALLEL DO private(jj,ji) 612 DO jj = 1, jpj 613 DO ji = 1, jpi 614 zwx(ji,jj) = e2u(ji,jj) * e3u_n(ji,jj,jk) * un(ji,jj,jk) 615 zwy(ji,jj) = e1v(ji,jj) * e3v_n(ji,jj,jk) * vn(ji,jj,jk) 616 END DO 617 END DO 603 618 604 619 ! !== compute and add the vorticity term trend =! … … 611 626 ztsw(ji,jj) = zwz(ji ,jj-1) + zwz(ji-1,jj-1) + zwz(ji-1,jj ) 612 627 END DO 628 !$OMP PARALLEL 629 !$OMP DO private(jj,ji) 613 630 DO jj = 3, jpj 614 631 DO ji = fs_2, jpi ! vector opt. ok because we start at jj = 3 … … 619 636 END DO 620 637 END DO 638 !$OMP DO private(jj,ji,zua,zva) 621 639 DO jj = 2, jpjm1 622 640 DO ji = fs_2, fs_jpim1 ! vector opt. … … 628 646 pva(ji,jj,jk) = pva(ji,jj,jk) + zva 629 647 END DO 630 END DO 648 END DO 649 !$OMP END DO NOWAIT 650 !$OMP END PARALLEL 631 651 ! ! =============== 632 652 END DO ! End of slab … … 683 703 IF(lwp) WRITE(numout,*) ' namlbc: change fmask value in the angles (T) ln_vorlat = ', ln_vorlat 684 704 IF( ln_vorlat .AND. ( ln_dynvor_ene .OR. ln_dynvor_ens .OR. ln_dynvor_mix ) ) THEN 705 !$OMP PARALLEL DO private(jk,jj,ji) 685 706 DO jk = 1, jpk 686 707 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynzad.F90
r6748 r7037 83 83 ENDIF 84 84 85 !$OMP PARALLEL 85 86 DO jk = 2, jpkm1 ! Vertical momentum advection at level w and u- and v- vertical 86 !$OMP PARALLELDO schedule(static) private(jj, ji)87 !$OMP DO schedule(static) private(jj, ji) 87 88 DO jj = 2, jpj ! vertical fluxes 88 89 DO ji = fs_2, jpi ! vector opt. … … 90 91 END DO 91 92 END DO 92 !$OMP PARALLELDO schedule(static) private(jj, ji)93 !$OMP DO schedule(static) private(jj, ji) 93 94 DO jj = 2, jpjm1 ! vertical momentum advection at w-point 94 95 DO ji = fs_2, fs_jpim1 ! vector opt. … … 98 99 END DO 99 100 END DO 101 !$OMP END PARALLEL 100 102 ! 101 103 ! Surface and bottom advective fluxes set to zero -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/dynzdf_imp.F90
r6748 r7037 92 92 ! 93 93 IF( ln_dynadv_vec .OR. ln_linssh ) THEN ! applied on velocity 94 !$OMP PARALLEL DO schedule(static) private(jk) 94 95 DO jk = 1, jpkm1 95 96 ua(:,:,jk) = ( ub(:,:,jk) + p2dt * ua(:,:,jk) ) * umask(:,:,jk) … … 97 98 END DO 98 99 ELSE ! applied on thickness weighted velocity 100 !$OMP PARALLEL DO schedule(static) private(jk) 99 101 DO jk = 1, jpkm1 100 102 ua(:,:,jk) = ( e3u_b(:,:,jk) * ub(:,:,jk) & … … 140 142 ! G. Madec : in linear free surface, e3u_a = e3u_n = e3u_0, so systematic use of e3u_a 141 143 IF( ln_bfrimp .AND. ln_dynspg_ts ) THEN 144 !$OMP PARALLEL 145 !$OMP DO schedule(static) private(jk) 142 146 DO jk = 1, jpkm1 ! remove barotropic velocities 143 147 ua(:,:,jk) = ( ua(:,:,jk) - ua_b(:,:) ) * umask(:,:,jk) 144 148 va(:,:,jk) = ( va(:,:,jk) - va_b(:,:) ) * vmask(:,:,jk) 145 149 END DO 150 !$OMP DO schedule(static) private(jj, ji, ikbu, ikbv, ze3ua, ze3va) 146 151 DO jj = 2, jpjm1 ! Add bottom/top stress due to barotropic component only 147 152 DO ji = fs_2, fs_jpim1 ! vector opt. … … 154 159 END DO 155 160 END DO 161 !$OMP END DO NOWAIT 162 !$OMP END PARALLEL 156 163 IF( ln_isfcav ) THEN ! Ocean cavities (ISF) 164 !$OMP PARALLEL DO schedule(static) private(jj, ji, ikbu, ikbv, ze3ua, ze3va) 157 165 DO jj = 2, jpjm1 158 166 DO ji = fs_2, fs_jpim1 ! vector opt. … … 174 182 ! non zero value at the ocean bottom depending on the bottom friction used. 175 183 ! 176 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ze3ua, zzwi, zzws) 184 !$OMP PARALLEL 185 !$OMP DO schedule(static) private(jk, jj, ji, ze3ua, zzwi, zzws) 177 186 DO jk = 1, jpkm1 ! Matrix 178 187 DO jj = 2, jpjm1 … … 187 196 END DO 188 197 END DO 198 !$OMP DO schedule(static) private(jj, ji) 189 199 DO jj = 2, jpjm1 ! Surface boundary conditions 190 200 DO ji = fs_2, fs_jpim1 ! vector opt. … … 210 220 ! 211 221 DO jk = 2, jpkm1 !== First recurrence : Dk = Dk - Lk * Uk-1 / Dk-1 (increasing k) == 212 !$OMP PARALLELDO schedule(static) private(jj, ji)222 !$OMP DO schedule(static) private(jj, ji) 213 223 DO jj = 2, jpjm1 214 224 DO ji = fs_2, fs_jpim1 ! vector opt. … … 216 226 END DO 217 227 END DO 218 END DO 219 ! 220 !$OMP PARALLEL DO schedule(static) private(jj, ji, ze3ua) 228 !$OMP END DO NOWAIT 229 END DO 230 ! 231 !$OMP DO schedule(static) private(jj, ji, ze3ua) 221 232 DO jj = 2, jpjm1 !== second recurrence: SOLk = RHSk - Lk / Dk-1 Lk-1 ==! 222 233 DO ji = fs_2, fs_jpim1 ! vector opt. … … 227 238 END DO 228 239 DO jk = 2, jpkm1 229 !$OMP PARALLELDO schedule(static) private(jj, ji)240 !$OMP DO schedule(static) private(jj, ji) 230 241 DO jj = 2, jpjm1 231 242 DO ji = fs_2, fs_jpim1 … … 235 246 END DO 236 247 ! 237 !$OMP PARALLELDO schedule(static) private(jj, ji)248 !$OMP DO schedule(static) private(jj, ji) 238 249 DO jj = 2, jpjm1 !== thrid recurrence : SOLk = ( Lk - Uk * Ek+1 ) / Dk ==! 239 250 DO ji = fs_2, fs_jpim1 ! vector opt. … … 242 253 END DO 243 254 DO jk = jpk-2, 1, -1 244 !$OMP PARALLELDO schedule(static) private(jj, ji)255 !$OMP DO schedule(static) private(jj, ji) 245 256 DO jj = 2, jpjm1 246 257 DO ji = fs_2, fs_jpim1 … … 256 267 ! non zero value at the ocean bottom depending on the bottom friction used 257 268 ! 258 !$OMP PARALLELDO schedule(static) private(jk, jj, ji, ze3va, zzwi, zzws)269 !$OMP DO schedule(static) private(jk, jj, ji, ze3va, zzwi, zzws) 259 270 DO jk = 1, jpkm1 ! Matrix 260 271 DO jj = 2, jpjm1 … … 269 280 END DO 270 281 END DO 271 !$OMP PARALLELDO schedule(static) private(jj, ji)282 !$OMP DO schedule(static) private(jj, ji) 272 283 DO jj = 2, jpjm1 ! Surface boundary conditions 273 284 DO ji = fs_2, fs_jpim1 ! vector opt. … … 293 304 ! 294 305 DO jk = 2, jpkm1 !== First recurrence : Dk = Dk - Lk * Uk-1 / Dk-1 (increasing k) == 295 !$OMP PARALLELDO schedule(static) private(jj, ji)306 !$OMP DO schedule(static) private(jj, ji) 296 307 DO jj = 2, jpjm1 297 308 DO ji = fs_2, fs_jpim1 ! vector opt. … … 299 310 END DO 300 311 END DO 301 END DO 302 ! 303 !$OMP PARALLEL DO schedule(static) private(jj, ji, ze3va) 312 !$OMP END DO NOWAIT 313 END DO 314 ! 315 !$OMP DO schedule(static) private(jj, ji, ze3va) 304 316 DO jj = 2, jpjm1 !== second recurrence: SOLk = RHSk - Lk / Dk-1 Lk-1 ==! 305 317 DO ji = fs_2, fs_jpim1 ! vector opt. … … 310 322 END DO 311 323 DO jk = 2, jpkm1 312 !$OMP PARALLELDO schedule(static) private(jj, ji)324 !$OMP DO schedule(static) private(jj, ji) 313 325 DO jj = 2, jpjm1 314 326 DO ji = fs_2, fs_jpim1 ! vector opt. … … 318 330 END DO 319 331 ! 320 !$OMP PARALLELDO schedule(static) private(jj, ji)332 !$OMP DO schedule(static) private(jj, ji) 321 333 DO jj = 2, jpjm1 !== third recurrence : SOLk = ( Lk - Uk * SOLk+1 ) / Dk ==! 322 334 DO ji = fs_2, fs_jpim1 ! vector opt. … … 325 337 END DO 326 338 DO jk = jpk-2, 1, -1 327 !$OMP PARALLELDO schedule(static) private(jj, ji)339 !$OMP DO schedule(static) private(jj, ji) 328 340 DO jj = 2, jpjm1 329 341 DO ji = fs_2, fs_jpim1 … … 331 343 END DO 332 344 END DO 333 END DO 334 345 !$OMP END DO NOWAIT 346 END DO 347 !$OMP END PARALLEL 335 348 ! J. Chanut: Lines below are useless ? 336 349 !! restore bottom layer avmu(v) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/DYN/sshwzv.F90
r6748 r7037 96 96 ! ! After Sea Surface Height ! 97 97 ! !------------------------------! 98 !$OMP PARALLEL WORKSHARE 98 !$OMP PARALLEL 99 !$OMP WORKSHARE 99 100 zhdiv(:,:) = 0._wp 100 !$OMP END PARALLELWORKSHARE101 !$OMP END WORKSHARE 101 102 DO jk = 1, jpkm1 ! Horizontal divergence of barotropic transports 102 !$OMP PARALLELDO schedule(static) private(jj, ji)103 !$OMP DO schedule(static) private(jj, ji) 103 104 DO jj = 1, jpj 104 105 DO ji = 1, jpi ! vector opt. … … 107 108 END DO 108 109 END DO 110 !$OMP END PARALLEL 109 111 ! ! Sea surface elevation time stepping 110 112 ! In time-split case we need a first guess of the ssh after (using the baroclinic timestep) in order to -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LBC/lbclnk.F90
r6493 r7037 163 163 !!---------------------------------------------------------------------- 164 164 ! 165 !$OMP PARALLEL DO schedule(static) private(jk, ztab) 165 166 DO jk = 1, jpk 166 167 ztab = pt3d(2,2,jk) … … 189 190 ! 190 191 ztab = pt2d(2,2) 192 !$OMP PARALLEL WORKSHARE 191 193 pt2d(:,:) = ztab 194 !$OMP END PARALLEL WORKSHARE 192 195 ! 193 196 END SUBROUTINE lbc_lnk_2d … … 313 316 ! 314 317 CASE ( 1 , 4 , 6 ) !** cyclic east-west 318 !$OMP PARALLEL WORKSHARE 315 319 pt3d( 1 ,:,:) = pt3d(jpim1,:,:) ! all points 316 320 pt3d(jpi,:,:) = pt3d( 2 ,:,:) 321 !$OMP END PARALLEL WORKSHARE 317 322 ! 318 323 CASE DEFAULT !** East closed -- West closed 319 324 SELECT CASE ( cd_type ) 320 325 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 326 !$OMP PARALLEL WORKSHARE 321 327 pt3d( 1 ,:,:) = zland 322 328 pt3d(jpi,:,:) = zland 329 !$OMP END PARALLEL WORKSHARE 323 330 CASE ( 'F' ) ! F-point 331 !$OMP PARALLEL WORKSHARE 324 332 pt3d(jpi,:,:) = zland 333 !$OMP END PARALLEL WORKSHARE 325 334 END SELECT 326 335 ! … … 333 342 SELECT CASE ( cd_type ) 334 343 CASE ( 'T' , 'U' , 'W' ) ! T-, U-, W-points 344 !$OMP PARALLEL WORKSHARE 335 345 pt3d(:, 1 ,:) = pt3d(:,3,:) 336 346 pt3d(:,jpj,:) = zland 347 !$OMP END PARALLEL WORKSHARE 337 348 CASE ( 'V' , 'F' ) ! V-, F-points 349 !$OMP PARALLEL WORKSHARE 338 350 pt3d(:, 1 ,:) = psgn * pt3d(:,2,:) 339 351 pt3d(:,jpj,:) = zland 352 !$OMP END PARALLEL WORKSHARE 340 353 END SELECT 341 354 ! … … 343 356 SELECT CASE ( cd_type ) ! South : closed 344 357 CASE ( 'T' , 'U' , 'V' , 'W' , 'I' ) ! all points except F-point 358 !$OMP PARALLEL WORKSHARE 345 359 pt3d(:, 1 ,:) = zland 360 !$OMP END PARALLEL WORKSHARE 346 361 END SELECT 347 362 ! ! North fold … … 351 366 SELECT CASE ( cd_type ) 352 367 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 368 !$OMP PARALLEL WORKSHARE 353 369 pt3d(:, 1 ,:) = zland 354 370 pt3d(:,jpj,:) = zland 371 !$OMP END PARALLEL WORKSHARE 355 372 CASE ( 'F' ) ! F-point 373 !$OMP PARALLEL WORKSHARE 356 374 pt3d(:,jpj,:) = zland 375 !$OMP END PARALLEL WORKSHARE 357 376 END SELECT 358 377 ! … … 589 608 ! 590 609 CASE ( 1 , 4 , 6 ) !** cyclic east-west 610 !$OMP PARALLEL WORKSHARE 591 611 pt3d(jpim1,:,:) = pt3d(jpim1,:,:) + pt3d( 1 ,:,:) 592 612 pt3d( 2 ,:,:) = pt3d( 2 ,:,:) + pt3d(jpi,:,:) 593 613 pt3d( 1 ,:,:) = 0.0_wp ! all points 594 614 pt3d(jpi,:,:) = 0.0_wp 615 !$OMP END PARALLEL WORKSHARE 595 616 ! 596 617 CASE DEFAULT !** East closed -- West closed 597 618 SELECT CASE ( cd_type ) 598 619 CASE ( 'T' , 'U' , 'V' , 'W' ) ! T-, U-, V-, W-points 620 !$OMP PARALLEL WORKSHARE 599 621 pt3d( 1 ,:,:) = zland 600 622 pt3d(jpi,:,:) = zland 623 !$OMP END PARALLEL WORKSHARE 601 624 CASE ( 'F' ) ! F-point 625 !$OMP PARALLEL WORKSHARE 602 626 pt3d(jpi,:,:) = zland 627 !$OMP END PARALLEL WORKSHARE 603 628 END SELECT 604 629 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LBC/lib_mpp.F90
r6748 r7037 400 400 ! !* Cyclic east-west 401 401 IF( nbondi == 2 .AND. (nperio == 1 .OR. nperio == 4 .OR. nperio == 6) ) THEN 402 !$OMP PARALLEL WORKSHARE 402 403 ptab( 1 ,:,:) = ptab(jpim1,:,:) 403 404 ptab(jpi,:,:) = ptab( 2 ,:,:) 405 !$OMP END PARALLEL WORKSHARE 404 406 ELSE !* closed 405 IF( .NOT. cd_type == 'F' ) ptab( 1 :jpreci,:,:) = zland ! south except F-point 407 IF( .NOT. cd_type == 'F' ) THEN 408 !$OMP PARALLEL WORKSHARE 409 ptab( 1 :jpreci,:,:) = zland ! south except F-point 410 !$OMP END PARALLEL WORKSHARE 411 END IF 412 !$OMP PARALLEL WORKSHARE 406 413 ptab(nlci-jpreci+1:jpi ,:,:) = zland ! north 414 !$OMP END PARALLEL WORKSHARE 407 415 ENDIF 408 416 ! ! North-South boundaries (always closed) 409 IF( .NOT. cd_type == 'F' ) ptab(:, 1 :jprecj,:) = zland ! south except F-point 417 IF( .NOT. cd_type == 'F' ) THEN 418 !$OMP PARALLEL WORKSHARE 419 ptab(:, 1 :jprecj,:) = zland ! south except F-point 420 !$OMP END PARALLEL WORKSHARE 421 END IF 422 !$OMP PARALLEL WORKSHARE 410 423 ptab(:,nlcj-jprecj+1:jpj ,:) = zland ! north 424 !$OMP END PARALLEL WORKSHARE 411 425 ! 412 426 ENDIF … … 419 433 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 420 434 iihom = nlci-nreci 421 DO jl = 1, jpreci 422 zt3ew(:,jl,:,1) = ptab(jpreci+jl,:,:) 423 zt3we(:,jl,:,1) = ptab(iihom +jl,:,:) 435 !$OMP PARALLEL DO schedule(static) private(jk, jj, jl) 436 DO jk = 1, jpk 437 DO jj = 1, jpj 438 DO jl = 1, jpreci 439 zt3ew(jj,jl,jk,1) = ptab(jpreci+jl,jj,jk) 440 zt3we(jj,jl,jk,1) = ptab(iihom +jl,jj,jk) 441 END DO 442 END DO 424 443 END DO 425 444 END SELECT … … 451 470 SELECT CASE ( nbondi ) 452 471 CASE ( -1 ) 453 DO jl = 1, jpreci 454 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 455 END DO 456 CASE ( 0 ) 457 DO jl = 1, jpreci 458 ptab(jl ,:,:) = zt3we(:,jl,:,2) 459 ptab(iihom+jl,:,:) = zt3ew(:,jl,:,2) 460 END DO 461 CASE ( 1 ) 462 DO jl = 1, jpreci 463 ptab(jl ,:,:) = zt3we(:,jl,:,2) 472 !$OMP PARALLEL DO schedule(static) private(jk, jj, jl) 473 DO jk = 1, jpk 474 DO jl = 1, jpreci 475 DO jj = 1, jpj 476 ptab(iihom+jl,jj,jk) = zt3ew(jj,jl,jk,2) 477 END DO 478 END DO 479 END DO 480 CASE ( 0 ) 481 !$OMP PARALLEL DO schedule(static) private(jk, jj, jl) 482 DO jk = 1, jpk 483 DO jl = 1, jpreci 484 DO jj = 1, jpj 485 ptab(jl ,jj,jk) = zt3we(jj,jl,jk,2) 486 ptab(iihom+jl,jj,jk) = zt3ew(jj,jl,jk,2) 487 END DO 488 END DO 489 END DO 490 CASE ( 1 ) 491 !$OMP PARALLEL DO schedule(static) private(jk, jj, jl) 492 DO jk = 1, jpk 493 DO jl = 1, jpreci 494 DO jj = 1, jpj 495 ptab(jl ,jj,jk) = zt3we(jj,jl,jk,2) 496 END DO 497 END DO 464 498 END DO 465 499 END SELECT … … 471 505 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 472 506 ijhom = nlcj-nrecj 473 DO jl = 1, jprecj 474 zt3sn(:,jl,:,1) = ptab(:,ijhom +jl,:) 475 zt3ns(:,jl,:,1) = ptab(:,jprecj+jl,:) 507 !$OMP PARALLEL DO schedule(static) private(jk, ji, jl) 508 DO jk = 1, jpk 509 DO jl = 1, jprecj 510 DO ji = 1, jpi 511 zt3sn(ji,jl,jk,1) = ptab(ji,ijhom +jl,jk) 512 zt3ns(ji,jl,jk,1) = ptab(ji,jprecj+jl,jk) 513 END DO 514 END DO 476 515 END DO 477 516 ENDIF … … 503 542 SELECT CASE ( nbondj ) 504 543 CASE ( -1 ) 505 DO jl = 1, jprecj 506 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 507 END DO 508 CASE ( 0 ) 509 DO jl = 1, jprecj 510 ptab(:,jl ,:) = zt3sn(:,jl,:,2) 511 ptab(:,ijhom+jl,:) = zt3ns(:,jl,:,2) 512 END DO 513 CASE ( 1 ) 514 DO jl = 1, jprecj 515 ptab(:,jl,:) = zt3sn(:,jl,:,2) 544 !$OMP PARALLEL DO schedule(static) private(jk, ji, jl) 545 DO jk = 1, jpk 546 DO jl = 1, jprecj 547 DO ji = 1, jpi 548 ptab(ji,ijhom+jl,jk) = zt3ns(ji,jl,jk,2) 549 END DO 550 END DO 551 END DO 552 CASE ( 0 ) 553 !$OMP PARALLEL DO schedule(static) private(jk, ji, jl) 554 DO jk = 1, jpk 555 DO jl = 1, jprecj 556 DO ji = 1, jpi 557 ptab(ji,jl ,jk) = zt3sn(ji,jl,jk,2) 558 ptab(ji,ijhom+jl,jk) = zt3ns(ji,jl,jk,2) 559 END DO 560 END DO 561 END DO 562 CASE ( 1 ) 563 !$OMP PARALLEL DO schedule(static) private(jk, ji, jl) 564 DO jk = 1, jpk 565 DO jl = 1, jprecj 566 DO ji = 1, jpi 567 ptab(ji,jl,jk) = zt3sn(ji,jl,jk,2) 568 END DO 569 END DO 516 570 END DO 517 571 END SELECT … … 902 956 CASE ( -1, 0, 1 ) ! all exept 2 (i.e. close case) 903 957 iihom = nlci-nreci 904 DO jl = 1, jpreci 905 zt2ew(:,jl,1) = pt2d(jpreci+jl,:) 906 zt2we(:,jl,1) = pt2d(iihom +jl,:) 958 !$OMP PARALLEL DO schedule(static) private(jj,jl) 959 DO jj = 1, jpj 960 DO jl = 1, jpreci 961 zt2ew(jj,jl,1) = pt2d(jpreci+jl,jj) 962 zt2we(jj,jl,1) = pt2d(iihom +jl,jj) 963 END DO 907 964 END DO 908 965 END SELECT … … 934 991 SELECT CASE ( nbondi ) 935 992 CASE ( -1 ) 993 !$OMP PARALLEL DO schedule(static) private(jj,jl) 936 994 DO jl = 1, jpreci 937 pt2d(iihom+jl,:) = zt2ew(:,jl,2) 938 END DO 939 CASE ( 0 ) 995 DO jj = 1, jpj 996 pt2d(iihom+jl,jj) = zt2ew(jj,jl,2) 997 END DO 998 END DO 999 CASE ( 0 ) 1000 !$OMP PARALLEL DO schedule(static) private(jj,jl) 940 1001 DO jl = 1, jpreci 941 pt2d(jl ,:) = zt2we(:,jl,2) 942 pt2d(iihom+jl,:) = zt2ew(:,jl,2) 943 END DO 944 CASE ( 1 ) 1002 DO jj = 1, jpj 1003 pt2d(jl ,jj) = zt2we(jj,jl,2) 1004 pt2d(iihom+jl,jj) = zt2ew(jj,jl,2) 1005 END DO 1006 END DO 1007 CASE ( 1 ) 1008 !$OMP PARALLEL DO schedule(static) private(jj,jl) 945 1009 DO jl = 1, jpreci 946 pt2d(jl ,:) = zt2we(:,jl,2) 1010 DO jj = 1, jpj 1011 pt2d(jl ,jj) = zt2we(jj,jl,2) 1012 END DO 947 1013 END DO 948 1014 END SELECT … … 955 1021 IF( nbondj /= 2 ) THEN ! Read Dirichlet lateral conditions 956 1022 ijhom = nlcj-nrecj 1023 !$OMP PARALLEL DO schedule(static) private(ji,jl) 957 1024 DO jl = 1, jprecj 958 zt2sn(:,jl,1) = pt2d(:,ijhom +jl) 959 zt2ns(:,jl,1) = pt2d(:,jprecj+jl) 1025 DO ji = 1, jpi 1026 zt2sn(ji,jl,1) = pt2d(ji,ijhom +jl) 1027 zt2ns(ji,jl,1) = pt2d(ji,jprecj+jl) 1028 END DO 960 1029 END DO 961 1030 ENDIF … … 987 1056 SELECT CASE ( nbondj ) 988 1057 CASE ( -1 ) 1058 !$OMP PARALLEL DO schedule(static) private(ji,jl) 989 1059 DO jl = 1, jprecj 990 pt2d(:,ijhom+jl) = zt2ns(:,jl,2) 991 END DO 992 CASE ( 0 ) 1060 DO ji = 1, jpi 1061 pt2d(ji,ijhom+jl) = zt2ns(ji,jl,2) 1062 END DO 1063 END DO 1064 CASE ( 0 ) 1065 !$OMP PARALLEL DO schedule(static) private(ji,jl) 993 1066 DO jl = 1, jprecj 994 pt2d(:,jl ) = zt2sn(:,jl,2) 995 pt2d(:,ijhom+jl) = zt2ns(:,jl,2) 996 END DO 997 CASE ( 1 ) 1067 DO ji = 1, jpi 1068 pt2d(ji,jl ) = zt2sn(ji,jl,2) 1069 pt2d(ji,ijhom+jl) = zt2ns(ji,jl,2) 1070 END DO 1071 END DO 1072 CASE ( 1 ) 1073 !$OMP PARALLEL DO schedule(static) private(ji,jl) 998 1074 DO jl = 1, jprecj 999 pt2d(:,jl ) = zt2sn(:,jl,2) 1075 DO ji = 1, jpi 1076 pt2d(ji,jl ) = zt2sn(ji,jl,2) 1077 END DO 1000 1078 END DO 1001 1079 END SELECT -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LBC/mppini.F90
r6748 r7037 155 155 ! The last line of blocks (west) will have fewer points 156 156 157 !$OMP PARALLEL DO schedule(static) private(jj,ji)158 157 DO jj = 1, jpnj 159 158 DO ji=1, jpni-1 … … 165 164 #else 166 165 167 !$OMP PARALLEL DO schedule(static) private(jj,ji)168 166 DO jj = 1, jpnj 169 167 DO ji = 1, iresti … … 176 174 177 175 #endif 178 !$OMP PARALLEL WORKSHARE179 176 nfilcit(:,:) = ilcit(:,:) 180 !$OMP END PARALLEL WORKSHARE181 177 IF( irestj == 0 ) irestj = jpnj 182 178 … … 206 202 ! ------------------------------- 207 203 208 !$OMP PARALLEL WORKSHARE209 204 iimppt(:,:) = 1 210 205 ijmppt(:,:) = 1 211 !$OMP END PARALLEL WORKSHARE212 206 213 207 IF( jpni > 1 ) THEN 214 !$OMP PARALLEL DO schedule(static) private(jj,ji)215 208 DO jj = 1, jpnj 216 209 DO ji = 2, jpni … … 219 212 END DO 220 213 ENDIF 221 !$OMP PARALLEL WORKSHARE222 214 nfiimpp(:,:)=iimppt(:,:) 223 !$OMP END PARALLEL WORKSHARE224 215 225 216 IF( jpnj > 1 ) THEN 226 !$OMP PARALLEL DO schedule(static) private(jj,ji)227 217 DO jj = 2, jpnj 228 218 DO ji = 1, jpni -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldfc1d_c2d.F90
r6140 r7037 141 141 IF(lwp) WRITE(numout,*) ' momentum laplacian coeffcients = rn_aht0/e_equ * max(e1,e2)' 142 142 za00 = pah0 / zd_max 143 !$OMP PARALLEL DO schedule(static) private(jj,ji,zemax1,zemax2) 143 144 DO jj = 1, jpj 144 145 DO ji = 1, jpi … … 152 153 IF(lwp) WRITE(numout,*) ' momentum bilaplacian coeffcients = rn_bht0/e_equ * max(e1,e2)**3' 153 154 za00 = pah0 / ( zd_max * zd_max * zd_max ) 155 !$OMP PARALLEL DO schedule(static) private(jj,ji,zemax1,zemax2) 154 156 DO jj = 1, jpj 155 157 DO ji = 1, jpi … … 164 166 ENDIF 165 167 ! ! deeper values (LAP and BLP cases) 168 !$OMP PARALLEL DO schedule(static) private(jk) 166 169 DO jk = 2, jpk 167 170 pah1(:,:,jk) = pah1(:,:,1) * tmask(:,:,jk) … … 173 176 IF(lwp) WRITE(numout,*) ' tracer laplacian coeffcients = rn_aht0/e_equ * max(e1,e2)' 174 177 za00 = pah0 / zd_max 178 !$OMP PARALLEL DO schedule(static) private(jj,ji,zemax1,zemax2) 175 179 DO jj = 1, jpj 176 180 DO ji = 1, jpi … … 184 188 IF(lwp) WRITE(numout,*) ' tracer bilaplacian coeffcients = rn_bht0/e_equ * max(e1,e2)**3' 185 189 za00 = pah0 / ( zd_max * zd_max * zd_max ) 190 !$OMP PARALLEL DO schedule(static) private(jj,ji,zemax1,zemax2) 186 191 DO jj = 1, jpj 187 192 DO ji = 1, jpi … … 196 201 ENDIF 197 202 ! ! deeper values (LAP and BLP cases) 203 !$OMP PARALLEL DO schedule(static) private(jk) 198 204 DO jk = 2, jpk 199 205 pah1(:,:,jk) = pah1(:,:,1) * umask(:,:,jk) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldfslp.F90
r6748 r7037 135 135 z1_slpmax = 1._wp / rn_slpmax 136 136 ! 137 138 !$OMP PARALLEL WORKSHARE 137 !$OMP PARALLEL 138 !$OMP WORKSHARE 139 139 140 zww(:,:,:) = 0._wp 140 141 zwz(:,:,:) = 0._wp 141 !$OMP END PARALLEL WORKSHARE142 ! 143 !$OMP PARALLELDO schedule(static) private(jk, jj, ji)142 !$OMP END WORKSHARE NOWAIT 143 ! 144 !$OMP DO schedule(static) private(jk, jj, ji) 144 145 DO jk = 1, jpk !== i- & j-gradient of density ==! 145 146 DO jj = 1, jpjm1 … … 150 151 END DO 151 152 END DO 153 !$OMP END DO NOWAIT 154 !$OMP END PARALLEL 152 155 IF( ln_zps ) THEN ! partial steps correction at the bottom ocean level 153 156 !$OMP PARALLEL DO schedule(static) private(jj, ji) … … 169 172 ENDIF 170 173 ! 174 !$OMP PARALLEL 175 !$OMP WORKSHARE 171 176 zdzr(:,:,1) = 0._wp !== Local vertical density gradient at T-point == ! (evaluated from N^2) 172 !$OMP PARALLEL DO schedule(static) private(jk) 177 !$OMP END WORKSHARE 178 !$OMP DO schedule(static) private(jk) 173 179 DO jk = 2, jpkm1 174 180 ! ! zdzr = d/dz(prd)= - ( prd ) / grav * mk(pn2) -- at t point … … 180 186 & * ( pn2(:,:,jk) + pn2(:,:,jk+1) ) * ( 1._wp - 0.5_wp * tmask(:,:,jk+1) ) 181 187 END DO 188 !$OMP END DO NOWAIT 189 !$OMP END PARALLEL 182 190 ! 183 191 ! !== Slopes just below the mixed layer ==! … … 248 256 ! 249 257 ! !* horizontal Shapiro filter 250 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 258 !$OMP PARALLEL 259 !$OMP DO schedule(static) private(jk, jj, ji) 251 260 DO jk = 2, jpkm1 252 261 DO jj = 2, jpjm1, MAX(1, jpj-3) ! rows jj=2 and =jpjm1 only … … 288 297 END DO 289 298 END DO 299 !$OMP END DO 290 300 291 301 … … 293 303 ! =========================== | wslpj = mij( d/dj( prd ) / d/dz( prd ) 294 304 ! 295 !$OMP PARALLELDO schedule(static) private(jk, jj, ji, zbw, zfk, zck, zbi, zbj, zai, zaj, zci, zcj)305 !$OMP DO schedule(static) private(jk, jj, ji, zbw, zfk, zck, zbi, zbj, zai, zaj, zci, zcj) 296 306 DO jk = 2, jpkm1 297 307 DO jj = 2, jpjm1 … … 329 339 END DO 330 340 END DO 341 !$OMP END DO NOWAIT 342 !$OMP END PARALLEL 331 343 CALL lbc_lnk( zwz, 'T', -1. ) ; CALL lbc_lnk( zww, 'T', -1. ) ! lateral boundary conditions 332 344 ! … … 441 453 ! 442 454 ip = jl ; jp = jl ! guaranteed nonzero gradients ( absolute value larger than repsln) 443 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zdit,zdis,zdjt,zdjs,zdxrho_raw,zdyrho_raw)444 455 DO jk = 1, jpkm1 ! done each pair of triad 445 456 DO jj = 1, jpjm1 ! NB: not masked ==> a minimum value is set … … 458 469 ! 459 470 IF( ln_zps .AND. l_grad_zps ) THEN ! partial steps: correction of i- & j-grad on bottom 460 !$OMP PARALLEL DO schedule(static) private(jj,ji,iku,zdit,zdis,zdxrho_raw,zdyrho_raw)461 471 DO jj = 1, jpjm1 462 472 DO ji = 1, jpim1 … … 690 700 ! 691 701 ! !== surface mixed layer mask ! 692 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ik) 702 !$OMP PARALLEL 703 !$OMP DO schedule(static) private(jk, jj, ji, ik) 693 704 DO jk = 1, jpk ! =1 inside the mixed layer, =0 otherwise 694 705 DO jj = 1, jpj … … 701 712 END DO 702 713 END DO 714 !$OMP END DO NOWAIT 703 715 704 716 … … 713 725 !----------------------------------------------------------------------- 714 726 ! 715 !$OMP PARALLELDO schedule(static) private(jj, ji, iku, ikv, zbu, zbv, zau, zav, ik, ikm1, zbw, zci, zcj, zai, zaj, zbi, zbj)727 !$OMP DO schedule(static) private(jj, ji, iku, ikv, zbu, zbv, zau, zav, ik, ikm1, zbw, zci, zcj, zai, zaj, zbi, zbj) 716 728 DO jj = 2, jpjm1 717 729 DO ji = 2, jpim1 … … 758 770 END DO 759 771 END DO 772 !$OMP END DO NOWAIT 773 !$OMP END PARALLEL 760 774 !!gm this lbc_lnk should be useless.... 761 775 CALL lbc_lnk( uslpml , 'U', -1. ) ; CALL lbc_lnk( vslpml , 'V', -1. ) ! lateral boundary cond. (sign change) … … 808 822 ! ------------------------------ 809 823 810 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 824 !$OMP PARALLEL 825 !$OMP DO schedule(static) private(jk, jj, ji) 811 826 DO jk = 1, jpk 812 827 DO jj = 1, jpj … … 819 834 END DO 820 835 END DO 821 !$OMP PARALLEL DO schedule(static) private(jj, ji) 836 !$OMP END DO NOWAIT 837 !$OMP DO schedule(static) private(jj, ji) 822 838 DO jj = 1, jpj 823 839 DO ji = 1, jpi … … 828 844 END DO 829 845 END DO 846 !$OMP END DO NOWAIT 847 !$OMP END PARALLEL 830 848 831 849 !!gm I no longer understand this..... -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/LDF/ldftra.F90
r6748 r7037 276 276 ! 277 277 IF( ln_traldf_blp .AND. .NOT. l_ldftra_time ) THEN 278 !$OMP PARALLEL WORKSHARE279 278 ahtu(:,:,:) = SQRT( ahtu(:,:,:) ) 280 279 ahtv(:,:,:) = SQRT( ahtv(:,:,:) ) 281 !$OMP END PARALLEL WORKSHARE282 280 ENDIF 283 281 ! … … 335 333 z1_f20 = 1._wp / ( 2._wp * omega * SIN( rad * 20._wp ) ) ! 1 / ff(20 degrees) 336 334 zaht_min = 0.2_wp * rn_aht_0 ! minimum value for aht 335 !$OMP PARALLEL 336 !$OMP DO schedule(static) private(jj,ji,zaht) 337 337 DO jj = 1, jpj 338 338 DO ji = 1, jpi … … 342 342 END DO 343 343 END DO 344 !$OMP DO schedule(static) private(jk) 344 345 DO jk = 2, jpkm1 ! deeper value = surface value 345 346 ahtu(:,:,jk) = ahtu(:,:,1) * umask(:,:,jk) 346 347 ahtv(:,:,jk) = ahtv(:,:,1) * vmask(:,:,jk) 347 348 END DO 349 !$OMP END DO NOWAIT 350 !$OMP END PARALLEL 348 351 ! 349 352 CASE( 31 ) !== time varying 3D field ==! = F( local velocity ) 350 353 IF( ln_traldf_lap ) THEN ! laplacian operator |u| e /12 354 !$OMP PARALLEL DO schedule(static) private(jk) 351 355 DO jk = 1, jpkm1 352 356 ahtu(:,:,jk) = ABS( ub(:,:,jk) ) * e1u(:,:) * r1_12 … … 354 358 END DO 355 359 ELSEIF( ln_traldf_blp ) THEN ! bilaplacian operator sqrt( |u| e^3 /12 ) = sqrt( |u| e /12 ) * e 360 !$OMP PARALLEL DO schedule(static) private(jk) 356 361 DO jk = 1, jpkm1 357 362 ahtu(:,:,jk) = SQRT( ABS( ub(:,:,jk) ) * e1u(:,:) * r1_12 ) * e1u(:,:) … … 516 521 CALL wrk_alloc( jpi,jpj, zn, zah, zhw, zross, zaeiw ) 517 522 ! 523 !$OMP PARALLEL WORKSHARE 518 524 zn (:,:) = 0._wp ! Local initialization 519 525 zhw (:,:) = 5._wp 520 526 zah (:,:) = 0._wp 521 527 zross(:,:) = 0._wp 528 !$OMP END PARALLEL WORKSHARE 522 529 ! ! Compute lateral diffusive coefficient at T-point 523 530 IF( ln_traldf_triad ) THEN 524 531 DO jk = 1, jpk 532 !$OMP PARALLEL DO schedule(static) private(jj,ji,zn2,ze3w) 525 533 DO jj = 2, jpjm1 526 534 DO ji = 2, jpim1 … … 541 549 ELSE 542 550 DO jk = 1, jpk 551 !$OMP PARALLEL DO schedule(static) private(jj,ji,zn2,ze3w) 543 552 DO jj = 2, jpjm1 544 553 DO ji = 2, jpim1 … … 560 569 END IF 561 570 571 !$OMP PARALLEL 572 !$OMP DO schedule(static) private(jj,ji,zfw) 562 573 DO jj = 2, jpjm1 563 574 DO ji = fs_2, fs_jpim1 ! vector opt. … … 581 592 ! !== Bound on eiv coeff. ==! 582 593 z1_f20 = 1._wp / ( 2._wp * omega * sin( rad * 20._wp ) ) 594 !$OMP DO schedule(static) private(jj,ji,zzaei) 583 595 DO jj = 2, jpjm1 584 596 DO ji = fs_2, fs_jpim1 ! vector opt. … … 587 599 END DO 588 600 END DO 601 !$OMP END DO NOWAIT 602 !$OMP END PARALLEL 589 603 CALL lbc_lnk( zaeiw(:,:), 'W', 1. ) ! lateral boundary condition 590 604 ! 605 !$OMP PARALLEL DO schedule(static) private(jj,ji,zfw) 591 606 DO jj = 2, jpjm1 !== aei at u- and v-points ==! 592 607 DO ji = fs_2, fs_jpim1 ! vector opt. … … 597 612 CALL lbc_lnk( paeiu(:,:,1), 'U', 1. ) ; CALL lbc_lnk( paeiv(:,:,1), 'V', 1. ) ! lateral boundary condition 598 613 614 !$OMP PARALLEL DO schedule(static) private(jk) 599 615 DO jk = 2, jpkm1 !== deeper values equal the surface one ==! 600 616 paeiu(:,:,jk) = paeiu(:,:,1) * umask(:,:,jk) … … 651 667 652 668 669 !$OMP PARALLEL 670 !$OMP WORKSHARE 653 671 zpsi_uw(:,:, 1 ) = 0._wp ; zpsi_vw(:,:, 1 ) = 0._wp 654 672 zpsi_uw(:,:,jpk) = 0._wp ; zpsi_vw(:,:,jpk) = 0._wp 655 ! 673 !$OMP END WORKSHARE NOWAIT 674 ! 675 !$OMP DO schedule(static) private(jk,jj,ji) 656 676 DO jk = 2, jpkm1 657 677 DO jj = 1, jpjm1 … … 665 685 END DO 666 686 ! 687 !$OMP DO schedule(static) private(jk,jj,ji) 667 688 DO jk = 1, jpkm1 668 689 DO jj = 1, jpjm1 … … 673 694 END DO 674 695 END DO 696 !$OMP END DO NOWAIT 697 !$OMP DO schedule(static) private(jk,jj,ji) 675 698 DO jk = 1, jpkm1 676 699 DO jj = 2, jpjm1 … … 681 704 END DO 682 705 END DO 706 !$OMP END DO NOWAIT 707 !$OMP END PARALLEL 683 708 ! 684 709 ! ! diagnose the eddy induced velocity and associated heat transport … … 722 747 CALL wrk_alloc( jpi,jpj,jpk, zw3d ) 723 748 ! 749 !$OMP PARALLEL 750 !$OMP WORKSHARE 724 751 zw3d(:,:,jpk) = 0._wp ! bottom value always 0 725 ! 752 !$OMP END WORKSHARE NOWAIT 753 ! 754 !$OMP DO schedule(static) private(jk) 726 755 DO jk = 1, jpkm1 ! e2u e3u u_eiv = -dk[psi_uw] 727 756 zw3d(:,:,jk) = ( psi_uw(:,:,jk+1) - psi_uw(:,:,jk) ) / ( e2u(:,:) * e3u_n(:,:,jk) ) 728 757 END DO 758 !$OMP END DO NOWAIT 759 !$OMP END PARALLEL 729 760 CALL iom_put( "uoce_eiv", zw3d ) 730 761 ! 762 !$OMP PARALLEL DO schedule(static) private(jk) 731 763 DO jk = 1, jpkm1 ! e1v e3v v_eiv = -dk[psi_vw] 732 764 zw3d(:,:,jk) = ( psi_vw(:,:,jk+1) - psi_vw(:,:,jk) ) / ( e1v(:,:) * e3v_n(:,:,jk) ) … … 734 766 CALL iom_put( "voce_eiv", zw3d ) 735 767 ! 768 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 736 769 DO jk = 1, jpkm1 ! e1 e2 w_eiv = dk[psix] + dk[psix] 737 770 DO jj = 2, jpjm1 … … 752 785 ! 753 786 zztmp = 0.5_wp * rau0 * rcp 754 zw2d(:,:) = 0._wp 787 !$OMP PARALLEL 788 !$OMP WORKSHARE 789 zw2d(:,:) = 0._wp 790 !$OMP END WORKSHARE 755 791 DO jk = 1, jpkm1 792 !$OMP DO schedule(static) private(jj,ji) 756 793 DO jj = 2, jpjm1 757 794 DO ji = fs_2, fs_jpim1 ! vector opt. … … 760 797 END DO 761 798 END DO 762 END DO 799 !$OMP END DO NOWAIT 800 END DO 801 !$OMP END PARALLEL 763 802 CALL lbc_lnk( zw2d, 'U', -1. ) 764 803 CALL iom_put( "ueiv_heattr", zw2d ) ! heat transport in i-direction 804 !$OMP PARALLEL 805 !$OMP WORKSHARE 765 806 zw2d(:,:) = 0._wp 807 !$OMP END WORKSHARE 766 808 DO jk = 1, jpkm1 809 !$OMP DO schedule(static) private(jj,ji) 767 810 DO jj = 2, jpjm1 768 811 DO ji = fs_2, fs_jpim1 ! vector opt. … … 771 814 END DO 772 815 END DO 773 END DO 816 !$OMP END DO NOWAIT 817 END DO 818 !$OMP END PARALLEL 774 819 CALL lbc_lnk( zw2d, 'V', -1. ) 775 820 CALL iom_put( "veiv_heattr", zw2d ) ! heat transport in i-direction -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/albedo.F90
r6416 r7037 126 126 ELSE WHERE ; zalb_it = 0.1 + 3.6 * ph_ice 127 127 END WHERE 128 128 !$OMP PARALLEL 129 !$OMP DO schedule(static) private(jl, jj, ji,zswitch,zalb_sf,zalb_sm,zalb_st) 129 130 DO jl = 1, ijpl 130 131 DO jj = 1, jpj … … 156 157 END DO 157 158 159 !$OMP WORKSHARE 158 160 pa_ice_os(:,:,:) = pa_ice_cs(:,:,:) + rcloud ! Oberhuber correction for overcast sky 161 !$OMP END WORKSHARE NOWAIT 162 !$OMP END PARALLEL 159 163 160 164 !------------------------------------------ … … 193 197 z1_c2 = 1. / 0.03 194 198 ! Computation of the snow/ice albedo 199 !$OMP PARALLEL DO schedule(static) private(jl, jj, ji,zswitch,zalb_sf,zalb_sm,zalb_st) 195 200 DO jl = 1, ijpl 196 201 DO jj = 1, jpj … … 233 238 ! 234 239 zcoef = 0.05 / ( 1.1 * rmue**1.4 + 0.15 ) ! Parameterization of Briegled and Ramanathan, 1982 240 !$OMP PARALLEL WORKSHARE 235 241 pa_oce_cs(:,:) = zcoef 236 242 pa_oce_os(:,:) = 0.06 ! Parameterization of Kondratyev, 1969 and Payne, 1972 243 !$OMP END PARALLEL WORKSHARE 237 244 ! 238 245 END SUBROUTINE albedo_oce -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/geo2ocean.F90
r6140 r7037 157 157 ! (computation done on the north stereographic polar plane) 158 158 ! 159 !$OMP PARALLEL 160 !$OMP DO schedule(static) private(jj,ji,zlam,zphi,zxnpt,zynpt,znnpt,zxnpu,zynpu,znnpu,zxnpv,zynpv,znnpv,zxnpf,zynpf,znnpf,zlan,zphh,zxvvt,zyvvt,znvvt,zxffu,zyffu,znffu,zxffv,zyffv,znffv,zxuuf,zyuuf,znuuf) 159 161 DO jj = 2, jpjm1 160 162 DO ji = fs_2, jpi ! vector opt. … … 248 250 ! =============== ! 249 251 252 !$OMP DO schedule(static) private(jj,ji) 250 253 DO jj = 2, jpjm1 251 254 DO ji = fs_2, jpi ! vector opt. … … 268 271 END DO 269 272 END DO 273 !$OMP END DO NOWAIT 274 !$OMP END PARALLEL 270 275 271 276 ! =========================== ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcana.F90
r6748 r7037 269 269 ztau_sais = 0.015 270 270 ztaun = ztau - ztau_sais * COS( (ztime - ztimemax) / (ztimemin - ztimemax) * rpi ) 271 !$OMP PARALLEL DO schedule(static) private(jj, ji) 271 ! module of wind stress and wind speed at T-point 272 zcoef = 1. / ( zrhoa * zcdrag ) 273 !$OMP PARALLEL 274 !$OMP DO schedule(static) private(jj, ji) 272 275 DO jj = 1, jpj 273 276 DO ji = 1, jpi … … 279 282 END DO 280 283 281 ! module of wind stress and wind speed at T-point 282 zcoef = 1. / ( zrhoa * zcdrag ) 283 !$OMP PARALLEL DO schedule(static) private(jj, ji, ztx, zty, zmod) 284 !$OMP DO schedule(static) private(jj, ji, ztx, zty, zmod) 284 285 DO jj = 2, jpjm1 285 286 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 291 292 END DO 292 293 END DO 294 !$OMP END DO NOWAIT 295 !$OMP END PARALLEL 293 296 CALL lbc_lnk( taum(:,:), 'T', 1. ) ; CALL lbc_lnk( wndm(:,:), 'T', 1. ) 294 297 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcblk_core.F90
r6416 r7037 266 266 ! local scalars ( place there for vector optimisation purposes) 267 267 zcoef_qsatw = 0.98 * 640380. / rhoa 268 268 269 !$OMP PARALLEL WORKSHARE 269 270 zst(:,:) = pst(:,:) + rt0 ! convert SST from Celcius to Kelvin (and set minimum value far above 0 K) 270 271 … … 276 277 zwnd_i(:,:) = 0.e0 277 278 zwnd_j(:,:) = 0.e0 279 !$OMP END PARALLEL WORKSHARE 278 280 #if defined key_cyclone 279 281 CALL wnd_cyc( kt, zwnd_i, zwnd_j ) ! add analytical tropical cyclone (Vincent et al. JGR 2012) 282 !$OMP PARALLEL DO schedule(static) private(jj, ji) 280 283 DO jj = 2, jpjm1 281 284 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 285 288 END DO 286 289 #endif 290 !$OMP PARALLEL DO schedule(static) private(jj, ji) 287 291 DO jj = 2, jpjm1 288 292 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 294 298 CALL lbc_lnk( zwnd_j(:,:) , 'T', -1. ) 295 299 ! ... scalar wind ( = | U10m - U_oce | ) at T-point (masked) 296 wndm(:,:) = SQRT( zwnd_i(:,:) * zwnd_i(:,:) & 297 & + zwnd_j(:,:) * zwnd_j(:,:) ) * tmask(:,:,1) 298 300 !$OMP PARALLEL DO schedule(static) private(jj, ji) 301 DO jj = 1, jpj 302 DO ji = 1, jpi 303 wndm(ji,jj) = SQRT( zwnd_i(ji,jj) * zwnd_i(ji,jj) & 304 & + zwnd_j(ji,jj) * zwnd_j(ji,jj) ) * tmask(ji,jj,1) 305 306 END DO 307 END DO 299 308 ! ----------------------------------------------------------------------------- ! 300 309 ! I Radiative FLUXES ! … … 307 316 ENDIF 308 317 309 zqlw(:,:) = ( sf(jp_qlw)%fnow(:,:,1) - Stef * zst(:,:)*zst(:,:)*zst(:,:)*zst(:,:) ) * tmask(:,:,1) ! Long Wave 318 !$OMP PARALLEL DO schedule(static) private(jj, ji) 319 DO jj = 1, jpj 320 DO ji = 1, jpi 321 zqlw(ji,jj) = ( sf(jp_qlw)%fnow(ji,jj,1) - Stef * zst(ji,jj)*zst(ji,jj)*zst(ji,jj)*zst(ji,jj) ) * tmask(ji,jj,1) ! Long Wave 310 322 ! ----------------------------------------------------------------------------- ! 311 323 ! II Turbulent FLUXES ! … … 313 325 314 326 ! ... specific humidity at SST and IST 315 zqsatw(:,:) = zcoef_qsatw * EXP( -5107.4 / zst(:,:) ) 316 327 zqsatw(ji,jj) = zcoef_qsatw * EXP( -5107.4 / zst(ji,jj) ) 328 329 END DO 330 END DO 317 331 ! ... NCAR Bulk formulae, computation of Cd, Ch, Ce at T-point : 318 332 CALL turb_core_2z( rn_zqt, rn_zu, zst, sf(jp_tair)%fnow, zqsatw, sf(jp_humi)%fnow, wndm, & … … 320 334 321 335 ! ... tau module, i and j component 336 !$OMP PARALLEL DO schedule(static) private(jj, ji,zztmp) 322 337 DO jj = 1, jpj 323 338 DO ji = 1, jpi … … 338 353 ! Note the use of 0.5*(2-umask) in order to unmask the stress along coastlines 339 354 ! Note the use of MAX(tmask(i,j),tmask(i+1,j) is to mask tau over ice shelves 355 !$OMP PARALLEL DO schedule(static) private(jj, ji) 340 356 DO jj = 1, jpjm1 341 357 DO ji = 1, fs_jpim1 … … 352 368 ! Turbulent fluxes over ocean 353 369 ! ----------------------------- 370 !$OMP PARALLEL DO schedule(static) private(jj, ji) 371 DO jj = 1, jpj 372 DO ji = 1, jpi 354 373 IF( ABS( rn_zu - rn_zqt) < 0.01_wp ) THEN 355 374 !! q_air and t_air are (or "are almost") given at 10m (wind reference height) 356 zevap( :,:) = rn_efac*MAX( 0._wp, rhoa*Ce(:,:)*( zqsatw(:,:) - sf(jp_humi)%fnow(:,:,1) )*wndm(:,:) ) ! Evaporation357 zqsb ( :,:) = cpa*rhoa*Ch(:,:)*( zst (:,:) - sf(jp_tair)%fnow(:,:,1) )*wndm(:,:) ! Sensible Heat375 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - sf(jp_humi)%fnow(ji,jj,1) )*wndm(ji,jj) ) ! Evaporation 376 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - sf(jp_tair)%fnow(ji,jj,1) )*wndm(ji,jj) ! Sensible Heat 358 377 ELSE 359 378 !! q_air and t_air are not given at 10m (wind reference height) 360 379 ! Values of temp. and hum. adjusted to height of wind during bulk algorithm iteration must be used!!! 361 zevap(:,:) = rn_efac*MAX( 0._wp, rhoa*Ce(:,:)*( zqsatw(:,:) - zq_zu(:,:) )*wndm(:,:) ) ! Evaporation 362 zqsb (:,:) = cpa*rhoa*Ch(:,:)*( zst (:,:) - zt_zu(:,:) )*wndm(:,:) ! Sensible Heat 363 ENDIF 364 zqla (:,:) = Lv * zevap(:,:) ! Latent Heat 365 380 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - zq_zu(ji,jj) )*wndm(ji,jj) ) ! Evaporation 381 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - zt_zu(ji,jj) )*wndm(ji,jj) ! Sensible Heat 382 ENDIF 383 zqla (ji,jj) = Lv * zevap(ji,jj) ! Latent Heat 384 385 END DO 386 END DO 366 387 IF(ln_ctl) THEN 367 388 CALL prt_ctl( tab2d_1=zqla , clinfo1=' blk_oce_core: zqla : ', tab2d_2=Ce , clinfo2=' Ce : ' ) … … 379 400 ! ----------------------------------------------------------------------------- ! 380 401 ! 381 emp (:,:) = ( zevap(:,:) & ! mass flux (evap. - precip.) 382 & - sf(jp_prec)%fnow(:,:,1) * rn_pfac ) * tmask(:,:,1) 383 ! 384 qns(:,:) = zqlw(:,:) - zqsb(:,:) - zqla(:,:) & ! Downward Non Solar 385 & - sf(jp_snow)%fnow(:,:,1) * rn_pfac * lfus & ! remove latent melting heat for solid precip 386 & - zevap(:,:) * pst(:,:) * rcp & ! remove evap heat content at SST 387 & + ( sf(jp_prec)%fnow(:,:,1) - sf(jp_snow)%fnow(:,:,1) ) * rn_pfac & ! add liquid precip heat content at Tair 388 & * ( sf(jp_tair)%fnow(:,:,1) - rt0 ) * rcp & 389 & + sf(jp_snow)%fnow(:,:,1) * rn_pfac & ! add solid precip heat content at min(Tair,Tsnow) 390 & * ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) 402 !$OMP PARALLEL DO schedule(static) private(jj, ji) 403 DO jj = 1, jpj 404 DO ji = 1, jpi 405 emp (ji,jj) = ( zevap(ji,jj) & ! mass flux (evap. - precip.) 406 & - sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ) * tmask(ji,jj,1) 407 ! 408 qns(ji,jj) = zqlw(ji,jj) - zqsb(ji,jj) - zqla(ji,jj) & ! Downward Non Solar 409 & - sf(jp_snow)%fnow(ji,jj,1) * rn_pfac * lfus & ! remove latent melting heat for solid precip 410 & - zevap(ji,jj) * pst(ji,jj) * rcp & ! remove evap heat content at SST 411 & + ( sf(jp_prec)%fnow(ji,jj,1) - sf(jp_snow)%fnow(ji,jj,1) ) * rn_pfac & ! add liquid precip heat content at Tair 412 & * ( sf(jp_tair)%fnow(ji,jj,1) - rt0 ) * rcp & 413 & + sf(jp_snow)%fnow(ji,jj,1) * rn_pfac & ! add solid precip heat content at min(Tair,Tsnow) 414 & * ( MIN( sf(jp_tair)%fnow(ji,jj,1), rt0_snow ) - rt0 ) * cpic * tmask(ji,jj,1) 415 END DO 416 END DO 391 417 ! 392 418 #if defined key_lim3 419 !$OMP PARALLEL WORKSHARE 393 420 qns_oce(:,:) = zqlw(:,:) - zqsb(:,:) - zqla(:,:) ! non solar without emp (only needed by LIM3) 394 421 qsr_oce(:,:) = qsr(:,:) 422 !$OMP END PARALLEL WORKSHARE 395 423 #endif 396 424 ! … … 449 477 450 478 !!gm brutal.... 479 !$OMP PARALLEL WORKSHARE 451 480 utau_ice (:,:) = 0._wp 452 481 vtau_ice (:,:) = 0._wp 453 482 wndm_ice (:,:) = 0._wp 483 !$OMP END PARALLEL WORKSHARE 454 484 !!gm end 455 485 … … 460 490 CASE( 'I' ) ! B-grid ice dynamics : I-point (i.e. F-point with sea-ice indexation) 461 491 ! and scalar wind at T-point ( = | U10m - U_ice | ) (masked) 492 !$OMP PARALLEL DO schedule(static) private(jj,ji,zwndi_f,zwndj_f,zwnorm_f,zwndi_t,zwndj_t) 462 493 DO jj = 2, jpjm1 463 494 DO ji = 2, jpim1 ! B grid : NO vector opt … … 484 515 ! 485 516 CASE( 'C' ) ! C-grid ice dynamics : U & V-points (same as ocean) 517 !$OMP PARALLEL DO schedule(static) private(jj,ji,zwndi_t,zwndj_t) 486 518 DO jj = 2, jpj 487 519 DO ji = fs_2, jpi ! vect. opt. … … 491 523 END DO 492 524 END DO 525 !$OMP PARALLEL DO schedule(static) private(jj,ji) 493 526 DO jj = 2, jpjm1 494 527 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 553 586 zztmp = 1. / ( 1. - albo ) 554 587 ! ! ========================== ! 588 !$OMP PARALLEL 589 !$OMP DO schedule(static) private(jl,jj,ji,zst2,zst3) 555 590 DO jl = 1, jpl ! Loop over ice categories ! 556 591 ! ! ========================== ! … … 602 637 END DO 603 638 ! 639 !$OMP WORKSHARE 604 640 tprecip(:,:) = sf(jp_prec)%fnow(:,:,1) * rn_pfac ! total precipitation [kg/m2/s] 605 641 sprecip(:,:) = sf(jp_snow)%fnow(:,:,1) * rn_pfac ! solid precipitation [kg/m2/s] 642 !$OMP END WORKSHARE 643 !$OMP END PARALLEL 606 644 CALL iom_put( 'snowpre', sprecip * 86400. ) ! Snow precipitation 607 645 CALL iom_put( 'precip' , tprecip * 86400. ) ! Total precipitation … … 612 650 ! --- evaporation --- ! 613 651 z1_lsub = 1._wp / Lsub 652 !$OMP PARALLEL WORKSHARE 614 653 evap_ice (:,:,:) = rn_efac * qla_ice (:,:,:) * z1_lsub ! sublimation 615 654 devap_ice(:,:,:) = rn_efac * dqla_ice(:,:,:) * z1_lsub ! d(sublimation)/dT … … 618 657 ! --- evaporation minus precipitation --- ! 619 658 zsnw(:,:) = 0._wp 659 !$OMP END PARALLEL WORKSHARE 620 660 CALL lim_thd_snwblow( pfrld, zsnw ) ! snow distribution over ice after wind blowing 621 661 emp_oce(:,:) = pfrld(:,:) * zevap(:,:) - ( tprecip(:,:) - sprecip(:,:) ) - sprecip(:,:) * (1._wp - zsnw ) … … 639 679 640 680 ! --- heat content of evap over ice in W/m2 (to be used in 1D-thermo) --- ! 681 !$OMP PARALLEL DO schedule(static) private(jl) 641 682 DO jl = 1, jpl 642 683 qevap_ice(:,:,jl) = 0._wp ! should be -evap_ice(:,:,jl)*( ( Tice - rt0 ) * cpic * tmask(:,:,1) ) … … 652 693 ! ( Maykut and Untersteiner, 1971 ; Ebert and Curry, 1993 ) 653 694 ! 695 !$OMP PARALLEL WORKSHARE 654 696 fr1_i0(:,:) = ( 0.18 * ( 1.0 - cldf_ice ) + 0.35 * cldf_ice ) 655 697 fr2_i0(:,:) = ( 0.82 * ( 1.0 - cldf_ice ) + 0.65 * cldf_ice ) 698 !$OMP END PARALLEL WORKSHARE 656 699 ! 657 700 ! … … 744 787 !! Neutral coefficients at 10m: 745 788 IF( ln_cdgw ) THEN ! wave drag case 789 !$OMP PARALLEL WORKSHARE 746 790 cdn_wave(:,:) = cdn_wave(:,:) + rsmall * ( 1._wp - tmask(:,:,1) ) 747 791 ztmp0 (:,:) = cdn_wave(:,:) 792 !$OMP END PARALLEL WORKSHARE 748 793 ELSE 749 794 ztmp0 = cd_neutral_10m( U_zu ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcice_lim_2.F90
r6140 r7037 131 131 SELECT CASE( cp_ice_msh ) 132 132 CASE( 'I' ) !== B-grid ice dynamics : I-point (i.e. F-point with sea-ice indexation) 133 !$OMP PARALLEL DO schedule(static) private(jj, ji) 133 134 DO jj = 2, jpj 134 135 DO ji = 2, jpi ! NO vector opt. possible … … 143 144 ! 144 145 CASE( 'C' ) !== C-grid ice dynamics : U & V-points (same as ocean) 146 !$OMP PARALLEL WORKSHARE 145 147 u_oce(:,:) = ssu_m(:,:) * umask(:,:,1) ! mean surface ocean current at ice velocity point 146 148 v_oce(:,:) = ssv_m(:,:) * vmask(:,:,1) 149 !$OMP END PARALLEL WORKSHARE 147 150 ! 148 151 END SELECT … … 150 153 ! ... masked sea surface freezing temperature [Kelvin] (set to rt0 over land) 151 154 CALL eos_fzp( sss_m(:,:), tfu(:,:) ) 155 !$OMP PARALLEL WORKSHARE 152 156 tfu(:,:) = tfu(:,:) + rt0 153 157 154 158 zsist (:,:,1) = sist (:,:) + rt0 * ( 1. - tmask(:,:,1) ) 159 !$OMP END PARALLEL WORKSHARE 155 160 156 161 ! Ice albedo … … 164 169 165 170 ! albedo depends on cloud fraction because of non-linear spectral effects 171 !$OMP PARALLEL WORKSHARE 166 172 zalb_ice(:,:,:) = ( 1. - cldf_ice ) * zalb_cs(:,:,:) + cldf_ice * zalb_os(:,:,:) 173 !$OMP END PARALLEL WORKSHARE 167 174 ! In CLIO the cloud fraction is read in the climatology and the all-sky albedo 168 175 ! (zalb_ice) is computed within the bulk routine … … 203 210 IF( ln_mixcpl) THEN 204 211 CALL sbc_cpl_ice_tau( zutau_ice , zvtau_ice ) 212 !$OMP PARALLEL WORKSHARE 205 213 utau_ice(:,:) = utau_ice(:,:) * xcplmask(:,:,0) + zutau_ice(:,:) * ( 1. - xcplmask(:,:,0) ) 206 214 vtau_ice(:,:) = vtau_ice(:,:) * xcplmask(:,:,0) + zvtau_ice(:,:) * ( 1. - xcplmask(:,:,0) ) 215 !$OMP END PARALLEL WORKSHARE 207 216 ENDIF 208 217 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcrnf.F90
r6460 r7037 124 124 ! ! set temperature & salinity content of runoffs 125 125 IF( ln_rnf_tem ) THEN ! use runoffs temperature data 126 !$OMP PARALLEL WORKSHARE 126 127 rnf_tsc(:,:,jp_tem) = ( sf_t_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 128 !$OMP END PARALLEL WORKSHARE 127 129 CALL eos_fzp( sss_m(:,:), ztfrz(:,:) ) 130 !$OMP PARALLEL WORKSHARE 128 131 WHERE( sf_t_rnf(1)%fnow(:,:,1) == -999._wp ) ! if missing data value use SST as runoffs temperature 129 132 rnf_tsc(:,:,jp_tem) = sst_m(:,:) * rnf(:,:) * r1_rau0 … … 132 135 rnf_tsc(:,:,jp_tem) = ztfrz(:,:) * rnf(:,:) * r1_rau0 - rnf(:,:) * rlfusisf * r1_rau0_rcp 133 136 END WHERE 137 !$OMP END PARALLEL WORKSHARE 134 138 ELSE ! use SST as runoffs temperature 139 !$OMP PARALLEL WORKSHARE 135 140 rnf_tsc(:,:,jp_tem) = sst_m(:,:) * rnf(:,:) * r1_rau0 141 !$OMP END PARALLEL WORKSHARE 136 142 ENDIF 137 143 ! ! use runoffs salinity data 138 IF( ln_rnf_sal ) rnf_tsc(:,:,jp_sal) = ( sf_s_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 144 IF( ln_rnf_sal ) THEN 145 !$OMP PARALLEL WORKSHARE 146 rnf_tsc(:,:,jp_sal) = ( sf_s_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 147 !$OMP END PARALLEL WORKSHARE 148 END IF 139 149 ! ! else use S=0 for runoffs (done one for all in the init) 140 150 CALL iom_put( "runoffs", rnf ) ! output runoffs arrays … … 152 162 ELSE !* no restart: set from nit000 values 153 163 IF(lwp) WRITE(numout,*) ' nit000-1 runoff forcing fields set to nit000' 164 !$OMP PARALLEL WORKSHARE 154 165 rnf_b (:,: ) = rnf (:,: ) 155 166 rnf_tsc_b(:,:,:) = rnf_tsc(:,:,:) 167 !$OMP END PARALLEL WORKSHARE 156 168 ENDIF 157 169 ENDIF … … 197 209 DO jj = 1, jpj 198 210 DO ji = 1, jpi 211 !$OMP PARALLEL DO schedule(static) private(jk) 199 212 DO jk = 1, nk_rnf(ji,jj) 200 213 phdivn(ji,jj,jk) = phdivn(ji,jj,jk) - ( rnf(ji,jj) + rnf_b(ji,jj) ) * zfact * r1_rau0 / h_rnf(ji,jj) … … 203 216 END DO 204 217 ELSE !* variable volume case 218 !$OMP PARALLEL 205 219 DO jj = 1, jpj ! update the depth over which runoffs are distributed 206 220 DO ji = 1, jpi 207 221 h_rnf(ji,jj) = 0._wp 222 !$OMP DO schedule(static) private(jk) 208 223 DO jk = 1, nk_rnf(ji,jj) ! recalculates h_rnf to be the depth in metres 209 224 h_rnf(ji,jj) = h_rnf(ji,jj) + e3t_n(ji,jj,jk) ! to the bottom of the relevant grid box 210 225 END DO 211 226 ! ! apply the runoff input flow 227 !$OMP DO schedule(static) private(jk) 212 228 DO jk = 1, nk_rnf(ji,jj) 213 229 phdivn(ji,jj,jk) = phdivn(ji,jj,jk) - ( rnf(ji,jj) + rnf_b(ji,jj) ) * zfact * r1_rau0 / h_rnf(ji,jj) 214 230 END DO 231 !$OMP END DO NOWAIT 215 232 END DO 216 233 END DO 234 !$OMP END PARALLEL 217 235 ENDIF 218 236 ELSE !== runoff put only at the surface ==! 237 !$OMP PARALLEL WORKSHARE 219 238 h_rnf (:,:) = e3t_n (:,:,1) ! update h_rnf to be depth of top box 220 239 phdivn(:,:,1) = phdivn(:,:,1) - ( rnf(:,:) + rnf_b(:,:) ) * zfact * r1_rau0 / e3t_n(:,:,1) 240 !$OMP END PARALLEL WORKSHARE 221 241 ENDIF 222 242 ! … … 256 276 ln_rnf_mouth = .FALSE. ! default definition needed for example by sbc_ssr or by tra_adv_muscl 257 277 nkrnf = 0 278 !$OMP PARALLEL WORKSHARE 258 279 rnf (:,:) = 0.0_wp 259 280 rnf_b (:,:) = 0.0_wp 260 281 rnfmsk (:,:) = 0.0_wp 261 282 rnfmsk_z(:) = 0.0_wp 283 !$OMP END PARALLEL WORKSHARE 262 284 RETURN 263 285 ENDIF … … 357 379 DO ji = 1, jpi 358 380 h_rnf(ji,jj) = 0._wp 381 !$OMP PARALLEL DO schedule(static) private(jk) 359 382 DO jk = 1, nk_rnf(ji,jj) 360 383 h_rnf(ji,jj) = h_rnf(ji,jj) + e3t_n(ji,jj,jk) … … 415 438 DO ji = 1, jpi 416 439 h_rnf(ji,jj) = 0._wp 440 !$OMP PARALLEL DO schedule(static) private(jk) 417 441 DO jk = 1, nk_rnf(ji,jj) 418 442 h_rnf(ji,jj) = h_rnf(ji,jj) + e3t_n(ji,jj,jk) … … 432 456 ENDIF 433 457 ! 458 !$OMP PARALLEL WORKSHARE 434 459 rnf(:,:) = 0._wp ! runoff initialisation 435 460 rnf_tsc(:,:,:) = 0._wp ! runoffs temperature & salinty contents initilisation 461 !$OMP END PARALLEL WORKSHARE 436 462 ! 437 463 ! ! ======================== -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcssm.F90
r6748 r7037 73 73 ssv_m(:,:) = vb(:,:,1) 74 74 !$OMP END PARALLEL WORKSHARE 75 IF( l_useCT ) THEN ; sst_m(:,:) = eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 76 ELSE ; sst_m(:,:) = zts(:,:,jp_tem) 75 IF( l_useCT ) THEN 76 !$OMP PARALLEL WORKSHARE 77 sst_m(:,:) = eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 78 !$OMP END PARALLEL WORKSHARE 79 ELSE 80 !$OMP PARALLEL WORKSHARE 81 sst_m(:,:) = zts(:,:,jp_tem) 82 !$OMP END PARALLEL WORKSHARE 77 83 ENDIF 78 84 !$OMP PARALLEL WORKSHARE … … 82 88 IF( ln_apr_dyn ) THEN 83 89 !$OMP PARALLEL WORKSHARE 84 ssh_m(:,:) = sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) )90 ssh_m(:,:) = sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 85 91 !$OMP END PARALLEL WORKSHARE 86 92 ELSE 87 93 !$OMP PARALLEL WORKSHARE 88 ssh_m(:,:) = sshn(:,:)94 ssh_m(:,:) = sshn(:,:) 89 95 !$OMP END PARALLEL WORKSHARE 90 96 ENDIF … … 107 113 ssv_m(:,:) = zcoef * vb(:,:,1) 108 114 !$OMP END PARALLEL WORKSHARE 109 IF( l_useCT ) THEN ; sst_m(:,:) = zcoef * eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 110 ELSE ; sst_m(:,:) = zcoef * zts(:,:,jp_tem) 115 IF( l_useCT ) THEN 116 !$OMP PARALLEL WORKSHARE 117 sst_m(:,:) = zcoef * eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 118 !$OMP END PARALLEL WORKSHARE 119 ELSE 120 !$OMP PARALLEL WORKSHARE 121 sst_m(:,:) = zcoef * zts(:,:,jp_tem) 122 !$OMP END PARALLEL WORKSHARE 111 123 ENDIF 112 124 !$OMP PARALLEL WORKSHARE … … 116 128 IF( ln_apr_dyn ) THEN 117 129 !$OMP PARALLEL WORKSHARE 118 ssh_m(:,:) = zcoef * ( sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) )130 ssh_m(:,:) = zcoef * ( sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) ) 119 131 !$OMP END PARALLEL WORKSHARE 120 132 ELSE 121 133 !$OMP PARALLEL WORKSHARE 122 ssh_m(:,:) = zcoef * sshn(:,:)134 ssh_m(:,:) = zcoef * sshn(:,:) 123 135 !$OMP END PARALLEL WORKSHARE 124 136 ENDIF … … 149 161 ssv_m(:,:) = ssv_m(:,:) + vb(:,:,1) 150 162 !$OMP END PARALLEL WORKSHARE 151 IF( l_useCT ) THEN ; sst_m(:,:) = sst_m(:,:) + eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 152 ELSE ; sst_m(:,:) = sst_m(:,:) + zts(:,:,jp_tem) 163 IF( l_useCT ) THEN 164 !$OMP PARALLEL WORKSHARE 165 sst_m(:,:) = sst_m(:,:) + eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 166 !$OMP END PARALLEL WORKSHARE 167 ELSE 168 !$OMP PARALLEL WORKSHARE 169 sst_m(:,:) = sst_m(:,:) + zts(:,:,jp_tem) 170 !$OMP END PARALLEL WORKSHARE 153 171 ENDIF 154 172 !$OMP PARALLEL WORKSHARE … … 158 176 IF( ln_apr_dyn ) THEN 159 177 !$OMP PARALLEL WORKSHARE 160 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) )178 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 161 179 !$OMP END PARALLEL WORKSHARE 162 180 ELSE 163 181 !$OMP PARALLEL WORKSHARE 164 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:)182 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) 165 183 !$OMP END PARALLEL WORKSHARE 166 184 ENDIF … … 257 275 CALL iom_get( numror, jpdom_autoglo, 'frq_m' , frq_m ) 258 276 ELSE 277 !$OMP PARALLEL WORKSHARE 259 278 frq_m(:,:) = 1._wp ! default definition 279 !$OMP END PARALLEL WORKSHARE 260 280 ENDIF 261 281 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcssr.F90
r6140 r7037 93 93 ! 94 94 IF( nn_sstr == 1 ) THEN !* Temperature restoring term 95 !$OMP PARALLEL DO schedule(static) private(jj, ji, zqrp) 95 96 DO jj = 1, jpj 96 97 DO ji = 1, jpi … … 105 106 IF( nn_sssr == 1 ) THEN !* Salinity damping term (salt flux only (sfx)) 106 107 zsrp = rn_deds / rday ! from [mm/day] to [kg/m2/s] 108 !$OMP PARALLEL DO schedule(static) private(jj, ji, zerp) 107 109 DO jj = 1, jpj 108 110 DO ji = 1, jpi … … 118 120 zsrp = rn_deds / rday ! from [mm/day] to [kg/m2/s] 119 121 zerp_bnd = rn_sssr_bnd / rday ! - - 122 !$OMP PARALLEL DO schedule(static) private(jj, ji, zerp) 120 123 DO jj = 1, jpj 121 124 DO ji = 1, jpi -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/eosbn2.F90
r6748 r7037 504 504 CASE( np_teos10, np_eos80 ) !== polynomial TEOS-10 / EOS-80 ==! 505 505 ! 506 !$OMP PARALLEL DO schedule(static) private(jj, ji, zh, zt, zs, zn3, zn2, zn1, zn0, zn) 506 507 DO jj = 1, jpjm1 507 508 DO ji = 1, fs_jpim1 ! vector opt. … … 543 544 CASE( np_seos ) !== simplified EOS ==! 544 545 ! 546 !$OMP PARALLEL DO schedule(static) private(jj, ji, zh, zt, zs, zn) 545 547 DO jj = 1, jpjm1 546 548 DO ji = 1, fs_jpim1 ! vector opt. … … 705 707 IF( nn_timing == 1 ) CALL timing_start('rab_2d') 706 708 ! 709 !$OMP PARALLEL WORKSHARE 707 710 pab(:,:,:) = 0._wp 711 !$OMP END PARALLEL WORKSHARE 708 712 ! 709 713 SELECT CASE ( neos ) … … 711 715 CASE( np_teos10, np_eos80 ) !== polynomial TEOS-10 / EOS-80 ==! 712 716 ! 717 !$OMP PARALLEL DO schedule(static) private(jj, ji, zh, zt, zs, zn3, zn2, zn1, zn0, zn) 713 718 DO jj = 1, jpjm1 714 719 DO ji = 1, fs_jpim1 ! vector opt. … … 769 774 CASE( np_seos ) !== simplified EOS ==! 770 775 ! 776 !$OMP PARALLEL DO schedule(static) private(jj, ji, zh, zt, zs, zn) 771 777 DO jj = 1, jpjm1 772 778 DO ji = 1, fs_jpim1 ! vector opt. … … 977 983 z1_T0 = 1._wp/40._wp 978 984 ! 985 !$OMP PARALLEL DO schedule(static) private(jj, ji, zt, zs, ztm, zn,zd) 979 986 DO jj = 1, jpj 980 987 DO ji = 1, jpi … … 1032 1039 ! 1033 1040 z1_S0 = 1._wp / 35.16504_wp 1041 !$OMP PARALLEL 1042 !$OMP DO schedule(static) private(jj, ji, zs) 1034 1043 DO jj = 1, jpj 1035 1044 DO ji = 1, jpi … … 1039 1048 END DO 1040 1049 END DO 1050 !$OMP WORKSHARE 1041 1051 ptf(:,:) = ptf(:,:) * psal(:,:) 1042 ! 1043 IF( PRESENT( pdep ) ) ptf(:,:) = ptf(:,:) - 7.53e-4 * pdep(:,:) 1052 !$OMP END WORKSHARE NOWAIT 1053 !$OMP END PARALLEL 1054 ! 1055 IF( PRESENT( pdep ) ) THEN 1056 !$OMP DO schedule(static) private(jj, ji) 1057 DO jj = 1, jpj 1058 DO ji = 1, jpi 1059 ptf(ji,jj) = ptf(ji,jj) - 7.53e-4 * pdep(ji,jj) 1060 END DO 1061 END DO 1062 END IF 1044 1063 ! 1045 1064 CASE ( np_eos80 ) !== PT,SP (UNESCO formulation) ==! 1046 1065 ! 1047 ptf(:,:) = ( - 0.0575_wp + 1.710523e-3_wp * SQRT( psal(:,:) ) & 1048 & - 2.154996e-4_wp * psal(:,:) ) * psal(:,:) 1066 !$OMP PARALLEL DO schedule(static) private(jj, ji) 1067 DO jj = 1, jpj 1068 DO ji = 1, jpi 1069 ptf(ji,jj) = ( - 0.0575_wp + 1.710523e-3_wp * SQRT( psal(ji,jj) ) & 1070 & - 2.154996e-4_wp * psal(ji,jj) ) * psal(ji,jj) 1071 END DO 1072 END DO 1049 1073 ! 1050 IF( PRESENT( pdep ) ) ptf(:,:) = ptf(:,:) - 7.53e-4 * pdep(:,:) 1074 IF( PRESENT( pdep ) ) THEN 1075 !$OMP PARALLEL WORKSHARE 1076 ptf(:,:) = ptf(:,:) - 7.53e-4 * pdep(:,:) 1077 !$OMP END PARALLEL WORKSHARE 1078 END IF 1051 1079 ! 1052 1080 CASE DEFAULT -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv.F90
r6748 r7037 108 108 ! 109 109 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN ! add z-tilde and/or vvl corrections 110 !$OMP PARALLEL WORKSHARE 110 111 zun(:,:,:) = zun(:,:,:) + un_td(:,:,:) 111 112 zvn(:,:,:) = zvn(:,:,:) + vn_td(:,:,:) 112 ENDIF 113 ! 113 !$OMP END PARALLEL WORKSHARE 114 ENDIF 115 ! 116 !$OMP PARALLEL WORKSHARE 114 117 zun(:,:,jpk) = 0._wp ! no transport trough the bottom 115 118 zvn(:,:,jpk) = 0._wp 116 119 zwn(:,:,jpk) = 0._wp 120 !$OMP END PARALLEL WORKSHARE 117 121 ! 118 122 IF( ln_ldfeiv .AND. .NOT. ln_traldf_triad ) & -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_fct.F90
r6748 r7037 96 96 IF( ( cdtype == 'TRA' .AND. l_trdtra ) .OR. ( cdtype == 'TRC' .AND. l_trdtrc ) ) l_trd = .TRUE. 97 97 ! 98 IF( l_trd ) THEN99 CALL wrk_alloc( jpi, jpj, jpk, ztrdx, ztrdy, ztrdz )100 !$OMP PARALLEL WORKSHARE101 ztrdx(:,:,:) = 0._wp ; ztrdy(:,:,:) = 0._wp ; ztrdz(:,:,:) = 0._wp102 !$OMP END PARALLEL WORKSHARE103 ENDIF104 !105 98 ! ! surface & bottom value : flux set to zero one for all 106 99 !$OMP PARALLEL WORKSHARE 107 100 zwz(:,:, 1 ) = 0._wp 108 zwx(:,:,jpk) = 0._wp ; zwy(:,:,jpk) = 0._wp ; zwz(:,:,jpk) = 0._wp 109 ! 101 zwx(:,:,jpk) = 0._wp 102 zwy(:,:,jpk) = 0._wp 103 zwz(:,:,jpk) = 0._wp 110 104 zwi(:,:,:) = 0._wp 111 105 !$OMP END PARALLEL WORKSHARE … … 115 109 ! !== upstream advection with initial mass fluxes & intermediate update ==! 116 110 ! !* upstream tracer flux in the i and j direction 117 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zfp_vj, zfm_vj, zfp_ui,zfm_ui) 111 !$OMP PARALLEL 112 !$OMP DO schedule(static) private(jk, jj, ji, zfp_vj, zfm_vj, zfp_ui,zfm_ui) 118 113 DO jk = 1, jpkm1 119 114 DO jj = 1, jpjm1 … … 129 124 END DO 130 125 END DO 126 !$OMP END DO NOWAIT 131 127 ! !* upstream tracer flux in the k direction *! 132 !$OMP PARALLELDO schedule(static) private(jk, jj, ji, zfp_wk, zfm_wk)128 !$OMP DO schedule(static) private(jk, jj, ji, zfp_wk, zfm_wk) 133 129 DO jk = 2, jpkm1 ! Interior value ( multiplied by wmask) 134 130 DO jj = 1, jpj … … 140 136 END DO 141 137 END DO 138 !$OMP END DO NOWAIT 139 !$OMP END PARALLEL 142 140 IF( ln_linssh ) THEN ! top ocean value (only in linear free surface as zwz has been w-masked) 143 141 IF( ln_isfcav ) THEN ! top of the ice-shelf cavities and at the ocean surface 142 !$OMP PARALLEL DO schedule(static) private(jj, ji) 144 143 DO jj = 1, jpj 145 144 DO ji = 1, jpi … … 148 147 END DO 149 148 ELSE ! no cavities: only at the ocean surface 149 !$OMP PARALLEL WORKSHARE 150 150 zwz(:,:,1) = pwn(:,:,1) * ptb(:,:,1,jn) 151 !$OMP END PARALLEL WORKSHARE 151 152 ENDIF 152 153 ENDIF … … 170 171 ! 171 172 IF( l_trd ) THEN ! trend diagnostics (contribution of upstream fluxes) 172 !$OMP PARALLEL WORKSHARE 173 ztrdx(:,:,:) = zwx(:,:,:) ; ztrdy(:,:,:) = zwy(:,:,:) ; ztrdz(:,:,:) = zwz(:,:,:) 174 !$OMP END PARALLEL WORKSHARE 173 CALL wrk_alloc( jpi, jpj, jpk, ztrdx, ztrdy, ztrdz ) 174 !$OMP PARALLEL 175 !$OMP WORKSHARE 176 ztrdx(:,:,:) = 0._wp 177 ztrdy(:,:,:) = 0._wp 178 ztrdz(:,:,:) = 0._wp 179 !$OMP END WORKSHARE 180 !$OMP WORKSHARE 181 ztrdx(:,:,:) = zwx(:,:,:) 182 ztrdy(:,:,:) = zwy(:,:,:) 183 ztrdz(:,:,:) = zwz(:,:,:) 184 !$OMP END WORKSHARE 185 !$OMP END PARALLEL 175 186 END IF 176 187 ! ! "Poleward" heat and salt transports (contribution of upstream fluxes) … … 196 207 ! 197 208 CASE( 4 ) !- 4th order centered 198 !$OMP PARALLEL WORKSHARE 209 !$OMP PARALLEL 210 !$OMP WORKSHARE 199 211 zltu(:,:,jpk) = 0._wp ! Bottom value : flux set to zero 200 212 zltv(:,:,jpk) = 0._wp 201 !$OMP END PARALLELWORKSHARE202 !$OMP PARALLELDO schedule(static) private(jk, jj, ji)213 !$OMP END WORKSHARE 214 !$OMP DO schedule(static) private(jk, jj, ji) 203 215 DO jk = 1, jpkm1 ! Laplacian 204 216 DO jj = 1, jpjm1 ! 1st derivative (gradient) … … 215 227 END DO 216 228 END DO 229 !$OMP END DO NOWAIT 230 !$OMP END PARALLEL 217 231 CALL lbc_lnk( zltu, 'T', 1. ) ; CALL lbc_lnk( zltv, 'T', 1. ) ! Lateral boundary cond. (unchanged sgn) 218 232 ! … … 231 245 ! 232 246 CASE( 41 ) !- 4th order centered ==>> !!gm coding attempt need to be tested 233 !$OMP PARALLEL WORKSHARE 247 !$OMP PARALLEL 248 !$OMP WORKSHARE 234 249 ztu(:,:,jpk) = 0._wp ! Bottom value : flux set to zero 235 250 ztv(:,:,jpk) = 0._wp 236 !$OMP END PARALLELWORKSHARE237 !$OMP PARALLELDO schedule(static) private(jk, jj, ji)251 !$OMP END WORKSHARE 252 !$OMP DO schedule(static) private(jk, jj, ji) 238 253 DO jk = 1, jpkm1 ! 1st derivative (gradient) 239 254 DO jj = 1, jpjm1 … … 244 259 END DO 245 260 END DO 261 !$OMP END DO NOWAIT 262 !$OMP END PARALLEL 246 263 CALL lbc_lnk( ztu, 'U', -1. ) ; CALL lbc_lnk( ztv, 'V', -1. ) ! Lateral boundary cond. (unchanged sgn) 247 264 ! … … 290 307 END SELECT 291 308 IF( ln_linssh ) THEN ! top ocean value: high order = upstream ==>> zwz=0 309 !$OMP PARALLEL WORKSHARE 292 310 zwz(:,:,1) = 0._wp ! only ocean surface as interior zwz values have been w-masked 311 !$OMP END PARALLEL WORKSHARE 293 312 ENDIF 294 313 ! … … 649 668 zbig = 1.e+40_wp 650 669 zrtrn = 1.e-15_wp 651 !$OMP PARALLEL WORKSHARE652 zbetup(:,:,:) = 0._wp ; zbetdo(:,:,:) = 0._wp653 !$OMP END PARALLEL WORKSHARE654 670 655 671 ! Search local extrema … … 661 677 & paft * tmask + zbig * ( 1._wp - tmask ) ) 662 678 663 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, ikm1, zup, zdo, zpos, zneg, zbt) 679 !$OMP PARALLEL 680 !$OMP WORKSHARE 681 zbetup(:,:,:) = 0._wp 682 zbetdo(:,:,:) = 0._wp 683 !$OMP END WORKSHARE 684 !$OMP DO schedule(static) private(jk, jj, ji, ikm1, zup, zdo, zpos, zneg, zbt) 664 685 DO jk = 1, jpkm1 665 686 ikm1 = MAX(jk-1,1) … … 696 717 END DO 697 718 END DO 719 !$OMP END PARALLEL 698 720 CALL lbc_lnk( zbetup, 'T', 1. ) ; CALL lbc_lnk( zbetdo, 'T', 1. ) ! lateral boundary cond. (unchanged sign) 699 721 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traadv_mus.F90
r6140 r7037 101 101 ! 102 102 ALLOCATE( xind(jpi,jpj,jpk), STAT=ierr ) 103 !$OMP PARALLEL WORKSHARE 103 104 xind(:,:,:) = 1._wp ! set equal to 1 where up-stream is not needed 105 !$OMP END PARALLEL WORKSHARE 104 106 ! 105 107 IF( ld_msc_ups ) THEN ! define the upstream indicator (if asked) 106 108 ALLOCATE( upsmsk(jpi,jpj), STAT=ierr ) 109 !$OMP PARALLEL 110 !$OMP WORKSHARE 107 111 upsmsk(:,:) = 0._wp ! not upstream by default 112 !$OMP END WORKSHARE 108 113 ! 114 !$OMP DO schedule(static) private(jk) 109 115 DO jk = 1, jpkm1 110 116 xind(:,:,jk) = 1._wp & ! =>1 where up-stream is not needed … … 112 118 & upsmsk(:,:) ) * tmask(:,:,jk) ! =>0 in some user defined area 113 119 END DO 120 !$OMP END DO NOWAIT 121 !$OMP END PARALLEL 114 122 ENDIF 115 123 ! … … 121 129 ! 122 130 ! !-- first guess of the slopes 131 !$OMP PARALLEL 132 !$OMP WORKSHARE 123 133 zwx(:,:,jpk) = 0._wp ! bottom values 124 zwy(:,:,jpk) = 0._wp 134 zwy(:,:,jpk) = 0._wp 135 !$OMP END WORKSHARE 136 !$OMP DO schedule(static) private(jk, jj, ji) 125 137 DO jk = 1, jpkm1 ! interior values 126 138 DO jj = 1, jpjm1 … … 131 143 END DO 132 144 END DO 145 !$OMP END DO NOWAIT 146 !$OMP END PARALLEL 133 147 CALL lbc_lnk( zwx, 'U', -1. ) ! lateral boundary conditions (changed sign) 134 148 CALL lbc_lnk( zwy, 'V', -1. ) 135 149 ! !-- Slopes of tracer 150 !$OMP PARALLEL 151 !$OMP WORKSHARE 136 152 zslpx(:,:,jpk) = 0._wp ! bottom values 137 153 zslpy(:,:,jpk) = 0._wp 154 !$OMP END WORKSHARE 155 !$OMP DO schedule(static) private(jk, jj, ji) 138 156 DO jk = 1, jpkm1 ! interior values 139 157 DO jj = 2, jpj … … 147 165 END DO 148 166 ! 167 !$OMP DO schedule(static) private(jk, jj, ji) 149 168 DO jk = 1, jpkm1 !-- Slopes limitation 150 169 DO jj = 2, jpj … … 160 179 END DO 161 180 ! 181 !$OMP DO schedule(static) private(jk, jj, ji, z0u, zalpha, zu, zv, zzwx, zzwy, z0v) 162 182 DO jk = 1, jpkm1 !-- MUSCL horizontal advective fluxes 163 183 DO jj = 2, jpjm1 … … 180 200 END DO 181 201 END DO 202 !$OMP END DO NOWAIT 203 !$OMP END PARALLEL 182 204 CALL lbc_lnk( zwx, 'U', -1. ) ; CALL lbc_lnk( zwy, 'V', -1. ) ! lateral boundary conditions (changed sign) 183 205 ! 206 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 184 207 DO jk = 1, jpkm1 !-- Tracer advective trend 185 208 DO jj = 2, jpjm1 … … 206 229 ! 207 230 ! !-- first guess of the slopes 231 !$OMP PARALLEL 232 !$OMP WORKSHARE 208 233 zwx(:,:, 1 ) = 0._wp ! surface & bottom boundary conditions 209 234 zwx(:,:,jpk) = 0._wp 235 !$OMP END WORKSHARE 236 !$OMP DO schedule(static) private(jk) 210 237 DO jk = 2, jpkm1 ! interior values 211 238 zwx(:,:,jk) = tmask(:,:,jk) * ( ptb(:,:,jk-1,jn) - ptb(:,:,jk,jn) ) 212 239 END DO 213 240 ! !-- Slopes of tracer 241 !$OMP END DO NOWAIT 242 !$OMP WORKSHARE 214 243 zslpx(:,:,1) = 0._wp ! surface values 244 !$OMP END WORKSHARE 245 !$OMP DO schedule(static) private(jk, jj, ji) 215 246 DO jk = 2, jpkm1 ! interior value 216 247 DO jj = 1, jpj … … 221 252 END DO 222 253 END DO 254 !$OMP DO schedule(static) private(jk, jj, ji) 223 255 DO jk = 2, jpkm1 !-- Slopes limitation 224 256 DO jj = 1, jpj ! interior values … … 230 262 END DO 231 263 END DO 264 !$OMP DO schedule(static) private(jk, jj, ji, z0w, zalpha, zw, zzwx, zzwy) 232 265 DO jk = 1, jpk-2 !-- vertical advective flux 233 266 DO jj = 2, jpjm1 … … 242 275 END DO 243 276 END DO 277 !$OMP END DO NOWAIT 278 !$OMP END PARALLEL 244 279 IF( ln_linssh ) THEN ! top values, linear free surface only 245 280 IF( ln_isfcav ) THEN ! ice-shelf cavities (top of the ocean) 281 !$OMP PARALLEL DO schedule(static) private(jj, ji) 246 282 DO jj = 1, jpj 247 283 DO ji = 1, jpi … … 250 286 END DO 251 287 ELSE ! no cavities: only at the ocean surface 288 !$OMP PARALLEL WORKSHARE 252 289 zwx(:,:,1) = pwn(:,:,1) * ptb(:,:,1,jn) 290 !$OMP END PARALLEL WORKSHARE 253 291 ENDIF 254 292 ENDIF 255 293 ! 294 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 256 295 DO jk = 1, jpkm1 !-- vertical advective trend 257 296 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trabbc.F90
r6140 r7037 84 84 IF( l_trdtra ) THEN ! Save the input temperature trend 85 85 CALL wrk_alloc( jpi,jpj,jpk, ztrdt ) 86 !$OMP PARALLEL WORKSHARE 86 87 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) 88 !$OMP END PARALLEL WORKSHARE 87 89 ENDIF 88 90 ! ! Add the geothermal trend on temperature 91 !$OMP PARALLEL DO schedule(static) private(jj, ji) 89 92 DO jj = 2, jpjm1 90 93 DO ji = 2, jpim1 … … 96 99 ! 97 100 IF( l_trdtra ) THEN ! Send the trend for diagnostics 101 !$OMP PARALLEL WORKSHARE 98 102 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 103 !$OMP END PARALLEL WORKSHARE 99 104 CALL trd_tra( kt, 'TRA', jp_tem, jptra_bbc, ztrdt ) 100 105 CALL wrk_dealloc( jpi,jpj,jpk, ztrdt ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trabbl.F90
r6140 r7037 146 146 147 147 IF( l_trdtra ) THEN ! send the trends for further diagnostics 148 !$OMP PARALLEL WORKSHARE 148 149 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 149 150 ztrds(:,:,:) = tsa(:,:,:,jp_sal) - ztrds(:,:,:) 151 !$OMP END PARALLEL WORKSHARE 150 152 CALL trd_tra( kt, 'TRA', jp_tem, jptra_bbl, ztrdt ) 151 153 CALL trd_tra( kt, 'TRA', jp_sal, jptra_bbl, ztrds ) … … 195 197 DO jn = 1, kjpt ! tracer loop 196 198 ! ! =========== 199 !$OMP PARALLEL DO schedule(static) private(jj,ji,ik) 197 200 DO jj = 1, jpj 198 201 DO ji = 1, jpi … … 202 205 END DO 203 206 ! 207 !$OMP PARALLEL DO schedule(static) private(jj,ji,ik) 204 208 DO jj = 2, jpjm1 ! Compute the trend 205 209 DO ji = 2, jpim1 … … 357 361 ENDIF 358 362 ! !* bottom variables (T, S, alpha, beta, depth, velocity) 363 !$OMP PARALLEL DO schedule(static) private(jj,ji,ik) 359 364 DO jj = 1, jpj 360 365 DO ji = 1, jpi … … 374 379 IF( nn_bbl_ldf == 1 ) THEN ! diffusive bbl ! 375 380 ! !-------------------! 381 !$OMP PARALLEL DO schedule(static) private(jj,ji,za,zb,zgdrho,zsign) 376 382 DO jj = 1, jpjm1 ! (criteria for non zero flux: grad(rho).grad(h) < 0 ) 377 383 DO ji = 1, fs_jpim1 ! vector opt. … … 406 412 ! 407 413 CASE( 1 ) != use of upper velocity 414 !$OMP PARALLEL DO schedule(static) private(jj,ji,za,zb,zgdrho,zsign,zsigna) 408 415 DO jj = 1, jpjm1 ! criteria: grad(rho).grad(h)<0 and grad(rho).grad(h)<0 409 416 DO ji = 1, fs_jpim1 ! vector opt. … … 437 444 CASE( 2 ) != bbl velocity = F( delta rho ) 438 445 zgbbl = grav * rn_gambbl 446 !$OMP PARALLEL DO schedule(static) private(jj,ji,iid,iis,ikud,ikus,za,zb,zgdrho,ijd,ijs,ikvd,ikvs) 439 447 DO jj = 1, jpjm1 ! criteria: rho_up > rho_down 440 448 DO ji = 1, fs_jpim1 ! vector opt. … … 533 541 534 542 ! !* vertical index of "deep" bottom u- and v-points 543 !$OMP PARALLEL DO schedule(static) private(jj,ji) 535 544 DO jj = 1, jpjm1 ! (the "shelf" bottom k-indices are mbku and mbkv) 536 545 DO ji = 1, jpim1 … … 547 556 !* sign of grad(H) at u- and v-points 548 557 mgrhu(jpi,:) = 0 ; mgrhu(:,jpj) = 0 ; mgrhv(jpi,:) = 0 ; mgrhv(:,jpj) = 0 558 !$OMP PARALLEL DO schedule(static) private(jj,ji) 549 559 DO jj = 1, jpjm1 550 560 DO ji = 1, jpim1 … … 554 564 END DO 555 565 566 !$OMP PARALLEL DO schedule(static) private(jj,ji) 556 567 DO jj = 1, jpjm1 !* bbl thickness at u- (v-) point 557 568 DO ji = 1, jpim1 ! minimum of top & bottom e3u_0 (e3v_0) … … 563 574 564 575 ! !* masked diffusive flux coefficients 576 !$OMP PARALLEL WORKSHARE 565 577 ahu_bbl_0(:,:) = rn_ahtbbl * e2_e1u(:,:) * e3u_bbl_0(:,:) * umask(:,:,1) 566 578 ahv_bbl_0(:,:) = rn_ahtbbl * e1_e2v(:,:) * e3v_bbl_0(:,:) * vmask(:,:,1) 579 !$OMP END PARALLEL WORKSHARE 567 580 568 581 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/tradmp.F90
r6140 r7037 102 102 IF( l_trdtra ) THEN !* Save ta and sa trends 103 103 CALL wrk_alloc( jpi,jpj,jpk,jpts, ztrdts ) 104 !$OMP PARALLEL WORKSHARE 104 105 ztrdts(:,:,:,:) = tsa(:,:,:,:) 106 !$OMP END PARALLEL WORKSHARE 105 107 ENDIF 106 108 ! !== input T-S data at kt ==! … … 111 113 CASE( 0 ) !* newtonian damping throughout the water column *! 112 114 DO jn = 1, jpts 115 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 113 116 DO jk = 1, jpkm1 114 117 DO jj = 2, jpjm1 … … 121 124 ! 122 125 CASE ( 1 ) !* no damping in the turbocline (avt > 5 cm2/s) *! 126 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 123 127 DO jk = 1, jpkm1 124 128 DO jj = 2, jpjm1 … … 135 139 ! 136 140 CASE ( 2 ) !* no damping in the mixed layer *! 141 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 137 142 DO jk = 1, jpkm1 138 143 DO jj = 2, jpjm1 … … 151 156 ! 152 157 IF( l_trdtra ) THEN ! trend diagnostic 158 !$OMP PARALLEL WORKSHARE 153 159 ztrdts(:,:,:,:) = tsa(:,:,:,:) - ztrdts(:,:,:,:) 160 !$OMP END PARALLEL WORKSHARE 154 161 CALL trd_tra( kt, 'TRA', jp_tem, jptra_dmp, ztrdts(:,:,:,jp_tem) ) 155 162 CALL trd_tra( kt, 'TRA', jp_sal, jptra_dmp, ztrdts(:,:,:,jp_sal) ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traldf_iso.F90
r6748 r7037 124 124 IF(lwp) WRITE(numout,*) '~~~~~~~~~~~' 125 125 ! 126 !$OMP PARALLEL WORKSHARE 126 127 akz (:,:,:) = 0._wp 127 128 ah_wslp2(:,:,:) = 0._wp 129 !$OMP END PARALLEL WORKSHARE 128 130 ENDIF 129 131 ! ! set time step size (Euler/Leapfrog) … … 216 218 !!---------------------------------------------------------------------- 217 219 !!gm : bug.... why (x,:,:)? (1,jpj,:) and (jpi,1,:) should be sufficient.... 218 !$OMP PARALLEL WORKSHARE 220 !$OMP PARALLEL 221 !$OMP WORKSHARE 219 222 zdit (1,:,:) = 0._wp ; zdit (jpi,:,:) = 0._wp 220 223 zdjt (1,:,:) = 0._wp ; zdjt (jpi,:,:) = 0._wp 221 !$OMP END PARALLELWORKSHARE224 !$OMP END WORKSHARE 222 225 !!end 223 226 224 227 ! Horizontal tracer gradient 225 !$OMP PARALLELDO schedule(static) private(jk, jj, ji)228 !$OMP DO schedule(static) private(jk, jj, ji) 226 229 DO jk = 1, jpkm1 227 230 DO jj = 1, jpjm1 … … 232 235 END DO 233 236 END DO 237 !$OMP END DO NOWAIT 238 !$OMP END PARALLEL 234 239 IF( ln_zps ) THEN ! botton and surface ocean correction of the horizontal gradient 235 240 !$OMP PARALLEL DO schedule(static) private(jj, ji) … … 254 259 !! II - horizontal trend (full) 255 260 !!---------------------------------------------------------------------- 256 !$OMP PARALLEL DO schedule(static) private(jj, ji) 261 !$OMP PARALLEL 262 !$OMP DO schedule(static) private(jj, ji) 257 263 DO jj = 1 , jpj !== Horizontal fluxes 258 264 DO ji = 1, jpi ! vector opt. … … 261 267 END DO 262 268 END DO 263 !$OMP PARALLELDO schedule(static) private(jj, ji, zmsku, zmskv, zabe1, zabe2, zcof1, zcof2)269 !$OMP DO schedule(static) private(jj, ji, zmsku, zmskv, zabe1, zabe2, zcof1, zcof2) 264 270 DO jj = 1 , jpjm1 !== Horizontal fluxes 265 271 DO ji = 1, fs_jpim1 ! vector opt. … … 285 291 END DO 286 292 ! 287 !$OMP PARALLELDO schedule(static) private(jj, ji)293 !$OMP DO schedule(static) private(jj, ji) 288 294 DO jj = 2 , jpjm1 !== horizontal divergence and add to pta 289 295 DO ji = fs_2, fs_jpim1 ! vector opt. … … 293 299 END DO 294 300 END DO 301 !$OMP END DO NOWAIT 295 302 DO jk = 2, jpkm1 296 !$OMP PARALLELDO schedule(static) private(jj, ji)303 !$OMP DO schedule(static) private(jj, ji) 297 304 DO jj = 1 , jpj !== Horizontal fluxes 298 305 DO ji = 1, jpi ! vector opt. … … 301 308 END DO 302 309 END DO 303 !$OMP PARALLELDO schedule(static) private(jj, ji, zmsku, zmskv, zabe1, zabe2, zcof1, zcof2)310 !$OMP DO schedule(static) private(jj, ji, zmsku, zmskv, zabe1, zabe2, zcof1, zcof2) 304 311 DO jj = 1 , jpjm1 !== Horizontal fluxes 305 312 DO ji = 1, fs_jpim1 ! vector opt. … … 325 332 END DO 326 333 ! 327 !$OMP PARALLELDO schedule(static) private(jj, ji)334 !$OMP DO schedule(static) private(jj, ji) 328 335 DO jj = 2 , jpjm1 !== horizontal divergence and add to pta 329 336 DO ji = fs_2, fs_jpim1 ! vector opt. … … 333 340 END DO 334 341 END DO 335 END DO 342 !$OMP END DO NOWAIT 343 END DO 344 !$OMP END PARALLEL 336 345 337 346 … … 340 349 !!---------------------------------------------------------------------- 341 350 ! 342 !$OMP PARALLEL WORKSHARE 351 !$OMP PARALLEL 352 !$OMP WORKSHARE 343 353 ztfw(1,:,:) = 0._wp ; ztfw(jpi,:,:) = 0._wp 344 354 ! … … 347 357 ! ! Surface and bottom vertical fluxes set to zero 348 358 ztfw(:,:, 1 ) = 0._wp ; ztfw(:,:,jpk) = 0._wp 349 !$OMP END PARALLELWORKSHARE359 !$OMP END WORKSHARE 350 360 351 !$OMP PARALLELDO schedule(static) private(jk, jj, ji, zmsku, zmskv, zahu_w, zahv_w, zcoef3, zcoef4)361 !$OMP DO schedule(static) private(jk, jj, ji, zmsku, zmskv, zahu_w, zahv_w, zcoef3, zcoef4) 352 362 DO jk = 2, jpkm1 ! interior (2=<jk=<jpk-1) 353 363 DO jj = 2, jpjm1 … … 374 384 END DO 375 385 END DO 386 !$OMP END DO NOWAIT 387 !$OMP END PARALLEL 376 388 ! !== add the vertical 33 flux ==! 377 389 IF( ln_traldf_lap ) THEN ! laplacian case: eddy coef = ah_wslp2 - akz … … 437 449 ! 438 450 IF( cdtype == 'TRA' .AND. jn == jp_tem ) THEN 451 !$OMP PARALLEL 452 !$OMP WORKSHARE 439 453 z2d(:,:) = zftu(ji,jj,1) 440 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 454 !$OMP END WORKSHARE 455 !$OMP DO schedule(static) private(jk, jj, ji) 441 456 DO jk = 2, jpkm1 442 457 DO jj = 2, jpjm1 … … 448 463 !!gm CAUTION I think there is an error of sign when using BLP operator.... 449 464 !!gm a multiplication by zsign is required (to be checked twice !) 450 !$OMP PARALLELWORKSHARE465 !$OMP WORKSHARE 451 466 z2d(:,:) = - rau0_rcp * z2d(:,:) ! note sign is reversed to give down-gradient diffusive transports (#1043) 452 !$OMP END PARALLEL WORKSHARE 467 !$OMP END WORKSHARE NOWAIT 468 !$OMP END PARALLEL 453 469 CALL lbc_lnk( z2d, 'U', -1. ) 454 470 CALL iom_put( "udiff_heattr", z2d ) ! heat transport in i-direction 455 471 ! 456 !$OMP PARALLEL WORKSHARE 472 !$OMP PARALLEL 473 !$OMP WORKSHARE 457 474 z2d(:,:) = zftv(ji,jj,1) 458 !$OMP END PARALLELWORKSHARE459 !$OMP PARALLELDO schedule(static) private(jk, jj, ji)475 !$OMP END WORKSHARE 476 !$OMP DO schedule(static) private(jk, jj, ji) 460 477 DO jk = 2, jpkm1 461 478 DO jj = 2, jpjm1 … … 465 482 END DO 466 483 END DO 467 !$OMP PARALLELWORKSHARE484 !$OMP WORKSHARE 468 485 z2d(:,:) = - rau0_rcp * z2d(:,:) ! note sign is reversed to give down-gradient diffusive transports (#1043) 469 !$OMP END PARALLEL WORKSHARE 486 !$OMP END WORKSHARE NOWAIT 487 !$OMP END PARALLEL 470 488 CALL lbc_lnk( z2d, 'V', -1. ) 471 489 CALL iom_put( "vdiff_heattr", z2d ) ! heat transport in i-direction -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/tranxt.F90
r6748 r7037 208 208 DO jn = 1, kjpt 209 209 ! 210 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,ztn,ztd) 210 211 DO jk = 1, jpkm1 211 212 DO jj = 2, jpjm1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/traqsr.F90
r6790 r7037 144 144 ELSE ! No restart or restart not found: Euler forward time stepping 145 145 z1_2 = 1._wp 146 !$OMP PARALLEL WORKSHARE 146 147 qsr_hc_b(:,:,:) = 0._wp 148 !$OMP END PARALLEL WORKSHARE 147 149 ENDIF 148 150 ELSE !== Swap of qsr heat content ==! … … 203 205 ! 204 206 zcoef = ( 1. - rn_abs ) / 3._wp !* surface equi-partition in R-G-B 207 !$OMP PARALLEL 208 !$OMP DO schedule(static) private(jj,ji) 205 209 DO jj = 2, jpjm1 206 210 DO ji = fs_2, fs_jpim1 … … 212 216 END DO 213 217 END DO 218 !$OMP END DO NOWAIT 214 219 ! 215 220 DO jk = 2, nksr+1 !* interior equi-partition in R-G-B depending of vertical profile of Chl 221 !$OMP DO schedule(static) private(jj,ji,zchl,irgb) 216 222 DO jj = 2, jpjm1 217 223 DO ji = fs_2, fs_jpim1 … … 224 230 END DO 225 231 232 !$OMP DO schedule(static) private(jj,ji,zc0,zc1,zc2,zc3) 226 233 DO jj = 2, jpjm1 227 234 DO ji = fs_2, fs_jpim1 … … 239 246 END DO 240 247 ! 241 !$OMP PARALLELDO schedule(static) private(jk,jj,ji)248 !$OMP DO schedule(static) private(jk,jj,ji) 242 249 DO jk = 1, nksr !* now qsr induced heat content 243 250 DO jj = 2, jpjm1 … … 247 254 END DO 248 255 END DO 256 !$OMP END DO NOWAIT 257 !$OMP END PARALLEL 249 258 ! 250 259 CALL wrk_dealloc( jpi,jpj, zekb, zekg, zekr ) … … 280 289 ! 281 290 IF( ln_qsr_ice ) THEN ! sea-ice: store the 1st ocean level attenuation coefficient 291 !$OMP PARALLEL DO schedule(static) private(jj,ji) 282 292 DO jj = 2, jpjm1 283 293 DO ji = fs_2, fs_jpim1 ! vector opt. … … 294 304 CALL wrk_alloc( jpi,jpj,jpk, zetot ) 295 305 ! 306 !$OMP PARALLEL 307 !$OMP WORKSHARE 296 308 zetot(:,:,nksr+1:jpk) = 0._wp ! below ~400m set to zero 309 !$OMP END WORKSHARE 297 310 DO jk = nksr, 1, -1 298 zetot(:,:,jk) = zetot(:,:,jk+1) + qsr_hc(:,:,jk) / r1_rau0_rcp 311 !$OMP DO schedule(static) private(jj,ji) 312 DO jj = 1, jpj 313 DO ji = 1, jpi ! vector opt. 314 zetot(ji,jj,jk) = zetot(ji,jj,jk+1) + qsr_hc(ji,jj,jk) / r1_rau0_rcp 315 END DO 316 END DO 317 !$OMP END DO NOWAIT 299 318 END DO 319 !$OMP END PARALLEL 300 320 CALL iom_put( 'qsr3d', zetot ) ! 3D distribution of shortwave Radiation 301 321 ! … … 309 329 ! 310 330 IF( l_trdtra ) THEN ! qsr tracers trends saved for diagnostics 331 !$OMP PARALLEL WORKSHARE 311 332 ztrdt(:,:,:) = tsa(:,:,:,jp_tem) - ztrdt(:,:,:) 333 !$OMP END PARALLEL WORKSHARE 312 334 CALL trd_tra( kt, 'TRA', jp_tem, jptra_qsr, ztrdt ) 313 335 CALL wrk_dealloc( jpi,jpj,jpk, ztrdt ) … … 444 466 CALL iom_get( numror, jpdom_autoglo, 'fraqsr_1lev' , fraqsr_1lev ) 445 467 ELSE 468 !$OMP PARALLEL WORKSHARE 446 469 fraqsr_1lev(:,:) = 1._wp ! default : no penetration 470 !$OMP END PARALLEL WORKSHARE 447 471 ENDIF 448 472 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trazdf.F90
r6748 r7037 86 86 ! JMM avoid negative salinities near river outlet ! Ugly fix 87 87 ! JMM : restore negative salinities to small salinities: 88 !$OMP PARALLEL WORKSHARE 88 89 WHERE( tsa(:,:,:,jp_sal) < 0._wp ) tsa(:,:,:,jp_sal) = 0.1_wp 90 !$OMP END PARALLEL WORKSHARE 89 91 !!gm 90 92 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/trazdf_imp.F90
r6748 r7037 106 106 ! 107 107 ! vertical mixing coef.: avt for temperature, avs for salinity and passive tracers 108 IF( cdtype == 'TRA' .AND. jn == jp_tem ) THEN ; zwt(:,:,2:jpk) = avt (:,:,2:jpk) 109 ELSE ; zwt(:,:,2:jpk) = fsavs(:,:,2:jpk) 108 IF( cdtype == 'TRA' .AND. jn == jp_tem ) THEN 109 !$OMP PARALLEL WORKSHARE 110 zwt(:,:,2:jpk) = avt (:,:,2:jpk) 111 !$OMP END PARALLEL WORKSHARE 112 ELSE 113 !$OMP PARALLEL WORKSHARE 114 zwt(:,:,2:jpk) = fsavs(:,:,2:jpk) 115 !$OMP END PARALLEL WORKSHARE 110 116 ENDIF 111 117 !$OMP PARALLEL WORKSHARE … … 136 142 ! 137 143 ! Diagonal, lower (i), upper (s) (including the bottom boundary condition since avt is masked) 138 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 144 !$OMP PARALLEL 145 !$OMP DO schedule(static) private(jk, jj, ji) 139 146 DO jk = 1, jpkm1 140 147 DO jj = 2, jpjm1 … … 167 174 ! used as a work space array: its value is modified. 168 175 ! 169 !$OMP PARALLELDO schedule(static) private(jj, ji)176 !$OMP DO schedule(static) private(jj, ji) 170 177 DO jj = 2, jpjm1 !* 1st recurrence: Tk = Dk - Ik Sk-1 / Tk-1 (increasing k) 171 178 DO ji = fs_2, fs_jpim1 ! done one for all passive tracers (so included in the IF instruction) … … 173 180 END DO 174 181 END DO 182 !$OMP END DO NOWAIT 175 183 DO jk = 2, jpkm1 176 !$OMP PARALLELDO schedule(static) private(jj, ji)184 !$OMP DO schedule(static) private(jj, ji) 177 185 DO jj = 2, jpjm1 178 186 DO ji = fs_2, fs_jpim1 … … 180 188 END DO 181 189 END DO 182 END DO 190 !$OMP END DO NOWAIT 191 END DO 192 !$OMP END PARALLEL 183 193 ! 184 194 ENDIF 185 195 ! 186 !$OMP PARALLEL DO schedule(static) private(jj, ji) 196 !$OMP PARALLEL 197 !$OMP DO schedule(static) private(jj, ji) 187 198 DO jj = 2, jpjm1 !* 2nd recurrence: Zk = Yk - Ik / Tk-1 Zk-1 188 199 DO ji = fs_2, fs_jpim1 … … 191 202 END DO 192 203 DO jk = 2, jpkm1 193 !$OMP PARALLELDO schedule(static) private(jj, ji, zrhs)204 !$OMP DO schedule(static) private(jj, ji, zrhs) 194 205 DO jj = 2, jpjm1 195 206 DO ji = fs_2, fs_jpim1 … … 200 211 END DO 201 212 ! 202 !$OMP PARALLELDO schedule(static) private(jj, ji)213 !$OMP DO schedule(static) private(jj, ji) 203 214 DO jj = 2, jpjm1 !* 3d recurrence: Xk = (Zk - Sk Xk+1 ) / Tk (result is the after tracer) 204 215 DO ji = fs_2, fs_jpim1 … … 207 218 END DO 208 219 DO jk = jpk-2, 1, -1 209 !$OMP PARALLELDO schedule(static) private(jj, ji)220 !$OMP DO schedule(static) private(jj, ji) 210 221 DO jj = 2, jpjm1 211 222 DO ji = fs_2, fs_jpim1 … … 214 225 END DO 215 226 END DO 216 END DO 227 !$OMP END DO NOWAIT 228 END DO 229 !$OMP END PARALLEL 217 230 ! ! ================= ! 218 231 END DO ! end tracer loop ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRA/zpshde.F90
r6140 r7037 101 101 IF( nn_timing == 1 ) CALL timing_start( 'zps_hde') 102 102 ! 103 !$OMP PARALLEL 104 !$OMP WORKSHARE 103 105 pgtu(:,:,:)=0._wp ; zti (:,:,:)=0._wp ; zhi (:,: )=0._wp 104 106 pgtv(:,:,:)=0._wp ; ztj (:,:,:)=0._wp ; zhj (:,: )=0._wp 107 !$OMP END WORKSHARE 105 108 ! 106 109 DO jn = 1, kjpt !== Interpolation of tracers at the last ocean level ==! 107 110 ! 111 !$OMP DO schedule(static) private(jj,ji,iku,ikv,ze3wu,ze3wv,zmaxu,zmaxv) 108 112 DO jj = 1, jpjm1 109 113 DO ji = 1, jpim1 … … 145 149 END DO 146 150 END DO 151 !$OMP END DO 152 !$OMP SINGLE 147 153 CALL lbc_lnk( pgtu(:,:,jn), 'U', -1. ) ; CALL lbc_lnk( pgtv(:,:,jn), 'V', -1. ) ! Lateral boundary cond. 154 !$OMP END SINGLE 148 155 ! 149 156 END DO 157 !$OMP END PARALLEL 150 158 ! 151 159 IF( PRESENT( prd ) ) THEN !== horizontal derivative of density anomalies (rd) ==! (optional part) 160 !$OMP PARALLEL 161 !$OMP WORKSHARE 152 162 pgru(:,:) = 0._wp 153 163 pgrv(:,:) = 0._wp ! depth of the partial step level 164 !$OMP END WORKSHARE NOWAIT 165 !$OMP DO schedule(static) private(jj,ji,iku,ikv,ze3wu,ze3wv) 154 166 DO jj = 1, jpjm1 155 167 DO ji = 1, jpim1 … … 166 178 END DO 167 179 END DO 180 !$OMP END DO NOWAIT 181 !$OMP END PARALLEL 168 182 ! 169 183 CALL eos( zti, zhi, zri ) ! interpolated density from zti, ztj 170 184 CALL eos( ztj, zhj, zrj ) ! at the partial step depth output in zri, zrj 171 185 ! 186 !$OMP PARALLEL DO schedule(static) private(jj,ji,iku,ikv,ze3wu,ze3wv) 172 187 DO jj = 1, jpjm1 ! Gradient of density at the last level 173 188 DO ji = 1, jpim1 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/TRD/trdtra.F90
r6140 r7037 99 99 CASE( jptra_yad ) ; CALL trd_tra_adv( ptrd, pun, ptra, 'Y', trdty ) 100 100 CASE( jptra_zad ) ; CALL trd_tra_adv( ptrd, pun, ptra, 'Z', trdt ) 101 CASE( jptra_bbc, & ! qsr, bbc: on temperature only, send to trd_tra_mng 102 & jptra_qsr ) ; trdt(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) 103 ztrds(:,:,:) = 0._wp 104 CALL trd_tra_mng( trdt, ztrds, ktrd, kt ) 101 CASE( jptra_bbc, jptra_qsr ) ! qsr, bbc: on temperature only, send to trd_tra_mng 102 !$OMP PARALLEL WORKSHARE 103 trdt(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) 104 ztrds(:,:,:) = 0._wp 105 !$OMP END PARALLEL WORKSHARE 106 CALL trd_tra_mng( trdt, ztrds, ktrd, kt ) 105 107 CASE DEFAULT ! other trends: masked trends 108 !$OMP PARALLEL WORKSHARE 106 109 trdt(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) ! mask & store 110 !$OMP END PARALLEL WORKSHARE 107 111 END SELECT 108 112 ! … … 124 128 CALL wrk_alloc( jpi, jpj, jpk, zwt, zws, ztrdt ) 125 129 ! 130 !$OMP PARALLEL WORKSHARE 126 131 zwt(:,:, 1 ) = 0._wp ; zws(:,:, 1 ) = 0._wp ! vertical diffusive fluxes 127 132 zwt(:,:,jpk) = 0._wp ; zws(:,:,jpk) = 0._wp 133 !$OMP END PARALLEL WORKSHARE 134 !$OMP PARALLEL DO schedule(static) private(jk) 128 135 DO jk = 2, jpk 129 136 zwt(:,:,jk) = avt(:,:,jk) * ( tsa(:,:,jk-1,jp_tem) - tsa(:,:,jk,jp_tem) ) / e3w_n(:,:,jk) * tmask(:,:,jk) … … 131 138 END DO 132 139 ! 140 !$OMP PARALLEL WORKSHARE 133 141 ztrdt(:,:,jpk) = 0._wp ; ztrds(:,:,jpk) = 0._wp 142 !$OMP END PARALLEL WORKSHARE 143 !$OMP PARALLEL DO schedule(static) private(jk) 134 144 DO jk = 1, jpkm1 135 145 ztrdt(:,:,jk) = ( zwt(:,:,jk) - zwt(:,:,jk+1) ) / e3t_n(:,:,jk) … … 141 151 ! 142 152 CASE DEFAULT ! other trends: mask and send T & S trends to trd_tra_mng 153 !$OMP PARALLEL WORKSHARE 143 154 ztrds(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) 155 !$OMP END PARALLEL WORKSHARE 144 156 CALL trd_tra_mng( trdt, ztrds, ktrd, kt ) 145 157 END SELECT … … 154 166 CASE( jptra_zad ) ; CALL trd_tra_adv( ptrd , pun , ptra, 'Z', ztrds ) 155 167 CASE DEFAULT ! other trends: just masked 168 !$OMP PARALLEL WORKSHARE 156 169 ztrds(:,:,:) = ptrd(:,:,:) * tmask(:,:,:) 170 !$OMP END PARALLEL WORKSHARE 157 171 END SELECT 158 172 ! ! send trend to trd_trc -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfddm.F90
r6497 r7037 112 112 ! Define the mask 113 113 ! --------------- 114 !$OMP PARALLEL 115 !$OMP DO schedule(static) private(jj,ji,zrw,zaw,zbw,zdt,zds) 114 116 DO jj = 1, jpj ! R=zrau = (alpha / beta) (dk[t] / dk[s]) 115 117 DO ji = 1, jpi … … 128 130 END DO 129 131 END DO 130 132 !$OMP END DO NOWAIT 133 134 !$OMP DO schedule(static) private(jj,ji) 131 135 DO jj = 1, jpj ! indicators: 132 136 DO ji = 1, jpi … … 155 159 END DO 156 160 ! mask zmsk in order to have avt and avs masked 157 zmsks(:,:) = zmsks(:,:) * wmask(:,:,jk) 158 161 162 !$OMP DO schedule(static) private(jj,ji) 163 DO jj = 1, jpj 164 DO ji = 1, jpi 165 zmsks(ji,jj) = zmsks(ji,jj) * wmask(ji,jj,jk) 166 END DO 167 END DO 159 168 160 169 ! Update avt and avs 161 170 ! ------------------ 162 171 ! Constant eddy coefficient: reset to the background value 172 !$OMP DO schedule(static) private(jj,ji,zinr,zrr,zavfs,zavft,zavdt,zavds) 163 173 DO jj = 1, jpj 164 174 DO ji = 1, jpi … … 189 199 ! -------------------------------- 190 200 !!gm to be changed following the definition of avm. 201 !$OMP DO schedule(static) private(jj,ji) 191 202 DO jj = 1, jpjm1 192 203 DO ji = 1, fs_jpim1 ! vector opt. … … 199 210 END DO 200 211 END DO 212 !$OMP END DO NOWAIT 213 !$OMP END PARALLEL 201 214 ! ! =============== 202 215 END DO ! End of slab … … 257 270 IF( zdf_ddm_alloc() /= 0 ) CALL ctl_stop( 'STOP', 'zdf_ddm_init : unable to allocate arrays' ) 258 271 ! ! initialization to masked Kz 272 !$OMP PARALLEL WORKSHARE 259 273 avs(:,:,:) = rn_avt0 * wmask(:,:,:) 274 !$OMP END PARALLEL WORKSHARE 260 275 ! 261 276 END SUBROUTINE zdf_ddm_init -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfevd.F90
r6748 r7037 68 68 CALL wrk_alloc( jpi,jpj,jpk, zavt_evd, zavm_evd ) 69 69 ! 70 !$OMP PARALLEL WORKSHARE 70 71 zavt_evd(:,:,:) = avt(:,:,:) ! set avt prior to evd application 72 !$OMP END PARALLEL WORKSHARE 71 73 ! 72 74 SELECT CASE ( nn_evdm ) … … 74 76 CASE ( 1 ) ! enhance vertical eddy viscosity and diffusivity (if rn2<-1.e-12) 75 77 ! 78 !$OMP PARALLEL 79 !$OMP WORKSHARE 76 80 zavm_evd(:,:,:) = avm(:,:,:) ! set avm prior to evd application 81 !$OMP END WORKSHARE 77 82 ! 78 !$OMP PARALLELDO schedule(static) private(jk, jj, ji)83 !$OMP DO schedule(static) private(jk, jj, ji) 79 84 DO jk = 1, jpkm1 80 85 DO jj = 2, jpj ! no vector opt. … … 91 96 END DO 92 97 END DO 98 !$OMP END DO NOWAIT 99 !$OMP END PARALLEL 93 100 CALL lbc_lnk( avt , 'W', 1. ) ; CALL lbc_lnk( avm , 'W', 1. ) ! Lateral boundary conditions 94 101 CALL lbc_lnk( avmu, 'U', 1. ) ; CALL lbc_lnk( avmv, 'V', 1. ) 95 102 ! 103 !$OMP PARALLEL WORKSHARE 96 104 zavm_evd(:,:,:) = avm(:,:,:) - zavm_evd(:,:,:) ! change in avm due to evd 105 !$OMP END PARALLEL WORKSHARE 97 106 CALL iom_put( "avm_evd", zavm_evd ) ! output this change 98 107 ! … … 111 120 END SELECT 112 121 122 !$OMP PARALLEL WORKSHARE 113 123 zavt_evd(:,:,:) = avt(:,:,:) - zavt_evd(:,:,:) ! change in avt due to evd 124 !$OMP END PARALLEL WORKSHARE 114 125 CALL iom_put( "avt_evd", zavt_evd ) ! output this change 115 126 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdfmxl.F90
r6748 r7037 95 95 ENDIF 96 96 97 zN2_c = grav * rho_c * r1_rau0 ! convert density criteria into N^2 criteria 98 97 99 ! w-level of the mixing and mixed layers 98 !$OMP PARALLEL WORKSHARE 100 !$OMP PARALLEL 101 !$OMP WORKSHARE 99 102 nmln(:,:) = nlb10 ! Initialization to the number of w ocean point 100 103 hmlp(:,:) = 0._wp ! here hmlp used as a dummy variable, integrating vertically N^2 101 !$OMP END PARALLEL WORKSHARE 102 zN2_c = grav * rho_c * r1_rau0 ! convert density criteria into N^2 criteria 104 !$OMP END WORKSHARE 103 105 DO jk = nlb10, jpkm1 106 !$OMP DO schedule(static) private(jj, ji, ikt) 104 107 DO jj = 1, jpj ! Mixed layer level: w-level 105 108 DO ji = 1, jpi … … 112 115 ! 113 116 ! w-level of the turbocline and mixing layer (iom_use) 114 !$OMP PARALLELWORKSHARE117 !$OMP WORKSHARE 115 118 imld(:,:) = mbkt(:,:) + 1 ! Initialization to the number of w ocean point 116 !$OMP END PARALLELWORKSHARE119 !$OMP END WORKSHARE 117 120 118 121 DO jk = jpkm1, nlb10, -1 ! from the bottom to nlb10 122 !$OMP DO schedule(static) private(jj, ji) 119 123 DO jj = 1, jpj 120 124 DO ji = 1, jpi … … 124 128 END DO 125 129 ! depth of the mixing and mixed layers 126 !$OMP PARALLELDO schedule(static) private(jj, ji, iiki, iikn)130 !$OMP DO schedule(static) private(jj, ji, iiki, iikn) 127 131 DO jj = 1, jpj 128 132 DO ji = 1, jpi … … 134 138 END DO 135 139 END DO 140 !$OMP END DO NOWAIT 141 !$OMP END PARALLEL 136 142 ! no need to output in offline mode 137 143 IF( .NOT.lk_offline ) THEN -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdftke.F90
r6748 r7037 299 299 ! 300 300 ! !* total energy produce by LC : cumulative sum over jk 301 zpelc(:,:,1) = MAX( rn2b(:,:,1), 0._wp ) * gdepw_n(:,:,1) * e3w_n(:,:,1) 301 !$OMP PARALLEL 302 !$OMP DO schedule(static) private(jj, ji) 303 DO jj =1, jpj 304 DO ji=1, jpi 305 zpelc(ji,jj,1) = MAX( rn2b(ji,jj,1), 0._wp ) * gdepw_n(ji,jj,1) * e3w_n(ji,jj,1) 306 END DO 307 END DO 302 308 DO jk = 2, jpk 303 !$OMP PARALLELDO schedule(static) private(jj, ji)309 !$OMP DO schedule(static) private(jj, ji) 304 310 DO jj =1, jpj 305 311 DO ji=1, jpi … … 310 316 ! !* finite Langmuir Circulation depth 311 317 zcof = 0.5 * 0.016 * 0.016 / ( zrhoa * zcdrag ) 318 !$OMP WORKSHARE 312 319 imlc(:,:) = mbkt(:,:) + 1 ! Initialization to the number of w ocean point (=2 over land) 320 !$OMP END WORKSHARE 313 321 DO jk = jpkm1, 2, -1 322 !$OMP DO schedule(static) private(jj, ji, zus) 314 323 DO jj = 1, jpj ! Last w-level at which zpelc>=0.5*us*us 315 324 DO ji = 1, jpi ! with us=0.016*wind(starting from jpk-1) … … 320 329 END DO 321 330 ! ! finite LC depth 322 !$OMP PARALLELDO schedule(static) private(jj, ji)331 !$OMP DO schedule(static) private(jj, ji) 323 332 DO jj = 1, jpj 324 333 DO ji = 1, jpi … … 327 336 END DO 328 337 zcof = 0.016 / SQRT( zrhoa * zcdrag ) 329 !$OMP PARALLELDO schedule(static) private(jk, jj, ji, zus, zind, zwlc)338 !$OMP DO schedule(static) private(jk, jj, ji, zus, zind, zwlc) 330 339 DO jk = 2, jpkm1 !* TKE Langmuir circulation source term added to en 331 340 DO jj = 2, jpjm1 … … 341 350 END DO 342 351 END DO 352 !$OMP END DO NOWAIT 353 !$OMP END PARALLEL 343 354 ! 344 355 ENDIF … … 387 398 ENDIF 388 399 ! 389 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zcof, zzd_up, zzd_lw, zesh2) 400 !$OMP PARALLEL 401 !$OMP DO schedule(static) private(jk, jj, ji, zcof, zzd_up, zzd_lw, zesh2) 390 402 DO jk = 2, jpkm1 !* Matrix and right hand side in en 391 403 DO jj = 2, jpjm1 … … 421 433 ! !* Matrix inversion from level 2 (tke prescribed at level 1) 422 434 DO jk = 3, jpkm1 ! First recurrence : Dk = Dk - Lk * Uk-1 / Dk-1 423 !$OMP PARALLELDO schedule(static) private(jj, ji)435 !$OMP DO schedule(static) private(jj, ji) 424 436 DO jj = 2, jpjm1 425 437 DO ji = fs_2, fs_jpim1 ! vector opt. … … 428 440 END DO 429 441 END DO 430 !$OMP PARALLELDO schedule(static) private(jj, ji)442 !$OMP DO schedule(static) private(jj, ji) 431 443 DO jj = 2, jpjm1 ! Second recurrence : Lk = RHSk - Lk / Dk-1 * Lk-1 432 444 DO ji = fs_2, fs_jpim1 ! vector opt. … … 435 447 END DO 436 448 DO jk = 3, jpkm1 437 !$OMP PARALLELDO schedule(static) private(jj, ji)449 !$OMP DO schedule(static) private(jj, ji) 438 450 DO jj = 2, jpjm1 439 451 DO ji = fs_2, fs_jpim1 ! vector opt. … … 442 454 END DO 443 455 END DO 444 !$OMP PARALLELDO schedule(static) private(jj, ji)456 !$OMP DO schedule(static) private(jj, ji) 445 457 DO jj = 2, jpjm1 ! thrid recurrence : Ek = ( Lk - Uk * Ek+1 ) / Dk 446 458 DO ji = fs_2, fs_jpim1 ! vector opt. … … 449 461 END DO 450 462 DO jk = jpk-2, 2, -1 451 !$OMP PARALLELDO schedule(static) private(jj, ji)463 !$OMP DO schedule(static) private(jj, ji) 452 464 DO jj = 2, jpjm1 453 465 DO ji = fs_2, fs_jpim1 ! vector opt. … … 456 468 END DO 457 469 END DO 458 !$OMP PARALLELDO schedule(static) private(jk,jj, ji)470 !$OMP DO schedule(static) private(jk,jj, ji) 459 471 DO jk = 2, jpkm1 ! set the minimum value of tke 460 472 DO jj = 2, jpjm1 … … 464 476 END DO 465 477 END DO 478 !$OMP END DO NOWAIT 479 !$OMP END PARALLEL 466 480 467 481 ! !<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< … … 589 603 ENDIF 590 604 ! 591 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zrn2) 605 !$OMP PARALLEL 606 !$OMP DO schedule(static) private(jk, jj, ji, zrn2) 592 607 DO jk = 2, jpkm1 ! interior value : l=sqrt(2*e/n^2) 593 608 DO jj = 2, jpjm1 … … 601 616 ! !* Physical limits for the mixing length 602 617 ! 603 !$OMP PARALLELWORKSHARE618 !$OMP WORKSHARE 604 619 zmxld(:,:, 1 ) = zmxlm(:,:,1) ! surface set to the minimum value 605 620 zmxld(:,:,jpk) = rmxl_min ! last level set to the minimum value 606 !$OMP END PARALLEL WORKSHARE 621 !$OMP END WORKSHARE NOWAIT 622 !$OMP END PARALLEL 607 623 ! 608 624 SELECT CASE ( nn_mxl ) … … 637 653 ! 638 654 CASE ( 2 ) ! |dk[xml]| bounded by e3t : 655 !$OMP PARALLEL 639 656 DO jk = 2, jpkm1 ! from the surface to the bottom : 640 !$OMP PARALLELDO schedule(static) private(jj, ji)657 !$OMP DO schedule(static) private(jj, ji) 641 658 DO jj = 2, jpjm1 642 659 DO ji = fs_2, fs_jpim1 ! vector opt. … … 646 663 END DO 647 664 DO jk = jpkm1, 2, -1 ! from the bottom to the surface : 648 !$OMP PARALLELDO schedule(static) private(jj, ji, zemxl)665 !$OMP DO schedule(static) private(jj, ji, zemxl) 649 666 DO jj = 2, jpjm1 650 667 DO ji = fs_2, fs_jpim1 ! vector opt. … … 655 672 END DO 656 673 END DO 674 !$OMP END PARALLEL 657 675 ! 658 676 CASE ( 3 ) ! lup and ldown, |dk[xml]| bounded by e3t : 677 !$OMP PARALLEL 659 678 DO jk = 2, jpkm1 ! from the surface to the bottom : lup 660 !$OMP PARALLELDO schedule(static) private(jj, ji)679 !$OMP DO schedule(static) private(jj, ji) 661 680 DO jj = 2, jpjm1 662 681 DO ji = fs_2, fs_jpim1 ! vector opt. … … 666 685 END DO 667 686 DO jk = jpkm1, 2, -1 ! from the bottom to the surface : ldown 668 !$OMP PARALLELDO schedule(static) private(jj, ji)687 !$OMP DO schedule(static) private(jj, ji) 669 688 DO jj = 2, jpjm1 670 689 DO ji = fs_2, fs_jpim1 ! vector opt. … … 673 692 END DO 674 693 END DO 675 !$OMP PARALLELDO schedule(static) private(jk, jj, ji, zemlm, zemlp)694 !$OMP DO schedule(static) private(jk, jj, ji, zemlm, zemlp) 676 695 DO jk = 2, jpkm1 677 696 DO jj = 2, jpjm1 … … 684 703 END DO 685 704 END DO 705 !$OMP END PARALLEL 686 706 ! 687 707 END SELECT … … 849 869 ENDIF 850 870 ! !* set vertical eddy coef. to the background value 851 !$OMP PARALLEL DO schedule(static) private(jk) 871 !$OMP PARALLEL 872 !$OMP DO schedule(static) private(jk) 852 873 DO jk = 1, jpk 853 874 avt (:,:,jk) = avtb(jk) * wmask (:,:,jk) … … 856 877 avmv(:,:,jk) = avmb(jk) * wvmask(:,:,jk) 857 878 END DO 879 !$OMP END DO NOWAIT 880 !$OMP WORKSHARE 858 881 dissl(:,:,:) = 1.e-12_wp 882 !$OMP END WORKSHARE 883 !$OMP END PARALLEL 859 884 ! 860 885 CALL tke_rst( nit000, 'READ' ) !* read or initialize all required files … … 918 943 ENDIF 919 944 ELSE !* Start from rest 920 !$OMP PARALLEL WORKSHARE 945 !$OMP PARALLEL 946 !$OMP WORKSHARE 921 947 en(:,:,:) = rn_emin * tmask(:,:,:) 922 !$OMP END PARALLEL WORKSHARE923 !$OMP PARALLELDO schedule(static) private(jk)948 !$OMP END WORKSHARE NOWAIT 949 !$OMP DO schedule(static) private(jk) 924 950 DO jk = 1, jpk ! set the Kz to the background value 925 951 avt (:,:,jk) = avtb(jk) * wmask (:,:,jk) … … 928 954 avmv(:,:,jk) = avmb(jk) * wvmask(:,:,jk) 929 955 END DO 956 !$OMP END DO NOWAIT 957 !$OMP END PARALLEL 930 958 ENDIF 931 959 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/ZDF/zdftmx.F90
r6497 r7037 121 121 ! ! ----------------------- ! 122 122 ! !* First estimation (with n2 bound by rn_n2min) bounded by 60 cm2/s 123 zav_tide(:,:,:) = MIN( 60.e-4, az_tmx(:,:,:) / MAX( rn_n2min, rn2(:,:,:) ) ) 124 123 !$OMP PARALLEL 124 !$OMP DO schedule(static) private(jk,jj,ji) 125 DO jk = 1, jpk 126 DO jj = 1, jpj 127 DO ji = 1, jpi 128 zav_tide(ji,jj,jk) = MIN( 60.e-4, az_tmx(ji,jj,jk) / MAX( rn_n2min, rn2(ji,jj,jk) ) ) 129 END DO 130 END DO 131 END DO 132 !$OMP END DO NOWAIT 133 134 !$OMP WORKSHARE 125 135 zkz(:,:) = 0.e0 !* Associated potential energy consummed over the whole water column 136 !$OMP END WORKSHARE 126 137 DO jk = 2, jpkm1 127 zkz(:,:) = zkz(:,:) + e3w_n(:,:,jk) * MAX( 0.e0, rn2(:,:,jk) ) * rau0 * zav_tide(:,:,jk) * wmask(:,:,jk) 128 END DO 129 138 !$OMP DO schedule(static) private(jj, ji) 139 DO jj = 1, jpj 140 DO ji = 1, jpi 141 zkz(ji,jj) = zkz(ji,jj) + e3w_n(ji,jj,jk) * MAX( 0.e0, rn2(ji,jj,jk) ) * rau0 * zav_tide(ji,jj,jk) * wmask(ji,jj,jk) 142 END DO 143 END DO 144 END DO 145 146 !$OMP DO schedule(static) private(jj, ji) 130 147 DO jj = 1, jpj !* Here zkz should be equal to en_tmx ==> multiply by en_tmx/zkz to recover en_tmx 131 148 DO ji = 1, jpi … … 135 152 136 153 DO jk = 2, jpkm1 !* Mutiply by zkz to recover en_tmx, BUT bound by 30/6 ==> zav_tide bound by 300 cm2/s 137 zav_tide(:,:,jk) = zav_tide(:,:,jk) * MIN( zkz(:,:), 30./6. ) * wmask(:,:,jk) !kz max = 300 cm2/s 138 END DO 154 !$OMP DO schedule(static) private(jj, ji) 155 DO jj = 1, jpj 156 DO ji = 1, jpi 157 zav_tide(ji,jj,jk) = zav_tide(ji,jj,jk) * MIN( zkz(ji,jj), 30./6. ) * wmask(ji,jj,jk) !kz max = 300 cm2/s 158 END DO 159 END DO 160 !$OMP END DO NOWAIT 161 END DO 162 !$OMP END PARALLEL 139 163 140 164 IF( kt == nit000 ) THEN !* check at first time-step: diagnose the energy consumed by zav_tide 141 165 ztpc = 0._wp 166 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,ztpc) 142 167 DO jk= 1, jpk 143 168 DO jj= 1, jpj … … 162 187 ! ! Update mixing coefs ! 163 188 ! ! ----------------------- ! 189 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 164 190 DO jk = 2, jpkm1 !* update momentum & tracer diffusivity with tidal mixing 165 avt(:,:,jk) = avt(:,:,jk) + zav_tide(:,:,jk) * wmask(:,:,jk) 166 avm(:,:,jk) = avm(:,:,jk) + zav_tide(:,:,jk) * wmask(:,:,jk) 191 DO jj = 1, jpj 192 DO ji = 1, jpi ! vector opt. 193 avt(ji,jj,jk) = avt(ji,jj,jk) + zav_tide(ji,jj,jk) * wmask(ji,jj,jk) 194 avm(ji,jj,jk) = avm(ji,jj,jk) + zav_tide(ji,jj,jk) * wmask(ji,jj,jk) 195 END DO 196 END DO 167 197 DO jj = 2, jpjm1 168 198 DO ji = fs_2, fs_jpim1 ! vector opt. … … 225 255 226 256 ! ! compute the form function using N2 at each time step 257 !$OMP PARALLEL 258 !$OMP WORKSHARE 227 259 zempba_3d_1(:,:,jpk) = 0.e0 228 260 zempba_3d_2(:,:,jpk) = 0.e0 261 !$OMP END WORKSHARE 262 !$OMP DO schedule(static) private(jk) 229 263 DO jk = 1, jpkm1 230 264 zdn2dz (:,:,jk) = rn2(:,:,jk) - rn2(:,:,jk+1) ! Vertical profile of dN2/dz … … 232 266 zempba_3d_2(:,:,jk) = MAX( 0.e0, rn2(:,:,jk) ) ! - - of N^2 233 267 END DO 234 ! 268 !$OMP END DO NOWAIT 269 ! 270 !$OMP WORKSHARE 235 271 zsum (:,:) = 0.e0 236 272 zsum1(:,:) = 0.e0 237 273 zsum2(:,:) = 0.e0 274 !$OMP END WORKSHARE 238 275 DO jk= 2, jpk 239 zsum1(:,:) = zsum1(:,:) + zempba_3d_1(:,:,jk) * e3w_n(:,:,jk) * wmask(:,:,jk) 240 zsum2(:,:) = zsum2(:,:) + zempba_3d_2(:,:,jk) * e3w_n(:,:,jk) * wmask(:,:,jk) 241 END DO 276 !$OMP DO schedule(static) private(jj,ji) 277 DO jj= 1, jpj 278 DO ji= 1, jpi 279 zsum1(ji,jj) = zsum1(ji,jj) + zempba_3d_1(ji,jj,jk) * e3w_n(ji,jj,jk) * wmask(ji,jj,jk) 280 zsum2(ji,jj) = zsum2(ji,jj) + zempba_3d_2(ji,jj,jk) * e3w_n(ji,jj,jk) * wmask(ji,jj,jk) 281 END DO 282 END DO 283 END DO 284 !$OMP DO schedule(static) private(jj,ji) 242 285 DO jj = 1, jpj 243 286 DO ji = 1, jpi … … 248 291 249 292 DO jk= 1, jpk 293 !$OMP DO schedule(static) private(jj,ji,zcoef,ztpc) 250 294 DO jj = 1, jpj 251 295 DO ji = 1, jpi … … 259 303 END DO 260 304 END DO 305 !$OMP DO schedule(static) private(jj,ji) 261 306 DO jj = 1, jpj 262 307 DO ji = 1, jpi … … 267 312 ! ! first estimation bounded by 10 cm2/s (with n2 bounded by rn_n2min) 268 313 zcoef = rn_tfe_itf / ( rn_tfe * rau0 ) 314 !$OMP DO schedule(static) private(jk,jj,ji) 269 315 DO jk = 1, jpk 270 zavt_itf(:,:,jk) = MIN( 10.e-4, zcoef * en_tmx(:,:) * zsum(:,:) * zempba_3d(:,:,jk) & 271 & / MAX( rn_n2min, rn2(:,:,jk) ) * tmask(:,:,jk) ) 272 END DO 273 316 DO jj = 1, jpj 317 DO ji = 1, jpi 318 zavt_itf(ji,jj,jk) = MIN( 10.e-4, zcoef * en_tmx(ji,jj) * zsum(ji,jj) * zempba_3d(ji,jj,jk) & 319 & / MAX( rn_n2min, rn2(ji,jj,jk) ) * tmask(ji,jj,jk) ) 320 END DO 321 END DO 322 END DO 323 324 !$OMP WORKSHARE 274 325 zkz(:,:) = 0.e0 ! Associated potential energy consummed over the whole water column 326 !$OMP END WORKSHARE 275 327 DO jk = 2, jpkm1 276 zkz(:,:) = zkz(:,:) + e3w_n(:,:,jk) * MAX( 0.e0, rn2(:,:,jk) ) * rau0 * zavt_itf(:,:,jk) * wmask(:,:,jk) 277 END DO 278 328 !$OMP DO schedule(static) private(jj,ji) 329 DO jj = 1, jpj 330 DO ji = 1, jpi 331 zkz(ji,jj) = zkz(ji,jj) + e3w_n(ji,jj,jk) * MAX( 0.e0, rn2(ji,jj,jk) ) * rau0 * zavt_itf(ji,jj,jk) * wmask(ji,jj,jk) 332 END DO 333 END DO 334 END DO 335 336 !$OMP DO schedule(static) private(jj,ji) 279 337 DO jj = 1, jpj ! Here zkz should be equal to en_tmx ==> multiply by en_tmx/zkz to recover en_tmx 280 338 DO ji = 1, jpi … … 283 341 END DO 284 342 343 !$OMP DO schedule(static) private(jk,jj,ji) 285 344 DO jk = 2, jpkm1 ! Mutiply by zkz to recover en_tmx, BUT bound by 30/6 ==> zavt_itf bound by 300 cm2/s 286 zavt_itf(:,:,jk) = zavt_itf(:,:,jk) * MIN( zkz(:,:), 120./10. ) * wmask(:,:,jk) ! kz max = 120 cm2/s 287 END DO 345 DO jj = 1, jpj 346 DO ji = 1, jpi 347 zavt_itf(ji,jj,jk) = zavt_itf(ji,jj,jk) * MIN( zkz(ji,jj), 120./10. ) * wmask(ji,jj,jk) ! kz max = 120 cm2/s 348 END DO 349 END DO 350 END DO 351 !$OMP END DO NOWAIT 352 !$OMP END PARALLEL 288 353 289 354 IF( kt == nit000 ) THEN ! diagnose the nergy consumed by zavt_itf 290 355 ztpc = 0.e0 356 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,ztpc) 291 357 DO jk= 1, jpk 292 358 DO jj= 1, jpj … … 303 369 304 370 ! ! Update pav with the ITF mixing coefficient 371 !$OMP PARALLEL DO schedule(static) private(jk) 305 372 DO jk = 2, jpkm1 306 373 pav(:,:,jk) = pav (:,:,jk) * ( 1.e0 - mask_itf(:,:) ) & … … 409 476 ! ! only the energy available for mixing is taken into account, 410 477 ! ! (mixing efficiency tidal dissipation efficiency) 478 !$OMP PARALLEL 479 480 !$OMP WORKSHARE 411 481 en_tmx(:,:) = - rn_tfe * rn_me * ( zem2(:,:) * 1.25 + zek1(:,:) ) * ssmask(:,:) 482 !$OMP END WORKSHARE 412 483 413 484 !============ … … 416 487 !! the error is thus ~1% which I feel comfortable with, compared to uncertainties in tidal energy dissipation. 417 488 ! ! Vertical structure (az_tmx) 489 !$OMP DO schedule(static) private(jj, ji) 418 490 DO jj = 1, jpj ! part independent of the level 419 491 DO ji = 1, jpi … … 423 495 END DO 424 496 END DO 497 !$OMP DO schedule(static) private(jk, jj, ji) 425 498 DO jk= 1, jpk ! complete with the level-dependent part 426 499 DO jj = 1, jpj … … 430 503 END DO 431 504 END DO 505 !$OMP END DO NOWAIT 506 !$OMP END PARALLEL 432 507 !=========== 433 508 ! … … 436 511 ! Total power consumption due to vertical mixing 437 512 ! zpc = rau0 * 1/rn_me * rn2 * zav_tide 513 ztpc = 0._wp 514 !$OMP PARALLEL 515 !$OMP WORKSHARE 438 516 zav_tide(:,:,:) = 0.e0 517 !$OMP END WORKSHARE 518 !$OMP DO schedule(static) private(jk) 439 519 DO jk = 2, jpkm1 440 520 zav_tide(:,:,jk) = az_tmx(:,:,jk) / MAX( rn_n2min, rn2(:,:,jk) ) 441 521 END DO 442 522 ! 443 ztpc = 0._wp 444 zpc(:,:,:) = MAX(rn_n2min,rn2(:,:,:)) * zav_tide(:,:,:) 523 !$OMP DO schedule(static) private(jk, jj, ji) 524 DO jk= 1, jpk 525 DO jj = 1, jpj 526 DO ji = 1, jpi 527 zpc(ji,jj,jk) = MAX(rn_n2min,rn2(ji,jj,jk)) * zav_tide(ji,jj,jk) 528 END DO 529 END DO 530 END DO 531 !$OMP DO schedule(static) private(jk, jj, ji, ztpc) 445 532 DO jk= 2, jpkm1 446 533 DO jj = 1, jpj … … 450 537 END DO 451 538 END DO 539 !$OMP END DO NOWAIT 540 !$OMP END PARALLEL 452 541 IF( lk_mpp ) CALL mpp_sum( ztpc ) 453 542 ztpc= rau0 * 1/(rn_tfe * rn_me) * ztpc … … 457 546 ! 458 547 ! control print 2 459 zav_tide(:,:,:) = MIN( zav_tide(:,:,:), 60.e-4 ) 460 zkz(:,:) = 0._wp 548 !$OMP PARALLEL 549 !$OMP DO schedule(static) private(jk, jj, ji) 550 DO jk= 1, jpk 551 DO jj = 1, jpj 552 DO ji = 1, jpi 553 zav_tide(ji,jj,jk) = MIN( zav_tide(ji,jj,jk), 60.e-4 ) 554 zkz(ji,jj) = 0._wp 555 END DO 556 END DO 557 END DO 558 461 559 DO jk = 2, jpkm1 462 zkz(:,:) = zkz(:,:) + e3w_n(:,:,jk) * MAX(0.e0, rn2(:,:,jk)) * rau0 * zav_tide(:,:,jk) * wmask(:,:,jk) 560 !$OMP DO schedule(static) private(jj, ji) 561 DO jj = 1, jpj 562 DO ji = 1, jpi 563 zkz(ji,jj) = zkz(ji,jj) + e3w_n(ji,jj,jk) * MAX(0.e0, rn2(ji,jj,jk)) * rau0 * zav_tide(ji,jj,jk) * wmask(ji,jj,jk) 564 END DO 565 END DO 463 566 END DO 464 567 ! Here zkz should be equal to en_tmx ==> multiply by en_tmx/zkz 568 !$OMP DO schedule(static) private(jj, ji) 465 569 DO jj = 1, jpj 466 570 DO ji = 1, jpi … … 471 575 END DO 472 576 ztpc = 1.e50 577 !$OMP DO schedule(static) private(jj, ji, ztpc) 473 578 DO jj = 1, jpj 474 579 DO ji = 1, jpi … … 478 583 END DO 479 584 END DO 585 !$OMP SINGLE 480 586 WRITE(numout,*) ' Min de zkz ', ztpc, ' Max = ', maxval(zkz(:,:) ) 587 !$OMP END SINGLE 481 588 ! 589 !$OMP DO schedule(static) private(jk) 482 590 DO jk = 2, jpkm1 483 591 zav_tide(:,:,jk) = zav_tide(:,:,jk) * MIN( zkz(:,:), 30./6. ) * wmask(:,:,jk) !kz max = 300 cm2/s 484 592 END DO 485 593 ztpc = 0._wp 486 zpc(:,:,:) = Max(0.e0,rn2(:,:,:)) * zav_tide(:,:,:) 594 !$OMP DO schedule(static) private(jk, jj, ji) 595 DO jk= 1, jpk 596 DO jj = 1, jpj 597 DO ji = 1, jpi 598 zpc(ji,jj,jk) = Max(0.e0,rn2(ji,jj,jk)) * zav_tide(ji,jj,jk) 599 END DO 600 END DO 601 END DO 602 !$OMP DO schedule(static) private(jk, jj, ji, ztpc) 487 603 DO jk= 1, jpk 488 604 DO jj = 1, jpj … … 492 608 END DO 493 609 END DO 610 !$OMP END DO NOWAIT 611 !$OMP END PARALLEL 494 612 IF( lk_mpp ) CALL mpp_sum( ztpc ) 495 613 ztpc= rau0 * 1/(rn_tfe * rn_me) * ztpc … … 500 618 & / MAX( 1.e-20, SUM( e1e2t(:,:) * wmask (:,:,jk) * tmask_i(:,:) ) ) 501 619 ztpc = 1.e50 620 !$OMP PARALLEL DO schedule(static) private(ztpc, jj, ji) 502 621 DO jj = 1, jpj 503 622 DO ji = 1, jpi … … 512 631 WRITE(numout,*) 513 632 WRITE(numout,*) ' Initial profile of tidal vertical mixing' 633 514 634 DO jk = 1, jpk 635 !$OMP PARALLEL DO schedule(static) private(jj, ji) 515 636 DO jj = 1,jpj 516 637 DO ji = 1,jpi … … 518 639 END DO 519 640 END DO 641 520 642 ze_z = SUM( e1e2t(:,:) * zkz (:,:) * tmask_i(:,:) ) & 521 643 & / MAX( 1.e-20, SUM( e1e2t(:,:) * wmask(:,:,jk) * tmask_i(:,:) ) ) 522 644 WRITE(numout,*) ' jk= ', jk,' ', ze_z * 1.e4,' cm2/s' 645 523 646 END DO 524 647 DO jk = 1, jpk 525 zkz(:,:) = az_tmx(:,:,jk) /rn_n2min 648 !$OMP PARALLEL DO schedule(static) private(jj, ji) 649 DO jj = 1,jpj 650 DO ji = 1,jpi 651 zkz(ji,jj) = az_tmx(ji,jj,jk) /rn_n2min 652 END DO 653 END DO 654 526 655 ze_z = SUM( e1e2t(:,:) * zkz (:,:) * tmask_i(:,:) ) & 527 656 & / MAX( 1.e-20, SUM( e1e2t(:,:) * wmask(:,:,jk) * tmask_i(:,:) ) ) … … 689 818 ! !* Critical slope mixing: distribute energy over the time-varying ocean depth, 690 819 ! using an exponential decay from the seafloor. 820 !$OMP PARALLEL 821 !$OMP DO schedule(static) private(jj,ji) 691 822 DO jj = 1, jpj ! part independent of the level 692 823 DO ji = 1, jpi … … 697 828 END DO 698 829 830 !$OMP DO schedule(static) private(jk) 699 831 DO jk = 2, jpkm1 ! complete with the level-dependent part 700 832 emix_tmx(:,:,jk) = zfact(:,:) * ( EXP( ( gde3w_n(:,:,jk ) - zhdep(:,:) ) / hcri_tmx(:,:) ) & … … 702 834 & / ( gde3w_n(:,:,jk) - gde3w_n(:,:,jk-1) ) 703 835 END DO 836 !$OMP END DO NOWAIT 837 !$OMP END PARALLEL 704 838 705 839 ! !* Pycnocline-intensified mixing: distribute energy over the time-varying … … 710 844 CASE ( 1 ) ! Dissipation scales as N (recommended) 711 845 846 !$OMP PARALLEL 847 !$OMP WORKSHARE 712 848 zfact(:,:) = 0._wp 849 !$OMP END WORKSHARE 713 850 DO jk = 2, jpkm1 ! part independent of the level 714 zfact(:,:) = zfact(:,:) + e3w_n(:,:,jk) * SQRT( MAX( 0._wp, rn2(:,:,jk) ) ) * wmask(:,:,jk) 715 END DO 716 851 !$OMP DO schedule(static) private(jj,ji) 852 DO jj = 1, jpj ! part independent of the level 853 DO ji = 1, jpi 854 zfact(ji,jj) = zfact(ji,jj) + e3w_n(ji,jj,jk) * SQRT( MAX( 0._wp, rn2(ji,jj,jk) ) ) * wmask(ji,jj,jk) 855 END DO 856 END DO 857 END DO 858 859 !$OMP DO schedule(static) private(jj,ji) 717 860 DO jj = 1, jpj 718 861 DO ji = 1, jpi … … 721 864 END DO 722 865 866 !$OMP DO schedule(static) private(jk) 723 867 DO jk = 2, jpkm1 ! complete with the level-dependent part 724 868 emix_tmx(:,:,jk) = emix_tmx(:,:,jk) + zfact(:,:) * SQRT( MAX( 0._wp, rn2(:,:,jk) ) ) * wmask(:,:,jk) 725 869 END DO 870 !$OMP END DO NOWAIT 871 !$OMP END PARALLEL 726 872 727 873 CASE ( 2 ) ! Dissipation scales as N^2 728 874 875 !$OMP PARALLEL 876 !$OMP WORKSHARE 729 877 zfact(:,:) = 0._wp 730 DO jk = 2, jpkm1 ! part independent of the level 731 zfact(:,:) = zfact(:,:) + e3w_n(:,:,jk) * MAX( 0._wp, rn2(:,:,jk) ) * wmask(:,:,jk) 732 END DO 733 878 !$OMP END WORKSHARE 879 DO jk = 2, jpkm1 880 !$OMP DO schedule(static) private(jj,ji) 881 DO jj = 1, jpj 882 DO ji = 1, jpi 883 zfact(ji,jj) = zfact(ji,jj) + e3w_n(ji,jj,jk) * MAX( 0._wp, rn2(ji,jj,jk) ) * wmask(ji,jj,jk) 884 END DO 885 END DO 886 END DO 887 888 !$OMP DO schedule(static) private(jj,ji) 734 889 DO jj= 1, jpj 735 890 DO ji = 1, jpi … … 738 893 END DO 739 894 895 !$OMP DO schedule(static) private(jk) 740 896 DO jk = 2, jpkm1 ! complete with the level-dependent part 741 897 emix_tmx(:,:,jk) = emix_tmx(:,:,jk) + zfact(:,:) * MAX( 0._wp, rn2(:,:,jk) ) * wmask(:,:,jk) 742 898 END DO 899 !$OMP END DO NOWAIT 900 !$OMP END PARALLEL 743 901 744 902 END SELECT … … 747 905 ! !* ocean depth as proportional to rn2 * exp(-z_wkb/rn_hbot) 748 906 907 !$OMP PARALLEL 908 !$OMP WORKSHARE 749 909 zwkb(:,:,:) = 0._wp 750 910 zfact(:,:) = 0._wp 911 !$OMP END WORKSHARE 751 912 DO jk = 2, jpkm1 752 zfact(:,:) = zfact(:,:) + e3w_n(:,:,jk) * SQRT( MAX( 0._wp, rn2(:,:,jk) ) ) * wmask(:,:,jk) 753 zwkb(:,:,jk) = zfact(:,:) 754 END DO 755 913 !$OMP DO schedule(static) private(jj,ji) 914 DO jj = 1, jpj 915 DO ji = 1, jpi 916 zfact(ji,jj) = zfact(ji,jj) + e3w_n(ji,jj,jk) * SQRT( MAX( 0._wp, rn2(ji,jj,jk) ) ) * wmask(ji,jj,jk) 917 zwkb(ji,jj,jk) = zfact(ji,jj) 918 END DO 919 END DO 920 END DO 921 922 !$OMP DO schedule(static) private(jk,jj,ji) 756 923 DO jk = 2, jpkm1 757 924 DO jj = 1, jpj … … 762 929 END DO 763 930 END DO 931 !$OMP WORKSHARE 764 932 zwkb(:,:,1) = zhdep(:,:) * tmask(:,:,1) 765 933 766 934 zweight(:,:,:) = 0._wp 935 !$OMP END WORKSHARE 936 !$OMP DO schedule(static) private(jk) 767 937 DO jk = 2, jpkm1 768 938 zweight(:,:,jk) = MAX( 0._wp, rn2(:,:,jk) ) * hbot_tmx(:,:) * wmask(:,:,jk) & … … 770 940 END DO 771 941 942 !$OMP WORKSHARE 772 943 zfact(:,:) = 0._wp 944 !$OMP END WORKSHARE 773 945 DO jk = 2, jpkm1 ! part independent of the level 774 zfact(:,:) = zfact(:,:) + zweight(:,:,jk) 775 END DO 776 946 !$OMP DO schedule(static) private(jj,ji) 947 DO jj = 1, jpj 948 DO ji = 1, jpi 949 zfact(ji,jj) = zfact(ji,jj) + zweight(ji,jj,jk) 950 END DO 951 END DO 952 END DO 953 954 !$OMP DO schedule(static) private(jj,ji) 777 955 DO jj = 1, jpj 778 956 DO ji = 1, jpi … … 781 959 END DO 782 960 961 !$OMP DO schedule(static) private(jk) 783 962 DO jk = 2, jpkm1 ! complete with the level-dependent part 784 963 emix_tmx(:,:,jk) = emix_tmx(:,:,jk) + zweight(:,:,jk) * zfact(:,:) * wmask(:,:,jk) & 785 964 & / ( gde3w_n(:,:,jk) - gde3w_n(:,:,jk-1) ) 786 965 END DO 966 !$OMP END DO NOWAIT 787 967 788 968 789 969 ! Calculate molecular kinematic viscosity 970 !$OMP WORKSHARE 790 971 znu_t(:,:,:) = 1.e-4_wp * ( 17.91_wp - 0.53810_wp * tsn(:,:,:,jp_tem) + 0.00694_wp * tsn(:,:,:,jp_tem) * tsn(:,:,:,jp_tem) & 791 972 & + 0.02305_wp * tsn(:,:,:,jp_sal) ) * tmask(:,:,:) * r1_rau0 973 !$OMP END WORKSHARE 974 !$OMP DO schedule(static) private(jk) 792 975 DO jk = 2, jpkm1 793 976 znu_w(:,:,jk) = 0.5_wp * ( znu_t(:,:,jk-1) + znu_t(:,:,jk) ) * wmask(:,:,jk) … … 795 978 796 979 ! Calculate turbulence intensity parameter Reb 980 !$OMP DO schedule(static) private(jk) 797 981 DO jk = 2, jpkm1 798 982 zReb(:,:,jk) = emix_tmx(:,:,jk) / MAX( 1.e-20_wp, znu_w(:,:,jk) * rn2(:,:,jk) ) … … 800 984 801 985 ! Define internal wave-induced diffusivity 986 !$OMP DO schedule(static) private(jk) 802 987 DO jk = 2, jpkm1 803 988 zav_wave(:,:,jk) = znu_w(:,:,jk) * zReb(:,:,jk) * r1_6 ! This corresponds to a constant mixing efficiency of 1/6 804 989 END DO 990 !$OMP END DO NOWAIT 991 !$OMP END PARALLEL 805 992 806 993 IF( ln_mevar ) THEN ! Variable mixing efficiency case : modify zav_wave in the 994 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 807 995 DO jk = 2, jpkm1 ! energetic (Reb > 480) and buoyancy-controlled (Reb <10.224 ) regimes 808 996 DO jj = 1, jpj … … 818 1006 ENDIF 819 1007 1008 !$OMP PARALLEL DO schedule(static) private(jk) 820 1009 DO jk = 2, jpkm1 ! Bound diffusivity by molecular value and 100 cm2/s 821 1010 zav_wave(:,:,jk) = MIN( MAX( 1.4e-7_wp, zav_wave(:,:,jk) ), 1.e-2_wp ) * wmask(:,:,jk) … … 824 1013 IF( kt == nit000 ) THEN !* Control print at first time-step: diagnose the energy consumed by zav_wave 825 1014 ztpc = 0._wp 1015 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,ztpc) 826 1016 DO jk = 2, jpkm1 827 1017 DO jj = 1, jpj … … 849 1039 ! 850 1040 IF( ln_tsdiff ) THEN !* Option for differential mixing of salinity and temperature 1041 !$OMP PARALLEL 1042 !$OMP DO schedule(static) private(jk,jj,ji) 851 1043 DO jk = 2, jpkm1 ! Calculate S/T diffusivity ratio as a function of Reb 852 1044 DO jj = 1, jpj … … 858 1050 END DO 859 1051 END DO 860 CALL iom_put( "av_ratio", zav_ratio)1052 !$OMP DO schedule(static) private(jk) 861 1053 DO jk = 2, jpkm1 !* update momentum & tracer diffusivity with wave-driven mixing 862 1054 fsavs(:,:,jk) = avt(:,:,jk) + zav_wave(:,:,jk) * zav_ratio(:,:,jk) … … 864 1056 avm (:,:,jk) = avm(:,:,jk) + zav_wave(:,:,jk) 865 1057 END DO 1058 !$OMP END DO NOWAIT 1059 !$OMP END PARALLEL 1060 CALL iom_put( "av_ratio", zav_ratio ) 866 1061 ! 867 1062 ELSE !* update momentum & tracer diffusivity with wave-driven mixing 1063 !$OMP PARALLEL DO schedule(static) private(jk) 868 1064 DO jk = 2, jpkm1 869 1065 fsavs(:,:,jk) = avt(:,:,jk) + zav_wave(:,:,jk) … … 873 1069 ENDIF 874 1070 1071 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 875 1072 DO jk = 2, jpkm1 !* update momentum diffusivity at wu and wv points 876 1073 DO jj = 2, jpjm1 … … 888 1085 ! vertical integral of rau0 * Kz * N^2 (pcmap_tmx), energy density (emix_tmx) 889 1086 IF( iom_use("bflx_tmx") .OR. iom_use("pcmap_tmx") ) THEN 890 bflx_tmx(:,:,:) = MAX( 0._wp, rn2(:,:,:) ) * zav_wave(:,:,:) 1087 !$OMP PARALLEL 1088 !$OMP DO schedule(static) private(jk,jj,ji) 1089 DO jk = 1, jpk 1090 DO jj = 1, jpj 1091 DO ji = 1, jpi 1092 bflx_tmx(ji,jj,jk) = MAX( 0._wp, rn2(ji,jj,jk) ) * zav_wave(ji,jj,jk) 1093 END DO 1094 END DO 1095 END DO 1096 !$OMP END DO NOWAII 1097 !$OMP WORKSHARE 891 1098 pcmap_tmx(:,:) = 0._wp 1099 !$OMP END WORKSHARE 1100 !$OMP PARALLEL DO schedule(static) private(jk) 892 1101 DO jk = 2, jpkm1 893 1102 pcmap_tmx(:,:) = pcmap_tmx(:,:) + e3w_n(:,:,jk) * bflx_tmx(:,:,jk) * wmask(:,:,jk) 894 1103 END DO 1104 !$OMP WORKSHARE 895 1105 pcmap_tmx(:,:) = rau0 * pcmap_tmx(:,:) 1106 !$OMP END WORKSHARE NOWAIT 1107 !$OMP END PARALLEL 896 1108 CALL iom_put( "bflx_tmx", bflx_tmx ) 897 1109 CALL iom_put( "pcmap_tmx", pcmap_tmx ) … … 968 1180 ! ensures that avt remains larger than its molecular value (=1.4e-7). Therefore, avtb should 969 1181 ! be set here to a very small value, and avmb to its (uniform) molecular value (=1.4e-6). 1182 !$OMP PARALLEL WORKSHARE 970 1183 avmb(:) = 1.4e-6_wp ! viscous molecular value 971 1184 avtb(:) = 1.e-10_wp ! very small diffusive minimum (background avt is specified in zdf_tmx) 972 1185 avtb_2d(:,:) = 1.e0_wp ! uniform 1186 !$OMP END PARALLEL WORKSHARE 973 1187 IF(lwp) THEN ! Control print 974 1188 WRITE(numout,*) … … 1003 1217 CALL iom_close(inum) 1004 1218 1219 !$OMP PARALLEL WORKSHARE 1005 1220 ebot_tmx(:,:) = ebot_tmx(:,:) * ssmask(:,:) 1006 1221 epyc_tmx(:,:) = epyc_tmx(:,:) * ssmask(:,:) … … 1014 1229 zav_wave (:,:, 1 ) = 0._wp 1015 1230 zav_wave (:,:,jpk) = 0._wp 1231 !$OMP END PARALLEL WORKSHARE 1016 1232 1017 1233 zbot = glob_sum( e1e2t(:,:) * ebot_tmx(:,:) ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zbio.F90
r6140 r7037 62 62 ! OF PHYTOPLANKTON AND DETRITUS 63 63 64 !$OMP PARALLEL 65 !$OMP WORKSHARE 64 66 xdiss(:,:,:) = 1. 67 !$OMP END WORKSHARE 65 68 !!gm the use of nmld should be better here? 69 !$OMP DO schedule(static) private(jk,jj,ji) 66 70 DO jk = 2, jpkm1 67 71 DO jj = 1, jpj … … 72 76 END DO 73 77 END DO 78 !$OMP END DO NOWAIT 79 !$OMP END PARALLEL 74 80 75 81 CALL p4z_opt ( kt, knt ) ! Optic: PAR in the water column -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zche.F90
r6291 r7037 154 154 ! CHEMICAL CONSTANTS - SURFACE LAYER 155 155 ! ---------------------------------- 156 !$OMP PARALLEL 157 !$OMP DO schedule(static) private(jj,ji,ztkel,zt,zt2,zsal,zsal2,zlogt,zcek1) 156 158 DO jj = 1, jpj 157 159 DO ji = 1, jpi … … 174 176 ! OXYGEN SOLUBILITY - DEEP OCEAN 175 177 ! ------------------------------- 178 !$OMP DO schedule(static) private(jk,jj,ji,ztkel,zsal,zsal2,ztgg,ztgg2,ztgg3,ztgg4,ztgg5,zoxy) 176 179 DO jk = 1, jpk 177 180 DO jj = 1, jpj … … 196 199 ! CHEMICAL CONSTANTS - DEEP OCEAN 197 200 ! ------------------------------- 201 !$OMP DO schedule(static) private(jk,jj,ji,zpres,ztkel,zsal,zsqrt,zsal15,zlogt,ztr,zis,zis2,zisqrt,ztc,zcl,zst,zft,zcks,zckf,zckb,zck1,zck2,zckw,zaksp0,zak1,zak2,zakb,zakw,zaksp1,zcpexp,zcpexp2,zbuf1,zbuf2) 198 202 DO jk = 1, jpk 199 203 DO jj = 1, jpj … … 317 321 END DO 318 322 END DO 323 !$OMP END DO NOWAIT 324 !$OMP END PARALLEL 319 325 ! 320 326 IF( nn_timing == 1 ) CALL timing_stop('p4z_che') -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zfechem.F90
r6140 r7037 82 82 ! 83 83 CALL wrk_alloc( jpi,jpj,jpk, zFe3, zFeL1, zTL1, ztotlig ) 84 zFe3 (:,:,:) = 0.85 zFeL1(:,:,:) = 0.86 zTL1 (:,:,:) = 0.87 IF( ln_fechem ) THEN88 CALL wrk_alloc( jpi,jpj,jpk, zFe2, zFeL2, zTL2, zFeP )89 zFe2 (:,:,:) = 0.90 zFeL2(:,:,:) = 0.91 zTL2 (:,:,:) = 0.92 zFeP (:,:,:) = 0.93 ENDIF94 84 95 85 ! Total ligand concentration : Ligands can be chosen to be constant or variable … … 97 87 ! ------------------------------------------------- 98 88 IF( ln_ligvar ) THEN 99 ztotlig(:,:,:) = 0.09 * trb(:,:,:,jpdoc) * 1E6 + ligand * 1E9 100 ztotlig(:,:,:) = MIN( ztotlig(:,:,:), 10. ) 89 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 90 DO jk = 1, jpk 91 DO jj = 1, jpj 92 DO ji = 1, jpi 93 zFe3 (ji,jj,jk) = 0. 94 zFeL1(ji,jj,jk) = 0. 95 zTL1 (ji,jj,jk) = 0. 96 ztotlig(ji,jj,jk) = 0.09 * trb(ji,jj,jk,jpdoc) * 1E6 + ligand * 1E9 97 ztotlig(ji,jj,jk) = MIN( ztotlig(ji,jj,jk), 10. ) 98 END DO 99 END DO 100 END DO 101 101 ELSE 102 !$OMP PARALLEL WORKSHARE 102 103 ztotlig(:,:,:) = ligand * 1E9 104 zFe3 (:,:,:) = 0. 105 zFeL1(:,:,:) = 0. 106 zTL1 (:,:,:) = 0. 107 !$OMP END PARALLEL WORKSHARE 103 108 ENDIF 104 109 … … 109 114 ! Chemistry is supposed to be fast enough to be at equilibrium 110 115 ! ------------------------------------------------------------ 116 CALL wrk_alloc( jpi,jpj,jpk, zFe2, zFeL2, zTL2, zFeP ) 117 !$OMP PARALLEL 118 !$OMP WORKSHARE 119 zFe2 (:,:,:) = 0. 120 zFeL2(:,:,:) = 0. 121 zTL2 (:,:,:) = 0. 122 zFeP (:,:,:) = 0. 123 !$OMP END WORKSHARE 124 !$OMP DO schedule(static) private(jk,jj,ji,ztligand,zionic,zph,zoxy,zkox,zkph2,zkph1,ztfe,za,zb,zc,zkappa1,zkappa2,za2,za1,za0,zp,zq,zp3,zq2,zd,zr,zphi,zxs,zfff,zfunc) 111 125 DO jk = 1, jpkm1 112 126 DO jj = 1, jpj … … 182 196 END DO 183 197 END DO 198 !$OMP END DO NOWAIT 199 !$OMP END PARALLEL 184 200 ELSE 185 201 ! ------------------------------------------------------------ … … 188 204 ! Chemistry is supposed to be fast enough to be at equilibrium 189 205 ! ------------------------------------------------------------ 206 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zkeq,zfesatur,ztfe) 190 207 DO jk = 1, jpkm1 191 208 DO jj = 1, jpj … … 209 226 zdust = 0. ! if no dust available 210 227 ! 228 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zkeq,zfesatur,ztfe,zstep,zfeequi,zfecoll,ztrc,zlam1b,zscave,zdenom1,zdenom2,zlamfac,zdep,zcoag,zlam1a,zaggdfea,zaggdfeb) 211 229 DO jk = 1, jpkm1 212 230 DO jj = 1, jpj … … 223 241 zfeequi = ( zFe3(ji,jj,jk) + zFe2(ji,jj,jk) + zFeP(ji,jj,jk) ) * 1E-9 224 242 zfecoll = ( 0.3 * zFeL1(ji,jj,jk) + 0.5 * zFeL2(ji,jj,jk) ) * 1E-9 243 ! Define the bioavailable fraction of iron 244 biron(ji,jj,jk) = MAX( 0., trb(ji,jj,jk,jpfer) - zFeP(ji,jj,jk) * 1E-9 ) 225 245 ELSE 226 246 zfeequi = zFe3(ji,jj,jk) * 1E-9 227 247 zfecoll = 0.5 * zFeL1(ji,jj,jk) * 1E-9 248 biron(ji,jj,jk) = trb(ji,jj,jk,jpfer) 228 249 ENDIF 229 250 #if defined key_kriest … … 278 299 END DO 279 300 ! 280 ! Define the bioavailable fraction of iron281 ! ----------------------------------------282 IF( ln_fechem ) THEN283 biron(:,:,:) = MAX( 0., trb(:,:,:,jpfer) - zFeP(:,:,:) * 1E-9 )284 ELSE285 biron(:,:,:) = trb(:,:,:,jpfer)286 ENDIF287 288 301 ! Output of some diagnostics variables 289 302 ! --------------------------------- -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zflx.F90
r6291 r7037 119 119 #endif 120 120 121 !$OMP PARALLEL 121 122 DO jm = 1, 10 123 !$OMP DO schedule(static) private(jj,ji,zbot,zfact,zdic,zph,zalka,zalk,zah2) 122 124 DO jj = 1, jpj 123 125 DO ji = 1, jpi … … 142 144 END DO 143 145 END DO 146 !$OMP END DO NOWAIT 144 147 END DO 145 148 … … 152 155 ! ------------------------------------------- 153 156 157 !$OMP DO schedule(static) private(jj,ji,ztc,ztc2,ztc3,ztc4,zsch_co2,zsch_o2,zws,zkgwan) 154 158 DO jj = 1, jpj 155 159 DO ji = 1, jpi … … 175 179 END DO 176 180 181 !$OMP DO schedule(static) private(jj,ji,zfld,zflu,zfld16,zflu16) 177 182 DO jj = 1, jpj 178 183 DO ji = 1, jpi … … 191 196 END DO 192 197 END DO 198 !$OMP END DO NOWAIT 199 !$OMP END PARALLEL 193 200 194 201 t_oce_co2_flx = glob_sum( oce_co2(:,:) ) ! Total Flux of Carbon -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zint.F90
r5656 r7037 41 41 INTEGER, INTENT( in ) :: kt ! ocean time-step index 42 42 ! 43 INTEGER :: ji, jj 43 INTEGER :: ji, jj, jk ! dummy loop indices 44 44 REAL(wp) :: zvar ! local variable 45 45 !!--------------------------------------------------------------------- … … 49 49 ! Computation of phyto and zoo metabolic rate 50 50 ! ------------------------------------------- 51 tgfunc (:,:,:) = EXP( 0.063913 * tsn(:,:,:,jp_tem) ) 52 tgfunc2(:,:,:) = EXP( 0.07608 * tsn(:,:,:,jp_tem) ) 51 !$OMP PARALLEL 52 !$OMP DO schedule(static) private(jk,jj,ji) 53 DO jk = 1, jpk 54 DO jj = 1, jpj 55 DO ji = 1, jpi 56 tgfunc (ji,jj,jk) = EXP( 0.063913 * tsn(ji,jj,jk,jp_tem) ) 57 tgfunc2(ji,jj,jk) = EXP( 0.07608 * tsn(ji,jj,jk,jp_tem) ) 58 END DO 59 END DO 60 END DO 53 61 54 62 ! Computation of the silicon dependant half saturation constant for silica uptake 55 63 ! --------------------------------------------------- 56 DO ji = 1, jpi 57 DO jj = 1, jpj 64 !$OMP DO schedule(static) private(jj,ji,zvar) 65 DO jj = 1, jpj 66 DO ji = 1, jpi 58 67 zvar = trb(ji,jj,1,jpsil) * trb(ji,jj,1,jpsil) 59 68 xksimax(ji,jj) = MAX( xksimax(ji,jj), ( 1.+ 7.* zvar / ( xksilim * xksilim + zvar ) ) * 1e-6 ) … … 62 71 ! 63 72 IF( nday_year == nyear_len(1) ) THEN 73 !$OMP WORKSHARE 64 74 xksi (:,:) = xksimax(:,:) 65 75 xksimax(:,:) = 0._wp 76 !$OMP END WORKSHARE NOWAIT 66 77 ENDIF 78 !$OMP END PARALLEL 67 79 ! 68 80 IF( nn_timing == 1 ) CALL timing_stop('p4z_int') -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zlim.F90
r5836 r7037 83 83 IF( nn_timing == 1 ) CALL timing_start('p4z_lim') 84 84 ! 85 !$OMP PARALLEL 86 !$OMP DO schedule(static) private(jk,jj,ji,zno3,zferlim,zconcd,zconcd2,zconcn,zconcn2,z1_trbphy,z1_trbdia,zconc1d,zconc1dnh4,zconc0n,zconc0nnh4,zdenom,zlim1,zlim2,zlim3,zlim4,zratio,zironmin) 85 87 DO jk = 1, jpkm1 86 88 DO jj = 1, jpj … … 159 161 END DO 160 162 END DO 163 !$OMP END DO NOWAIT 161 164 162 165 ! Compute the fraction of nanophytoplankton that is made of calcifiers 163 166 ! -------------------------------------------------------------------- 167 !$OMP DO schedule(static) private(jk,jj,ji,zlim1,zlim2,zlim3,ztem1,ztem2,zetot1,zetot2) 164 168 DO jk = 1, jpkm1 165 169 DO jj = 1, jpj … … 185 189 END DO 186 190 END DO 191 !$OMP END DO NOWAIT 192 !$OMP END PARALLEL 187 193 ! 188 194 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zlys.F90
r6291 r7037 72 72 CALL wrk_alloc( jpi, jpj, jpk, zco3, zcaldiss ) 73 73 ! 74 75 !$OMP PARALLEL 76 !$OMP WORKSHARE 74 77 zco3 (:,:,:) = 0. 75 78 zcaldiss(:,:,:) = 0. 79 !$OMP END WORKSHARE 76 80 ! ------------------------------------------- 77 81 ! COMPUTE [CO3--] and [H+] CONCENTRATIONS … … 80 84 DO jn = 1, 5 ! BEGIN OF ITERATION 81 85 ! 86 !$OMP DO schedule(static) private(jk, jj, ji, zfact, zph, zdic, zalka, zalk, zaldi, zah2) 82 87 DO jk = 1, jpkm1 83 88 DO jj = 1, jpj … … 109 114 ! --------------------------------------------------------- 110 115 116 !$OMP DO schedule(static) private(jk, jj, ji, zcalcon, zfact, zomegaca, zexcess0, zexcess, zdispot) 111 117 DO jk = 1, jpkm1 112 118 DO jj = 1, jpj … … 142 148 END DO 143 149 END DO 150 !$OMP END DO NOWAIT 151 !$OMP END PARALLEL 144 152 ! 145 153 … … 151 159 ELSE 152 160 IF( ln_diatrc ) THEN 153 trc3d(:,:,:,jp_pcs0_3d ) = -1. * LOG10( hi(:,:,:) ) * tmask(:,:,:) 154 trc3d(:,:,:,jp_pcs0_3d + 1) = zco3(:,:,:) * tmask(:,:,:) 155 trc3d(:,:,:,jp_pcs0_3d + 2) = aksp(:,:,:) / calcon * tmask(:,:,:) 161 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 162 DO jk = 1, jpk 163 DO jj = 1, jpj 164 DO ji = 1, jpi 165 trc3d(ji,jj,jk,jp_pcs0_3d ) = -1. * LOG10( hi(ji,jj,jk) ) * tmask(ji,jj,jk) 166 trc3d(ji,jj,jk,jp_pcs0_3d + 1) = zco3(ji,jj,jk) * tmask(ji,jj,jk) 167 trc3d(ji,jj,jk,jp_pcs0_3d + 2) = aksp(ji,jj,jk) / calcon * tmask(ji,jj,jk) 168 END DO 169 END DO 170 END DO 156 171 ENDIF 157 172 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zmeso.F90
r5836 r7037 89 89 IF( lk_iomput ) THEN 90 90 CALL wrk_alloc( jpi, jpj, jpk, zgrazing ) 91 !$OMP PARALLEL WORKSHARE 91 92 zgrazing(:,:,:) = 0._wp 93 !$OMP END PARALLEL WORKSHARE 92 94 ENDIF 93 95 96 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zcompam,zstep,zfact,zrespz2,ztortz2,zcompadi,zcompaz,zcompaph,zcompapoc,zfood,zfoodlim,zdenom,zdenom2,zgraze2,zgrazd,zgrazz,zgrazn,zgrazpoc,zgraznf,zgrazf,zgrazpof,zgrazffeg,zgrazfffg,zgrazffep,zgrazfffp,zgraztot,zproport,zratio,zratio2,zfrac,zfracfe,zgraztotf,zgrasrat,zgrasratn,zepshert,zepsherv,zgrarem2,zgrafer2,zgrapoc2,zgrarsig,zmortz2,zmortzgoc,zprcaca) 94 97 DO jk = 1, jpkm1 95 98 DO jj = 1, jpj … … 249 252 CALL wrk_alloc( jpi, jpj, jpk, zw3d ) 250 253 IF( iom_use( "GRAZ2" ) ) THEN 254 !$OMP PARALLEL WORKSHARE 251 255 zw3d(:,:,:) = zgrazing(:,:,:) * 1.e+3 * rfact2r * tmask(:,:,:) ! Total grazing of phyto by zooplankton 256 !$OMP END PARALLEL WORKSHARE 252 257 CALL iom_put( "GRAZ2", zw3d ) 253 258 ENDIF 254 259 IF( iom_use( "PCAL" ) ) THEN 260 !$OMP PARALLEL WORKSHARE 255 261 zw3d(:,:,:) = prodcal(:,:,:) * 1.e+3 * rfact2r * tmask(:,:,:) ! Calcite production 262 !$OMP END PARALLEL WORKSHARE 256 263 CALL iom_put( "PCAL", zw3d ) 257 264 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zmicro.F90
r5836 r7037 85 85 IF( lk_iomput ) CALL wrk_alloc( jpi, jpj, jpk, zgrazing ) 86 86 ! 87 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zcompaz,zstep,zfact,zrespz,ztortz,zcompadi,zcompaph,zcompapoc,zfood,zfoodlim,zdenom,zdenom2,zgraze,zgrazp,zgrazm,zgrazsd,zgrazpf,zgrazmf,zgrazsf,zgraztot,zgraztotf,zgraztotn,zgrasrat,zgrasratn,zepshert,zepsherv,zgrafer,zgrarem,zgrapoc,zgrarsig,zmortz,zprcaca) 87 88 DO jk = 1, jpkm1 88 89 DO jj = 1, jpj … … 191 192 CALL wrk_alloc( jpi, jpj, jpk, zw3d ) 192 193 IF( iom_use( "GRAZ1" ) ) THEN 194 !$OMP PARALLEL WORKSHARE 193 195 zw3d(:,:,:) = zgrazing(:,:,:) * 1.e+3 * rfact2r * tmask(:,:,:) ! Total grazing of phyto by zooplankton 196 !$OMP END PARALLEL WORKSHARE 194 197 CALL iom_put( "GRAZ1", zw3d ) 195 198 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zmort.F90
r5836 r7037 79 79 IF( nn_timing == 1 ) CALL timing_start('p4z_nano') 80 80 ! 81 !$OMP PARALLEL 82 !$OMP WORKSHARE 81 83 prodcal(:,:,:) = 0. !: calcite production variable set to zero 84 !$OMP END WORKSHARE 85 !$OMP DO schedule(static) private(jk,jj,ji,zcompaph,zstep,zsizerat,zrespp,ztortp,zmortp,zfactfe,zfactch,zprcaca,zfracal) 82 86 DO jk = 1, jpkm1 83 87 DO jj = 1, jpj … … 132 136 END DO 133 137 END DO 138 !$OMP END DO NOWAIT 139 !$OMP END PARALLEL 134 140 ! 135 141 IF(ln_ctl) THEN ! print mean trends (used for debugging) … … 166 172 ! ------------------------------------------------------------ 167 173 174 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zcompadi,zstep,zlim2,zlim1,zrespp2,ztortp2,zmortp2,zfactfe,zfactch,zfactsi) 168 175 DO jk = 1, jpkm1 169 176 DO jj = 1, jpj -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zopt.F90
r6140 r7037 88 88 ! Initialisation of variables used to compute PAR 89 89 ! ----------------------------------------------- 90 !$OMP PARALLEL 91 !$OMP WORKSHARE 90 92 ze1(:,:,:) = 0._wp 91 93 ze2(:,:,:) = 0._wp 92 94 ze3(:,:,:) = 0._wp 95 !$OMP END WORKSHARE NOWAIT 93 96 ! !* attenuation coef. function of Chlorophyll and wavelength (Red-Green-Blue) 97 !$OMP DO schedule(static) private(jk,jj,ji,zchl,irgb) 94 98 DO jk = 1, jpkm1 ! -------------------------------------------------------- 95 99 DO jj = 1, jpj … … 105 109 END DO 106 110 END DO 111 !$OMP END DO NOWAIT 112 !$OMP END PARALLEL 107 113 ! !* Photosynthetically Available Radiation (PAR) 108 114 ! ! -------------------------------------- 109 115 IF( l_trcdm2dc ) THEN ! diurnal cycle 110 116 ! 1% of qsr to compute euphotic layer 117 !$OMP PARALLEL WORKSHARE 111 118 zqsr100(:,:) = 0.01 * qsr_mean(:,:) ! daily mean qsr 119 !$OMP END PARALLEL WORKSHARE 112 120 ! 113 121 CALL p4z_opt_par( kt, qsr_mean, ze1, ze2, ze3 ) 114 122 ! 123 !$OMP PARALLEL DO schedule(static) private(jk) 115 124 DO jk = 1, nksrp 116 125 etot_ndcy(:,:,jk) = ze1(:,:,jk) + ze2(:,:,jk) + ze3(:,:,jk) … … 121 130 CALL p4z_opt_par( kt, qsr, ze1, ze2, ze3 ) 122 131 ! 132 !$OMP PARALLEL DO schedule(static) private(jk) 123 133 DO jk = 1, nksrp 124 134 etot(:,:,jk) = ze1(:,:,jk) + ze2(:,:,jk) + ze3(:,:,jk) … … 127 137 ELSE 128 138 ! 1% of qsr to compute euphotic layer 139 !$OMP PARALLEL WORKSHARE 129 140 zqsr100(:,:) = 0.01 * qsr(:,:) 141 !$OMP END PARALLEL WORKSHARE 130 142 ! 131 143 CALL p4z_opt_par( kt, qsr, ze1, ze2, ze3 ) 132 144 ! 145 !$OMP PARALLEL 146 !$OMP DO schedule(static) private(jk) 133 147 DO jk = 1, nksrp 134 148 etot (:,:,jk) = ze1(:,:,jk) + ze2(:,:,jk) + ze3(:,:,jk) … … 136 150 ediat(:,:,jk) = 1.6 * ze1(:,:,jk) + 0.69 * ze2(:,:,jk) + 0.7 * ze3(:,:,jk) 137 151 END DO 152 !$OMP WORKSHARE 138 153 etot_ndcy(:,:,:) = etot(:,:,:) 154 !$OMP END WORKSHARE NOWAIT 155 !$OMP END PARALLEL 139 156 ENDIF 140 157 … … 144 161 CALL p4z_opt_par( kt, qsr, ze1, ze2, ze3, pe0=ze0 ) 145 162 ! 163 !$OMP PARALLEL 164 !$OMP WORKSHARE 146 165 etot3(:,:,1) = qsr(:,:) * tmask(:,:,1) 166 !$OMP END WORKSHARE 167 !$OMP DO schedule(static) private(jk) 147 168 DO jk = 2, nksrp + 1 148 169 etot3(:,:,jk) = ( ze0(:,:,jk) + ze1(:,:,jk) + ze2(:,:,jk) + ze3(:,:,jk) ) * tmask(:,:,jk) 149 170 END DO 171 !$OMP END DO NOWAIT 172 !$OMP END PARALLEL 150 173 ! ! ------------------------ 151 174 ENDIF 152 175 ! !* Euphotic depth and level 176 !$OMP PARALLEL 177 !$OMP WORKSHARE 153 178 neln(:,:) = 1 ! ------------------------ 154 179 heup(:,:) = 300. 180 !$OMP END WORKSHARE 155 181 156 182 DO jk = 2, nksrp 183 !$OMP DO schedule(static) private(jj,ji) 157 184 DO jj = 1, jpj 158 185 DO ji = 1, jpi … … 166 193 END DO 167 194 ! 168 heup(:,:) = MIN( 300., heup(:,:) ) 195 !$OMP PARALLEL DO schedule(static) private(jj,ji) 196 DO jj = 1, jpj 197 DO ji = 1, jpi 198 heup(ji,jj) = MIN( 300. , heup(ji,jj)) 199 END DO 200 END DO 169 201 ! !* mean light over the mixed layer 202 !$OMP WORKSHARE 170 203 zdepmoy(:,:) = 0.e0 ! ------------------------------- 171 204 zetmp1 (:,:) = 0.e0 … … 173 206 zetmp3 (:,:) = 0.e0 174 207 zetmp4 (:,:) = 0.e0 208 !$OMP END WORKSHARE 175 209 176 210 DO jk = 1, nksrp 211 !$OMP DO schedule(static) private(jj,ji) 177 212 DO jj = 1, jpj 178 213 DO ji = 1, jpi … … 186 221 END DO 187 222 END DO 223 !$OMP END DO NOWAIT 188 224 END DO 189 225 ! 226 !$OMP WORKSHARE 190 227 emoy(:,:,:) = etot(:,:,:) ! remineralisation 191 228 zpar(:,:,:) = etot_ndcy(:,:,:) ! diagnostic : PAR with no diurnal cycle 192 ! 229 !$OMP END WORKSHARE 230 ! 231 !$OMP DO schedule(static) private(jk,jj,ji,z1_dep) 193 232 DO jk = 1, nksrp 194 233 DO jj = 1, jpj … … 204 243 END DO 205 244 END DO 245 !$OMP END DO NOWAIT 246 !$OMP END PARALLEL 206 247 ! 207 248 IF( lk_iomput ) THEN … … 213 254 ELSE 214 255 IF( ln_diatrc ) THEN ! save output diagnostics 256 !$OMP PARALLEL WORKSHARE 215 257 trc2d(:,:, jp_pcs0_2d + 10) = heup(:,: ) * tmask(:,:,1) 216 258 trc3d(:,:,:,jp_pcs0_3d + 3) = etot(:,:,:) * tmask(:,:,:) 259 !$OMP END PARALLEL WORKSHARE 217 260 ENDIF 218 261 ENDIF … … 244 287 245 288 ! Real shortwave 246 IF( ln_varpar ) THEN ; zqsr(:,:) = par_varsw(:,:) * pqsr(:,:) 247 ELSE ; zqsr(:,:) = xparsw * pqsr(:,:) 289 IF( ln_varpar ) THEN 290 !$OMP PARALLEL WORKSHARE 291 zqsr(:,:) = par_varsw(:,:) * pqsr(:,:) 292 !$OMP END PARALLEL WORKSHARE 293 ELSE 294 !$OMP PARALLEL WORKSHARE 295 zqsr(:,:) = xparsw * pqsr(:,:) 296 !$OMP END PARALLEL WORKSHARE 248 297 ENDIF 249 298 ! 250 299 IF( PRESENT( pe0 ) ) THEN ! W-level 251 300 ! 301 !$OMP PARALLEL 302 !$OMP WORKSHARE 252 303 pe0(:,:,1) = pqsr(:,:) - 3. * zqsr(:,:) ! ( 1 - 3 * alpha ) * q 253 304 pe1(:,:,1) = zqsr(:,:) 254 305 pe2(:,:,1) = zqsr(:,:) 255 306 pe3(:,:,1) = zqsr(:,:) 307 !$OMP END WORKSHARE 256 308 ! 257 309 DO jk = 2, nksrp + 1 310 !$OMP DO schedule(static) private(jj,ji) 258 311 DO jj = 1, jpj 259 312 DO ji = 1, jpi … … 265 318 ! 266 319 END DO 320 !$OMP END DO NOWAIT 267 321 ! 268 322 END DO 323 !$OMP END PARALLEL 269 324 ! 270 325 ELSE ! T- level 271 326 ! 272 pe1(:,:,1) = zqsr(:,:) * EXP( -0.5 * ekb(:,:,1) ) 273 pe2(:,:,1) = zqsr(:,:) * EXP( -0.5 * ekg(:,:,1) ) 274 pe3(:,:,1) = zqsr(:,:) * EXP( -0.5 * ekr(:,:,1) ) 327 !$OMP PARALLEL 328 !$OMP DO schedule(static) private(jj,ji) 329 DO jj = 1, jpj 330 DO ji = 1, jpi 331 pe1(ji,jj,1) = zqsr(ji,jj) * EXP( -0.5 * ekb(ji,jj,1) ) 332 pe2(ji,jj,1) = zqsr(ji,jj) * EXP( -0.5 * ekg(ji,jj,1) ) 333 pe3(ji,jj,1) = zqsr(ji,jj) * EXP( -0.5 * ekr(ji,jj,1) ) 334 END DO 335 END DO 275 336 ! 276 337 DO jk = 2, nksrp 338 !$OMP DO schedule(static) private(jj,ji) 277 339 DO jj = 1, jpj 278 340 DO ji = 1, jpi … … 282 344 END DO 283 345 END DO 346 !$OMP END DO NOWAIT 284 347 END DO 348 !$OMP END PARALLEL 285 349 ! 286 350 ENDIF … … 315 379 IF( kt == nit000 .OR. ( kt /= nit000 .AND. ntimes_par > 1 ) ) THEN 316 380 CALL fld_read( kt, 1, sf_par ) 381 !$OMP PARALLEL WORKSHARE 317 382 par_varsw(:,:) = ( sf_par(1)%fnow(:,:,1) ) / 3.0 383 !$OMP END PARALLEL WORKSHARE 318 384 ENDIF 319 385 ENDIF … … 391 457 IF(lwp) WRITE(numout,*) ' level of light extinction = ', nksrp, ' ref depth = ', gdepw_1d(nksrp+1), ' m' 392 458 ! 459 !$OMP PARALLEL WORKSHARE 393 460 ekr (:,:,:) = 0._wp 394 461 ekb (:,:,:) = 0._wp … … 398 465 enano (:,:,:) = 0._wp 399 466 ediat (:,:,:) = 0._wp 400 IF( ln_qsr_bio ) etot3 (:,:,:) = 0._wp 467 !$OMP END PARALLEL WORKSHARE 468 IF( ln_qsr_bio ) THEN 469 !$OMP PARALLEL WORKSHARE 470 etot3 (:,:,:) = 0._wp 471 !$OMP END PARALLEL WORKSHARE 472 END IF 401 473 ! 402 474 IF( nn_timing == 1 ) CALL timing_stop('p4z_opt_init') -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zprod.F90
r6140 r7037 94 94 CALL wrk_alloc( jpi, jpj, jpk, zprorca, zprorcad, zprofed, zprofen, zprochln, zprochld, zpronew, zpronewd ) 95 95 ! 96 !$OMP PARALLEL WORKSHARE 96 97 zprorca (:,:,:) = 0._wp 97 98 zprorcad(:,:,:) = 0._wp … … 110 111 ! Computation of the optimal production 111 112 prmax(:,:,:) = 0.6_wp * r1_rday * tgfunc(:,:,:) 112 IF( lk_degrad ) prmax(:,:,:) = prmax(:,:,:) * facvol(:,:,:) 113 114 ! day length in hours 115 zstrn(:,:) = 0. 116 !$OMP END PARALLEL WORKSHARE 117 118 IF( lk_degrad ) THEN 119 !$OMP PARALLEL WORKSHARE 120 prmax(:,:,:) = prmax(:,:,:) * facvol(:,:,:) 121 !$OMP END PARALLEL WORKSHARE 122 END IF 113 123 114 124 ! compute the day length depending on latitude and the day … … 116 126 zcodel = ASIN( SIN( zrum * rpi * 2._wp ) * SIN( rad * 23.5_wp ) ) 117 127 118 ! day length in hours 119 zstrn(:,:) = 0. 128 !$OMP PARALLEL 129 !$OMP DO schedule(static) private(jj,ji,zargu) 120 130 DO jj = 1, jpj 121 131 DO ji = 1, jpi … … 127 137 128 138 ! Impact of the day duration on phytoplankton growth 139 !$OMP DO schedule(static) private(jk,jj,ji,zval) 129 140 DO jk = 1, jpkm1 130 141 DO jj = 1 ,jpj … … 139 150 END DO 140 151 END DO 152 !$OMP END DO 141 153 142 154 ! Maximum light intensity 155 !$OMP WORKSHARE 143 156 WHERE( zstrn(:,:) < 1.e0 ) zstrn(:,:) = 24. 144 157 zstrn(:,:) = 24. / zstrn(:,:) 158 !$OMP END WORKSHARE NOWAIT 159 !$OMP END PARALLEL 145 160 146 161 IF( ln_newprod ) THEN 162 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,ztn,zadap,zconctemp,zconctemp2,znanotot,zdiattot,zpislopen,zpislope2n,zmaxday) 147 163 DO jk = 1, jpkm1 148 164 DO jj = 1, jpj … … 180 196 END DO 181 197 ELSE 198 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,ztn,zadap,zconctemp,zconctemp2,znanotot,zpislopen,zpislope2n) 182 199 DO jk = 1, jpkm1 183 200 DO jj = 1, jpj … … 222 239 ! Computation of a proxy of the N/C ratio 223 240 ! --------------------------------------- 241 !$OMP PARALLEL 242 !$OMP DO schedule(static) private(jk,jj,ji,zval) 224 243 DO jk = 1, jpkm1 225 244 DO jj = 1, jpj … … 234 253 END DO 235 254 END DO 236 237 255 !$OMP END DO NOWAIT 256 257 258 !$OMP DO schedule(static) private(jk,jj,ji,zlim,zsilim,zsilfac,zsiborn,zsilfac2) 238 259 DO jk = 1, jpkm1 239 260 DO jj = 1, jpj … … 260 281 END DO 261 282 END DO 283 !$OMP END DO NOWAIT 262 284 263 285 ! Computation of the limitation term due to a mixed layer deeper than the euphotic depth 286 !$OMP DO schedule(static) private(jj,ji,zmxltst,zmxlday) 264 287 DO jj = 1, jpj 265 288 DO ji = 1, jpi … … 272 295 273 296 ! Mixed-layer effect on production 297 !$OMP DO schedule(static) private(jk,jj,ji) 274 298 DO jk = 1, jpkm1 275 299 DO jj = 1, jpj … … 284 308 285 309 ! Computation of the various production terms 310 !$OMP DO schedule(static) private(jk,jj,ji,zratio,zmax) 286 311 DO jk = 1, jpkm1 287 312 DO jj = 1, jpj … … 314 339 END DO 315 340 END DO 341 !$OMP END DO NOWAIT 342 !$OMP END PARALLEL 316 343 317 344 IF( ln_newprod ) THEN 345 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,znanotot,zprod,zdiattot) 318 346 DO jk = 1, jpkm1 319 347 DO jj = 1, jpj … … 341 369 END DO 342 370 ELSE 371 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,znanotot,zprod,zdiattot) 343 372 DO jk = 1, jpkm1 344 373 DO jj = 1, jpj … … 364 393 365 394 ! Update the arrays TRA which contain the biological sources and sinks 395 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zproreg,zproreg2) 366 396 DO jk = 1, jpkm1 367 397 DO jj = 1, jpj … … 403 433 ! 404 434 IF( iom_use( "PPPHY" ) .OR. iom_use( "PPPHY2" ) ) THEN 435 !$OMP PARALLEL WORKSHARE 405 436 zw3d(:,:,:) = zprorca (:,:,:) * zfact * tmask(:,:,:) ! primary production by nanophyto 437 !$OMP END PARALLEL WORKSHARE 406 438 CALL iom_put( "PPPHY" , zw3d ) 407 439 ! 440 !$OMP PARALLEL WORKSHARE 408 441 zw3d(:,:,:) = zprorcad(:,:,:) * zfact * tmask(:,:,:) ! primary production by diatomes 442 !$OMP END PARALLEL WORKSHARE 409 443 CALL iom_put( "PPPHY2" , zw3d ) 410 444 ENDIF 411 445 IF( iom_use( "PPNEWN" ) .OR. iom_use( "PPNEWD" ) ) THEN 446 !$OMP PARALLEL WORKSHARE 412 447 zw3d(:,:,:) = zpronew (:,:,:) * zfact * tmask(:,:,:) ! new primary production by nanophyto 448 !$OMP END PARALLEL WORKSHARE 413 449 CALL iom_put( "PPNEWN" , zw3d ) 414 450 ! 451 !$OMP PARALLEL WORKSHARE 415 452 zw3d(:,:,:) = zpronewd(:,:,:) * zfact * tmask(:,:,:) ! new primary production by diatomes 453 !$OMP END PARALLEL WORKSHARE 416 454 CALL iom_put( "PPNEWD" , zw3d ) 417 455 ENDIF 418 456 IF( iom_use( "PBSi" ) ) THEN 457 !$OMP PARALLEL WORKSHARE 419 458 zw3d(:,:,:) = zprorcad(:,:,:) * zfact * tmask(:,:,:) * zysopt(:,:,:) ! biogenic silica production 459 !$OMP END PARALLEL WORKSHARE 420 460 CALL iom_put( "PBSi" , zw3d ) 421 461 ENDIF 422 462 IF( iom_use( "PFeN" ) .OR. iom_use( "PFeD" ) ) THEN 463 !$OMP PARALLEL WORKSHARE 423 464 zw3d(:,:,:) = zprofen(:,:,:) * zfact * tmask(:,:,:) ! biogenic iron production by nanophyto 465 !$OMP END PARALLEL WORKSHARE 424 466 CALL iom_put( "PFeN" , zw3d ) 425 467 ! 468 !$OMP PARALLEL WORKSHARE 426 469 zw3d(:,:,:) = zprofed(:,:,:) * zfact * tmask(:,:,:) ! biogenic iron production by diatomes 470 !$OMP END PARALLEL WORKSHARE 427 471 CALL iom_put( "PFeD" , zw3d ) 428 472 ENDIF 429 473 IF( iom_use( "Mumax" ) ) THEN 474 !$OMP PARALLEL WORKSHARE 430 475 zw3d(:,:,:) = prmax(:,:,:) * tmask(:,:,:) ! Maximum growth rate 476 !$OMP END PARALLEL WORKSHARE 431 477 CALL iom_put( "Mumax" , zw3d ) 432 478 ENDIF 433 479 IF( iom_use( "MuN" ) .OR. iom_use( "MuD" ) ) THEN 480 !$OMP PARALLEL WORKSHARE 434 481 zw3d(:,:,:) = zprbio(:,:,:) * xlimphy(:,:,:) * tmask(:,:,:) ! Realized growth rate for nanophyto 482 !$OMP END PARALLEL WORKSHARE 435 483 CALL iom_put( "MuN" , zw3d ) 436 484 ! 485 !$OMP PARALLEL WORKSHARE 437 486 zw3d(:,:,:) = zprdia(:,:,:) * xlimdia(:,:,:) * tmask(:,:,:) ! Realized growth rate for diatoms 487 !$OMP END PARALLEL WORKSHARE 438 488 CALL iom_put( "MuD" , zw3d ) 439 489 ENDIF 440 490 IF( iom_use( "LNlight" ) .OR. iom_use( "LDlight" ) ) THEN 491 !$OMP PARALLEL WORKSHARE 441 492 zw3d(:,:,:) = zprbio (:,:,:) / (prmax(:,:,:) + rtrn) * tmask(:,:,:) ! light limitation term 493 !$OMP END PARALLEL WORKSHARE 442 494 CALL iom_put( "LNlight" , zw3d ) 443 495 ! 496 !$OMP PARALLEL WORKSHARE 444 497 zw3d(:,:,:) = zprdia (:,:,:) / (prmax(:,:,:) + rtrn) * tmask(:,:,:) ! light limitation term 498 !$OMP END PARALLEL WORKSHARE 445 499 CALL iom_put( "LDlight" , zw3d ) 446 500 ENDIF 447 501 IF( iom_use( "TPP" ) ) THEN 502 !$OMP PARALLEL WORKSHARE 448 503 zw3d(:,:,:) = ( zprorca(:,:,:) + zprorcad(:,:,:) ) * zfact * tmask(:,:,:) ! total primary production 504 !$OMP END PARALLEL WORKSHARE 449 505 CALL iom_put( "TPP" , zw3d ) 450 506 ENDIF 451 507 IF( iom_use( "TPNEW" ) ) THEN 508 !$OMP PARALLEL WORKSHARE 452 509 zw3d(:,:,:) = ( zpronew(:,:,:) + zpronewd(:,:,:) ) * zfact * tmask(:,:,:) ! total new production 510 !$OMP END PARALLEL WORKSHARE 453 511 CALL iom_put( "TPNEW" , zw3d ) 454 512 ENDIF 455 513 IF( iom_use( "TPBFE" ) ) THEN 514 !$OMP PARALLEL WORKSHARE 456 515 zw3d(:,:,:) = ( zprofen(:,:,:) + zprofed(:,:,:) ) * zfact * tmask(:,:,:) ! total biogenic iron production 516 !$OMP END PARALLEL WORKSHARE 457 517 CALL iom_put( "TPBFE" , zw3d ) 458 518 ENDIF 459 519 IF( iom_use( "INTPPPHY" ) .OR. iom_use( "INTPPPHY2" ) ) THEN 520 !$OMP PARALLEL 521 !$OMP WORKSHARE 460 522 zw2d(:,:) = 0. 523 !$OMP END WORKSHARE 461 524 DO jk = 1, jpkm1 462 zw2d(:,:) = zw2d(:,:) + zprorca (:,:,jk) * e3t_n(:,:,jk) * zfact * tmask(:,:,jk) ! vert. integrated primary produc. by nano 525 !$OMP DO schedule(static) private(jj,ji) 526 DO jj = 1, jpj 527 DO ji =1 ,jpi 528 zw2d(ji,jj) = zw2d(ji,jj) + zprorca (ji,jj,jk) * e3t_n(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! vert. integrated primary produc. by nano 529 END DO 530 END DO 531 !$OMP END DO NOWAIT 463 532 ENDDO 533 !$OMP END PARALLEL 464 534 CALL iom_put( "INTPPPHY" , zw2d ) 465 535 ! 536 !$OMP PARALLEL 537 !$OMP WORKSHARE 466 538 zw2d(:,:) = 0. 539 !$OMP END WORKSHARE 467 540 DO jk = 1, jpkm1 468 zw2d(:,:) = zw2d(:,:) + zprorcad(:,:,jk) * e3t_n(:,:,jk) * zfact * tmask(:,:,jk) ! vert. integrated primary produc. by diatom 541 !$OMP DO schedule(static) private(jj,ji) 542 DO jj = 1, jpj 543 DO ji =1 ,jpi 544 zw2d(ji,jj) = zw2d(ji,jj) + zprorcad(ji,jj,jk) * e3t_n(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! vert. integrated primary produc. by diatom 545 END DO 546 END DO 547 !$OMP END DO NOWAIT 469 548 ENDDO 549 !$OMP END PARALLEL 470 550 CALL iom_put( "INTPPPHY2" , zw2d ) 471 551 ENDIF 472 552 IF( iom_use( "INTPP" ) ) THEN 553 !$OMP PARALLEL 554 !$OMP WORKSHARE 473 555 zw2d(:,:) = 0. 556 !$OMP END WORKSHARE 474 557 DO jk = 1, jpkm1 475 zw2d(:,:) = zw2d(:,:) + ( zprorca(:,:,jk) + zprorcad(:,:,jk) ) * e3t_n(:,:,jk) * zfact * tmask(:,:,jk) ! vert. integrated pp 558 !$OMP DO schedule(static) private(jj,ji) 559 DO jj = 1, jpj 560 DO ji =1 ,jpi 561 zw2d(ji,jj) = zw2d(ji,jj) + ( zprorca(ji,jj,jk) + zprorcad(ji,jj,jk) ) * e3t_n(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! vert. integrated pp 562 END DO 563 END DO 564 !$OMP END DO NOWAIT 476 565 ENDDO 566 !$OMP END PARALLEL 477 567 CALL iom_put( "INTPP" , zw2d ) 478 568 ENDIF 479 569 IF( iom_use( "INTPNEW" ) ) THEN 570 !$OMP PARALLEL 571 !$OMP WORKSHARE 480 572 zw2d(:,:) = 0. 573 !$OMP END WORKSHARE 481 574 DO jk = 1, jpkm1 482 zw2d(:,:) = zw2d(:,:) + ( zpronew(:,:,jk) + zpronewd(:,:,jk) ) * e3t_n(:,:,jk) * zfact * tmask(:,:,jk) ! vert. integrated new prod 575 !$OMP DO schedule(static) private(jj,ji) 576 DO jj = 1, jpj 577 DO ji =1 ,jpi 578 zw2d(ji,jj) = zw2d(ji,jj) + ( zpronew(ji,jj,jk) + zpronewd(ji,jj,jk) ) * e3t_n(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! vert. integrated new prod 579 END DO 580 END DO 581 !$OMP END DO NOWAIT 483 582 ENDDO 583 !$OMP END PARALLEL 484 584 CALL iom_put( "INTPNEW" , zw2d ) 485 585 ENDIF 486 586 IF( iom_use( "INTPBFE" ) ) THEN ! total biogenic iron production ( vertically integrated ) 587 !$OMP PARALLEL 588 !$OMP WORKSHARE 487 589 zw2d(:,:) = 0. 590 !$OMP END WORKSHARE 488 591 DO jk = 1, jpkm1 489 zw2d(:,:) = zw2d(:,:) + ( zprofen(:,:,jk) + zprofed(:,:,jk) ) * e3t_n(:,:,jk) * zfact * tmask(:,:,jk) ! vert integr. bfe prod 592 !$OMP DO schedule(static) private(jj,ji) 593 DO jj = 1, jpj 594 DO ji =1 ,jpi 595 zw2d(ji,jj) = zw2d(ji,jj) + ( zprofen(ji,jj,jk) + zprofed(ji,jj,jk) ) * e3t_n(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! vert integr. bfe prod 596 END DO 597 END DO 598 !$OMP END DO NOWAIT 490 599 ENDDO 600 !$OMP END PARALLEL 491 601 CALL iom_put( "INTPBFE" , zw2d ) 492 602 ENDIF 493 603 IF( iom_use( "INTPBSI" ) ) THEN ! total biogenic silica production ( vertically integrated ) 604 !$OMP PARALLEL 605 !$OMP WORKSHARE 494 606 zw2d(:,:) = 0. 607 !$OMP END WORKSHARE 495 608 DO jk = 1, jpkm1 496 zw2d(:,:) = zw2d(:,:) + zprorcad(:,:,jk) * zysopt(:,:,jk) * e3t_n(:,:,jk) * zfact * tmask(:,:,jk) ! vert integr. bsi prod 609 !$OMP DO schedule(static) private(jj,ji) 610 DO jj = 1, jpj 611 DO ji =1 ,jpi 612 zw2d(ji,jj) = zw2d(ji,jj) + zprorcad(ji,jj,jk) * zysopt(ji,jj,jk) * e3t_n(ji,jj,jk) * zfact * tmask(ji,jj,jk) ! vert integr. bsi prod 613 END DO 614 END DO 615 !$OMP END DO NOWAIT 497 616 ENDDO 617 !$OMP END PARALLEL 498 618 CALL iom_put( "INTPBSI" , zw2d ) 499 619 ENDIF … … 506 626 IF( ln_diatrc ) THEN 507 627 zfact = 1.e+3 * rfact2r 628 !$OMP PARALLEL WORKSHARE 508 629 trc3d(:,:,:,jp_pcs0_3d + 4) = zprorca (:,:,:) * zfact * tmask(:,:,:) 509 630 trc3d(:,:,:,jp_pcs0_3d + 5) = zprorcad(:,:,:) * zfact * tmask(:,:,:) … … 515 636 trc3d(:,:,:,jp_pcs0_3d + 10) = zprofen (:,:,:) * zfact * tmask(:,:,:) 516 637 # endif 638 !$OMP END PARALLEL WORKSHARE 517 639 ENDIF 518 640 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zrem.F90
r6140 r7037 89 89 90 90 ! Initialisation of temprary arrys 91 !$OMP PARALLEL 92 !$OMP WORKSHARE 91 93 zdepprod(:,:,:) = 1._wp 92 94 ztempbac(:,:) = 0._wp 95 !$OMP END WORKSHARE 93 96 94 97 ! Computation of the mean phytoplankton concentration as … … 98 101 ! ------------------------------------------------------- 99 102 DO jk = 1, jpkm1 103 !$OMP DO schedule(static) private(jj,ji,zdep,zdepmin) 100 104 DO jj = 1, jpj 101 105 DO ji = 1, jpi … … 111 115 END DO 112 116 END DO 113 END DO 114 117 !$OMP END DO NOWAIT 118 END DO 119 120 !$OMP DO schedule(static) private(jk,jj,ji) 115 121 DO jk = 1, jpkm1 116 122 DO jj = 1, jpj … … 124 130 END DO 125 131 132 !$OMP DO schedule(static) private(jk,jj,ji,zstep,zremik,zolimit) 126 133 DO jk = 1, jpkm1 127 134 DO jj = 1, jpj … … 153 160 154 161 162 !$OMP DO schedule(static) private(jk,jj,ji,zstep,zonitr) 155 163 DO jk = 1, jpkm1 156 164 DO jj = 1, jpj … … 174 182 END DO 175 183 END DO 184 !$OMP END DO NOWAIT 185 !$OMP END PARALLEL 176 186 177 187 IF(ln_ctl) THEN ! print mean trends (used for debugging) … … 181 191 ENDIF 182 192 193 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zbactfer) 183 194 DO jk = 1, jpkm1 184 195 DO jj = 1, jpj … … 210 221 ENDIF 211 222 223 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zstep,zremip,zorem,zofer,zorem2,zofer2) 212 224 DO jk = 1, jpkm1 213 225 DO jj = 1, jpj … … 261 273 ENDIF 262 274 275 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,zstep,zsatur,zsatur2,znusil,znusil2,zdep,ztem,zfactdep,zsiremin,zosil) 263 276 DO jk = 1, jpkm1 264 277 DO jj = 1, jpj … … 303 316 ! -------------------------------------------------------------------- 304 317 318 !$OMP PARALLEL DO schedule(static) private(jk) 305 319 DO jk = 1, jpkm1 306 320 tra(:,:,jk,jppo4) = tra(:,:,jk,jppo4) + zolimi (:,:,jk) + denitr(:,:,jk) … … 318 332 ! 319 333 IF( iom_use( "REMIN" ) ) THEN 334 !$OMP PARALLEL WORKSHARE 320 335 zw3d(:,:,:) = zolimi(:,:,:) * tmask(:,:,:) * zfact ! Remineralisation rate 336 !$OMP END PARALLEL WORKSHARE 321 337 CALL iom_put( "REMIN" , zw3d ) 322 338 ENDIF 323 339 IF( iom_use( "DENIT" ) ) THEN 340 !$OMP PARALLEL WORKSHARE 324 341 zw3d(:,:,:) = denitr(:,:,:) * rdenit * rno3 * tmask(:,:,:) * zfact ! Denitrification 342 !$OMP END PARALLEL WORKSHARE 325 343 CALL iom_put( "DENIT" , zw3d ) 326 344 ENDIF … … 381 399 ENDIF 382 400 ! 401 !$OMP PARALLEL WORKSHARE 383 402 nitrfac (:,:,:) = 0._wp 384 403 denitr (:,:,:) = 0._wp 385 404 denitnh4(:,:,:) = 0._wp 405 !$OMP END PARALLEL WORKSHARE 386 406 ! 387 407 END SUBROUTINE p4z_rem_init -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zsbc.F90
r6140 r7037 114 114 CALL fld_read( kt, 1, sf_dust ) 115 115 IF( nn_ice_tr == -1 .AND. .NOT. ln_ironice ) THEN 116 dust(:,:) = sf_dust(1)%fnow(:,:,1) 116 !$OMP PARALLEL DO schedule(static) private(jj, ji) 117 DO jj = 1, jpj 118 DO ji = 1, jpi 119 dust(ji,jj) = sf_dust(1)%fnow(ji,jj,1) 120 END DO 121 END DO 117 122 ELSE 118 dust(:,:) = sf_dust(1)%fnow(:,:,1) * ( 1.0 - fr_i(:,:) ) 123 !$OMP PARALLEL DO schedule(static) private(jj, ji) 124 DO jj = 1, jpj 125 DO ji = 1, jpi 126 dust(ji,jj) = sf_dust(1)%fnow(ji,jj,1) * ( 1.0 - fr_i(ji,jj) ) 127 END DO 128 END DO 119 129 ENDIF 120 130 ENDIF … … 124 134 IF( kt == nit000 .OR. ( kt /= nit000 .AND. ntimes_solub > 1 ) ) THEN 125 135 CALL fld_read( kt, 1, sf_solub ) 126 solub(:,:) = sf_solub(1)%fnow(:,:,1) 136 !$OMP PARALLEL DO schedule(static) private(jj, ji) 137 DO jj = 1, jpj 138 DO ji = 1, jpi 139 solub(ji,jj) = sf_solub(1)%fnow(ji,jj,1) 140 END DO 141 END DO 127 142 ENDIF 128 143 ENDIF … … 134 149 IF( kt == nit000 .OR. ( kt /= nit000 .AND. ntimes_riv > 1 ) ) THEN 135 150 CALL fld_read( kt, 1, sf_river ) 151 !$OMP PARALLEL DO schedule(static) private(jj, ji, zcoef) 136 152 DO jj = 1, jpj 137 153 DO ji = 1, jpi … … 156 172 IF( kt == nit000 .OR. ( kt /= nit000 .AND. ntimes_ndep > 1 ) ) THEN 157 173 CALL fld_read( kt, 1, sf_ndepo ) 174 !$OMP PARALLEL DO schedule(static) private(jj, ji) 158 175 DO jj = 1, jpj 159 176 DO ji = 1, jpi … … 261 278 ! online configuration : computed in sbcrnf 262 279 IF( lk_offline ) THEN 280 !$OMP PARALLEL WORKSHARE 263 281 nk_rnf(:,:) = 1 264 282 h_rnf (:,:) = gdept_n(:,:,1) 283 !$OMP END PARALLEL WORKSHARE 265 284 ENDIF 266 285 … … 434 453 IF (lwp) WRITE(numout,*) ' Level corresponding to 50m depth ', ik50,' ', gdept_1d(ik50+1) 435 454 IF (lwp) WRITE(numout,*) 455 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zmaskt) 436 456 DO jk = 1, ik50 437 457 DO jj = 2, jpjm1 … … 448 468 CALL lbc_lnk( zcmask , 'T', 1. ) ! lateral boundary conditions on cmask (sign unchanged) 449 469 ! 470 !$OMP PARALLEL 471 !$OMP DO schedule(static) private(jk, jj, ji, zexpide, zdenitide) 450 472 DO jk = 1, jpk 451 473 DO jj = 1, jpj … … 457 479 END DO 458 480 END DO 481 !$OMP END DO NOWAIT 459 482 ! Coastal supply of iron 460 483 ! ------------------------- 484 !$OMP WORKSHARE 461 485 ironsed(:,:,jpk) = 0._wp 486 !$OMP END WORKSHARE 487 !$OMP DO schedule(static) private(jk) 462 488 DO jk = 1, jpkm1 463 489 ironsed(:,:,jk) = sedfeinput * zcmask(:,:,jk) / ( e3t_n(:,:,jk) * rday ) 464 490 END DO 491 !$OMP END DO NOWAIT 492 !$OMP END PARALLEL 465 493 DEALLOCATE( zcmask) 466 494 ENDIF … … 479 507 CALL iom_close( numhydro ) 480 508 ! 509 !$OMP PARALLEL WORKSHARE 481 510 hydrofe(:,:,:) = ( hydrofe(:,:,:) * hratio ) / ( cvol(:,:,:) * ryyss + rtrn ) / 1000._wp 511 !$OMP END PARALLEL WORKSHARE 482 512 ! 483 513 ENDIF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zsed.F90
r6140 r7037 82 82 CALL wrk_alloc( jpi, jpj, jpk, zsoufer ) 83 83 84 !$OMP PARALLEL WORKSHARE 84 85 zdenit2d(:,:) = 0.e0 85 86 zbureff (:,:) = 0.e0 … … 87 88 zwork2 (:,:) = 0.e0 88 89 zwork3 (:,:) = 0.e0 90 !$OMP END PARALLEL WORKSHARE 89 91 90 92 ! Iron input/uptake due to sea ice : Crude parameterization based on Lancelot et al. … … 93 95 ! 94 96 CALL wrk_alloc( jpi, jpj, zironice ) 95 ! 97 !$OMP PARALLEL 98 !$OMP DO schedule(static) private(jj,ji,zdep,zwflux,zfminus,zfplus) 96 99 DO jj = 1, jpj 97 100 DO ji = 1, jpi … … 104 107 END DO 105 108 ! 109 !$OMP WORKSHARE 106 110 tra(:,:,1,jpfer) = tra(:,:,1,jpfer) + zironice(:,:) 111 !$OMP END WORKSHARE NOWAIT 112 !$OMP END PARALLEL 107 113 ! 108 114 IF( lk_iomput .AND. knt == nrdttrc .AND. iom_use( "Ironice" ) ) & … … 121 127 ! ! Iron and Si deposition at the surface 122 128 IF( ln_solub ) THEN 123 zirondep(:,:,1) = solub(:,:) * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 55.85 + 3.e-10 * r1_ryyss 129 !$OMP PARALLEL WORKSHARE 130 zirondep(:,:,1) = solub(:,:) * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 55.85 + 3.e-10 * r1_ryyss 131 !$OMP END PARALLEL WORKSHARE 124 132 ELSE 125 zirondep(:,:,1) = dustsolub * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 55.85 + 3.e-10 * r1_ryyss 133 !$OMP PARALLEL WORKSHARE 134 zirondep(:,:,1) = dustsolub * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 55.85 + 3.e-10 * r1_ryyss 135 !$OMP END PARALLEL WORKSHARE 126 136 ENDIF 127 zsidep(:,:) = 8.8 * 0.075 * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 28.1 128 zpdep (:,:) = 0.1 * 0.021 * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 31. / po4r 137 !$OMP PARALLEL WORKSHARE 138 zsidep(:,:) = 8.8 * 0.075 * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 28.1 139 zpdep (:,:) = 0.1 * 0.021 * dust(:,:) * mfrac * rfact2 / e3t_n(:,:,1) / 31. / po4r 140 !$OMP END PARALLEL WORKSHARE 129 141 ! ! Iron solubilization of particles in the water column 130 142 ! ! dust in kg/m2/s ---> 1/55.85 to put in mol/Fe ; wdust in m/j 131 143 zwdust = 0.03 * rday / ( wdust * 55.85 ) / ( 270. * rday ) 144 !$OMP DO schedule(static) private(jk) 132 145 DO jk = 2, jpkm1 133 146 zirondep(:,:,jk) = dust(:,:) * mfrac * zwdust * rfact2 * EXP( -gdept_n(:,:,jk) / 540. ) 134 147 END DO 135 148 ! ! Iron solubilization of particles in the water column 149 !$OMP PARALLEL 150 !$OMP WORKSHARE 136 151 tra(:,:,1,jppo4) = tra(:,:,1,jppo4) + zpdep (:,:) 137 152 tra(:,:,1,jpsil) = tra(:,:,1,jpsil) + zsidep (:,:) 138 153 tra(:,:,:,jpfer) = tra(:,:,:,jpfer) + zirondep(:,:,:) 154 !$OMP END WORKSHARE 155 !$OMP END PARALLEL 139 156 ! 140 157 IF( lk_iomput ) THEN … … 146 163 ENDIF 147 164 ELSE 148 IF( ln_diatrc ) & 149 & trc2d(:,:,jp_pcs0_2d + 11) = zirondep(:,:,1) * 1.e+3 * rfact2r * e3t_n(:,:,1) * tmask(:,:,1) 165 IF( ln_diatrc ) THEN 166 !$OMP PARALLEL WORKSHARE 167 trc2d(:,:,jp_pcs0_2d + 11) = zirondep(:,:,1) * 1.e+3 * rfact2r * e3t_n(:,:,1) * tmask(:,:,1) 168 !$OMP END PARALLEL WORKSHARE 169 ENDIF 150 170 ENDIF 151 171 CALL wrk_dealloc( jpi, jpj, zpdep, zsidep ) … … 157 177 ! ---------------------------------------------------------- 158 178 IF( ln_river ) THEN 179 !$OMP DO schedule(static) private(jj,ji,jk) 159 180 DO jj = 1, jpj 160 181 DO ji = 1, jpi … … 174 195 ! ---------------------------------------------------------- 175 196 IF( ln_ndepo ) THEN 197 !$OMP PARALLEL WORKSHARE 176 198 tra(:,:,1,jpno3) = tra(:,:,1,jpno3) + nitdep(:,:) * rfact2 177 199 tra(:,:,1,jptal) = tra(:,:,1,jptal) - rno3 * nitdep(:,:) * rfact2 200 !$OMP END PARALLEL WORKSHARE 178 201 ENDIF 179 202 … … 181 204 ! ------------------------------------------------------ 182 205 IF( ln_ironsed ) THEN 206 !$OMP PARALLEL WORKSHARE 183 207 tra(:,:,:,jpfer) = tra(:,:,:,jpfer) + ironsed(:,:,:) * rfact2 208 !$OMP END PARALLEL WORKSHARE 184 209 ! 185 210 IF( lk_iomput .AND. knt == nrdttrc .AND. iom_use( "Ironsed" ) ) & … … 190 215 ! ------------------------------------------------------ 191 216 IF( ln_hydrofe ) THEN 217 !$OMP PARALLEL WORKSHARE 192 218 tra(:,:,:,jpfer) = tra(:,:,:,jpfer) + hydrofe(:,:,:) * rfact2 219 !$OMP END PARALLEL WORKSHARE 193 220 ! 194 221 IF( lk_iomput .AND. knt == nrdttrc .AND. iom_use( "HYDR" ) ) & … … 199 226 ! to avoid CFL problems above the sediments 200 227 ! -------------------------------------------------------------------- 228 !$OMP PARALLEL 229 !$OMP DO schedule(static) private(jj,ji,ikt,zdep) 201 230 DO jj = 1, jpj 202 231 DO ji = 1, jpi … … 213 242 ! Computation of the fraction of organic matter that is permanently buried from Dunne's model 214 243 ! ------------------------------------------------------- 244 !$OMP DO schedule(static) private(jj,ji,ikt,zflx,zo2,zno3,zdep) 215 245 DO jj = 1, jpj 216 246 DO ji = 1, jpi … … 242 272 ! The factor for calcite comes from the alkalinity effect 243 273 ! ------------------------------------------------------------- 274 !$OMP DO schedule(static) private(jj,ji,ikt,zfactcal) 244 275 DO jj = 1, jpj 245 276 DO ji = 1, jpi … … 260 291 END DO 261 292 END DO 293 294 !$OMP SINGLE 262 295 zsumsedsi = glob_sum( zwork1(:,:) * e1e2t(:,:) ) * r1_rday 263 296 zsumsedpo4 = glob_sum( zwork2(:,:) * e1e2t(:,:) ) * r1_rday … … 271 304 zrivsil = 1._wp - ( sumdepsi + rivdsiinput * r1_ryyss ) / ( zsumsedsi + rtrn ) 272 305 #endif 273 306 !$OMP END SINGLE 307 308 !$OMP DO schedule(static) private(jj,ji,ikt,zdep,zws4,zwsc,zsiloss,zcaloss,zfactcal,zrivalk) 274 309 DO jj = 1, jpj 275 310 DO ji = 1, jpi … … 298 333 END DO 299 334 335 !$OMP DO schedule(static) private(jj,ji,ikt,zdep,zws3,zws4,zrivno3,zwstpoc,zpdenit,z1pdenit,zolimit,zdenitt) 300 336 DO jj = 1, jpj 301 337 DO ji = 1, jpi … … 336 372 END DO 337 373 END DO 338 374 !$OMP END DO NOWAIT 339 375 ! Nitrogen fixation process 340 376 ! Small source iron from particulate inorganic iron 341 377 !----------------------------------- 378 !$OMP DO schedule(static) private(jk,jj,ji,zlim,zfact,ztrfer,ztrpo4,zlight) 342 379 DO jk = 1, jpkm1 343 380 DO jj = 1, jpj … … 363 400 ! Nitrogen change due to nitrogen fixation 364 401 ! ---------------------------------------- 402 !$OMP DO schedule(static) private(jk,jj,ji,zfact) 365 403 DO jk = 1, jpkm1 366 404 DO jj = 1, jpj … … 376 414 END DO 377 415 END DO 416 !$OMP END DO NOWAIT 417 !$OMP END PARALLEL 378 418 379 419 IF( lk_iomput ) THEN … … 384 424 zwork1(:,:) = 0. 385 425 DO jk = 1, jpkm1 386 zwork1(:,:) = zwork1(:,:) + nitrpot(:,:,jk) * nitrfix * zfact * e3t_n(:,:,jk) * tmask(:,:,jk) 426 !$OMP PARALLEL DO schedule(static) private(jj,ji) 427 DO jj = 1, jpj 428 DO ji = 1, jpi 429 zwork1(ji,jj) = zwork1(ji,jj) + nitrpot(ji,jj,jk) * nitrfix * zfact * e3t_n(ji,jj,jk) * tmask(ji,jj,jk) 430 END DO 431 END DO 387 432 ENDDO 388 433 CALL iom_put( "INTNFIX" , zwork1 ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zsink.F90
r6140 r7037 102 102 ! by data and from the coagulation theory 103 103 ! ----------------------------------------------------------- 104 !$OMP PARALLEL 105 !$OMP DO schedule(static) private(jk, jj, ji, zmax, zfact) 104 106 DO jk = 1, jpkm1 105 107 DO jj = 1, jpj … … 113 115 114 116 ! limit the values of the sinking speeds to avoid numerical instabilities 117 !$OMP WORKSHARE 115 118 wsbio3(:,:,:) = wsbio 116 119 wscal (:,:,:) = wsbio4(:,:,:) 120 !$OMP END WORKSHARE NOWAIT 121 !$OMP END PARALLEL 117 122 ! 118 123 ! OA This is (I hope) a temporary solution for the problem that may … … 131 136 iiter1 = 1 132 137 iiter2 = 1 138 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zwsmax, iiter1, iiter2) 133 139 DO jk = 1, jpkm1 134 140 DO jj = 1, jpj … … 150 156 ENDIF 151 157 158 !$OMP PARALLEL 159 !$OMP DO schedule(static) private(jk, jj, ji, zwsmax) 152 160 DO jk = 1,jpkm1 153 161 DO jj = 1, jpj … … 161 169 END DO 162 170 END DO 171 !$OMP END DO NOWAIT 163 172 164 173 ! Initializa to zero all the sinking arrays 165 174 ! ----------------------------------------- 175 !$OMP WORKSHARE 166 176 sinking (:,:,:) = 0.e0 167 177 sinking2(:,:,:) = 0.e0 … … 170 180 sinksil (:,:,:) = 0.e0 171 181 sinkfer2(:,:,:) = 0.e0 182 !$OMP END WORKSHARE NOWAIT 183 !$OMP END PARALLEL 172 184 173 185 ! Compute the sedimentation term using p4zsink2 for all the sinking particles … … 187 199 ! Exchange between organic matter compartments due to coagulation/disaggregation 188 200 ! --------------------------------------------------- 201 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, zstep, zfact, zagg, zagg1, zagg2, zagg3, zagg4, zaggfe, zaggdoc, zaggdoc2, zaggdoc3) 189 202 DO jk = 1, jpkm1 190 203 DO jj = 1, jpj … … 235 248 236 249 ! Total carbon export per year 237 IF( iom_use( "tcexp" ) .OR. ( ln_check_mass .AND. kt == nitend .AND. knt == nrdttrc ) ) 238 &t_oce_co2_exp = glob_sum( ( sinking(:,:,ik100) + sinking2(:,:,ik100) ) * e1e2t(:,:) * tmask(:,:,1) )250 IF( iom_use( "tcexp" ) .OR. ( ln_check_mass .AND. kt == nitend .AND. knt == nrdttrc ) ) & 251 & t_oce_co2_exp = glob_sum( ( sinking(:,:,ik100) + sinking2(:,:,ik100) ) * e1e2t(:,:) * tmask(:,:,1) ) 239 252 ! 240 253 IF( lk_iomput ) THEN … … 245 258 ! 246 259 IF( iom_use( "EPC100" ) ) THEN 260 !$OMP PARALLEL WORKSHARE 247 261 zw2d(:,:) = ( sinking(:,:,ik100) + sinking2(:,:,ik100) ) * zfact * tmask(:,:,1) ! Export of carbon at 100m 262 !$OMP END PARALLEL WORKSHARE 248 263 CALL iom_put( "EPC100" , zw2d ) 249 264 ENDIF 250 265 IF( iom_use( "EPFE100" ) ) THEN 266 !$OMP PARALLEL WORKSHARE 251 267 zw2d(:,:) = ( sinkfer(:,:,ik100) + sinkfer2(:,:,ik100) ) * zfact * tmask(:,:,1) ! Export of iron at 100m 268 !$OMP END PARALLEL WORKSHARE 252 269 CALL iom_put( "EPFE100" , zw2d ) 253 270 ENDIF 254 271 IF( iom_use( "EPCAL100" ) ) THEN 272 !$OMP PARALLEL WORKSHARE 255 273 zw2d(:,:) = sinkcal(:,:,ik100) * zfact * tmask(:,:,1) ! Export of calcite at 100m 274 !$OMP END PARALLEL WORKSHARE 256 275 CALL iom_put( "EPCAL100" , zw2d ) 257 276 ENDIF 258 277 IF( iom_use( "EPSI100" ) ) THEN 278 !$OMP PARALLEL WORKSHARE 259 279 zw2d(:,:) = sinksil(:,:,ik100) * zfact * tmask(:,:,1) ! Export of bigenic silica at 100m 280 !$OMP END PARALLEL WORKSHARE 260 281 CALL iom_put( "EPSI100" , zw2d ) 261 282 ENDIF 262 283 IF( iom_use( "EXPC" ) ) THEN 284 !$OMP PARALLEL WORKSHARE 263 285 zw3d(:,:,:) = ( sinking(:,:,:) + sinking2(:,:,:) ) * zfact * tmask(:,:,:) ! Export of carbon in the water column 286 !$OMP END PARALLEL WORKSHARE 264 287 CALL iom_put( "EXPC" , zw3d ) 265 288 ENDIF 266 289 IF( iom_use( "EXPFE" ) ) THEN 290 !$OMP PARALLEL WORKSHARE 267 291 zw3d(:,:,:) = ( sinkfer(:,:,:) + sinkfer2(:,:,:) ) * zfact * tmask(:,:,:) ! Export of iron 292 !$OMP END PARALLEL WORKSHARE 268 293 CALL iom_put( "EXPFE" , zw3d ) 269 294 ENDIF 270 295 IF( iom_use( "EXPCAL" ) ) THEN 296 !$OMP PARALLEL WORKSHARE 271 297 zw3d(:,:,:) = sinkcal(:,:,:) * zfact * tmask(:,:,:) ! Export of calcite 298 !$OMP END PARALLEL WORKSHARE 272 299 CALL iom_put( "EXPCAL" , zw3d ) 273 300 ENDIF 274 301 IF( iom_use( "EXPSI" ) ) THEN 302 !$OMP PARALLEL WORKSHARE 275 303 zw3d(:,:,:) = sinksil(:,:,:) * zfact * tmask(:,:,:) ! Export of bigenic silica 304 !$OMP END PARALLEL WORKSHARE 276 305 CALL iom_put( "EXPSI" , zw3d ) 277 306 ENDIF … … 284 313 IF( ln_diatrc ) THEN 285 314 zfact = 1.e3 * rfact2r 315 !$OMP PARALLEL WORKSHARE 286 316 trc2d(:,:,jp_pcs0_2d + 4) = sinking (:,:,ik100) * zfact * tmask(:,:,1) 287 317 trc2d(:,:,jp_pcs0_2d + 5) = sinking2(:,:,ik100) * zfact * tmask(:,:,1) … … 290 320 trc2d(:,:,jp_pcs0_2d + 8) = sinksil (:,:,ik100) * zfact * tmask(:,:,1) 291 321 trc2d(:,:,jp_pcs0_2d + 9) = sinkcal (:,:,ik100) * zfact * tmask(:,:,1) 322 !$OMP END PARALLEL WORKSHARE 292 323 ENDIF 293 324 ENDIF … … 359 390 ! --------------------------------------------------------- 360 391 361 znum3d(:,:,:) = 0.e0362 392 zval1 = 1. + xkr_zeta 363 393 zval2 = 1. + xkr_zeta + xkr_eta 364 394 zval3 = 1. + xkr_eta 365 395 !$OMP PARALLEL 396 !$OMP WORKSHARE 397 znum3d(:,:,:) = 0.e0 398 !$OMP END WORKSHARE 366 399 ! Computation of the vertical sinking speed : Kriest et Evans, 2000 367 400 ! ----------------------------------------------------------------- 368 401 402 !$OMP DO schedule(static) private(jk, jj, ji, znum, zeps, zfm, zgm, zdiv, zdiv1) 369 403 DO jk = 1, jpkm1 370 404 DO jj = 1, jpj … … 391 425 END DO 392 426 END DO 393 394 wscal(:,:,:) = MAX( wsbio3(:,:,:), 30._wp ) 427 !$OMP END DO NOWAIT 428 !$OMP DO schedule(static) private(jk, jj, ji) 429 DO jk = 1, jpk 430 DO jj = 1, jpj 431 DO ji = 1, jpi 432 wscal(ji,jj,jk) = MAX( wsbio3(ji,jj,jk), 30._wp ) 433 END DO 434 END DO 435 END DO 436 !$OMP END DO NOWAIT 395 437 396 438 ! INITIALIZE TO ZERO ALL THE SINKING ARRAYS 397 439 ! ----------------------------------------- 398 440 !$OMP WORKSHARE 399 441 sinking (:,:,:) = 0.e0 400 442 sinking2(:,:,:) = 0.e0 … … 402 444 sinkfer (:,:,:) = 0.e0 403 445 sinksil (:,:,:) = 0.e0 404 446 !$OMP END WORKSHARE NOWAIT 447 !$OMP END PARALLEL 405 448 ! Compute the sedimentation term using p4zsink2 for all the sinking particles 406 449 ! ----------------------------------------------------- … … 428 471 zval4 = 4. + xkr_eta 429 472 473 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji, znum, zeps, zfm, zsm, zdiv, zdiv1, zdiv2, zdiv3, zdiv4, zdiv5, zagg, zagg1, zagg2, zagg3, zagg4, zagg5, zfract, zaggdoc, zaggdoc1, zaggsh, zaggsi, znumdoc) 430 474 DO jk = 1,jpkm1 431 475 DO jj = 1,jpj … … 532 576 ! 533 577 IF( iom_use( "EPC100" ) ) THEN 578 !$OMP PARALLEL WORKSHARE 534 579 zw2d(:,:) = sinking(:,:,ik100) * zfact * tmask(:,:,1) ! Export of carbon at 100m 580 !$OMP END PARALLEL WORKSHARE 535 581 CALL iom_put( "EPC100" , zw2d ) 536 582 ENDIF 537 583 IF( iom_use( "EPN100" ) ) THEN 584 !$OMP PARALLEL WORKSHARE 538 585 zw2d(:,:) = sinking2(:,:,ik100) * zfact * tmask(:,:,1) ! Export of number of aggregates ? 586 !$OMP END PARALLEL WORKSHARE 539 587 CALL iom_put( "EPN100" , zw2d ) 540 588 ENDIF 541 589 IF( iom_use( "EPCAL100" ) ) THEN 590 !$OMP PARALLEL WORKSHARE 542 591 zw2d(:,:) = sinkcal(:,:,ik100) * zfact * tmask(:,:,1) ! Export of calcite at 100m 592 !$OMP END PARALLEL WORKSHARE 543 593 CALL iom_put( "EPCAL100" , zw2d ) 544 594 ENDIF 545 595 IF( iom_use( "EPSI100" ) ) THEN 596 !$OMP PARALLEL WORKSHARE 546 597 zw2d(:,:) = sinksil(:,:,ik100) * zfact * tmask(:,:,1) ! Export of bigenic silica at 100m 598 !$OMP END PARALLEL WORKSHARE 547 599 CALL iom_put( "EPSI100" , zw2d ) 548 600 ENDIF 549 601 IF( iom_use( "EXPC" ) ) THEN 602 !$OMP PARALLEL WORKSHARE 550 603 zw3d(:,:,:) = sinking(:,:,:) * zfact * tmask(:,:,:) ! Export of carbon in the water column 604 !$OMP END PARALLEL WORKSHARE 551 605 CALL iom_put( "EXPC" , zw3d ) 552 606 ENDIF 553 607 IF( iom_use( "EXPN" ) ) THEN 608 !$OMP PARALLEL WORKSHARE 554 609 zw3d(:,:,:) = sinking(:,:,:) * zfact * tmask(:,:,:) ! Export of carbon in the water column 610 !$OMP END PARALLEL WORKSHARE 555 611 CALL iom_put( "EXPN" , zw3d ) 556 612 ENDIF 557 613 IF( iom_use( "EXPCAL" ) ) THEN 614 !$OMP PARALLEL WORKSHARE 558 615 zw3d(:,:,:) = sinkcal(:,:,:) * zfact * tmask(:,:,:) ! Export of calcite 616 !$OMP END PARALLEL WORKSHARE 559 617 CALL iom_put( "EXPCAL" , zw3d ) 560 618 ENDIF 561 619 IF( iom_use( "EXPSI" ) ) THEN 620 !$OMP PARALLEL WORKSHARE 562 621 zw3d(:,:,:) = sinksil(:,:,:) * zfact * tmask(:,:,:) ! Export of bigenic silica 622 !$OMP END PARALLEL WORKSHARE 563 623 CALL iom_put( "EXPSI" , zw3d ) 564 624 ENDIF 565 625 IF( iom_use( "XNUM" ) ) THEN 626 !$OMP PARALLEL WORKSHARE 566 627 zw3d(:,:,:) = znum3d(:,:,:) * tmask(:,:,:) ! Number of particles on aggregats 628 !$OMP END PARALLEL WORKSHARE 567 629 CALL iom_put( "XNUM" , zw3d ) 568 630 ENDIF 569 631 IF( iom_use( "WSC" ) ) THEN 632 !$OMP PARALLEL WORKSHARE 570 633 zw3d(:,:,:) = wsbio3(:,:,:) * tmask(:,:,:) ! Sinking speed of carbon particles 634 !$OMP END PARALLEL WORKSHARE 571 635 CALL iom_put( "WSC" , zw3d ) 572 636 ENDIF 573 637 IF( iom_use( "WSN" ) ) THEN 638 !$OMP PARALLEL WORKSHARE 574 639 zw3d(:,:,:) = wsbio4(:,:,:) * tmask(:,:,:) ! Sinking speed of particles number 640 !$OMP END PARALLEL WORKSHARE 575 641 CALL iom_put( "WSN" , zw3d ) 576 642 ENDIF … … 581 647 IF( ln_diatrc ) THEN 582 648 zfact = 1.e3 * rfact2r 649 !$OMP PARALLEL WORKSHARE 583 650 trc2d(:,: ,jp_pcs0_2d + 4) = sinking (:,:,ik100) * zfact * tmask(:,:,1) 584 651 trc2d(:,: ,jp_pcs0_2d + 5) = sinking2(:,:,ik100) * zfact * tmask(:,:,1) … … 593 660 trc3d(:,:,:,jp_pcs0_3d + 16) = wsbio3 (:,:,:) * tmask(:,:,:) 594 661 trc3d(:,:,:,jp_pcs0_3d + 17) = wsbio4 (:,:,:) * tmask(:,:,:) 662 !$OMP END PARALLEL WORKSHARE 595 663 ENDIF 596 664 ENDIF … … 795 863 796 864 zstep = rfact2 / FLOAT( kiter ) / 2. 797 798 ztraz(:,:,:) = 0.e0 799 zakz (:,:,:) = 0.e0 800 ztrb (:,:,:) = trb(:,:,:,jp_tra) 801 865 !$OMP PARALLEL 866 !$OMP DO schedule(static) private(jk, jj, ji) 867 DO jk = 1, jpk 868 DO jj = 1, jpj 869 DO ji = 1, jpi 870 ztraz(ji,jj,jk) = 0.e0 871 zakz (ji,jj,jk) = 0.e0 872 ztrb (ji,jj,jk) = trb(ji,jj,jk,jp_tra) 873 END DO 874 END DO 875 END DO 876 !$OMP END DO NOWAIT 877 !$OMP DO schedule(static) private(jk) 802 878 DO jk = 1, jpkm1 803 879 zwsink2(:,:,jk+1) = -pwsink(:,:,jk) / rday * tmask(:,:,jk+1) 804 880 END DO 805 zwsink2(:,:,1) = 0.e0 881 882 !$OMP DO schedule(static) private(jj, ji) 883 DO jj = 1, jpj 884 DO ji = 1, jpi 885 zwsink2(ji,jj,1) = 0.e0 886 END DO 887 END DO 888 !$OMP END DO NOWAIT 889 !$OMP END PARALLEL 890 806 891 IF( lk_degrad ) THEN 892 !$OMP PARALLEL WORKSHARE 807 893 zwsink2(:,:,:) = zwsink2(:,:,:) * facvol(:,:,:) 894 !$OMP END PARALLEL WORKSHARE 808 895 ENDIF 809 896 … … 812 899 DO jn = 1, 2 813 900 ! first guess of the slopes interior values 901 !$OMP PARALLEL 902 !$OMP DO schedule(static) private(jk) 814 903 DO jk = 2, jpkm1 815 904 ztraz(:,:,jk) = ( trb(:,:,jk-1,jp_tra) - trb(:,:,jk,jp_tra) ) * tmask(:,:,jk) 816 905 END DO 817 ztraz(:,:,1 ) = 0.0 818 ztraz(:,:,jpk) = 0.0 906 !$OMP END DO NOWAIT 907 908 !$OMP DO schedule(static) private(jj, ji) 909 DO jj = 1, jpj 910 DO ji = 1, jpi 911 ztraz(ji,jj,1 ) = 0.0 912 ztraz(ji,jj,jpk) = 0.0 913 END DO 914 END DO 819 915 820 916 ! slopes 917 !$OMP DO schedule(static) private(jk, jj, ji, zign) 821 918 DO jk = 2, jpkm1 822 919 DO jj = 1,jpj … … 829 926 830 927 ! Slopes limitation 928 !$OMP DO schedule(static) private(jk, jj, ji) 831 929 DO jk = 2, jpkm1 832 930 DO jj = 1, jpj … … 839 937 840 938 ! vertical advective flux 939 !$OMP DO schedule(static) private(jk, jj, ji, zigma, zew) 841 940 DO jk = 1, jpkm1 842 941 DO jj = 1, jpj … … 849 948 END DO 850 949 ! 851 ! Boundary conditions 852 psinkflx(:,:,1 ) = 0.e0 853 psinkflx(:,:,jpk) = 0.e0 950 !$OMP DO schedule(static) private(jj, ji) 951 DO jj = 1, jpj 952 DO ji = 1, jpi 953 ! Boundary conditions 954 psinkflx(ji,jj,1 ) = 0.e0 955 psinkflx(ji,jj,jpk) = 0.e0 956 END DO 957 END DO 854 958 959 !$OMP DO schedule(static) private(jk, jj, ji, zflx) 855 960 DO jk=1,jpkm1 856 961 DO jj = 1,jpj … … 861 966 END DO 862 967 END DO 863 968 !$OMP END DO NOWAIT 969 !$OMP END PARALLEL 864 970 ENDDO 865 971 !$OMP PARALLEL 972 !$OMP DO schedule(static) private(jk, jj, ji, zflx) 866 973 DO jk = 1,jpkm1 867 974 DO jj = 1,jpj … … 873 980 END DO 874 981 875 trb(:,:,:,jp_tra) = ztrb(:,:,:) 876 psinkflx(:,:,:) = 2. * psinkflx(:,:,:) 982 !$OMP DO schedule(static) private(jk, jj, ji) 983 DO jk = 1, jpk 984 DO jj = 1, jpj 985 DO ji = 1, jpi 986 trb(ji,jj,jk,jp_tra) = ztrb(ji,jj,jk) 987 psinkflx(ji,jj,jk) = 2. * psinkflx(ji,jj,jk) 988 END DO 989 END DO 990 END DO 991 !$OMP END DO NOWAIT 992 !$OMP END PARALLEL 877 993 ! 878 994 CALL wrk_dealloc( jpi, jpj, jpk, ztraz, zakz, zwsink2, ztrb ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/PISCES/P4Z/p4zsms.F90
r6421 r7037 109 109 IF( ( neuler == 0 .AND. kt == nittrc000 ) .OR. ln_top_euler ) THEN 110 110 DO jn = jp_pcs0, jp_pcs1 ! SMS on tracer without Asselin time-filter 111 trb(:,:,:,jn) = trn(:,:,:,jn) 111 !$OMP DO schedule(static) private(jk, jj, ji) 112 DO jk = 1, jpk 113 DO jj = 1, jpj 114 DO ji = 1, jpi 115 trb(ji,jj,jk,jn) = trn(ji,jj,jk,jn) 116 END DO 117 END DO 118 END DO 112 119 END DO 113 120 ENDIF … … 135 142 CALL p4z_flx( kt, jnt ) ! Compute surface fluxes 136 143 ! 144 !$OMP PARALLEL 145 !$OMP WORKSHARE 137 146 xnegtr(:,:,:) = 1.e0 147 !$OMP END WORKSHARE 138 148 DO jn = jp_pcs0, jp_pcs1 149 !$OMP DO schedule(static) private(jk, jj, ji, ztra) 139 150 DO jk = 1, jpk 140 151 DO jj = 1, jpj … … 151 162 ! ! 152 163 DO jn = jp_pcs0, jp_pcs1 153 trb(:,:,:,jn) = trb(:,:,:,jn) + xnegtr(:,:,:) * tra(:,:,:,jn) 164 !$OMP DO schedule(static) private(jk, jj, ji) 165 DO jk = 1, jpk 166 DO jj = 1, jpj 167 DO ji = 1, jpi 168 trb(ji,jj,jk,jn) = trb(ji,jj,jk,jn) + xnegtr(ji,jj,jk) * tra(ji,jj,jk,jn) 169 END DO 170 END DO 171 END DO 154 172 END DO 155 173 ! 156 174 DO jn = jp_pcs0, jp_pcs1 157 tra(:,:,:,jn) = 0._wp 175 !$OMP DO schedule(static) private(jk, jj, ji) 176 DO jk = 1, jpk 177 DO jj = 1, jpj 178 DO ji = 1, jpi 179 tra(ji,jj,jk,jn) = 0._wp 180 END DO 181 END DO 182 END DO 183 !$OMP END DO NOWAIT 158 184 END DO 185 !$OMP END PARALLEL 159 186 ! 160 187 IF( ln_top_euler ) THEN 161 188 DO jn = jp_pcs0, jp_pcs1 162 trn(:,:,:,jn) = trb(:,:,:,jn) 189 !$OMP PARALLEL DO schedule(static) private(jk, jj, ji) 190 DO jk = 1, jpk 191 DO jj = 1, jpj 192 DO ji = 1, jpi 193 trn(ji,jj,jk,jn) = trb(ji,jj,jk,jn) 194 END DO 195 END DO 196 END DO 163 197 END DO 164 198 ENDIF … … 169 203 zcoef1 = 1.e0 / xkr_massp 170 204 zcoef2 = 1.e0 / xkr_massp / 1.1 205 !$OMP PARALLEL DO schedule(static) private(jk) 171 206 DO jk = 1,jpkm1 172 207 trb(:,:,jk,jpnum) = MAX( trb(:,:,jk,jpnum), trb(:,:,jk,jppoc) * zcoef1 / xnumm(jk) ) … … 321 356 ! Set PH from total alkalinity, borat (???), akb3 (???) and ak23 (???) 322 357 ! -------------------------------------------------------- 358 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji,ztmas,ztmas1,zcaralk,zco3,zbicarb) 323 359 DO jk = 1, jpk 324 360 DO jj = 1, jpj … … 370 406 CALL iom_get( numrtr, jpdom_autoglo, 'Silicamax' , xksimax(:,:) ) 371 407 ELSE 408 !$OMP PARALLEL WORKSHARE 372 409 xksimax(:,:) = xksi(:,:) 410 !$OMP END PARALLEL WORKSHARE 373 411 ENDIF 374 412 ! … … 407 445 REAL(wp) :: silmean = 91.51 ! mean value of silicate 408 446 ! 447 INTEGER :: ji, jj, jk 409 448 REAL(wp) :: zarea, zalksumn, zpo4sumn, zno3sumn, zsilsumn 410 449 REAL(wp) :: zalksumb, zpo4sumb, zno3sumb, zsilsumb 450 REAL(wp), POINTER, DIMENSION(:,:,:) :: zctrn_jptal, zctrn_jppo4, zctrn_jppo3, zctrn_jpsil !workspace arrays 451 REAL(wp), POINTER, DIMENSION(:,:,:) :: zctrb_jptal, zctrb_jppo4, zctrb_jppo3, zctrb_jpsil !workspace arrays 411 452 !!--------------------------------------------------------------------- 412 453 … … 417 458 418 459 IF( cp_cfg == "orca" .AND. .NOT. lk_c1d ) THEN ! ORCA configuration (not 1D) ! 419 ! ! --------------------------- ! 460 CALL wrk_alloc( jpi, jpj, jpk, zctrn_jptal, zctrn_jppo4, zctrn_jppo3, zctrn_jpsil ) 461 CALL wrk_alloc( jpi, jpj, jpk, zctrb_jptal, zctrb_jppo4, zctrb_jppo3, zctrb_jpsil ) 420 462 ! set total alkalinity, phosphate, nitrate & silicate 421 463 zarea = 1._wp / glob_sum( cvol(:,:,:) ) * 1e6 422 423 zalksumn = glob_sum( trn(:,:,:,jptal) * cvol(:,:,:) ) * zarea 424 zpo4sumn = glob_sum( trn(:,:,:,jppo4) * cvol(:,:,:) ) * zarea * po4r 425 zno3sumn = glob_sum( trn(:,:,:,jpno3) * cvol(:,:,:) ) * zarea * rno3 426 zsilsumn = glob_sum( trn(:,:,:,jpsil) * cvol(:,:,:) ) * zarea 464 !$OMP PARALLEL 465 !$OMP DO schedule(static) private(jk,jj,ji) 466 DO jk = 1, jpk 467 DO jj = 1, jpj 468 DO ji = 1, jpi 469 zctrn_jptal(ji,jj,jk) = trn(ji,jj,jk,jptal) * cvol(ji,jj,jk) 470 zctrn_jppo4(ji,jj,jk) = trn(ji,jj,jk,jppo4) * cvol(ji,jj,jk) 471 zctrn_jppo3(ji,jj,jk) = trn(ji,jj,jk,jpno3) * cvol(ji,jj,jk) 472 zctrn_jpsil(ji,jj,jk) = trn(ji,jj,jk,jpsil) * cvol(ji,jj,jk) 473 END DO 474 END DO 475 END DO 476 !$OMP SINGLE 477 zalksumn = glob_sum( zctrn_jptal(:,:,:) ) * zarea 478 zpo4sumn = glob_sum( zctrn_jppo4(:,:,:) ) * zarea * po4r 479 zno3sumn = glob_sum( zctrn_jppo3(:,:,:) ) * zarea * rno3 480 zsilsumn = glob_sum( zctrn_jpsil(:,:,:) ) * zarea 481 !$OMP END SINGLE 482 483 !$OMP DO schedule(static) private(jk,jj,ji) 484 DO jk = 1, jpk 485 DO jj = 1, jpj 486 DO ji = 1, jpi 487 trn(ji,jj,jk,jpsil) = MIN( 400.e-6,trn(ji,jj,jk,jpsil) * silmean / zsilsumn ) 488 trn(ji,jj,jk,jptal) = trn(ji,jj,jk,jptal) * alkmean / zalksumn 489 trn(ji,jj,jk,jppo4) = trn(ji,jj,jk,jppo4) * po4mean / zpo4sumn 490 trn(ji,jj,jk,jpno3) = trn(ji,jj,jk,jpno3) * no3mean / zno3sumn 491 END DO 492 END DO 493 END DO 494 !$OMP END DO NOWAIT 495 !$OMP END PARALLEL 496 497 IF(lwp) THEN 498 WRITE(numout,*) ' TALKN mean : ', zalksumn 499 WRITE(numout,*) ' PO4N mean : ', zpo4sumn 500 WRITE(numout,*) ' NO3N mean : ', zno3sumn 501 WRITE(numout,*) ' SiO3N mean : ', zsilsumn 502 END IF 503 ! 504 IF( .NOT. ln_top_euler ) THEN 505 !$OMP PARALLEL 506 !$OMP DO schedule(static) private(jk,jj,ji) 507 DO jk = 1, jpk 508 DO jj = 1, jpj 509 DO ji = 1, jpi 510 zctrb_jptal(ji,jj,jk) = trb(ji,jj,jk,jptal) * cvol(ji,jj,jk) 511 zctrb_jppo4(ji,jj,jk) = trb(ji,jj,jk,jppo4) * cvol(ji,jj,jk) 512 zctrb_jppo3(ji,jj,jk) = trb(ji,jj,jk,jpno3) * cvol(ji,jj,jk) 513 zctrb_jpsil(ji,jj,jk) = trb(ji,jj,jk,jpsil) * cvol(ji,jj,jk) 514 END DO 515 END DO 516 END DO 517 !$OMP SINGLE 518 zalksumb = glob_sum( zctrb_jptal(:,:,:) ) * zarea 519 zpo4sumb = glob_sum( zctrb_jppo4(:,:,:) ) * zarea * po4r 520 zno3sumb = glob_sum( zctrb_jppo3(:,:,:) ) * zarea * rno3 521 zsilsumb = glob_sum( zctrb_jpsil(:,:,:) ) * zarea 522 !$OMP END SINGLE 523 524 !$OMP DO schedule(static) private(jk,jj,ji) 525 DO jk = 1, jpk 526 DO jj = 1, jpj 527 DO ji = 1, jpi 528 trb(ji,jj,jk,jpsil) = MIN( 400.e-6,trb(ji,jj,jk,jpsil) * silmean / zsilsumb ) 529 trb(ji,jj,jk,jptal) = trb(ji,jj,jk,jptal) * alkmean / zalksumb 530 trb(ji,jj,jk,jppo4) = trb(ji,jj,jk,jppo4) * po4mean / zpo4sumb 531 trb(ji,jj,jk,jpno3) = trb(ji,jj,jk,jpno3) * no3mean / zno3sumb 532 END DO 533 END DO 534 END DO 535 !$OMP END DO NOWAIT 536 !$OMP END PARALLEL 427 537 428 IF(lwp) WRITE(numout,*) ' TALKN mean : ', zalksumn 429 trn(:,:,:,jptal) = trn(:,:,:,jptal) * alkmean / zalksumn 430 431 IF(lwp) WRITE(numout,*) ' PO4N mean : ', zpo4sumn 432 trn(:,:,:,jppo4) = trn(:,:,:,jppo4) * po4mean / zpo4sumn 433 434 IF(lwp) WRITE(numout,*) ' NO3N mean : ', zno3sumn 435 trn(:,:,:,jpno3) = trn(:,:,:,jpno3) * no3mean / zno3sumn 436 437 IF(lwp) WRITE(numout,*) ' SiO3N mean : ', zsilsumn 438 trn(:,:,:,jpsil) = MIN( 400.e-6,trn(:,:,:,jpsil) * silmean / zsilsumn ) 439 ! 440 ! 441 IF( .NOT. ln_top_euler ) THEN 442 zalksumb = glob_sum( trb(:,:,:,jptal) * cvol(:,:,:) ) * zarea 443 zpo4sumb = glob_sum( trb(:,:,:,jppo4) * cvol(:,:,:) ) * zarea * po4r 444 zno3sumb = glob_sum( trb(:,:,:,jpno3) * cvol(:,:,:) ) * zarea * rno3 445 zsilsumb = glob_sum( trb(:,:,:,jpsil) * cvol(:,:,:) ) * zarea 446 447 IF(lwp) WRITE(numout,*) ' ' 448 IF(lwp) WRITE(numout,*) ' TALKB mean : ', zalksumb 449 trb(:,:,:,jptal) = trb(:,:,:,jptal) * alkmean / zalksumb 450 451 IF(lwp) WRITE(numout,*) ' PO4B mean : ', zpo4sumb 452 trb(:,:,:,jppo4) = trb(:,:,:,jppo4) * po4mean / zpo4sumb 453 454 IF(lwp) WRITE(numout,*) ' NO3B mean : ', zno3sumb 455 trb(:,:,:,jpno3) = trb(:,:,:,jpno3) * no3mean / zno3sumb 456 457 IF(lwp) WRITE(numout,*) ' SiO3B mean : ', zsilsumb 458 trb(:,:,:,jpsil) = MIN( 400.e-6,trb(:,:,:,jpsil) * silmean / zsilsumb ) 538 IF(lwp) THEN 539 WRITE(numout,*) ' ' 540 WRITE(numout,*) ' TALKB mean : ', zalksumb 541 WRITE(numout,*) ' PO4B mean : ', zpo4sumb 542 WRITE(numout,*) ' NO3B mean : ', zno3sumb 543 WRITE(numout,*) ' SiO3B mean : ', zsilsumb 544 END IF 459 545 ENDIF 460 546 ! 547 CALL wrk_dealloc( jpi, jpj, jpk, zctrb_jptal, zctrb_jppo4, zctrb_jppo3, zctrb_jpsil ) 548 CALL wrk_dealloc( jpi, jpj, jpk, zctrn_jptal, zctrn_jppo4, zctrn_jppo3, zctrn_jpsil ) 461 549 ENDIF 462 550 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcadv.F90
r6140 r7037 94 94 ENDIF 95 95 ! !== effective transport ==! 96 !$OMP PARALLEL DO schedule(static) private(jk) 96 97 DO jk = 1, jpkm1 97 98 zun(:,:,jk) = e2u (:,:) * e3u_n(:,:,jk) * un(:,:,jk) ! eulerian transport … … 101 102 ! 102 103 IF( ln_vvl_ztilde .OR. ln_vvl_layer ) THEN ! add z-tilde and/or vvl corrections 104 !$OMP PARALLEL WORKSHARE 103 105 zun(:,:,:) = zun(:,:,:) + un_td(:,:,:) 104 106 zvn(:,:,:) = zvn(:,:,:) + vn_td(:,:,:) 107 !$OMP END PARALLEL WORKSHARE 105 108 ENDIF 106 109 ! … … 110 113 IF( ln_mle ) CALL tra_adv_mle( kt, nittrc000, zun, zvn, zwn, 'TRC' ) ! add the mle transport 111 114 ! 115 !$OMP PARALLEL WORKSHARE 112 116 zun(:,:,jpk) = 0._wp ! no transport trough the bottom 113 117 zvn(:,:,jpk) = 0._wp 114 118 zwn(:,:,jpk) = 0._wp 119 !$OMP END PARALLEL WORKSHARE 115 120 ! 116 121 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcbbl.F90
r5836 r7037 61 61 IF( l_trdtrc ) THEN 62 62 CALL wrk_alloc( jpi, jpj, jpk, jptra, ztrtrd ) ! temporary save of trends 63 !$OMP PARALLEL WORKSHARE 63 64 ztrtrd(:,:,:,:) = tra(:,:,:,:) 65 !$OMP END PARALLEL WORKSHARE 64 66 ENDIF 65 67 … … 88 90 IF( l_trdtrc ) THEN ! save the horizontal diffusive trends for further diagnostics 89 91 DO jn = 1, jptra 92 !$OMP PARALLEL WORKSHARE 90 93 ztrtrd(:,:,:,jn) = tra(:,:,:,jn) - ztrtrd(:,:,:,jn) 94 !$OMP END PARALLEL WORKSHARE 91 95 CALL trd_tra( kt, 'TRC', jn, jptra_bbl, ztrtrd(:,:,:,jn) ) 92 96 END DO -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcldf.F90
r6403 r7037 76 76 IF( l_trdtrc ) THEN 77 77 CALL wrk_alloc( jpi,jpj,jpk,jptra, ztrtrd ) 78 !$OMP PARALLEL WORKSHARE 78 79 ztrtrd(:,:,:,:) = tra(:,:,:,:) 80 !$OMP END PARALLEL WORKSHARE 79 81 ENDIF 80 82 ! !* set the lateral diffusivity coef. for passive tracer 81 83 CALL wrk_alloc( jpi,jpj,jpk, zahu, zahv ) 84 !$OMP PARALLEL 85 !$OMP WORKSHARE 82 86 zahu(:,:,:) = rldf * ahtu(:,:,:) 83 87 zahv(:,:,:) = rldf * ahtv(:,:,:) 88 !$OMP END WORKSHARE 84 89 ! !* Enhanced zonal diffusivity coefficent in the equatorial domain 90 !$OMP DO schedule(static) private(jk,jj,ji,zdep) 85 91 DO jk= 1, jpk 86 92 DO jj = 1, jpj … … 93 99 END DO 94 100 END DO 101 !$OMP END DO NOWAIT 102 !$OMP END PARALLEL 95 103 ! 96 104 SELECT CASE ( nldf ) !* compute lateral mixing trend and add it to the general trend … … 112 120 IF( l_trdtrc ) THEN ! send the trends for further diagnostics 113 121 DO jn = 1, jptra 122 !$OMP PARALLEL WORKSHARE 114 123 ztrtrd(:,:,:,jn) = tra(:,:,:,jn) - ztrtrd(:,:,:,jn) 124 !$OMP END PARALLEL WORKSHARE 115 125 CALL trd_tra( kt, 'TRC', jn, jptra_ldf, ztrtrd(:,:,:,jn) ) 116 126 END DO -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcnxt.F90
r6140 r7037 79 79 INTEGER, INTENT( in ) :: kt ! ocean time-step index 80 80 ! 81 INTEGER :: jk, jn ! dummy loop indices81 INTEGER :: jk, jn, jj, ji ! dummy loop indices 82 82 REAL(wp) :: zfact ! temporary scalar 83 83 CHARACTER (len=22) :: charout … … 108 108 IF( l_trdtrc ) THEN ! trends: store now fields before the Asselin filter application 109 109 CALL wrk_alloc( jpi, jpj, jpk, jptra, ztrdt ) 110 !$OMP PARALLEL WORKSHARE 110 111 ztrdt(:,:,:,:) = trn(:,:,:,:) 112 !$OMP END PARALLEL WORKSHARE 111 113 ENDIF 112 114 ! ! Leap-Frog + Asselin filter time stepping 113 115 IF( neuler == 0 .AND. kt == nittrc000 ) THEN ! Euler time-stepping at first time-step (only swap) 114 116 DO jn = 1, jptra 117 !$OMP PARALLEL DO schedule(static) private(jk) 115 118 DO jk = 1, jpkm1 116 119 trn(:,:,jk,jn) = tra(:,:,jk,jn) … … 134 137 DO jk = 1, jpkm1 135 138 zfact = 1._wp / r2dttrc 136 ztrdt(:,:,jk,jn) = ( trb(:,:,jk,jn) - ztrdt(:,:,jk,jn) ) * zfact 139 !$OMP PARALLEL DO schedule(static) private(jj,ji) 140 DO jj = 1, jpj 141 DO ji = 1, jpi 142 ztrdt(ji,jj,jk,jn) = ( trb(ji,jj,jk,jn) - ztrdt(ji,jj,jk,jn) ) * zfact 143 END DO 144 END DO 137 145 CALL trd_tra( kt, 'TRC', jn, jptra_atf, ztrdt ) 138 146 END DO -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcrad.F90
r5836 r7037 138 138 REAL(wp) :: ztrcorb, ztrmasb ! temporary scalars 139 139 REAL(wp) :: zcoef, ztrcorn, ztrmasn ! " " 140 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrtrdb, ztrtrdn ! workspace arrays 140 REAL(wp), POINTER, DIMENSION(:,:,:) :: ztrtrdb, ztrtrdn ! workspace arrays 141 REAL(wp), POINTER, DIMENSION(:,:,:) :: zcptrbmax, zcptrnmax, zcptrbmin, zcptrnmin ! workspace arrays 141 142 REAL(wp) :: zs2rdt 142 143 LOGICAL :: lldebug = .FALSE. … … 145 146 146 147 IF( l_trdtrc ) CALL wrk_alloc( jpi, jpj, jpk, ztrtrdb, ztrtrdn ) 147 148 CALL wrk_alloc( jpi, jpj, jpk, zcptrbmax, zcptrnmax, zcptrbmin, zcptrnmin ) 148 149 IF( PRESENT( cpreserv ) ) THEN ! total tracer concentration is preserved 149 150 … … 154 155 155 156 IF( l_trdtrc ) THEN 156 ztrtrdb(:,:,:) = ptrb(:,:,:,jn) ! save input trb for trend computation 157 ztrtrdn(:,:,:) = ptrn(:,:,:,jn) ! save input trn for trend computation 158 ENDIF 159 ! ! sum over the global domain 160 ztrcorb = glob_sum( MIN( 0., ptrb(:,:,:,jn) ) * cvol(:,:,:) ) 161 ztrcorn = glob_sum( MIN( 0., ptrn(:,:,:,jn) ) * cvol(:,:,:) ) 162 163 ztrmasb = glob_sum( MAX( 0., ptrb(:,:,:,jn) ) * cvol(:,:,:) ) 164 ztrmasn = glob_sum( MAX( 0., ptrn(:,:,:,jn) ) * cvol(:,:,:) ) 157 !$OMP PARALLEL WORKSHARE 158 ztrtrdb(:,:,:) = ptrb(:,:,:,jn) ! save input trb for trend computation 159 ztrtrdn(:,:,:) = ptrn(:,:,:,jn) 160 !$OMP END PARALLEL WORKSHARE 161 ENDIF 162 ! ! sum over the global domain 163 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 164 DO jk = 1, jpk 165 DO jj = 1, jpj 166 DO ji = 1, jpi 167 zcptrbmin(ji,jj,jk) = MIN( 0., ptrb(ji,jj,jk,jn) ) * cvol(ji,jj,jk) 168 zcptrnmin(ji,jj,jk) = MIN( 0., ptrn(ji,jj,jk,jn) ) * cvol(ji,jj,jk) 169 zcptrbmax(ji,jj,jk) = MAX( 0., ptrb(ji,jj,jk,jn) ) * cvol(ji,jj,jk) 170 zcptrnmax(ji,jj,jk) = MAX( 0., ptrn(ji,jj,jk,jn) ) * cvol(ji,jj,jk) 171 END DO 172 END DO 173 END DO 174 ztrcorb = glob_sum( zcptrbmin(:,:,:) ) 175 ztrcorn = glob_sum( zcptrnmin(:,:,:) ) 176 ztrmasb = glob_sum( zcptrbmax(:,:,:) ) 177 ztrmasn = glob_sum( zcptrnmax(:,:,:) ) 165 178 166 179 IF( ztrcorb /= 0 ) THEN 167 180 zcoef = 1. + ztrcorb / ztrmasb 181 !$OMP PARALLEL DO schedule(static) private(jk) 168 182 DO jk = 1, jpkm1 169 183 ptrb(:,:,jk,jn) = MAX( 0., ptrb(:,:,jk,jn) ) … … 174 188 IF( ztrcorn /= 0 ) THEN 175 189 zcoef = 1. + ztrcorn / ztrmasn 190 !$OMP PARALLEL DO schedule(static) private(jk) 176 191 DO jk = 1, jpkm1 177 192 ptrn(:,:,jk,jn) = MAX( 0., ptrn(:,:,jk,jn) ) … … 183 198 ! 184 199 zs2rdt = 1. / ( 2. * rdt ) 200 !$OMP PARALLEL WORKSHARE 185 201 ztrtrdb(:,:,:) = ( ptrb(:,:,:,jn) - ztrtrdb(:,:,:) ) * zs2rdt 186 202 ztrtrdn(:,:,:) = ( ptrn(:,:,:,jn) - ztrtrdn(:,:,:) ) * zs2rdt 203 !$OMP END PARALLEL WORKSHARE 204 187 205 CALL trd_tra( kt, 'TRC', jn, jptra_radb, ztrtrdb ) ! Asselin-like trend handling 188 206 CALL trd_tra( kt, 'TRC', jn, jptra_radn, ztrtrdn ) ! standard trend handling … … 198 216 199 217 IF( l_trdtrc ) THEN 218 !$OMP PARALLEL 219 !$OMP WORKSHARE 200 220 ztrtrdb(:,:,:) = ptrb(:,:,:,jn) ! save input trb for trend computation 201 221 ztrtrdn(:,:,:) = ptrn(:,:,:,jn) ! save input trn for trend computation 202 ENDIF 203 222 !$OMP END WORKSHARE NOWAIT 223 224 !$OMP DO schedule(static) private(jk,jj,ji) 204 225 DO jk = 1, jpkm1 205 226 DO jj = 1, jpj … … 210 231 END DO 211 232 END DO 212 213 IF( l_trdtrc ) THEN214 233 ! 215 234 zs2rdt = 1. / ( 2. * rdt * FLOAT( nn_dttrc ) ) 235 !$OMP WORKSHARE 216 236 ztrtrdb(:,:,:) = ( ptrb(:,:,:,jn) - ztrtrdb(:,:,:) ) * zs2rdt 217 237 ztrtrdn(:,:,:) = ( ptrn(:,:,:,jn) - ztrtrdn(:,:,:) ) * zs2rdt 238 !$OMP END WORKSHARE NOWAIT 239 !$OMP END PARALLEL 218 240 CALL trd_tra( kt, 'TRC', jn, jptra_radb, ztrtrdb ) ! Asselin-like trend handling 219 241 CALL trd_tra( kt, 'TRC', jn, jptra_radn, ztrtrdn ) ! standard trend handling 220 242 ! 243 ELSE 244 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 245 DO jk = 1, jpkm1 246 DO jj = 1, jpj 247 DO ji = 1, jpi 248 ptrn(ji,jj,jk,jn) = MAX( 0. , ptrn(ji,jj,jk,jn) ) 249 ptrb(ji,jj,jk,jn) = MAX( 0. , ptrb(ji,jj,jk,jn) ) 250 END DO 251 END DO 252 END DO 221 253 ENDIF 222 254 ! … … 226 258 227 259 IF( l_trdtrc ) CALL wrk_dealloc( jpi, jpj, jpk, ztrtrdb, ztrtrdn ) 260 CALL wrk_dealloc( jpi, jpj, jpk, zcptrbmax, zcptrnmax, zcptrbmin, zcptrnmin ) 228 261 229 262 END SUBROUTINE trc_rad_sms -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trcsbc.F90
r6309 r7037 109 109 ELSE ! No restart or restart not found: Euler forward time stepping 110 110 zfact = 1._wp 111 !$OMP PARALLEL WORKSHARE 111 112 sbc_trc_b(:,:,:) = 0._wp 113 !$OMP END PARALLEL WORKSHARE 112 114 ENDIF 113 115 ELSE ! Swap of forcing fields 114 116 IF( ln_top_euler ) THEN 115 117 zfact = 1._wp 118 !$OMP PARALLEL WORKSHARE 116 119 sbc_trc_b(:,:,:) = 0._wp 120 !$OMP END PARALLEL WORKSHARE 117 121 ELSE 118 122 zfact = 0.5_wp 123 !$OMP PARALLEL WORKSHARE 119 124 sbc_trc_b(:,:,:) = sbc_trc(:,:,:) 125 !$OMP END PARALLEL WORKSHARE 120 126 ENDIF 121 127 ! … … 127 133 ! 128 134 IF( .NOT. lk_offline .AND. .NOT.ln_linssh ) THEN ! online coupling with vvl 135 !$OMP PARALLEL WORKSHARE 129 136 zsfx(:,:) = 0._wp 137 !$OMP END PARALLEL WORKSHARE 130 138 ELSE ! online coupling free surface or offline with free surface 139 !$OMP PARALLEL WORKSHARE 131 140 zsfx(:,:) = emp(:,:) 141 !$OMP END PARALLEL WORKSHARE 132 142 ENDIF 133 143 … … 135 145 DO jn = 1, jptra 136 146 ! 137 IF( l_trdtrc ) ztrtrd(:,:,:) = tra(:,:,:,jn) ! save trends 138 147 IF( l_trdtrc ) THEN 148 !$OMP PARALLEL WORKSHARE 149 ztrtrd(:,:,:) = tra(:,:,:,jn) ! save trends 150 !$OMP END PARALLEL WORKSHARE 151 END IF 139 152 IF ( nn_ice_tr == -1 ) THEN ! No tracers in sea ice (null concentration in sea ice) 140 153 154 !$OMP PARALLEL DO schedule(static) private(jj, ji) 141 155 DO jj = 2, jpj 142 156 DO ji = fs_2, fs_jpim1 ! vector opt. … … 146 160 147 161 ELSE 148 162 !$OMP PARALLEL DO schedule(static) private(jj,ji,zse3t,zftra,zcd,ztfx,zdtra,zratio) 149 163 DO jj = 2, jpj 150 164 DO ji = fs_2, fs_jpim1 ! vector opt. … … 170 184 CALL lbc_lnk( sbc_trc(:,:,jn), 'T', 1. ) 171 185 ! Concentration dilution effect on tracers due to evaporation & precipitation 186 !$OMP PARALLEL DO schedule(static) private(jj,ji,zse3t) 172 187 DO jj = 2, jpj 173 188 DO ji = fs_2, fs_jpim1 ! vector opt. … … 178 193 ! 179 194 IF( l_trdtrc ) THEN 195 !$OMP PARALLEL WORKSHARE 180 196 ztrtrd(:,:,:) = tra(:,:,:,jn) - ztrtrd(:,:,:) 197 !$OMP END PARALLEL WORKSHARE 181 198 CALL trd_tra( kt, 'TRC', jn, jptra_nsr, ztrtrd ) 182 199 END IF -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/TRP/trczdf.F90
r6140 r7037 71 71 IF( l_trdtrc ) THEN 72 72 CALL wrk_alloc( jpi, jpj, jpk, jptra, ztrtrd ) 73 !$OMP PARALLEL WORKSHARE 73 74 ztrtrd(:,:,:,:) = tra(:,:,:,:) 75 !$OMP END PARALLEL WORKSHARE 74 76 ENDIF 75 77 … … 81 83 IF( l_trdtrc ) THEN ! save the vertical diffusive trends for further diagnostics 82 84 DO jn = 1, jptra 85 !$OMP PARALLEL DO schedule(static) private(jk) 83 86 DO jk = 1, jpkm1 84 87 ztrtrd(:,:,jk,jn) = ( ( tra(:,:,jk,jn) - trb(:,:,jk,jn) ) / r2dttrc ) - ztrtrd(:,:,jk,jn) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/trcini.F90
r6309 r7037 117 117 !!---------------------------------------------------------------------- 118 118 ! ! masked grid volume 119 !$OMP PARALLEL DO schedule(static) private(jk) 119 120 DO jk = 1, jpk 120 121 cvol(:,:,jk) = e1e2t(:,:) * e3t_n(:,:,jk) * tmask(:,:,jk) 121 122 END DO 122 IF( lk_degrad ) cvol(:,:,:) = cvol(:,:,:) * facvol(:,:,:) ! degrad option: reduction by facvol 123 IF( lk_degrad ) THEN 124 !$OMP PARALLEL WORKSHARE 125 cvol(:,:,:) = cvol(:,:,:) * facvol(:,:,:) ! degrad option: reduction by facvol 126 !$OMP END PARALLEL WORKSHARE 127 END IF 123 128 ! ! total volume of the ocean 124 129 areatot = glob_sum( cvol(:,:,:) ) … … 224 229 jl = n_trc_index(jn) 225 230 CALL trc_dta( nit000, sf_trcdta(jl) ) ! read tracer data at nit000 231 !$OMP PARALLEL WORKSHARE 226 232 trn(:,:,:,jn) = sf_trcdta(jl)%fnow(:,:,:) * tmask(:,:,:) * rf_trfac(jl) 233 !$OMP END PARALLEL WORKSHARE 227 234 ! 228 235 IF( .NOT.ln_trcdmp .AND. .NOT.ln_trcdmp_clo ) THEN !== deallocate data structure ==! … … 238 245 ENDIF 239 246 ! 247 !$OMP PARALLEL WORKSHARE 240 248 trb(:,:,:,:) = trn(:,:,:,:) 249 !$OMP END PARALLEL WORKSHARE 241 250 ! 242 251 ENDIF 243 252 253 !$OMP PARALLEL WORKSHARE 244 254 tra(:,:,:,:) = 0._wp 255 !$OMP END PARALLEL WORKSHARE 245 256 ! ! Partial top/bottom cell: GRADh(trn) 246 257 END SUBROUTINE trc_ini_state -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/trcrst.F90
r6140 r7037 279 279 ENDIF 280 280 ! 281 !$OMP PARALLEL DO schedule(static) private(jk) 281 282 DO jk = 1, jpk 282 283 zvol(:,:,jk) = e1e2t(:,:) * e3t_a(:,:,jk) * tmask(:,:,jk) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/TOP_SRC/trcstp.F90
r6140 r7037 54 54 !!------------------------------------------------------------------- 55 55 INTEGER, INTENT( in ) :: kt ! ocean time-step index 56 INTEGER :: jk, jn ! dummy loop indices56 INTEGER :: jk, jn, jj, ji ! dummy loop indices 57 57 REAL(wp) :: ztrai 58 58 CHARACTER (len=25) :: charout 59 REAL(wp), POINTER, DIMENSION(:,:,:) :: zctrn ! workspace array 59 60 60 61 !!------------------------------------------------------------------- 61 62 ! 63 CALL wrk_alloc( jpi, jpj, jpk, zctrn) 62 64 IF( nn_timing == 1 ) CALL timing_start('trc_stp') 63 65 ! … … 65 67 ! 66 68 IF( .NOT.ln_linssh ) THEN ! update ocean volume due to ssh temporal evolution 69 !$OMP PARALLEL 70 !$OMP DO schedule(static) private(jk) 67 71 DO jk = 1, jpk 68 72 cvol(:,:,jk) = e1e2t(:,:) * e3t_n(:,:,jk) * tmask(:,:,jk) 69 73 END DO 70 IF( lk_degrad ) cvol(:,:,:) = cvol(:,:,:) * facvol(:,:,:) ! degrad option: reduction by facvol 74 IF( lk_degrad ) THEN 75 !$OMP WORKSHARE 76 cvol(:,:,:) = cvol(:,:,:) * facvol(:,:,:) ! degrad option: reduction by facvol 77 !$OMP END WORKSHARE NOWAIT 78 END IF 79 !$OMP END PARALLEL 71 80 areatot = glob_sum( cvol(:,:,:) ) 72 81 ENDIF … … 83 92 ENDIF 84 93 ! 94 !$OMP PARALLEL WORKSHARE 85 95 tra(:,:,:,:) = 0.e0 96 !$OMP END PARALLEL WORKSHARE 86 97 ! 87 98 CALL trc_rst_opn ( kt ) ! Open tracer restart file … … 105 116 ztrai = 0._wp ! content of all tracers 106 117 DO jn = 1, jptra 107 ztrai = ztrai + glob_sum( trn(:,:,:,jn) * cvol(:,:,:) ) 118 119 !$OMP PARALLEL DO schedule(static) private(jk,jj,ji) 120 DO jk = 1, jpk 121 DO jj = 1, jpj 122 DO ji = 1, jpi 123 zctrn(ji,jj,jk) = trn(ji,jj,jk,jn) * cvol(ji,jj,jk) 124 END DO 125 END DO 126 END DO 127 128 ztrai = ztrai + glob_sum( zctrn(:,:,:) ) 129 108 130 END DO 109 131 IF( lwp ) WRITE(numstr,9300) kt, ztrai / areatot 110 132 9300 FORMAT(i10,e18.10) 111 133 ! 134 CALL wrk_dealloc( jpi, jpj, jpk, zctrn) 135 112 136 IF( nn_timing == 1 ) CALL timing_stop('trc_stp') 113 ! 137 114 138 END SUBROUTINE trc_stp 115 139 … … 128 152 !!---------------------------------------------------------------------- 129 153 INTEGER, INTENT(in) :: kt 130 INTEGER :: jn 154 INTEGER :: jn, jj, ji 131 155 !!---------------------------------------------------------------------- 132 156 ! … … 147 171 ! 148 172 ALLOCATE( qsr_arr(jpi,jpj,nb_rec_per_days ) ) 173 !$OMP PARALLEL 174 !$OMP DO schedule(static) private(jn, ji, jj) 149 175 DO jn = 1, nb_rec_per_days 150 qsr_arr(:,:,jn) = qsr(:,:) 176 DO jj = 1, jpj 177 DO ji = 1, jpi 178 qsr_arr(ji,jj,jn) = qsr(ji,jj) 179 END DO 180 END DO 151 181 ENDDO 152 qsr_mean(:,:) = qsr(:,:) 182 !$OMP DO schedule(static) private(ji, jj) 183 DO jj = 1, jpj 184 DO ji = 1, jpi 185 qsr_mean(ji,jj) = qsr(ji,jj) 186 END DO 187 END DO 188 !$OMP END DO NOWAIT 189 !$OMP END PARALLEL 153 190 ! 154 191 isecfst = nsec_year + nsec1jan000 ! number of seconds between Jan. 1st 00h of nit000 year and the middle of time step … … 163 200 & ' time = ', (iseclast+rdt*nn_dttrc/2.)/3600.,'hours ' 164 201 isecfst = iseclast 202 !$OMP PARALLEL 165 203 DO jn = 1, nb_rec_per_days - 1 166 qsr_arr(:,:,jn) = qsr_arr(:,:,jn+1) 204 !$OMP DO schedule(static) private(ji, jj) 205 DO jj = 1, jpj 206 DO ji = 1, jpi 207 qsr_arr(ji,jj,jn) = qsr_arr(ji,jj,jn+1) 208 END DO 209 END DO 167 210 END DO 211 !$OMP WORKSHARE 168 212 qsr_arr (:,:,nb_rec_per_days) = qsr(:,:) 213 !$OMP END WORKSHARE 169 214 qsr_mean(:,: ) = SUM( qsr_arr(:,:,:), 3 ) / nb_rec_per_days 215 !$OMP END PARALLEL 170 216 ENDIF 171 217 !
Note: See TracChangeset
for help on using the changeset viewer.