Changeset 7037 for branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC
- Timestamp:
- 2016-10-18T15:32:04+02:00 (8 years ago)
- Location:
- branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/albedo.F90
r6416 r7037 126 126 ELSE WHERE ; zalb_it = 0.1 + 3.6 * ph_ice 127 127 END WHERE 128 128 !$OMP PARALLEL 129 !$OMP DO schedule(static) private(jl, jj, ji,zswitch,zalb_sf,zalb_sm,zalb_st) 129 130 DO jl = 1, ijpl 130 131 DO jj = 1, jpj … … 156 157 END DO 157 158 159 !$OMP WORKSHARE 158 160 pa_ice_os(:,:,:) = pa_ice_cs(:,:,:) + rcloud ! Oberhuber correction for overcast sky 161 !$OMP END WORKSHARE NOWAIT 162 !$OMP END PARALLEL 159 163 160 164 !------------------------------------------ … … 193 197 z1_c2 = 1. / 0.03 194 198 ! Computation of the snow/ice albedo 199 !$OMP PARALLEL DO schedule(static) private(jl, jj, ji,zswitch,zalb_sf,zalb_sm,zalb_st) 195 200 DO jl = 1, ijpl 196 201 DO jj = 1, jpj … … 233 238 ! 234 239 zcoef = 0.05 / ( 1.1 * rmue**1.4 + 0.15 ) ! Parameterization of Briegled and Ramanathan, 1982 240 !$OMP PARALLEL WORKSHARE 235 241 pa_oce_cs(:,:) = zcoef 236 242 pa_oce_os(:,:) = 0.06 ! Parameterization of Kondratyev, 1969 and Payne, 1972 243 !$OMP END PARALLEL WORKSHARE 237 244 ! 238 245 END SUBROUTINE albedo_oce -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/geo2ocean.F90
r6140 r7037 157 157 ! (computation done on the north stereographic polar plane) 158 158 ! 159 !$OMP PARALLEL 160 !$OMP DO schedule(static) private(jj,ji,zlam,zphi,zxnpt,zynpt,znnpt,zxnpu,zynpu,znnpu,zxnpv,zynpv,znnpv,zxnpf,zynpf,znnpf,zlan,zphh,zxvvt,zyvvt,znvvt,zxffu,zyffu,znffu,zxffv,zyffv,znffv,zxuuf,zyuuf,znuuf) 159 161 DO jj = 2, jpjm1 160 162 DO ji = fs_2, jpi ! vector opt. … … 248 250 ! =============== ! 249 251 252 !$OMP DO schedule(static) private(jj,ji) 250 253 DO jj = 2, jpjm1 251 254 DO ji = fs_2, jpi ! vector opt. … … 268 271 END DO 269 272 END DO 273 !$OMP END DO NOWAIT 274 !$OMP END PARALLEL 270 275 271 276 ! =========================== ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcana.F90
r6748 r7037 269 269 ztau_sais = 0.015 270 270 ztaun = ztau - ztau_sais * COS( (ztime - ztimemax) / (ztimemin - ztimemax) * rpi ) 271 !$OMP PARALLEL DO schedule(static) private(jj, ji) 271 ! module of wind stress and wind speed at T-point 272 zcoef = 1. / ( zrhoa * zcdrag ) 273 !$OMP PARALLEL 274 !$OMP DO schedule(static) private(jj, ji) 272 275 DO jj = 1, jpj 273 276 DO ji = 1, jpi … … 279 282 END DO 280 283 281 ! module of wind stress and wind speed at T-point 282 zcoef = 1. / ( zrhoa * zcdrag ) 283 !$OMP PARALLEL DO schedule(static) private(jj, ji, ztx, zty, zmod) 284 !$OMP DO schedule(static) private(jj, ji, ztx, zty, zmod) 284 285 DO jj = 2, jpjm1 285 286 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 291 292 END DO 292 293 END DO 294 !$OMP END DO NOWAIT 295 !$OMP END PARALLEL 293 296 CALL lbc_lnk( taum(:,:), 'T', 1. ) ; CALL lbc_lnk( wndm(:,:), 'T', 1. ) 294 297 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcblk_core.F90
r6416 r7037 266 266 ! local scalars ( place there for vector optimisation purposes) 267 267 zcoef_qsatw = 0.98 * 640380. / rhoa 268 268 269 !$OMP PARALLEL WORKSHARE 269 270 zst(:,:) = pst(:,:) + rt0 ! convert SST from Celcius to Kelvin (and set minimum value far above 0 K) 270 271 … … 276 277 zwnd_i(:,:) = 0.e0 277 278 zwnd_j(:,:) = 0.e0 279 !$OMP END PARALLEL WORKSHARE 278 280 #if defined key_cyclone 279 281 CALL wnd_cyc( kt, zwnd_i, zwnd_j ) ! add analytical tropical cyclone (Vincent et al. JGR 2012) 282 !$OMP PARALLEL DO schedule(static) private(jj, ji) 280 283 DO jj = 2, jpjm1 281 284 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 285 288 END DO 286 289 #endif 290 !$OMP PARALLEL DO schedule(static) private(jj, ji) 287 291 DO jj = 2, jpjm1 288 292 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 294 298 CALL lbc_lnk( zwnd_j(:,:) , 'T', -1. ) 295 299 ! ... scalar wind ( = | U10m - U_oce | ) at T-point (masked) 296 wndm(:,:) = SQRT( zwnd_i(:,:) * zwnd_i(:,:) & 297 & + zwnd_j(:,:) * zwnd_j(:,:) ) * tmask(:,:,1) 298 300 !$OMP PARALLEL DO schedule(static) private(jj, ji) 301 DO jj = 1, jpj 302 DO ji = 1, jpi 303 wndm(ji,jj) = SQRT( zwnd_i(ji,jj) * zwnd_i(ji,jj) & 304 & + zwnd_j(ji,jj) * zwnd_j(ji,jj) ) * tmask(ji,jj,1) 305 306 END DO 307 END DO 299 308 ! ----------------------------------------------------------------------------- ! 300 309 ! I Radiative FLUXES ! … … 307 316 ENDIF 308 317 309 zqlw(:,:) = ( sf(jp_qlw)%fnow(:,:,1) - Stef * zst(:,:)*zst(:,:)*zst(:,:)*zst(:,:) ) * tmask(:,:,1) ! Long Wave 318 !$OMP PARALLEL DO schedule(static) private(jj, ji) 319 DO jj = 1, jpj 320 DO ji = 1, jpi 321 zqlw(ji,jj) = ( sf(jp_qlw)%fnow(ji,jj,1) - Stef * zst(ji,jj)*zst(ji,jj)*zst(ji,jj)*zst(ji,jj) ) * tmask(ji,jj,1) ! Long Wave 310 322 ! ----------------------------------------------------------------------------- ! 311 323 ! II Turbulent FLUXES ! … … 313 325 314 326 ! ... specific humidity at SST and IST 315 zqsatw(:,:) = zcoef_qsatw * EXP( -5107.4 / zst(:,:) ) 316 327 zqsatw(ji,jj) = zcoef_qsatw * EXP( -5107.4 / zst(ji,jj) ) 328 329 END DO 330 END DO 317 331 ! ... NCAR Bulk formulae, computation of Cd, Ch, Ce at T-point : 318 332 CALL turb_core_2z( rn_zqt, rn_zu, zst, sf(jp_tair)%fnow, zqsatw, sf(jp_humi)%fnow, wndm, & … … 320 334 321 335 ! ... tau module, i and j component 336 !$OMP PARALLEL DO schedule(static) private(jj, ji,zztmp) 322 337 DO jj = 1, jpj 323 338 DO ji = 1, jpi … … 338 353 ! Note the use of 0.5*(2-umask) in order to unmask the stress along coastlines 339 354 ! Note the use of MAX(tmask(i,j),tmask(i+1,j) is to mask tau over ice shelves 355 !$OMP PARALLEL DO schedule(static) private(jj, ji) 340 356 DO jj = 1, jpjm1 341 357 DO ji = 1, fs_jpim1 … … 352 368 ! Turbulent fluxes over ocean 353 369 ! ----------------------------- 370 !$OMP PARALLEL DO schedule(static) private(jj, ji) 371 DO jj = 1, jpj 372 DO ji = 1, jpi 354 373 IF( ABS( rn_zu - rn_zqt) < 0.01_wp ) THEN 355 374 !! q_air and t_air are (or "are almost") given at 10m (wind reference height) 356 zevap( :,:) = rn_efac*MAX( 0._wp, rhoa*Ce(:,:)*( zqsatw(:,:) - sf(jp_humi)%fnow(:,:,1) )*wndm(:,:) ) ! Evaporation357 zqsb ( :,:) = cpa*rhoa*Ch(:,:)*( zst (:,:) - sf(jp_tair)%fnow(:,:,1) )*wndm(:,:) ! Sensible Heat375 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - sf(jp_humi)%fnow(ji,jj,1) )*wndm(ji,jj) ) ! Evaporation 376 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - sf(jp_tair)%fnow(ji,jj,1) )*wndm(ji,jj) ! Sensible Heat 358 377 ELSE 359 378 !! q_air and t_air are not given at 10m (wind reference height) 360 379 ! Values of temp. and hum. adjusted to height of wind during bulk algorithm iteration must be used!!! 361 zevap(:,:) = rn_efac*MAX( 0._wp, rhoa*Ce(:,:)*( zqsatw(:,:) - zq_zu(:,:) )*wndm(:,:) ) ! Evaporation 362 zqsb (:,:) = cpa*rhoa*Ch(:,:)*( zst (:,:) - zt_zu(:,:) )*wndm(:,:) ! Sensible Heat 363 ENDIF 364 zqla (:,:) = Lv * zevap(:,:) ! Latent Heat 365 380 zevap(ji,jj) = rn_efac*MAX( 0._wp, rhoa*Ce(ji,jj)*( zqsatw(ji,jj) - zq_zu(ji,jj) )*wndm(ji,jj) ) ! Evaporation 381 zqsb (ji,jj) = cpa*rhoa*Ch(ji,jj)*( zst (ji,jj) - zt_zu(ji,jj) )*wndm(ji,jj) ! Sensible Heat 382 ENDIF 383 zqla (ji,jj) = Lv * zevap(ji,jj) ! Latent Heat 384 385 END DO 386 END DO 366 387 IF(ln_ctl) THEN 367 388 CALL prt_ctl( tab2d_1=zqla , clinfo1=' blk_oce_core: zqla : ', tab2d_2=Ce , clinfo2=' Ce : ' ) … … 379 400 ! ----------------------------------------------------------------------------- ! 380 401 ! 381 emp (:,:) = ( zevap(:,:) & ! mass flux (evap. - precip.) 382 & - sf(jp_prec)%fnow(:,:,1) * rn_pfac ) * tmask(:,:,1) 383 ! 384 qns(:,:) = zqlw(:,:) - zqsb(:,:) - zqla(:,:) & ! Downward Non Solar 385 & - sf(jp_snow)%fnow(:,:,1) * rn_pfac * lfus & ! remove latent melting heat for solid precip 386 & - zevap(:,:) * pst(:,:) * rcp & ! remove evap heat content at SST 387 & + ( sf(jp_prec)%fnow(:,:,1) - sf(jp_snow)%fnow(:,:,1) ) * rn_pfac & ! add liquid precip heat content at Tair 388 & * ( sf(jp_tair)%fnow(:,:,1) - rt0 ) * rcp & 389 & + sf(jp_snow)%fnow(:,:,1) * rn_pfac & ! add solid precip heat content at min(Tair,Tsnow) 390 & * ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) 402 !$OMP PARALLEL DO schedule(static) private(jj, ji) 403 DO jj = 1, jpj 404 DO ji = 1, jpi 405 emp (ji,jj) = ( zevap(ji,jj) & ! mass flux (evap. - precip.) 406 & - sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ) * tmask(ji,jj,1) 407 ! 408 qns(ji,jj) = zqlw(ji,jj) - zqsb(ji,jj) - zqla(ji,jj) & ! Downward Non Solar 409 & - sf(jp_snow)%fnow(ji,jj,1) * rn_pfac * lfus & ! remove latent melting heat for solid precip 410 & - zevap(ji,jj) * pst(ji,jj) * rcp & ! remove evap heat content at SST 411 & + ( sf(jp_prec)%fnow(ji,jj,1) - sf(jp_snow)%fnow(ji,jj,1) ) * rn_pfac & ! add liquid precip heat content at Tair 412 & * ( sf(jp_tair)%fnow(ji,jj,1) - rt0 ) * rcp & 413 & + sf(jp_snow)%fnow(ji,jj,1) * rn_pfac & ! add solid precip heat content at min(Tair,Tsnow) 414 & * ( MIN( sf(jp_tair)%fnow(ji,jj,1), rt0_snow ) - rt0 ) * cpic * tmask(ji,jj,1) 415 END DO 416 END DO 391 417 ! 392 418 #if defined key_lim3 419 !$OMP PARALLEL WORKSHARE 393 420 qns_oce(:,:) = zqlw(:,:) - zqsb(:,:) - zqla(:,:) ! non solar without emp (only needed by LIM3) 394 421 qsr_oce(:,:) = qsr(:,:) 422 !$OMP END PARALLEL WORKSHARE 395 423 #endif 396 424 ! … … 449 477 450 478 !!gm brutal.... 479 !$OMP PARALLEL WORKSHARE 451 480 utau_ice (:,:) = 0._wp 452 481 vtau_ice (:,:) = 0._wp 453 482 wndm_ice (:,:) = 0._wp 483 !$OMP END PARALLEL WORKSHARE 454 484 !!gm end 455 485 … … 460 490 CASE( 'I' ) ! B-grid ice dynamics : I-point (i.e. F-point with sea-ice indexation) 461 491 ! and scalar wind at T-point ( = | U10m - U_ice | ) (masked) 492 !$OMP PARALLEL DO schedule(static) private(jj,ji,zwndi_f,zwndj_f,zwnorm_f,zwndi_t,zwndj_t) 462 493 DO jj = 2, jpjm1 463 494 DO ji = 2, jpim1 ! B grid : NO vector opt … … 484 515 ! 485 516 CASE( 'C' ) ! C-grid ice dynamics : U & V-points (same as ocean) 517 !$OMP PARALLEL DO schedule(static) private(jj,ji,zwndi_t,zwndj_t) 486 518 DO jj = 2, jpj 487 519 DO ji = fs_2, jpi ! vect. opt. … … 491 523 END DO 492 524 END DO 525 !$OMP PARALLEL DO schedule(static) private(jj,ji) 493 526 DO jj = 2, jpjm1 494 527 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 553 586 zztmp = 1. / ( 1. - albo ) 554 587 ! ! ========================== ! 588 !$OMP PARALLEL 589 !$OMP DO schedule(static) private(jl,jj,ji,zst2,zst3) 555 590 DO jl = 1, jpl ! Loop over ice categories ! 556 591 ! ! ========================== ! … … 602 637 END DO 603 638 ! 639 !$OMP WORKSHARE 604 640 tprecip(:,:) = sf(jp_prec)%fnow(:,:,1) * rn_pfac ! total precipitation [kg/m2/s] 605 641 sprecip(:,:) = sf(jp_snow)%fnow(:,:,1) * rn_pfac ! solid precipitation [kg/m2/s] 642 !$OMP END WORKSHARE 643 !$OMP END PARALLEL 606 644 CALL iom_put( 'snowpre', sprecip * 86400. ) ! Snow precipitation 607 645 CALL iom_put( 'precip' , tprecip * 86400. ) ! Total precipitation … … 612 650 ! --- evaporation --- ! 613 651 z1_lsub = 1._wp / Lsub 652 !$OMP PARALLEL WORKSHARE 614 653 evap_ice (:,:,:) = rn_efac * qla_ice (:,:,:) * z1_lsub ! sublimation 615 654 devap_ice(:,:,:) = rn_efac * dqla_ice(:,:,:) * z1_lsub ! d(sublimation)/dT … … 618 657 ! --- evaporation minus precipitation --- ! 619 658 zsnw(:,:) = 0._wp 659 !$OMP END PARALLEL WORKSHARE 620 660 CALL lim_thd_snwblow( pfrld, zsnw ) ! snow distribution over ice after wind blowing 621 661 emp_oce(:,:) = pfrld(:,:) * zevap(:,:) - ( tprecip(:,:) - sprecip(:,:) ) - sprecip(:,:) * (1._wp - zsnw ) … … 639 679 640 680 ! --- heat content of evap over ice in W/m2 (to be used in 1D-thermo) --- ! 681 !$OMP PARALLEL DO schedule(static) private(jl) 641 682 DO jl = 1, jpl 642 683 qevap_ice(:,:,jl) = 0._wp ! should be -evap_ice(:,:,jl)*( ( Tice - rt0 ) * cpic * tmask(:,:,1) ) … … 652 693 ! ( Maykut and Untersteiner, 1971 ; Ebert and Curry, 1993 ) 653 694 ! 695 !$OMP PARALLEL WORKSHARE 654 696 fr1_i0(:,:) = ( 0.18 * ( 1.0 - cldf_ice ) + 0.35 * cldf_ice ) 655 697 fr2_i0(:,:) = ( 0.82 * ( 1.0 - cldf_ice ) + 0.65 * cldf_ice ) 698 !$OMP END PARALLEL WORKSHARE 656 699 ! 657 700 ! … … 744 787 !! Neutral coefficients at 10m: 745 788 IF( ln_cdgw ) THEN ! wave drag case 789 !$OMP PARALLEL WORKSHARE 746 790 cdn_wave(:,:) = cdn_wave(:,:) + rsmall * ( 1._wp - tmask(:,:,1) ) 747 791 ztmp0 (:,:) = cdn_wave(:,:) 792 !$OMP END PARALLEL WORKSHARE 748 793 ELSE 749 794 ztmp0 = cd_neutral_10m( U_zu ) -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcice_lim_2.F90
r6140 r7037 131 131 SELECT CASE( cp_ice_msh ) 132 132 CASE( 'I' ) !== B-grid ice dynamics : I-point (i.e. F-point with sea-ice indexation) 133 !$OMP PARALLEL DO schedule(static) private(jj, ji) 133 134 DO jj = 2, jpj 134 135 DO ji = 2, jpi ! NO vector opt. possible … … 143 144 ! 144 145 CASE( 'C' ) !== C-grid ice dynamics : U & V-points (same as ocean) 146 !$OMP PARALLEL WORKSHARE 145 147 u_oce(:,:) = ssu_m(:,:) * umask(:,:,1) ! mean surface ocean current at ice velocity point 146 148 v_oce(:,:) = ssv_m(:,:) * vmask(:,:,1) 149 !$OMP END PARALLEL WORKSHARE 147 150 ! 148 151 END SELECT … … 150 153 ! ... masked sea surface freezing temperature [Kelvin] (set to rt0 over land) 151 154 CALL eos_fzp( sss_m(:,:), tfu(:,:) ) 155 !$OMP PARALLEL WORKSHARE 152 156 tfu(:,:) = tfu(:,:) + rt0 153 157 154 158 zsist (:,:,1) = sist (:,:) + rt0 * ( 1. - tmask(:,:,1) ) 159 !$OMP END PARALLEL WORKSHARE 155 160 156 161 ! Ice albedo … … 164 169 165 170 ! albedo depends on cloud fraction because of non-linear spectral effects 171 !$OMP PARALLEL WORKSHARE 166 172 zalb_ice(:,:,:) = ( 1. - cldf_ice ) * zalb_cs(:,:,:) + cldf_ice * zalb_os(:,:,:) 173 !$OMP END PARALLEL WORKSHARE 167 174 ! In CLIO the cloud fraction is read in the climatology and the all-sky albedo 168 175 ! (zalb_ice) is computed within the bulk routine … … 203 210 IF( ln_mixcpl) THEN 204 211 CALL sbc_cpl_ice_tau( zutau_ice , zvtau_ice ) 212 !$OMP PARALLEL WORKSHARE 205 213 utau_ice(:,:) = utau_ice(:,:) * xcplmask(:,:,0) + zutau_ice(:,:) * ( 1. - xcplmask(:,:,0) ) 206 214 vtau_ice(:,:) = vtau_ice(:,:) * xcplmask(:,:,0) + zvtau_ice(:,:) * ( 1. - xcplmask(:,:,0) ) 215 !$OMP END PARALLEL WORKSHARE 207 216 ENDIF 208 217 -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcrnf.F90
r6460 r7037 124 124 ! ! set temperature & salinity content of runoffs 125 125 IF( ln_rnf_tem ) THEN ! use runoffs temperature data 126 !$OMP PARALLEL WORKSHARE 126 127 rnf_tsc(:,:,jp_tem) = ( sf_t_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 128 !$OMP END PARALLEL WORKSHARE 127 129 CALL eos_fzp( sss_m(:,:), ztfrz(:,:) ) 130 !$OMP PARALLEL WORKSHARE 128 131 WHERE( sf_t_rnf(1)%fnow(:,:,1) == -999._wp ) ! if missing data value use SST as runoffs temperature 129 132 rnf_tsc(:,:,jp_tem) = sst_m(:,:) * rnf(:,:) * r1_rau0 … … 132 135 rnf_tsc(:,:,jp_tem) = ztfrz(:,:) * rnf(:,:) * r1_rau0 - rnf(:,:) * rlfusisf * r1_rau0_rcp 133 136 END WHERE 137 !$OMP END PARALLEL WORKSHARE 134 138 ELSE ! use SST as runoffs temperature 139 !$OMP PARALLEL WORKSHARE 135 140 rnf_tsc(:,:,jp_tem) = sst_m(:,:) * rnf(:,:) * r1_rau0 141 !$OMP END PARALLEL WORKSHARE 136 142 ENDIF 137 143 ! ! use runoffs salinity data 138 IF( ln_rnf_sal ) rnf_tsc(:,:,jp_sal) = ( sf_s_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 144 IF( ln_rnf_sal ) THEN 145 !$OMP PARALLEL WORKSHARE 146 rnf_tsc(:,:,jp_sal) = ( sf_s_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 147 !$OMP END PARALLEL WORKSHARE 148 END IF 139 149 ! ! else use S=0 for runoffs (done one for all in the init) 140 150 CALL iom_put( "runoffs", rnf ) ! output runoffs arrays … … 152 162 ELSE !* no restart: set from nit000 values 153 163 IF(lwp) WRITE(numout,*) ' nit000-1 runoff forcing fields set to nit000' 164 !$OMP PARALLEL WORKSHARE 154 165 rnf_b (:,: ) = rnf (:,: ) 155 166 rnf_tsc_b(:,:,:) = rnf_tsc(:,:,:) 167 !$OMP END PARALLEL WORKSHARE 156 168 ENDIF 157 169 ENDIF … … 197 209 DO jj = 1, jpj 198 210 DO ji = 1, jpi 211 !$OMP PARALLEL DO schedule(static) private(jk) 199 212 DO jk = 1, nk_rnf(ji,jj) 200 213 phdivn(ji,jj,jk) = phdivn(ji,jj,jk) - ( rnf(ji,jj) + rnf_b(ji,jj) ) * zfact * r1_rau0 / h_rnf(ji,jj) … … 203 216 END DO 204 217 ELSE !* variable volume case 218 !$OMP PARALLEL 205 219 DO jj = 1, jpj ! update the depth over which runoffs are distributed 206 220 DO ji = 1, jpi 207 221 h_rnf(ji,jj) = 0._wp 222 !$OMP DO schedule(static) private(jk) 208 223 DO jk = 1, nk_rnf(ji,jj) ! recalculates h_rnf to be the depth in metres 209 224 h_rnf(ji,jj) = h_rnf(ji,jj) + e3t_n(ji,jj,jk) ! to the bottom of the relevant grid box 210 225 END DO 211 226 ! ! apply the runoff input flow 227 !$OMP DO schedule(static) private(jk) 212 228 DO jk = 1, nk_rnf(ji,jj) 213 229 phdivn(ji,jj,jk) = phdivn(ji,jj,jk) - ( rnf(ji,jj) + rnf_b(ji,jj) ) * zfact * r1_rau0 / h_rnf(ji,jj) 214 230 END DO 231 !$OMP END DO NOWAIT 215 232 END DO 216 233 END DO 234 !$OMP END PARALLEL 217 235 ENDIF 218 236 ELSE !== runoff put only at the surface ==! 237 !$OMP PARALLEL WORKSHARE 219 238 h_rnf (:,:) = e3t_n (:,:,1) ! update h_rnf to be depth of top box 220 239 phdivn(:,:,1) = phdivn(:,:,1) - ( rnf(:,:) + rnf_b(:,:) ) * zfact * r1_rau0 / e3t_n(:,:,1) 240 !$OMP END PARALLEL WORKSHARE 221 241 ENDIF 222 242 ! … … 256 276 ln_rnf_mouth = .FALSE. ! default definition needed for example by sbc_ssr or by tra_adv_muscl 257 277 nkrnf = 0 278 !$OMP PARALLEL WORKSHARE 258 279 rnf (:,:) = 0.0_wp 259 280 rnf_b (:,:) = 0.0_wp 260 281 rnfmsk (:,:) = 0.0_wp 261 282 rnfmsk_z(:) = 0.0_wp 283 !$OMP END PARALLEL WORKSHARE 262 284 RETURN 263 285 ENDIF … … 357 379 DO ji = 1, jpi 358 380 h_rnf(ji,jj) = 0._wp 381 !$OMP PARALLEL DO schedule(static) private(jk) 359 382 DO jk = 1, nk_rnf(ji,jj) 360 383 h_rnf(ji,jj) = h_rnf(ji,jj) + e3t_n(ji,jj,jk) … … 415 438 DO ji = 1, jpi 416 439 h_rnf(ji,jj) = 0._wp 440 !$OMP PARALLEL DO schedule(static) private(jk) 417 441 DO jk = 1, nk_rnf(ji,jj) 418 442 h_rnf(ji,jj) = h_rnf(ji,jj) + e3t_n(ji,jj,jk) … … 432 456 ENDIF 433 457 ! 458 !$OMP PARALLEL WORKSHARE 434 459 rnf(:,:) = 0._wp ! runoff initialisation 435 460 rnf_tsc(:,:,:) = 0._wp ! runoffs temperature & salinty contents initilisation 461 !$OMP END PARALLEL WORKSHARE 436 462 ! 437 463 ! ! ======================== -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcssm.F90
r6748 r7037 73 73 ssv_m(:,:) = vb(:,:,1) 74 74 !$OMP END PARALLEL WORKSHARE 75 IF( l_useCT ) THEN ; sst_m(:,:) = eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 76 ELSE ; sst_m(:,:) = zts(:,:,jp_tem) 75 IF( l_useCT ) THEN 76 !$OMP PARALLEL WORKSHARE 77 sst_m(:,:) = eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 78 !$OMP END PARALLEL WORKSHARE 79 ELSE 80 !$OMP PARALLEL WORKSHARE 81 sst_m(:,:) = zts(:,:,jp_tem) 82 !$OMP END PARALLEL WORKSHARE 77 83 ENDIF 78 84 !$OMP PARALLEL WORKSHARE … … 82 88 IF( ln_apr_dyn ) THEN 83 89 !$OMP PARALLEL WORKSHARE 84 ssh_m(:,:) = sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) )90 ssh_m(:,:) = sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 85 91 !$OMP END PARALLEL WORKSHARE 86 92 ELSE 87 93 !$OMP PARALLEL WORKSHARE 88 ssh_m(:,:) = sshn(:,:)94 ssh_m(:,:) = sshn(:,:) 89 95 !$OMP END PARALLEL WORKSHARE 90 96 ENDIF … … 107 113 ssv_m(:,:) = zcoef * vb(:,:,1) 108 114 !$OMP END PARALLEL WORKSHARE 109 IF( l_useCT ) THEN ; sst_m(:,:) = zcoef * eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 110 ELSE ; sst_m(:,:) = zcoef * zts(:,:,jp_tem) 115 IF( l_useCT ) THEN 116 !$OMP PARALLEL WORKSHARE 117 sst_m(:,:) = zcoef * eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 118 !$OMP END PARALLEL WORKSHARE 119 ELSE 120 !$OMP PARALLEL WORKSHARE 121 sst_m(:,:) = zcoef * zts(:,:,jp_tem) 122 !$OMP END PARALLEL WORKSHARE 111 123 ENDIF 112 124 !$OMP PARALLEL WORKSHARE … … 116 128 IF( ln_apr_dyn ) THEN 117 129 !$OMP PARALLEL WORKSHARE 118 ssh_m(:,:) = zcoef * ( sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) )130 ssh_m(:,:) = zcoef * ( sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) ) 119 131 !$OMP END PARALLEL WORKSHARE 120 132 ELSE 121 133 !$OMP PARALLEL WORKSHARE 122 ssh_m(:,:) = zcoef * sshn(:,:)134 ssh_m(:,:) = zcoef * sshn(:,:) 123 135 !$OMP END PARALLEL WORKSHARE 124 136 ENDIF … … 149 161 ssv_m(:,:) = ssv_m(:,:) + vb(:,:,1) 150 162 !$OMP END PARALLEL WORKSHARE 151 IF( l_useCT ) THEN ; sst_m(:,:) = sst_m(:,:) + eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 152 ELSE ; sst_m(:,:) = sst_m(:,:) + zts(:,:,jp_tem) 163 IF( l_useCT ) THEN 164 !$OMP PARALLEL WORKSHARE 165 sst_m(:,:) = sst_m(:,:) + eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 166 !$OMP END PARALLEL WORKSHARE 167 ELSE 168 !$OMP PARALLEL WORKSHARE 169 sst_m(:,:) = sst_m(:,:) + zts(:,:,jp_tem) 170 !$OMP END PARALLEL WORKSHARE 153 171 ENDIF 154 172 !$OMP PARALLEL WORKSHARE … … 158 176 IF( ln_apr_dyn ) THEN 159 177 !$OMP PARALLEL WORKSHARE 160 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) )178 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 161 179 !$OMP END PARALLEL WORKSHARE 162 180 ELSE 163 181 !$OMP PARALLEL WORKSHARE 164 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:)182 ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) 165 183 !$OMP END PARALLEL WORKSHARE 166 184 ENDIF … … 257 275 CALL iom_get( numror, jpdom_autoglo, 'frq_m' , frq_m ) 258 276 ELSE 277 !$OMP PARALLEL WORKSHARE 259 278 frq_m(:,:) = 1._wp ! default definition 279 !$OMP END PARALLEL WORKSHARE 260 280 ENDIF 261 281 ! -
branches/2016/dev_r6519_HPC_4/NEMOGCM/NEMO/OPA_SRC/SBC/sbcssr.F90
r6140 r7037 93 93 ! 94 94 IF( nn_sstr == 1 ) THEN !* Temperature restoring term 95 !$OMP PARALLEL DO schedule(static) private(jj, ji, zqrp) 95 96 DO jj = 1, jpj 96 97 DO ji = 1, jpi … … 105 106 IF( nn_sssr == 1 ) THEN !* Salinity damping term (salt flux only (sfx)) 106 107 zsrp = rn_deds / rday ! from [mm/day] to [kg/m2/s] 108 !$OMP PARALLEL DO schedule(static) private(jj, ji, zerp) 107 109 DO jj = 1, jpj 108 110 DO ji = 1, jpi … … 118 120 zsrp = rn_deds / rday ! from [mm/day] to [kg/m2/s] 119 121 zerp_bnd = rn_sssr_bnd / rday ! - - 122 !$OMP PARALLEL DO schedule(static) private(jj, ji, zerp) 120 123 DO jj = 1, jpj 121 124 DO ji = 1, jpi
Note: See TracChangeset
for help on using the changeset viewer.