Changeset 7698 for trunk/NEMOGCM/NEMO/OPA_SRC/SBC
- Timestamp:
- 2017-02-18T10:02:03+01:00 (7 years ago)
- Location:
- trunk/NEMOGCM/NEMO/OPA_SRC/SBC
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/NEMOGCM/NEMO/OPA_SRC/SBC/albedo.F90
r6416 r7698 115 115 116 116 ! Computation of ice albedo (free of snow) 117 WHERE ( ph_snw == 0._wp .AND. pt_ice >= rt0_ice ) ; zalb(:,:,:) = ralb_im 118 ELSE WHERE ; zalb(:,:,:) = ralb_if 119 END WHERE 117 !$OMP PARALLEL DO schedule(static) private(jl,jj,ji) 118 DO jl = 1, ijpl 119 DO jj = 1, jpj 120 DO ji = 1, jpi 121 IF ( ph_snw(ji,jj,jl) == 0._wp .AND. pt_ice(ji,jj,jl) >= rt0_ice ) THEN 122 zalb(ji,jj,jl) = ralb_im 123 ELSE 124 zalb(ji,jj,jl) = ralb_if 125 END IF 126 END DO 127 END DO 128 END DO 120 129 121 130 WHERE ( 1.5 < ph_ice ) ; zalb_it = zalb … … 126 135 ELSE WHERE ; zalb_it = 0.1 + 3.6 * ph_ice 127 136 END WHERE 128 137 !$OMP PARALLEL 138 !$OMP DO schedule(static) private(jl, jj, ji,zswitch,zalb_sf,zalb_sm,zalb_st) 129 139 DO jl = 1, ijpl 130 140 DO jj = 1, jpj … … 156 166 END DO 157 167 158 pa_ice_os(:,:,:) = pa_ice_cs(:,:,:) + rcloud ! Oberhuber correction for overcast sky 168 !$OMP DO schedule(static) private(jl, jj, ji) 169 DO jl = 1, ijpl 170 DO jj = 1, jpj 171 DO ji = 1, jpi 172 pa_ice_os(ji,jj,jl) = pa_ice_cs(ji,jj,jl) + rcloud ! Oberhuber correction for overcast sky 173 END DO 174 END DO 175 END DO 176 !$OMP END PARALLEL 159 177 160 178 !------------------------------------------ … … 193 211 z1_c2 = 1. / 0.03 194 212 ! Computation of the snow/ice albedo 213 !$OMP PARALLEL DO schedule(static) private(jl, jj, ji,zswitch,zalb_sf,zalb_sm,zalb_st) 195 214 DO jl = 1, ijpl 196 215 DO jj = 1, jpj … … 230 249 !! 231 250 REAL(wp) :: zcoef 251 INTEGER :: ji, jj ! dummy loop indices 232 252 !!---------------------------------------------------------------------- 233 253 ! 234 254 zcoef = 0.05 / ( 1.1 * rmue**1.4 + 0.15 ) ! Parameterization of Briegled and Ramanathan, 1982 235 pa_oce_cs(:,:) = zcoef 236 pa_oce_os(:,:) = 0.06 ! Parameterization of Kondratyev, 1969 and Payne, 1972 255 !$OMP PARALLEL DO schedule(static) private(jj, ji) 256 DO jj = 1, jpj 257 DO ji = 1, jpi 258 pa_oce_cs(ji,jj) = zcoef 259 pa_oce_os(ji,jj) = 0.06 ! Parameterization of Kondratyev, 1969 and Payne, 1972 260 END DO 261 END DO 237 262 ! 238 263 END SUBROUTINE albedo_oce -
trunk/NEMOGCM/NEMO/OPA_SRC/SBC/geo2ocean.F90
r6140 r7698 66 66 ! ! 'ij->e' = (i,j) components to east 67 67 ! ! 'ij->n' = (i,j) components to north 68 INTEGER :: ji, jj ! dummy loop indices 68 69 REAL(wp), DIMENSION(jpi,jpj), INTENT( out) :: prot 69 70 !!---------------------------------------------------------------------- … … 82 83 CASE( 'en->i' ) ! east-north to i-component 83 84 SELECT CASE (cd_type) 84 CASE ('T') ; prot(:,:) = pxin(:,:) * gcost(:,:) + pyin(:,:) * gsint(:,:) 85 CASE ('U') ; prot(:,:) = pxin(:,:) * gcosu(:,:) + pyin(:,:) * gsinu(:,:) 86 CASE ('V') ; prot(:,:) = pxin(:,:) * gcosv(:,:) + pyin(:,:) * gsinv(:,:) 87 CASE ('F') ; prot(:,:) = pxin(:,:) * gcosf(:,:) + pyin(:,:) * gsinf(:,:) 85 CASE ('T') 86 !$OMP PARALLEL DO schedule(static) private(jj,ji) 87 DO jj = 1, jpj 88 DO ji = 1, jpi 89 prot(ji,jj) = pxin(ji,jj) * gcost(ji,jj) + pyin(ji,jj) * gsint(ji,jj) 90 END DO 91 END DO 92 CASE ('U') 93 !$OMP PARALLEL DO schedule(static) private(jj,ji) 94 DO jj = 1, jpj 95 DO ji = 1, jpi 96 prot(ji,jj) = pxin(ji,jj) * gcosu(ji,jj) + pyin(ji,jj) * gsinu(ji,jj) 97 END DO 98 END DO 99 CASE ('V') 100 !$OMP PARALLEL DO schedule(static) private(jj,ji) 101 DO jj = 1, jpj 102 DO ji = 1, jpi 103 prot(ji,jj) = pxin(ji,jj) * gcosv(ji,jj) + pyin(ji,jj) * gsinv(ji,jj) 104 END DO 105 END DO 106 CASE ('F') 107 !$OMP PARALLEL DO schedule(static) private(jj,ji) 108 DO jj = 1, jpj 109 DO ji = 1, jpi 110 prot(ji,jj) = pxin(ji,jj) * gcosf(ji,jj) + pyin(ji,jj) * gsinf(ji,jj) 111 END DO 112 END DO 88 113 CASE DEFAULT ; CALL ctl_stop( 'Only T, U, V and F grid points are coded' ) 89 114 END SELECT 90 115 CASE ('en->j') ! east-north to j-component 91 116 SELECT CASE (cd_type) 92 CASE ('T') ; prot(:,:) = pyin(:,:) * gcost(:,:) - pxin(:,:) * gsint(:,:) 93 CASE ('U') ; prot(:,:) = pyin(:,:) * gcosu(:,:) - pxin(:,:) * gsinu(:,:) 94 CASE ('V') ; prot(:,:) = pyin(:,:) * gcosv(:,:) - pxin(:,:) * gsinv(:,:) 95 CASE ('F') ; prot(:,:) = pyin(:,:) * gcosf(:,:) - pxin(:,:) * gsinf(:,:) 117 CASE ('T') 118 !$OMP PARALLEL DO schedule(static) private(jj,ji) 119 DO jj = 1, jpj 120 DO ji = 1, jpi 121 prot(ji,jj) = pyin(ji,jj) * gcost(ji,jj) - pxin(ji,jj) * gsint(ji,jj) 122 END DO 123 END DO 124 CASE ('U') 125 !$OMP PARALLEL DO schedule(static) private(jj,ji) 126 DO jj = 1, jpj 127 DO ji = 1, jpi 128 prot(ji,jj) = pyin(ji,jj) * gcosu(ji,jj) - pxin(ji,jj) * gsinu(ji,jj) 129 END DO 130 END DO 131 CASE ('V') 132 !$OMP PARALLEL DO schedule(static) private(jj,ji) 133 DO jj = 1, jpj 134 DO ji = 1, jpi 135 prot(ji,jj) = pyin(ji,jj) * gcosv(ji,jj) - pxin(ji,jj) * gsinv(ji,jj) 136 END DO 137 END DO 138 CASE ('F') 139 !$OMP PARALLEL DO schedule(static) private(jj,ji) 140 DO jj = 1, jpj 141 DO ji = 1, jpi 142 prot(ji,jj) = pyin(ji,jj) * gcosf(ji,jj) - pxin(ji,jj) * gsinf(ji,jj) 143 END DO 144 END DO 96 145 CASE DEFAULT ; CALL ctl_stop( 'Only T, U, V and F grid points are coded' ) 97 146 END SELECT 98 147 CASE ('ij->e') ! (i,j)-components to east 99 148 SELECT CASE (cd_type) 100 CASE ('T') ; prot(:,:) = pxin(:,:) * gcost(:,:) - pyin(:,:) * gsint(:,:) 101 CASE ('U') ; prot(:,:) = pxin(:,:) * gcosu(:,:) - pyin(:,:) * gsinu(:,:) 102 CASE ('V') ; prot(:,:) = pxin(:,:) * gcosv(:,:) - pyin(:,:) * gsinv(:,:) 103 CASE ('F') ; prot(:,:) = pxin(:,:) * gcosf(:,:) - pyin(:,:) * gsinf(:,:) 149 CASE ('T') 150 !$OMP PARALLEL DO schedule(static) private(jj,ji) 151 DO jj = 1, jpj 152 DO ji = 1, jpi 153 prot(ji,jj) = pxin(ji,jj) * gcost(ji,jj) - pyin(ji,jj) * gsint(ji,jj) 154 END DO 155 END DO 156 CASE ('U') 157 !$OMP PARALLEL DO schedule(static) private(jj,ji) 158 DO jj = 1, jpj 159 DO ji = 1, jpi 160 prot(ji,jj) = pxin(ji,jj) * gcosu(ji,jj) - pyin(ji,jj) * gsinu(ji,jj) 161 END DO 162 END DO 163 CASE ('V') 164 !$OMP PARALLEL DO schedule(static) private(jj,ji) 165 DO jj = 1, jpj 166 DO ji = 1, jpi 167 prot(ji,jj) = pxin(ji,jj) * gcosv(ji,jj) - pyin(ji,jj) * gsinv(ji,jj) 168 END DO 169 END DO 170 CASE ('F') 171 !$OMP PARALLEL DO schedule(static) private(jj,ji) 172 DO jj = 1, jpj 173 DO ji = 1, jpi 174 prot(ji,jj) = pxin(ji,jj) * gcosf(ji,jj) - pyin(ji,jj) * gsinf(ji,jj) 175 END DO 176 END DO 104 177 CASE DEFAULT ; CALL ctl_stop( 'Only T, U, V and F grid points are coded' ) 105 178 END SELECT 106 179 CASE ('ij->n') ! (i,j)-components to north 107 180 SELECT CASE (cd_type) 108 CASE ('T') ; prot(:,:) = pyin(:,:) * gcost(:,:) + pxin(:,:) * gsint(:,:) 109 CASE ('U') ; prot(:,:) = pyin(:,:) * gcosu(:,:) + pxin(:,:) * gsinu(:,:) 110 CASE ('V') ; prot(:,:) = pyin(:,:) * gcosv(:,:) + pxin(:,:) * gsinv(:,:) 111 CASE ('F') ; prot(:,:) = pyin(:,:) * gcosf(:,:) + pxin(:,:) * gsinf(:,:) 181 CASE ('T') 182 !$OMP PARALLEL DO schedule(static) private(jj,ji) 183 DO jj = 1, jpj 184 DO ji = 1, jpi 185 prot(ji,jj) = pyin(ji,jj) * gcost(ji,jj) + pxin(ji,jj) * gsint(ji,jj) 186 END DO 187 END DO 188 CASE ('U') 189 !$OMP PARALLEL DO schedule(static) private(jj,ji) 190 DO jj = 1, jpj 191 DO ji = 1, jpi 192 prot(ji,jj) = pyin(ji,jj) * gcosu(ji,jj) + pxin(ji,jj) * gsinu(ji,jj) 193 END DO 194 END DO 195 CASE ('V') 196 !$OMP PARALLEL DO schedule(static) private(jj,ji) 197 DO jj = 1, jpj 198 DO ji = 1, jpi 199 prot(ji,jj) = pyin(ji,jj) * gcosv(ji,jj) + pxin(ji,jj) * gsinv(ji,jj) 200 END DO 201 END DO 202 CASE ('F') 203 !$OMP PARALLEL DO schedule(static) private(jj,ji) 204 DO jj = 1, jpj 205 DO ji = 1, jpi 206 prot(ji,jj) = pyin(ji,jj) * gcosf(ji,jj) + pxin(ji,jj) * gsinf(ji,jj) 207 END DO 208 END DO 112 209 CASE DEFAULT ; CALL ctl_stop( 'Only T, U, V and F grid points are coded' ) 113 210 END SELECT … … 157 254 ! (computation done on the north stereographic polar plane) 158 255 ! 256 !$OMP PARALLEL 257 !$OMP DO schedule(static) private(jj,ji,zlam,zphi,zxnpt,zynpt,znnpt,zxnpu,zynpu,znnpu,zxnpv,zynpv,znnpv,zxnpf) & 258 !$OMP& private(zynpf,znnpf,zlan,zphh,zxvvt,zyvvt,znvvt,zxffu,zyffu,znffu,zxffv,zyffv,znffv,zxuuf,zyuuf,znuuf) 159 259 DO jj = 2, jpjm1 160 260 DO ji = fs_2, jpi ! vector opt. … … 248 348 ! =============== ! 249 349 350 !$OMP DO schedule(static) private(jj,ji) 250 351 DO jj = 2, jpjm1 251 352 DO ji = fs_2, jpi ! vector opt. … … 268 369 END DO 269 370 END DO 371 !$OMP END DO NOWAIT 372 !$OMP END PARALLEL 270 373 271 374 ! =========================== ! -
trunk/NEMOGCM/NEMO/OPA_SRC/SBC/sbcblk.F90
r7646 r7698 316 316 #if defined key_cice 317 317 IF( MOD( kt - 1, nn_fsbc ) == 0 ) THEN 318 qlw_ice(:,:,1) = sf(jp_qlw )%fnow(:,:,1) 319 IF( ln_dm2dc ) THEN ; qsr_ice(:,:,1) = sbc_dcy( sf(jp_qsr)%fnow(:,:,1) ) 320 ELSE ; qsr_ice(:,:,1) = sf(jp_qsr)%fnow(:,:,1) 318 !$OMP PARALLEL DO schedule(static) private(jj, ji) 319 DO jj = 1, jpj 320 DO ji = 1, jpi 321 qlw_ice(ji,jj,1) = sf(jp_qlw)%fnow(ji,jj,1) 322 END DO 323 END DO 324 IF( ln_dm2dc ) THEN ; qsr_ice(:,:,1) = sbc_dcy( sf(jp_qsr)%fnow(:,:,1) ) 325 ELSE 326 !$OMP PARALLEL DO schedule(static) private(jj, ji) 327 DO jj = 1, jpj 328 DO ji = 1, jpi 329 qsr_ice(ji,jj,1) = sf(jp_qsr)%fnow(ji,jj,1) 330 END DO 331 END DO 321 332 ENDIF 322 tatm_ice(:,:) = sf(jp_tair)%fnow(:,:,1) 323 qatm_ice(:,:) = sf(jp_humi)%fnow(:,:,1) 324 tprecip(:,:) = sf(jp_prec)%fnow(:,:,1) * rn_pfac 325 sprecip(:,:) = sf(jp_snow)%fnow(:,:,1) * rn_pfac 326 wndi_ice(:,:) = sf(jp_wndi)%fnow(:,:,1) 327 wndj_ice(:,:) = sf(jp_wndj)%fnow(:,:,1) 333 !$OMP PARALLEL DO schedule(static) private(jj, ji) 334 DO jj = 1, jpj 335 DO ji = 1, jpi 336 tatm_ice(ji,jj) = sf(jp_tair)%fnow(ji,jj,1) 337 qatm_ice(ji,jj) = sf(jp_humi)%fnow(ji,jj,1) 338 tprecip(ji,jj) = sf(jp_prec)%fnow(ji,jj,1) * rn_pfac 339 sprecip(ji,jj) = sf(jp_snow)%fnow(ji,jj,1) * rn_pfac 340 wndi_ice(ji,jj) = sf(jp_wndi)%fnow(ji,jj,1) 341 wndj_ice(ji,jj) = sf(jp_wndj)%fnow(ji,jj,1) 342 END DO 343 END DO 328 344 ENDIF 329 345 #endif … … 382 398 ! 383 399 384 ! local scalars ( place there for vector optimisation purposes) 385 zst(:,:) = pst(:,:) + rt0 ! convert SST from Celcius to Kelvin (and set minimum value far above 0 K) 386 400 !$OMP PARALLEL DO schedule(static) private(jj, ji) 401 DO jj = 1, jpj 402 DO ji = 1, jpi 403 ! local scalars ( place there for vector optimisation purposes) 404 zst(ji,jj) = pst(ji,jj) + rt0 ! convert SST from Celcius to Kelvin (and set minimum value far above 0 K) 405 406 ! ... components ( U10m - U_oce ) at T-point (unmasked) 407 !!gm move zwnd_i (_j) set to zero inside the key_cyclone ??? 408 zwnd_i(ji,jj) = 0._wp 409 zwnd_j(ji,jj) = 0._wp 410 END DO 411 END DO 387 412 ! ----------------------------------------------------------------------------- ! 388 413 ! 0 Wind components and module at T-point relative to the moving ocean ! 389 414 ! ----------------------------------------------------------------------------- ! 390 415 391 ! ... components ( U10m - U_oce ) at T-point (unmasked)392 !!gm move zwnd_i (_j) set to zero inside the key_cyclone ???393 zwnd_i(:,:) = 0._wp394 zwnd_j(:,:) = 0._wp395 416 #if defined key_cyclone 396 417 CALL wnd_cyc( kt, zwnd_i, zwnd_j ) ! add analytical tropical cyclone (Vincent et al. JGR 2012) 418 !$OMP PARALLEL DO schedule(static) private(jj, ji) 397 419 DO jj = 2, jpjm1 398 420 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 402 424 END DO 403 425 #endif 426 !$OMP PARALLEL DO schedule(static) private(jj, ji) 404 427 DO jj = 2, jpjm1 405 428 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 411 434 CALL lbc_lnk( zwnd_j(:,:) , 'T', -1. ) 412 435 ! ... scalar wind ( = | U10m - U_oce | ) at T-point (masked) 413 wndm(:,:) = SQRT( zwnd_i(:,:) * zwnd_i(:,:) & 414 & + zwnd_j(:,:) * zwnd_j(:,:) ) * tmask(:,:,1) 415 436 !$OMP PARALLEL DO schedule(static) private(jj, ji) 437 DO jj = 1, jpj 438 DO ji = 1, jpi 439 wndm(ji,jj) = SQRT( zwnd_i(ji,jj) * zwnd_i(ji,jj) & 440 & + zwnd_j(ji,jj) * zwnd_j(ji,jj) ) * tmask(ji,jj,1) 441 442 END DO 443 END DO 416 444 ! ----------------------------------------------------------------------------- ! 417 445 ! I Radiative FLUXES ! … … 421 449 zztmp = 1. - albo 422 450 IF( ln_dm2dc ) THEN ; qsr(:,:) = zztmp * sbc_dcy( sf(jp_qsr)%fnow(:,:,1) ) * tmask(:,:,1) 423 ELSE ; qsr(:,:) = zztmp * sf(jp_qsr)%fnow(:,:,1) * tmask(:,:,1) 451 ELSE 452 !$OMP PARALLEL DO schedule(static) private(jj, ji) 453 DO jj = 1, jpj 454 DO ji = 1, jpi 455 qsr(ji,jj) = zztmp * sf(jp_qsr)%fnow(ji,jj,1) * tmask(ji,jj,1) 456 END DO 457 END DO 424 458 ENDIF 425 459 426 zqlw(:,:) = ( sf(jp_qlw)%fnow(:,:,1) - Stef * zst(:,:)*zst(:,:)*zst(:,:)*zst(:,:) ) * tmask(:,:,1) ! Long Wave 460 !$OMP PARALLEL DO schedule(static) private(jj, ji) 461 DO jj = 1, jpj 462 DO ji = 1, jpi 463 zqlw(ji,jj) = ( sf(jp_qlw)%fnow(ji,jj,1) - Stef * zst(ji,jj)*zst(ji,jj)*zst(ji,jj)*zst(ji,jj) ) * tmask(ji,jj,1) ! Long Wave 464 END DO 465 END DO 427 466 428 467 … … 461 500 END IF 462 501 463 Cd_oce(:,:) = Cd(:,:) ! record value of pure ocean-atm. drag (clem) 464 502 !$OMP PARALLEL 503 !$OMP DO schedule(static) private(jj, ji) 504 DO jj = 1, jpj 505 DO ji = 1, jpi 506 Cd_oce(ji,jj) = Cd(ji,jj) ! record value of pure ocean-atm. drag (clem) 507 END DO 508 END DO 509 510 !$OMP DO schedule(static) private(jj, ji) 465 511 DO jj = 1, jpj ! tau module, i and j component 466 512 DO ji = 1, jpi … … 471 517 END DO 472 518 END DO 519 !$OMP END PARALLEL 473 520 474 521 ! ! add the HF tau contribution to the wind stress module 475 IF( lhftau ) taum(:,:) = taum(:,:) + sf(jp_tdif)%fnow(:,:,1) 522 IF( lhftau ) THEN 523 !$OMP PARALLEL DO schedule(static) private(jj, ji) 524 DO jj = 1, jpj 525 DO ji = 1, jpi 526 taum(ji,jj) = taum(ji,jj) + sf(jp_tdif)%fnow(ji,jj,1) 527 END DO 528 END DO 529 END IF 476 530 477 531 CALL iom_put( "taum_oce", taum ) ! output wind stress module … … 480 534 ! Note the use of 0.5*(2-umask) in order to unmask the stress along coastlines 481 535 ! Note the use of MAX(tmask(i,j),tmask(i+1,j) is to mask tau over ice shelves 536 !$OMP PARALLEL DO schedule(static) private(jj, ji) 482 537 DO jj = 1, jpjm1 483 538 DO ji = 1, fs_jpim1 … … 496 551 497 552 ! zqla used as temporary array, for rho*U (common term of bulk formulae): 498 zqla(:,:) = zrhoa(:,:) * zU_zu(:,:) 553 !$OMP PARALLEL DO schedule(static) private(jj, ji) 554 DO jj = 1, jpj 555 DO ji = 1, jpi 556 zqla(ji,jj) = zrhoa(ji,jj) * zU_zu(ji,jj) 557 END DO 558 END DO 499 559 500 560 IF( ABS( rn_zu - rn_zqt) < 0.01_wp ) THEN 501 561 !! q_air and t_air are given at 10m (wind reference height) 502 zevap(:,:) = rn_efac*MAX( 0._wp, zqla(:,:)*Ce(:,:)*(zsq(:,:) - sf(jp_humi)%fnow(:,:,1)) ) ! Evaporation, using bulk wind speed 503 zqsb (:,:) = cp_air(sf(jp_humi)%fnow(:,:,1))*zqla(:,:)*Ch(:,:)*(zst(:,:) - ztpot(:,:) ) ! Sensible Heat, using bulk wind speed 562 !$OMP PARALLEL DO schedule(static) private(jj, ji) 563 DO jj = 1, jpj 564 DO ji = 1, jpi 565 zevap(ji,jj) = rn_efac*MAX( 0._wp, zqla(ji,jj)*Ce(ji,jj)*(zsq(ji,jj) - sf(jp_humi)%fnow(ji,jj,1)) ) ! Evaporation, using bulk wind speed 566 END DO 567 END DO 568 zqsb (:,:) = cp_air(sf(jp_humi)%fnow(:,:,1))*zqla(:,:)*Ch(:,:)*(zst(:,:) - ztpot(:,:) ) ! Sensible Heat, using bulk wind speed 504 569 ELSE 505 570 !! q_air and t_air are not given at 10m (wind reference height) 506 571 ! Values of temp. and hum. adjusted to height of wind during bulk algorithm iteration must be used!!! 507 zevap(:,:) = rn_efac*MAX( 0._wp, zqla(:,:)*Ce(:,:)*(zsq(:,:) - zq_zu(:,:) ) ) ! Evaporation ! using bulk wind speed 572 !$OMP PARALLEL DO schedule(static) private(jj, ji) 573 DO jj = 1, jpj 574 DO ji = 1, jpi 575 zevap(ji,jj) = rn_efac*MAX( 0._wp, zqla(ji,jj)*Ce(ji,jj)*(zsq(ji,jj) - zq_zu(ji,jj) ) ) ! Evaporation ! using bulk wind speed 576 END DO 577 END DO 508 578 zqsb (:,:) = cp_air(sf(jp_humi)%fnow(:,:,1))*zqla(:,:)*Ch(:,:)*(zst(:,:) - zt_zu(:,:) ) ! Sensible Heat ! using bulk wind speed 509 579 ENDIF … … 527 597 ! ----------------------------------------------------------------------------- ! 528 598 ! 529 emp (:,:) = ( zevap(:,:) & ! mass flux (evap. - precip.) 530 & - sf(jp_prec)%fnow(:,:,1) * rn_pfac ) * tmask(:,:,1) 531 ! 532 qns(:,:) = zqlw(:,:) - zqsb(:,:) - zqla(:,:) & ! Downward Non Solar 533 & - sf(jp_snow)%fnow(:,:,1) * rn_pfac * lfus & ! remove latent melting heat for solid precip 534 & - zevap(:,:) * pst(:,:) * rcp & ! remove evap heat content at SST 535 & + ( sf(jp_prec)%fnow(:,:,1) - sf(jp_snow)%fnow(:,:,1) ) * rn_pfac & ! add liquid precip heat content at Tair 536 & * ( sf(jp_tair)%fnow(:,:,1) - rt0 ) * rcp & 537 & + sf(jp_snow)%fnow(:,:,1) * rn_pfac & ! add solid precip heat content at min(Tair,Tsnow) 538 & * ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) 539 ! 599 !$OMP PARALLEL DO schedule(static) private(jj, ji) 600 DO jj = 1, jpj 601 DO ji = 1, jpi 602 emp (ji,jj) = ( zevap(ji,jj) & ! mass flux (evap. - precip.) 603 & - sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ) * tmask(ji,jj,1) 604 ! 605 qns(ji,jj) = zqlw(ji,jj) - zqsb(ji,jj) - zqla(ji,jj) & ! Downward Non Solar 606 & - sf(jp_snow)%fnow(ji,jj,1) * rn_pfac * lfus & ! remove latent melting heat for solid precip 607 & - zevap(ji,jj) * pst(ji,jj) * rcp & ! remove evap heat content at SST 608 & + ( sf(jp_prec)%fnow(ji,jj,1) - sf(jp_snow)%fnow(ji,jj,1) ) * rn_pfac & ! add liquid precip heat content at Tair 609 & * ( sf(jp_tair)%fnow(ji,jj,1) - rt0 ) * rcp & 610 & + sf(jp_snow)%fnow(ji,jj,1) * rn_pfac & ! add solid precip heat content at min(Tair,Tsnow) 611 & * ( MIN( sf(jp_tair)%fnow(ji,jj,1), rt0_snow ) - rt0 ) * cpic * tmask(ji,jj,1) 612 ! 540 613 #if defined key_lim3 541 qns_oce(:,:) = zqlw(:,:) - zqsb(:,:) - zqla(:,:) ! non solar without emp (only needed by LIM3)542 qsr_oce(:,:) = qsr(:,:)614 qns_oce(ji,jj) = zqlw(ji,jj) - zqsb(ji,jj) - zqla(ji,jj) ! non solar without emp (only needed by LIM3) 615 qsr_oce(ji,jj) = qsr(ji,jj) 543 616 #endif 617 END DO 618 END DO 544 619 ! 545 620 IF ( nn_ice == 0 ) THEN … … 551 626 CALL iom_put( "qsr_oce" , qsr ) ! output downward solar heat over the ocean 552 627 CALL iom_put( "qt_oce" , qns+qsr ) ! output total downward heat over the ocean 553 tprecip(:,:) = sf(jp_prec)%fnow(:,:,1) * rn_pfac ! output total precipitation [kg/m2/s] 554 sprecip(:,:) = sf(jp_snow)%fnow(:,:,1) * rn_pfac ! output solid precipitation [kg/m2/s] 628 !$OMP PARALLEL DO schedule(static) private(jj, ji) 629 DO jj = 1, jpj 630 DO ji = 1, jpi 631 tprecip(ji,jj) = sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ! output total precipitation [kg/m2/s] 632 sprecip(ji,jj) = sf(jp_snow)%fnow(ji,jj,1) * rn_pfac ! output solid precipitation [kg/m2/s] 633 END DO 634 END DO 555 635 CALL iom_put( 'snowpre', sprecip * 86400. ) ! Snow 556 636 CALL iom_put( 'precip' , tprecip * 86400. ) ! Total precipitation … … 599 679 CALL wrk_alloc( jpi,jpj, Cd ) 600 680 601 Cd(:,:) = Cd_ice 681 !$OMP PARALLEL DO schedule(static) private(jj, ji) 682 DO jj = 1, jpj 683 DO ji = 1, jpi 684 Cd(ji,jj) = Cd_ice 685 END DO 686 END DO 602 687 603 688 ! Make ice-atm. drag dependent on ice concentration (see Lupkes et al. 2012) (clem) … … 613 698 zrhoa (:,:) = rho_air(sf(jp_tair)%fnow(:,:,1), sf(jp_humi)%fnow(:,:,1), sf(jp_slp)%fnow(:,:,1)) 614 699 615 !!gm brutal.... 616 utau_ice (:,:) = 0._wp 617 vtau_ice (:,:) = 0._wp 618 wndm_ice (:,:) = 0._wp 619 !!gm end 700 !$OMP PARALLEL DO schedule(static) private(jj, ji) 701 DO jj = 1, jpj 702 DO ji = 1, jpi 703 !!gm brutal.... 704 utau_ice (ji,jj) = 0._wp 705 vtau_ice (ji,jj) = 0._wp 706 wndm_ice (ji,jj) = 0._wp 707 !!gm end 708 END DO 709 END DO 620 710 621 711 ! ----------------------------------------------------------------------------- ! … … 625 715 CASE( 'I' ) ! B-grid ice dynamics : I-point (i.e. F-point with sea-ice indexation) 626 716 ! and scalar wind at T-point ( = | U10m - U_ice | ) (masked) 717 !$OMP PARALLEL DO schedule(static) private(jj,ji,zwndi_f,zwndj_f,zwnorm_f,zwndi_t,zwndj_t) 627 718 DO jj = 2, jpjm1 628 719 DO ji = 2, jpim1 ! B grid : NO vector opt … … 649 740 ! 650 741 CASE( 'C' ) ! C-grid ice dynamics : U & V-points (same as ocean) 742 !$OMP PARALLEL DO schedule(static) private(jj,ji,zwndi_t,zwndj_t) 651 743 DO jj = 2, jpj 652 744 DO ji = fs_2, jpi ! vect. opt. … … 656 748 END DO 657 749 END DO 750 !$OMP PARALLEL DO schedule(static) private(jj,ji) 658 751 DO jj = 2, jpjm1 659 752 DO ji = fs_2, fs_jpim1 ! vect. opt. … … 700 793 REAL(wp) :: zztmp, z1_lsub ! - - 701 794 REAL(wp), DIMENSION(:,:,:), POINTER :: z_qlw ! long wave heat flux over ice 795 REAL(wp), DIMENSION(:,:,:), POINTER :: zevap_ice3d, zqns_ice3d, zqsr_ice3d 702 796 REAL(wp), DIMENSION(:,:,:), POINTER :: z_qsb ! sensible heat flux over ice 703 797 REAL(wp), DIMENSION(:,:,:), POINTER :: z_dqlw ! long wave heat sensitivity over ice 704 798 REAL(wp), DIMENSION(:,:,:), POINTER :: z_dqsb ! sensible heat sensitivity over ice 705 799 REAL(wp), DIMENSION(:,:) , POINTER :: zevap, zsnw ! evaporation and snw distribution after wind blowing (LIM3) 800 REAL(wp), DIMENSION(:,:) , POINTER :: zevap_ice2d, zqns_ice2d, zqsr_ice2d 706 801 REAL(wp), DIMENSION(:,:) , POINTER :: zrhoa 707 802 REAL(wp), DIMENSION(:,:) , POINTER :: Cd ! transfer coefficient for momentum (tau) … … 710 805 IF( nn_timing == 1 ) CALL timing_start('blk_ice_flx') 711 806 ! 712 CALL wrk_alloc( jpi,jpj,jpl, z_qlw, z_qsb, z_dqlw, z_dqsb )713 CALL wrk_alloc( jpi,jpj, zrhoa )807 CALL wrk_alloc( jpi,jpj,jpl, z_qlw, z_qsb, z_dqlw, z_dqsb, zevap_ice3d, zqns_ice3d, zqsr_ice3d ) 808 CALL wrk_alloc( jpi,jpj, zrhoa, zevap_ice2d, zqns_ice2d, zqsr_ice2d) 714 809 CALL wrk_alloc( jpi,jpj, Cd ) 715 810 716 Cd(:,:) = Cd_ice 811 !$OMP PARALLEL DO schedule(static) private(jj, ji) 812 DO jj = 1, jpj 813 DO ji = 1, jpi 814 Cd(ji,jj) = Cd_ice 815 END DO 816 END DO 717 817 718 818 ! Make ice-atm. drag dependent on ice concentration (see Lupkes et al. 2012) (clem) … … 731 831 ! 732 832 zztmp = 1. / ( 1. - albo ) 733 ! ! ========================== ! 734 DO jl = 1, jpl ! Loop over ice categories ! 735 ! ! ========================== ! 833 !$OMP PARALLEL 834 !$OMP DO schedule(static) private(jl,jj,ji,zst2,zst3) ! ========================== ! 835 DO jl = 1, jpl ! Loop over ice categories ! 836 ! ! ========================== ! 736 837 DO jj = 1 , jpj 737 838 DO ji = 1, jpi … … 781 882 END DO 782 883 ! 783 tprecip(:,:) = sf(jp_prec)%fnow(:,:,1) * rn_pfac ! total precipitation [kg/m2/s] 784 sprecip(:,:) = sf(jp_snow)%fnow(:,:,1) * rn_pfac ! solid precipitation [kg/m2/s] 884 !$OMP DO schedule(static) private(jj, ji) 885 DO jj = 1, jpj 886 DO ji = 1, jpi 887 tprecip(ji,jj) = sf(jp_prec)%fnow(ji,jj,1) * rn_pfac ! total precipitation [kg/m2/s] 888 sprecip(ji,jj) = sf(jp_snow)%fnow(ji,jj,1) * rn_pfac ! solid precipitation [kg/m2/s] 889 END DO 890 END DO 891 !$OMP END PARALLEL 785 892 CALL iom_put( 'snowpre', sprecip * 86400. ) ! Snow precipitation 786 893 CALL iom_put( 'precip' , tprecip * 86400. ) ! Total precipitation … … 791 898 ! --- evaporation --- ! 792 899 z1_lsub = 1._wp / Lsub 793 evap_ice (:,:,:) = rn_efac * qla_ice (:,:,:) * z1_lsub ! sublimation 794 devap_ice(:,:,:) = rn_efac * dqla_ice(:,:,:) * z1_lsub ! d(sublimation)/dT 795 zevap (:,:) = rn_efac * ( emp(:,:) + tprecip(:,:) ) ! evaporation over ocean 796 797 ! --- evaporation minus precipitation --- ! 798 zsnw(:,:) = 0._wp 900 !$OMP PARALLEL 901 !$OMP DO schedule(static) private(jl,jj,ji) 902 DO jl = 1, jpl 903 DO jj = 1 , jpj 904 DO ji = 1, jpi 905 evap_ice (ji,jj,jl) = rn_efac * qla_ice (ji,jj,jl) * z1_lsub ! sublimation 906 devap_ice(ji,jj,jl) = rn_efac * dqla_ice(ji,jj,jl) * z1_lsub ! d(sublimation)/dT 907 END DO 908 END DO 909 END DO 910 ! 911 !$OMP DO schedule(static) private(jj, ji) 912 DO jj = 1, jpj 913 DO ji = 1, jpi 914 zevap (ji,jj) = rn_efac * ( emp(ji,jj) + tprecip(ji,jj) ) ! evaporation over ocean 915 916 ! --- evaporation minus precipitation --- ! 917 zsnw(ji,jj) = 0._wp 918 END DO 919 END DO 920 !$OMP END PARALLEL 799 921 CALL lim_thd_snwblow( pfrld, zsnw ) ! snow distribution over ice after wind blowing 800 emp_oce(:,:) = pfrld(:,:) * zevap(:,:) - ( tprecip(:,:) - sprecip(:,:) ) - sprecip(:,:) * (1._wp - zsnw ) 801 emp_ice(:,:) = SUM( a_i_b(:,:,:) * evap_ice(:,:,:), dim=3 ) - sprecip(:,:) * zsnw 802 emp_tot(:,:) = emp_oce(:,:) + emp_ice(:,:) 803 804 ! --- heat flux associated with emp --- ! 805 qemp_oce(:,:) = - pfrld(:,:) * zevap(:,:) * sst_m(:,:) * rcp & ! evap at sst 806 & + ( tprecip(:,:) - sprecip(:,:) ) * ( sf(jp_tair)%fnow(:,:,1) - rt0 ) * rcp & ! liquid precip at Tair 807 & + sprecip(:,:) * ( 1._wp - zsnw ) * & ! solid precip at min(Tair,Tsnow) 808 & ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus ) 809 qemp_ice(:,:) = sprecip(:,:) * zsnw * & ! solid precip (only) 810 & ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus ) 811 812 ! --- total solar and non solar fluxes --- ! 813 qns_tot(:,:) = pfrld(:,:) * qns_oce(:,:) + SUM( a_i_b(:,:,:) * qns_ice(:,:,:), dim=3 ) + qemp_ice(:,:) + qemp_oce(:,:) 814 qsr_tot(:,:) = pfrld(:,:) * qsr_oce(:,:) + SUM( a_i_b(:,:,:) * qsr_ice(:,:,:), dim=3 ) 815 816 ! --- heat content of precip over ice in J/m3 (to be used in 1D-thermo) --- ! 817 qprec_ice(:,:) = rhosn * ( ( MIN( sf(jp_tair)%fnow(:,:,1), rt0_snow ) - rt0 ) * cpic * tmask(:,:,1) - lfus ) 922 !$OMP PARALLEL 923 !$OMP DO schedule(static) private(jj,ji) 924 DO jj = 1, jpj 925 DO ji = 1, jpi 926 emp_oce(ji,jj) = pfrld(ji,jj) * zevap(ji,jj) - ( tprecip(ji,jj) - sprecip(ji,jj) ) - sprecip(ji,jj) * (1._wp - zsnw(ji,jj)) 927 END DO 928 END DO 929 !$OMP END DO NOWAIT 930 !$OMP DO schedule(static) private(jl,jj,ji) 931 DO jl = 1, jpl 932 DO jj = 1 , jpj 933 DO ji = 1, jpi 934 zevap_ice3d(ji,jj,jl) = a_i_b(ji,jj,jl) * evap_ice(ji,jj,jl) 935 zqns_ice3d(ji,jj,jl) = a_i_b(ji,jj,jl) * qns_ice(ji,jj,jl) 936 zqsr_ice3d(ji,jj,jl) = a_i_b(ji,jj,jl) * qsr_ice(ji,jj,jl) 937 END DO 938 END DO 939 END DO 940 !$OMP END DO NOWAIT 941 !$OMP DO schedule(static) private(jj,ji) 942 DO jj = 1, jpj 943 DO ji = 1, jpi 944 zevap_ice2d(ji,jj) = 0._wp 945 zqns_ice2d(ji,jj) = 0._wp 946 zqsr_ice2d(ji,jj) = 0._wp 947 END DO 948 END DO 949 DO jl = 1, jpl 950 !$OMP DO schedule(static) private(jj,ji) 951 DO jj = 1 , jpj 952 DO ji = 1, jpi 953 zevap_ice2d(ji,jj) = zevap_ice2d(ji,jj) + zevap_ice3d(ji,jj,jl) 954 zqns_ice2d(ji,jj) = zqns_ice2d(ji,jj) + zqns_ice3d(ji,jj,jl) 955 zqsr_ice2d(ji,jj) = zqsr_ice2d(ji,jj) + zqsr_ice3d(ji,jj,jl) 956 END DO 957 END DO 958 END DO 959 !$OMP DO schedule(static) private(jj,ji) 960 DO jj = 1 , jpj 961 DO ji = 1, jpi 962 emp_ice(ji,jj) = zevap_ice2d(ji,jj) - sprecip(ji,jj) * zsnw(ji,jj) 963 emp_tot(ji,jj) = emp_oce(ji,jj) + emp_ice(ji,jj) 964 965 ! --- heat flux associated with emp --- ! 966 qemp_oce(ji,jj) = - pfrld(ji,jj) * zevap(ji,jj) * sst_m(ji,jj) * rcp & ! evap at sst 967 & + ( tprecip(ji,jj) - sprecip(ji,jj) ) * ( sf(jp_tair)%fnow(ji,jj,1) - rt0 ) * rcp & ! liquid precip at Tair 968 & + sprecip(ji,jj) * ( 1._wp - zsnw(ji,jj) ) * & ! solid precip at min(Tair,Tsnow) 969 & ( ( MIN( sf(jp_tair)%fnow(ji,jj,1), rt0_snow ) - rt0 ) * cpic * tmask(ji,jj,1) - lfus ) 970 qemp_ice(ji,jj) = sprecip(ji,jj) * zsnw(ji,jj) * & ! solid precip (only) 971 & ( ( MIN( sf(jp_tair)%fnow(ji,jj,1), rt0_snow ) - rt0 ) * cpic * tmask(ji,jj,1) - lfus ) 972 973 ! --- total solar and non solar fluxes --- ! 974 qns_tot(ji,jj) = pfrld(ji,jj) * qns_oce(ji,jj) + zqns_ice2d(ji,jj) + qemp_ice(ji,jj) + qemp_oce(ji,jj) 975 qsr_tot(ji,jj) = pfrld(ji,jj) * qsr_oce(ji,jj) + zqsr_ice2d(ji,jj) 976 977 ! --- heat content of precip over ice in J/m3 (to be used in 1D-thermo) --- ! 978 qprec_ice(ji,jj) = rhosn * ( ( MIN( sf(jp_tair)%fnow(ji,jj,1), rt0_snow ) - rt0 ) * cpic * tmask(ji,jj,1) - lfus ) 979 END DO 980 END DO 981 !$OMP END DO NOWAIT 818 982 819 983 ! --- heat content of evap over ice in W/m2 (to be used in 1D-thermo) --- 984 !$OMP DO schedule(static) private(jl,jj,ji) 820 985 DO jl = 1, jpl 821 qevap_ice(:,:,jl) = 0._wp ! should be -evap_ice(:,:,jl)*( ( Tice - rt0 ) * cpic * tmask(:,:,1) ) 822 ! But we do not have Tice => consider it at 0degC => evap=0 823 END DO 986 DO jj = 1, jpj 987 DO ji = 1, jpi 988 qevap_ice(ji,jj,jl) = 0._wp ! should be -evap_ice(:,:,jl)*( ( Tice - rt0 ) * cpic * tmask(:,:,1) ) 989 ! But we do not have Tice => consider it at 0degC => evap=0 990 END DO 991 END DO 992 END DO 993 !$OMP END PARALLEL 824 994 825 995 CALL wrk_dealloc( jpi,jpj, zevap, zsnw ) … … 831 1001 ! ( Maykut and Untersteiner, 1971 ; Ebert and Curry, 1993 ) 832 1002 ! 833 fr1_i0(:,:) = ( 0.18 * ( 1.0 - cldf_ice ) + 0.35 * cldf_ice ) 834 fr2_i0(:,:) = ( 0.82 * ( 1.0 - cldf_ice ) + 0.65 * cldf_ice ) 1003 !$OMP PARALLEL DO schedule(static) private(jj,ji) 1004 DO jj = 1, jpj 1005 DO ji = 1, jpi 1006 fr1_i0(ji,jj) = ( 0.18 * ( 1.0 - cldf_ice ) + 0.35 * cldf_ice ) 1007 fr2_i0(ji,jj) = ( 0.82 * ( 1.0 - cldf_ice ) + 0.65 * cldf_ice ) 1008 END DO 1009 END DO 835 1010 ! 836 1011 ! … … 844 1019 ENDIF 845 1020 846 CALL wrk_dealloc( jpi,jpj,jpl, z_qlw, z_qsb, z_dqlw, z_dqsb )1021 CALL wrk_dealloc( jpi,jpj,jpl, z_qlw, z_qsb, z_dqlw, z_dqsb, zevap_ice3d, zqns_ice3d, zqsr_ice3d ) 847 1022 CALL wrk_dealloc( jpi,jpj, zrhoa ) 848 CALL wrk_dealloc( jpi,jpj, Cd 1023 CALL wrk_dealloc( jpi,jpj, Cd, zevap_ice2d, zqns_ice2d, zqsr_ice2d) 849 1024 ! 850 1025 IF( nn_timing == 1 ) CALL timing_stop('blk_ice_flx') … … 908 1083 !!---------------------------------------------------------------------------------- 909 1084 ! 1085 !$OMP PARALLEL DO schedule(static) private(jj,ji,ztmp,ze_sat) 910 1086 DO jj = 1, jpj 911 1087 DO ji = 1, jpi … … 944 1120 !!---------------------------------------------------------------------------------- 945 1121 ! 1122 !$OMP PARALLEL DO schedule(static) private(jj,ji,zrv,ziRT) 946 1123 DO jj = 1, jpj 947 1124 DO ji = 1, jpi -
trunk/NEMOGCM/NEMO/OPA_SRC/SBC/sbcblk_algo_ncar.F90
r7646 r7698 114 114 ! 115 115 INTEGER :: j_itt 116 INTEGER :: ji, jj ! dummy loop indices 116 117 LOGICAL :: l_zt_equal_zu = .FALSE. ! if q and t are given at same height as U 117 118 INTEGER , PARAMETER :: nb_itt = 4 ! number of itterations … … 141 142 !! Neutral coefficients at 10m: 142 143 IF( ln_cdgw ) THEN ! wave drag case 143 cdn_wave(:,:) = cdn_wave(:,:) + rsmall * ( 1._wp - tmask(:,:,1) ) 144 ztmp0 (:,:) = cdn_wave(:,:) 144 !$OMP PARALLEL DO schedule(static) private(jj, ji) 145 DO jj = 1, jpj 146 DO ji = 1, jpi 147 cdn_wave(ji,jj) = cdn_wave(ji,jj) + rsmall * ( 1._wp - tmask(ji,jj,1) ) 148 ztmp0 (ji,jj) = cdn_wave(ji,jj) 149 END DO 150 END DO 145 151 ELSE 146 152 ztmp0 = cd_neutral_10m( U_blk ) … … 245 251 !!---------------------------------------------------------------------------------- 246 252 ! 253 !$OMP PARALLEL DO schedule(static) private(jj,ji,zw,zw6,zgt33) 247 254 DO jj = 1, jpj 248 255 DO ji = 1, jpi … … 284 291 !!---------------------------------------------------------------------------------- 285 292 ! 293 !$OMP PARALLEL DO schedule(static) private(jj,ji,zx2,zx,zstab) 286 294 DO jj = 1, jpj 287 295 DO ji = 1, jpi … … 318 326 !!---------------------------------------------------------------------------------- 319 327 ! 328 !$OMP PARALLEL DO schedule(static) private(jj,ji,zx2,zstab) 320 329 DO jj = 1, jpj 321 330 DO ji = 1, jpi -
trunk/NEMOGCM/NEMO/OPA_SRC/SBC/sbcice_lim.F90
r7646 r7698 109 109 ! 4 = Pure Coupled formulation) 110 110 !! 111 INTEGER :: jl 111 INTEGER :: jl, jj, ji ! dummy loop index 112 112 REAL(wp), POINTER, DIMENSION(:,:,:) :: zalb_os, zalb_cs ! ice albedo under overcast/clear sky 113 113 REAL(wp), POINTER, DIMENSION(:,: ) :: zutau_ice, zvtau_ice … … 133 133 134 134 ! mean surface ocean current at ice velocity point (C-grid dynamics : U- & V-points as the ocean) 135 u_oce(:,:) = ssu_m(:,:) * umask(:,:,1) 136 v_oce(:,:) = ssv_m(:,:) * vmask(:,:,1) 135 !$OMP PARALLEL DO schedule(static) private(jj, ji) 136 DO jj = 1, jpj 137 DO ji = 1, jpi 138 u_oce(ji,jj) = ssu_m(ji,jj) * umask(ji,jj,1) 139 v_oce(ji,jj) = ssv_m(ji,jj) * vmask(ji,jj,1) 140 END DO 141 END DO 137 142 138 143 ! masked sea surface freezing temperature [Kelvin] (set to rt0 over land) 139 144 CALL eos_fzp( sss_m(:,:) , t_bo(:,:) ) 140 t_bo(:,:) = ( t_bo(:,:) + rt0 ) * tmask(:,:,1) + rt0 * ( 1._wp - tmask(:,:,1) ) 145 !$OMP PARALLEL 146 !$OMP DO schedule(static) private(jj, ji) 147 DO jj = 1, jpj 148 DO ji = 1, jpi 149 t_bo(ji,jj) = ( t_bo(ji,jj) + rt0 ) * tmask(ji,jj,1) + rt0 * ( 1._wp - tmask(ji,jj,1) ) 150 END DO 151 END DO 141 152 142 153 ! Mask sea ice surface temperature (set to rt0 over land) 143 154 DO jl = 1, jpl 144 t_su(:,:,jl) = t_su(:,:,jl) * tmask(:,:,1) + rt0 * ( 1._wp - tmask(:,:,1) ) 145 END DO 155 !$OMP DO schedule(static) private(jj, ji) 156 DO jj = 1, jpj 157 DO ji = 1, jpi 158 t_su(ji,jj,jl) = t_su(ji,jj,jl) * tmask(ji,jj,1) + rt0 * ( 1._wp - tmask(ji,jj,1) ) 159 END DO 160 END DO 161 END DO 162 !$OMP END PARALLEL 146 163 ! 147 164 !------------------------------------------------! … … 161 178 CALL wrk_alloc( jpi,jpj , zutau_ice, zvtau_ice) 162 179 CALL sbc_cpl_ice_tau( zutau_ice , zvtau_ice ) 163 utau_ice(:,:) = utau_ice(:,:) * xcplmask(:,:,0) + zutau_ice(:,:) * ( 1. - xcplmask(:,:,0) ) 164 vtau_ice(:,:) = vtau_ice(:,:) * xcplmask(:,:,0) + zvtau_ice(:,:) * ( 1. - xcplmask(:,:,0) ) 180 !$OMP PARALLEL DO schedule(static) private(jj, ji) 181 DO jj = 1, jpj 182 DO ji = 1, jpi 183 utau_ice(ji,jj) = utau_ice(ji,jj) * xcplmask(ji,jj,0) + zutau_ice(ji,jj) * ( 1. - xcplmask(ji,jj,0) ) 184 vtau_ice(ji,jj) = vtau_ice(ji,jj) * xcplmask(ji,jj,0) + zvtau_ice(ji,jj) * ( 1. - xcplmask(ji,jj,0) ) 185 END DO 186 END DO 165 187 CALL wrk_dealloc( jpi,jpj , zutau_ice, zvtau_ice) 166 188 ENDIF … … 180 202 CALL lim_dyn( kt ) ! rheology 181 203 ELSE 182 u_ice(:,:) = rn_uice * umask(:,:,1) ! or prescribed velocity 183 v_ice(:,:) = rn_vice * vmask(:,:,1) 204 !$OMP PARALLEL DO schedule(static) private(jj, ji) 205 DO jj = 1, jpj 206 DO ji = 1, jpi 207 u_ice(ji,jj) = rn_uice * umask(ji,jj,1) ! or prescribed velocity 208 v_ice(ji,jj) = rn_vice * vmask(ji,jj,1) 209 END DO 210 END DO 184 211 ENDIF 185 212 CALL lim_trp( kt ) ! -- Ice transport (Advection/diffusion) … … 200 227 CALL lim_var_agg(1) ! at_i for coupling (via pfrld) 201 228 ! 202 pfrld(:,:) = 1._wp - at_i(:,:) 203 phicif(:,:) = vt_i(:,:) 229 !$OMP PARALLEL DO schedule(static) private(jj, ji) 230 DO jj = 1, jpj 231 DO ji = 1, jpi 232 pfrld(ji,jj) = 1._wp - at_i(ji,jj) 233 phicif(ji,jj) = vt_i(ji,jj) 234 END DO 235 END DO 204 236 205 237 !------------------------------------------------------! … … 220 252 CASE( jp_blk ) ! bulk formulation 221 253 ! albedo depends on cloud fraction because of non-linear spectral effects 222 alb_ice(:,:,:) = ( 1. - cldf_ice ) * zalb_cs(:,:,:) + cldf_ice * zalb_os(:,:,:) 254 DO jl = 1, jpl 255 !$OMP PARALLEL DO schedule(static) private(jj, ji) 256 DO jj = 1, jpj 257 DO ji = 1, jpi 258 alb_ice(ji,jj,jl) = ( 1. - cldf_ice ) * zalb_cs(ji,jj,jl) + cldf_ice * zalb_os(ji,jj,jl) 259 END DO 260 END DO 261 END DO 223 262 CALL blk_ice_flx( t_su, alb_ice ) 224 263 IF( ln_mixcpl ) CALL sbc_cpl_ice_flx( p_frld=pfrld, palbi=alb_ice, psst=sst_m, pist=t_su ) … … 226 265 CASE ( jp_purecpl ) 227 266 ! albedo depends on cloud fraction because of non-linear spectral effects 228 alb_ice(:,:,:) = ( 1. - cldf_ice ) * zalb_cs(:,:,:) + cldf_ice * zalb_os(:,:,:) 267 DO jl = 1, jpl 268 !$OMP PARALLEL DO schedule(static) private(jj, ji) 269 DO jj = 1, jpj 270 DO ji = 1, jpi 271 alb_ice(ji,jj,jl) = ( 1. - cldf_ice ) * zalb_cs(ji,jj,jl) + cldf_ice * zalb_os(ji,jj,jl) 272 END DO 273 END DO 274 END DO 229 275 CALL sbc_cpl_ice_flx( p_frld=pfrld, palbi=alb_ice, psst=sst_m, pist=t_su ) 230 276 IF( nn_limflx == 2 ) CALL ice_lim_flx( t_su, alb_ice, qns_ice, qsr_ice, dqns_ice, evap_ice, devap_ice, nn_limflx ) … … 285 331 !! ** purpose : Allocate all the dynamic arrays of the LIM-3 modules 286 332 !!---------------------------------------------------------------------- 287 INTEGER :: j i, jj, ierr333 INTEGER :: jl, ji, jj, ierr 288 334 !!---------------------------------------------------------------------- 289 335 IF(lwp) WRITE(numout,*) … … 334 380 IF( ln_limdiahsb) CALL lim_diahsb_init ! initialization for diags 335 381 ! 336 fr_i(:,:) = at_i(:,:) ! initialisation of sea-ice fraction 337 tn_ice(:,:,:) = t_su(:,:,:) ! initialisation of surface temp for coupled simu 338 ! 382 !$OMP PARALLEL 383 !$OMP DO schedule(static) private(jj, ji) 384 DO jj = 1, jpj 385 DO ji = 1, jpi 386 fr_i(ji,jj) = at_i(ji,jj) ! initialisation of sea-ice fraction 387 END DO 388 END DO 389 !$OMP END DO NOWAIT 390 DO jl = 1, jpl 391 !$OMP DO schedule(static) private(jj, ji) 392 DO jj = 1, jpj 393 DO ji = 1, jpi 394 tn_ice(ji,jj,jl) = t_su(ji,jj,jl) ! initialisation of surface temp for coupled simu 395 END DO 396 END DO 397 !$OMP END DO NOWAIT 398 END DO 399 ! 400 !$OMP DO schedule(static) private(jj, ji) 339 401 DO jj = 1, jpj 340 402 DO ji = 1, jpi … … 344 406 END DO 345 407 END DO 408 !$OMP END PARALLEL 346 409 ! 347 410 nstart = numit + nn_fsbc … … 527 590 REAL(wp), DIMENSION(:,:,:), INTENT(inout) :: pdevap_ice ! sublimation sensitivity 528 591 ! 529 INTEGER :: jl ! dummy loop index592 INTEGER :: jl, jj, ji ! dummy loop index 530 593 ! 531 594 REAL(wp), POINTER, DIMENSION(:,:) :: zalb_m ! Mean albedo over all categories … … 550 613 z_evap_m (:,:) = fice_ice_ave ( pevap_ice (:,:,:) ) 551 614 z_devap_m(:,:) = fice_ice_ave ( pdevap_ice (:,:,:) ) 615 616 !$OMP PARALLEL 552 617 DO jl = 1, jpl 553 pdqn_ice (:,:,jl) = z_dqn_m(:,:) 554 pdevap_ice(:,:,jl) = z_devap_m(:,:) 618 !$OMP DO schedule(static) private(jj, ji) 619 DO jj = 1, jpj 620 DO ji = 1, jpi 621 pdqn_ice (ji,jj,jl) = z_dqn_m(ji,jj) 622 pdevap_ice(ji,jj,jl) = z_devap_m(ji,jj) 623 END DO 624 END DO 625 !$OMP END DO NOWAIT 555 626 END DO 556 627 ! 557 628 DO jl = 1, jpl 558 pqns_ice (:,:,jl) = z_qns_m(:,:) 559 pqsr_ice (:,:,jl) = z_qsr_m(:,:) 560 pevap_ice(:,:,jl) = z_evap_m(:,:) 561 END DO 629 !$OMP DO schedule(static) private(jj, ji) 630 DO jj = 1, jpj 631 DO ji = 1, jpi 632 pqns_ice (ji,jj,jl) = z_qns_m(ji,jj) 633 pqsr_ice (ji,jj,jl) = z_qsr_m(ji,jj) 634 pevap_ice(ji,jj,jl) = z_evap_m(ji,jj) 635 END DO 636 END DO 637 END DO 638 !$OMP END PARALLEL 562 639 ! 563 640 CALL wrk_dealloc( jpi,jpj, z_qsr_m, z_qns_m, z_evap_m, z_dqn_m, z_devap_m) … … 571 648 ztem_m(:,:) = fice_ice_ave ( ptn_ice (:,:,:) ) 572 649 DO jl = 1, jpl 573 pqns_ice (:,:,jl) = pqns_ice (:,:,jl) + pdqn_ice (:,:,jl) * ( ptn_ice(:,:,jl) - ztem_m(:,:) ) 574 pevap_ice(:,:,jl) = pevap_ice(:,:,jl) + pdevap_ice(:,:,jl) * ( ptn_ice(:,:,jl) - ztem_m(:,:) ) 575 pqsr_ice (:,:,jl) = pqsr_ice (:,:,jl) * ( 1._wp - palb_ice(:,:,jl) ) / ( 1._wp - zalb_m(:,:) ) 650 !$OMP PARALLEL DO schedule(static) private(jj, ji) 651 DO jj = 1, jpj 652 DO ji = 1, jpi 653 pqns_ice (ji,jj,jl) = pqns_ice (ji,jj,jl) + pdqn_ice (ji,jj,jl) * ( ptn_ice(ji,jj,jl) - ztem_m(ji,jj) ) 654 pevap_ice(ji,jj,jl) = pevap_ice(ji,jj,jl) + pdevap_ice(ji,jj,jl) * ( ptn_ice(ji,jj,jl) - ztem_m(ji,jj) ) 655 pqsr_ice (ji,jj,jl) = pqsr_ice (ji,jj,jl) * ( 1._wp - palb_ice(ji,jj,jl) ) / ( 1._wp - zalb_m(ji,jj) ) 656 END DO 657 END DO 576 658 END DO 577 659 ! … … 590 672 !! ** purpose : store ice variables at "before" time step 591 673 !!---------------------------------------------------------------------- 592 a_i_b (:,:,:) = a_i (:,:,:) ! ice area 593 e_i_b (:,:,:,:) = e_i (:,:,:,:) ! ice thermal energy 594 v_i_b (:,:,:) = v_i (:,:,:) ! ice volume 595 v_s_b (:,:,:) = v_s (:,:,:) ! snow volume 596 e_s_b (:,:,:,:) = e_s (:,:,:,:) ! snow thermal energy 597 smv_i_b(:,:,:) = smv_i(:,:,:) ! salt content 598 oa_i_b (:,:,:) = oa_i (:,:,:) ! areal age content 599 u_ice_b(:,:) = u_ice(:,:) 600 v_ice_b(:,:) = v_ice(:,:) 601 ! 602 at_i_b (:,:) = SUM( a_i_b(:,:,:), dim=3 ) 674 INTEGER :: jn, jl, jj, ji ! dummy loop index 675 676 !$OMP PARALLEL 677 DO jl = 1, jpl 678 !$OMP DO schedule(static) private(jj, ji) 679 DO jj = 1, jpj 680 DO ji = 1, jpi 681 a_i_b (ji,jj,jl) = a_i (ji,jj,jl) ! ice area 682 v_i_b (ji,jj,jl) = v_i (ji,jj,jl) ! ice volume 683 v_s_b (ji,jj,jl) = v_s (ji,jj,jl) ! snow volume 684 smv_i_b(ji,jj,jl) = smv_i(ji,jj,jl) ! salt content 685 oa_i_b (ji,jj,jl) = oa_i (ji,jj,jl) ! areal age content 686 END DO 687 END DO 688 !$OMP END DO NOWAIT 689 END DO 690 DO jl = 1, jpl 691 DO jn = 1, nlay_i 692 !$OMP DO schedule(static) private(jj, ji) 693 DO jj = 1, jpj 694 DO ji = 1, jpi 695 e_i_b (ji,jj,jn,jl) = e_i (ji,jj,jn,jl) ! ice thermal energy 696 END DO 697 END DO 698 !$OMP END DO NOWAIT 699 END DO 700 END DO 701 DO jl = 1, jpl 702 DO jn = 1, nlay_s 703 !$OMP DO schedule(static) private(jj, ji) 704 DO jj = 1, jpj 705 DO ji = 1, jpi 706 e_s_b (ji,jj,jn,jl) = e_s (ji,jj,jn,jl) ! snow thermal energy 707 END DO 708 END DO 709 !$OMP END DO NOWAIT 710 END DO 711 END DO 712 !$OMP DO schedule(static) private(jj, ji) 713 DO jj = 1, jpj 714 DO ji = 1, jpi 715 u_ice_b(ji,jj) = u_ice(ji,jj) 716 v_ice_b(ji,jj) = v_ice(ji,jj) 717 at_i_b (ji,jj) = 0._wp 718 END DO 719 END DO 720 DO jl = 1, jpl 721 !$OMP DO schedule(static) private(jj, ji) 722 DO jj = 1, jpj 723 DO ji = 1, jpi 724 ! 725 at_i_b (ji,jj) = at_i_b (ji,jj) + a_i_b(ji,jj,jl) 726 END DO 727 END DO 728 END DO 729 !$OMP END PARALLEL 603 730 604 731 END SUBROUTINE sbc_lim_bef … … 612 739 !! of the time step 613 740 !!---------------------------------------------------------------------- 614 sfx (:,:) = 0._wp ; 615 sfx_bri(:,:) = 0._wp ; sfx_lam(:,:) = 0._wp 616 sfx_sni(:,:) = 0._wp ; sfx_opw(:,:) = 0._wp 617 sfx_bog(:,:) = 0._wp ; sfx_dyn(:,:) = 0._wp 618 sfx_bom(:,:) = 0._wp ; sfx_sum(:,:) = 0._wp 619 sfx_res(:,:) = 0._wp ; sfx_sub(:,:) = 0._wp 620 ! 621 wfx_snw(:,:) = 0._wp ; wfx_ice(:,:) = 0._wp 622 wfx_sni(:,:) = 0._wp ; wfx_opw(:,:) = 0._wp 623 wfx_bog(:,:) = 0._wp ; wfx_dyn(:,:) = 0._wp 624 wfx_bom(:,:) = 0._wp ; wfx_sum(:,:) = 0._wp 625 wfx_res(:,:) = 0._wp ; wfx_sub(:,:) = 0._wp 626 wfx_spr(:,:) = 0._wp ; wfx_lam(:,:) = 0._wp 741 INTEGER :: jj, ji ! dummy loop index 742 743 !$OMP PARALLEL DO schedule(static) private(jj, ji) 744 DO jj = 1, jpj 745 DO ji = 1, jpi 746 sfx (ji,jj) = 0._wp ; 747 sfx_bri(ji,jj) = 0._wp ; sfx_lam(ji,jj) = 0._wp 748 sfx_sni(ji,jj) = 0._wp ; sfx_opw(ji,jj) = 0._wp 749 sfx_bog(ji,jj) = 0._wp ; sfx_dyn(ji,jj) = 0._wp 750 sfx_bom(ji,jj) = 0._wp ; sfx_sum(ji,jj) = 0._wp 751 sfx_res(ji,jj) = 0._wp ; sfx_sub(ji,jj) = 0._wp 752 ! 753 wfx_snw(ji,jj) = 0._wp ; wfx_ice(ji,jj) = 0._wp 754 wfx_sni(ji,jj) = 0._wp ; wfx_opw(ji,jj) = 0._wp 755 wfx_bog(ji,jj) = 0._wp ; wfx_dyn(ji,jj) = 0._wp 756 wfx_bom(ji,jj) = 0._wp ; wfx_sum(ji,jj) = 0._wp 757 wfx_res(ji,jj) = 0._wp ; wfx_sub(ji,jj) = 0._wp 758 wfx_spr(ji,jj) = 0._wp ; wfx_lam(ji,jj) = 0._wp 627 759 628 hfx_thd(:,:) = 0._wp ; 629 hfx_snw(:,:) = 0._wp ; hfx_opw(:,:) = 0._wp 630 hfx_bog(:,:) = 0._wp ; hfx_dyn(:,:) = 0._wp 631 hfx_bom(:,:) = 0._wp ; hfx_sum(:,:) = 0._wp 632 hfx_res(:,:) = 0._wp ; hfx_sub(:,:) = 0._wp 633 hfx_spr(:,:) = 0._wp ; hfx_dif(:,:) = 0._wp 634 hfx_err(:,:) = 0._wp ; hfx_err_rem(:,:) = 0._wp 635 hfx_err_dif(:,:) = 0._wp 636 wfx_err_sub(:,:) = 0._wp 637 ! 638 afx_tot(:,:) = 0._wp ; 639 afx_dyn(:,:) = 0._wp ; afx_thd(:,:) = 0._wp 640 ! 641 diag_heat(:,:) = 0._wp ; diag_smvi(:,:) = 0._wp 642 diag_vice(:,:) = 0._wp ; diag_vsnw(:,:) = 0._wp 643 644 tau_icebfr(:,:) = 0._wp; ! landfast ice param only (clem: important to keep the init here) 760 hfx_thd(ji,jj) = 0._wp ; 761 hfx_snw(ji,jj) = 0._wp ; hfx_opw(ji,jj) = 0._wp 762 hfx_bog(ji,jj) = 0._wp ; hfx_dyn(ji,jj) = 0._wp 763 hfx_bom(ji,jj) = 0._wp ; hfx_sum(ji,jj) = 0._wp 764 hfx_res(ji,jj) = 0._wp ; hfx_sub(ji,jj) = 0._wp 765 hfx_spr(ji,jj) = 0._wp ; hfx_dif(ji,jj) = 0._wp 766 hfx_err(ji,jj) = 0._wp ; hfx_err_rem(ji,jj) = 0._wp 767 hfx_err_dif(ji,jj) = 0._wp 768 wfx_err_sub(ji,jj) = 0._wp 769 ! 770 afx_tot(ji,jj) = 0._wp ; 771 afx_dyn(ji,jj) = 0._wp ; afx_thd(ji,jj) = 0._wp 772 ! 773 diag_heat(ji,jj) = 0._wp ; diag_smvi(ji,jj) = 0._wp 774 diag_vice(ji,jj) = 0._wp ; diag_vsnw(ji,jj) = 0._wp 775 776 tau_icebfr(ji,jj) = 0._wp; ! landfast ice param only (clem: important to keep the init here) 777 END DO 778 END DO 645 779 646 780 END SUBROUTINE sbc_lim_diag0 -
trunk/NEMOGCM/NEMO/OPA_SRC/SBC/sbcmod.F90
r7646 r7698 84 84 !! - nsbc: type of sbc 85 85 !!---------------------------------------------------------------------- 86 INTEGER :: ji, jj, jn ! dummy loop indices 86 87 INTEGER :: ios, icpt ! local integer 87 88 LOGICAL :: ll_purecpl, ll_opa, ll_not_nemo ! local logical … … 240 241 IF( .NOT.ln_isf ) THEN !* No ice-shelf in the domain : allocate and set to zero 241 242 IF( sbc_isf_alloc() /= 0 ) CALL ctl_stop( 'STOP', 'sbc_init : unable to allocate sbc_isf arrays' ) 242 fwfisf (:,:) = 0._wp ; risf_tsc (:,:,:) = 0._wp 243 fwfisf_b(:,:) = 0._wp ; risf_tsc_b(:,:,:) = 0._wp 243 !$OMP PARALLEL 244 !$OMP DO schedule(static) private(jj,ji) 245 DO jj = 1, jpj 246 DO ji = 1, jpi 247 fwfisf (ji,jj) = 0.0_wp ; fwfisf_b (ji,jj) = 0.0_wp 248 END DO 249 END DO 250 !$OMP END DO NOWAIT 251 DO jn = 1, jpts 252 !$OMP DO schedule(static) private(jj,ji) 253 DO jj = 1, jpj 254 DO ji = 1, jpi 255 risf_tsc(ji,jj,jn) = 0.0_wp ; risf_tsc_b(ji,jj,jn) = 0.0_wp 256 END DO 257 END DO 258 END DO 259 !$OMP END PARALLEL 244 260 END IF 245 261 IF( nn_ice == 0 ) THEN !* No sea-ice in the domain : ice fraction is always zero 246 IF( nn_components /= jp_iam_opa ) fr_i(:,:) = 0._wp ! except for OPA in SAS-OPA coupled case 247 ENDIF 248 ! 249 sfx (:,:) = 0._wp !* salt flux due to freezing/melting 250 fmmflx(:,:) = 0._wp !* freezing minus melting flux 251 252 taum(:,:) = 0._wp !* wind stress module (needed in GLS in case of reduced restart) 262 IF( nn_components /= jp_iam_opa ) THEN 263 !$OMP PARALLEL DO schedule(static) private(jj,ji) 264 DO jj = 1, jpj 265 DO ji = 1, jpi 266 fr_i(ji,jj) = 0._wp ! except for OPA in SAS-OPA coupled case 267 END DO 268 END DO 269 END IF 270 ENDIF 271 ! 272 !$OMP PARALLEL DO schedule(static) private(jj,ji) 273 DO jj = 1, jpj 274 DO ji = 1, jpi 275 sfx (ji,jj) = 0._wp !* salt flux due to freezing/melting 276 fmmflx(ji,jj) = 0._wp !* freezing minus melting flux 277 taum (ji,jj) = 0._wp !* wind stress module (needed in GLS in case of reduced restart) 278 END DO 279 END DO 253 280 254 281 ! ! Choice of the Surface Boudary Condition (set nsbc) … … 356 383 !!---------------------------------------------------------------------- 357 384 INTEGER, INTENT(in) :: kt ! ocean time step 385 INTEGER :: ji, jj, jn ! dummy loop indices 358 386 ! 359 387 LOGICAL :: ll_sas, ll_opa ! local logical … … 365 393 IF( kt /= nit000 ) THEN ! Swap of forcing fields ! 366 394 ! ! ---------------------------------------- ! 367 utau_b(:,:) = utau(:,:) ! Swap the ocean forcing fields 368 vtau_b(:,:) = vtau(:,:) ! (except at nit000 where before fields 369 qns_b (:,:) = qns (:,:) ! are set at the end of the routine) 370 emp_b (:,:) = emp (:,:) 371 sfx_b (:,:) = sfx (:,:) 395 !$OMP PARALLEL DO schedule(static) private(jj,ji) 396 DO jj = 1, jpj 397 DO ji = 1, jpi 398 utau_b(ji,jj) = utau(ji,jj) ! Swap the ocean forcing fields 399 vtau_b(ji,jj) = vtau(ji,jj) ! (except at nit000 where before fields 400 qns_b (ji,jj) = qns (ji,jj) ! are set at the end of the routine) 401 emp_b (ji,jj) = emp (ji,jj) 402 sfx_b (ji,jj) = sfx (ji,jj) 403 END DO 404 END DO 372 405 IF ( ln_rnf ) THEN 373 rnf_b (:,: ) = rnf (:,: ) 374 rnf_tsc_b(:,:,:) = rnf_tsc(:,:,:) 406 !$OMP PARALLEL 407 !$OMP DO schedule(static) private(jj,ji) 408 DO jj = 1, jpj 409 DO ji = 1, jpi 410 rnf_b (ji,jj ) = rnf (ji,jj ) 411 END DO 412 END DO 413 !$OMP END DO NOWAIT 414 DO jn = 1, jpts 415 !$OMP DO schedule(static) private(jj,ji) 416 DO jj = 1, jpj 417 DO ji = 1, jpi 418 rnf_tsc_b(ji,jj,jn) = rnf_tsc(ji,jj,jn) 419 END DO 420 END DO 421 END DO 422 !$OMP END PARALLEL 375 423 ENDIF 376 424 ENDIF … … 401 449 END SELECT 402 450 IF ( ln_wave .AND. ln_tauoc) THEN ! Wave stress subctracted 403 utau(:,:) = utau(:,:)*tauoc_wave(:,:) 404 vtau(:,:) = vtau(:,:)*tauoc_wave(:,:) 405 taum(:,:) = taum(:,:)*tauoc_wave(:,:) 451 !$OMP PARALLEL DO schedule(static) private(jj,ji) 452 DO jj = 1, jpj 453 DO ji = 1, jpi 454 utau(ji,jj) = utau(ji,jj)*tauoc_wave(ji,jj) 455 vtau(ji,jj) = vtau(ji,jj)*tauoc_wave(ji,jj) 456 taum(ji,jj) = taum(ji,jj)*tauoc_wave(ji,jj) 457 END DO 458 END DO 406 459 ! 407 460 SELECT CASE( nsbc ) … … 457 510 CALL iom_get( numror, jpdom_autoglo, 'sfx_b', sfx_b ) ! before salt flux (T-point) 458 511 ELSE 459 sfx_b (:,:) = sfx(:,:) 512 !$OMP PARALLEL DO schedule(static) private(jj,ji) 513 DO jj = 1, jpj 514 DO ji = 1, jpi 515 sfx_b (ji,jj) = sfx(ji,jj) 516 END DO 517 END DO 460 518 ENDIF 461 519 ELSE !* no restart: set from nit000 values 462 520 IF(lwp) WRITE(numout,*) ' nit000-1 surface forcing fields set to nit000' 463 utau_b(:,:) = utau(:,:) 464 vtau_b(:,:) = vtau(:,:) 465 qns_b (:,:) = qns (:,:) 466 emp_b (:,:) = emp (:,:) 467 sfx_b (:,:) = sfx (:,:) 521 !$OMP PARALLEL DO schedule(static) private(jj,ji) 522 DO jj = 1, jpj 523 DO ji = 1, jpi 524 utau_b(ji,jj) = utau(ji,jj) 525 vtau_b(ji,jj) = vtau(ji,jj) 526 qns_b (ji,jj) = qns (ji,jj) 527 emp_b (ji,jj) = emp(ji,jj) 528 sfx_b (ji,jj) = sfx(ji,jj) 529 END DO 530 END DO 468 531 ENDIF 469 532 ENDIF -
trunk/NEMOGCM/NEMO/OPA_SRC/SBC/sbcrnf.F90
r7646 r7698 103 103 INTEGER, INTENT(in) :: kt ! ocean time step 104 104 ! 105 INTEGER :: ji, jj ! dummy loop indices106 INTEGER :: z_err = 0 ! dummy integer for error handling105 INTEGER :: ji, jj, jn ! dummy loop indices 106 INTEGER :: z_err = 0 ! dummy integer for error handling 107 107 !!---------------------------------------------------------------------- 108 108 REAL(wp), DIMENSION(:,:), POINTER :: ztfrz ! freezing point used for temperature correction … … 120 120 IF( MOD( kt - 1, nn_fsbc ) == 0 ) THEN 121 121 ! 122 IF( .NOT. l_rnfcpl ) rnf(:,:) = rn_rfact * ( sf_rnf(1)%fnow(:,:,1) ) ! updated runoff value at time step kt 122 IF( .NOT. l_rnfcpl ) THEN ! updated runoff value at time step kt 123 !$OMP PARALLEL DO schedule(static) private(jj, ji) 124 DO jj = 1, jpj 125 DO ji = 1, jpi 126 rnf(ji,jj) = rn_rfact * ( sf_rnf(1)%fnow(ji,jj,1) ) 127 END DO 128 END DO 129 END IF 123 130 ! 124 131 ! ! set temperature & salinity content of runoffs 125 132 IF( ln_rnf_tem ) THEN ! use runoffs temperature data 126 rnf_tsc(:,:,jp_tem) = ( sf_t_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 133 !$OMP PARALLEL DO schedule(static) private(jj, ji) 134 DO jj = 1, jpj 135 DO ji = 1, jpi 136 rnf_tsc(ji,jj,jp_tem) = ( sf_t_rnf(1)%fnow(ji,jj,1) ) * rnf(ji,jj) * r1_rau0 137 END DO 138 END DO 127 139 CALL eos_fzp( sss_m(:,:), ztfrz(:,:) ) 128 WHERE( sf_t_rnf(1)%fnow(:,:,1) == -999._wp ) ! if missing data value use SST as runoffs temperature 129 rnf_tsc(:,:,jp_tem) = sst_m(:,:) * rnf(:,:) * r1_rau0 130 END WHERE 131 WHERE( sf_t_rnf(1)%fnow(:,:,1) == -222._wp ) ! where fwf comes from melting of ice shelves or iceberg 132 rnf_tsc(:,:,jp_tem) = ztfrz(:,:) * rnf(:,:) * r1_rau0 - rnf(:,:) * rlfusisf * r1_rau0_rcp 133 END WHERE 140 !$OMP PARALLEL DO schedule(static) private(jj, ji) 141 DO jj = 1, jpj 142 DO ji = 1, jpi 143 IF ( sf_t_rnf(1)%fnow(ji,jj,1) == -999._wp ) THEN ! if missing data value use SST as runoffs temperature 144 rnf_tsc(ji,jj,jp_tem) = sst_m(ji,jj) * rnf(ji,jj) * r1_rau0 145 END IF 146 IF ( sf_t_rnf(1)%fnow(ji,jj,1) == -222._wp ) THEN ! where fwf comes from melting of ice shelves or iceberg 147 rnf_tsc(ji,jj,jp_tem) = ztfrz(ji,jj) * rnf(ji,jj) * r1_rau0 - rnf(ji,jj) * rlfusisf * r1_rau0_rcp 148 END IF 149 END DO 150 END DO 134 151 ELSE ! use SST as runoffs temperature 135 rnf_tsc(:,:,jp_tem) = sst_m(:,:) * rnf(:,:) * r1_rau0 136 ENDIF 152 !$OMP PARALLEL DO schedule(static) private(jj, ji) 153 DO jj = 1, jpj 154 DO ji = 1, jpi 155 rnf_tsc(ji,jj,jp_tem) = sst_m(ji,jj) * rnf(ji,jj) * r1_rau0 156 END DO 157 END DO 158 END IF 137 159 ! ! use runoffs salinity data 138 IF( ln_rnf_sal ) rnf_tsc(:,:,jp_sal) = ( sf_s_rnf(1)%fnow(:,:,1) ) * rnf(:,:) * r1_rau0 139 ! ! else use S=0 for runoffs (done one for all in the init) 160 IF( ln_rnf_sal ) THEN 161 !$OMP PARALLEL DO schedule(static) private(jj, ji) 162 DO jj = 1, jpj 163 DO ji = 1, jpi 164 rnf_tsc(ji,jj,jp_sal) = ( sf_s_rnf(1)%fnow(ji,jj,1) ) * rnf(ji,jj) * r1_rau0 165 END DO 166 END DO 167 END IF 168 ! ! else use S=0 for runoffs (done one for all in the init) 140 169 CALL iom_put( "runoffs", rnf ) ! output runoffs arrays 141 170 ENDIF … … 152 181 ELSE !* no restart: set from nit000 values 153 182 IF(lwp) WRITE(numout,*) ' nit000-1 runoff forcing fields set to nit000' 154 rnf_b (:,: ) = rnf (:,: ) 155 rnf_tsc_b(:,:,:) = rnf_tsc(:,:,:) 183 !$OMP PARALLEL 184 !$OMP DO schedule(static) private(jj,ji) 185 DO jj = 1, jpj 186 DO ji = 1, jpi 187 rnf_b (ji,jj ) = rnf (ji,jj ) 188 END DO 189 END DO 190 !$OMP END DO NOWAIT 191 DO jn = 1, jpts 192 !$OMP DO schedule(static) private(jj,ji) 193 DO jj = 1, jpj 194 DO ji = 1, jpi 195 rnf_tsc_b(ji,jj,jn) = rnf_tsc(ji,jj,jn) 196 END DO 197 END DO 198 END DO 199 !$OMP END PARALLEL 156 200 ENDIF 157 201 ENDIF … … 187 231 REAL(wp), DIMENSION(:,:,:), INTENT(inout) :: phdivn ! horizontal divergence 188 232 !! 189 INTEGER :: ji, jj, jk ! dummy loop indices233 INTEGER :: ji, jj, jk, jn ! dummy loop indices 190 234 REAL(wp) :: zfact ! local scalar 191 235 !!---------------------------------------------------------------------- … … 195 239 IF( ln_rnf_depth .OR. ln_rnf_depth_ini ) THEN !== runoff distributed over several levels ==! 196 240 IF( ln_linssh ) THEN !* constant volume case : just apply the runoff input flow 241 !$OMP PARALLEL DO schedule(static) private(jj,ji,jk) 197 242 DO jj = 1, jpj 198 243 DO ji = 1, jpi … … 203 248 END DO 204 249 ELSE !* variable volume case 250 !$OMP PARALLEL DO schedule(static) private(jj,ji,jk) 205 251 DO jj = 1, jpj ! update the depth over which runoffs are distributed 206 252 DO ji = 1, jpi … … 217 263 ENDIF 218 264 ELSE !== runoff put only at the surface ==! 219 h_rnf (:,:) = e3t_n (:,:,1) ! update h_rnf to be depth of top box 220 phdivn(:,:,1) = phdivn(:,:,1) - ( rnf(:,:) + rnf_b(:,:) ) * zfact * r1_rau0 / e3t_n(:,:,1) 265 !$OMP PARALLEL DO schedule(static) private(jj, ji) 266 DO jj = 1, jpj 267 DO ji = 1, jpi 268 h_rnf (ji,jj) = e3t_n (ji,jj,1) ! update h_rnf to be depth of top box 269 phdivn(ji,jj,1) = phdivn(ji,jj,1) - ( rnf(ji,jj) + rnf_b(ji,jj) ) * zfact * r1_rau0 / e3t_n(ji,jj,1) 270 END DO 271 END DO 221 272 ENDIF 222 273 ! … … 235 286 !!---------------------------------------------------------------------- 236 287 CHARACTER(len=32) :: rn_dep_file ! runoff file name 237 INTEGER :: ji, jj, jk, jm ! dummy loop indices288 INTEGER :: ji, jj, jk, jm, jn ! dummy loop indices 238 289 INTEGER :: ierror, inum ! temporary integer 239 290 INTEGER :: ios ! Local integer output status for namelist read … … 256 307 ln_rnf_mouth = .FALSE. ! default definition needed for example by sbc_ssr or by tra_adv_muscl 257 308 nkrnf = 0 258 rnf (:,:) = 0.0_wp 259 rnf_b (:,:) = 0.0_wp 260 rnfmsk (:,:) = 0.0_wp 261 rnfmsk_z(:) = 0.0_wp 309 !$OMP PARALLEL 310 !$OMP DO schedule(static) private(jj, ji) 311 DO jj = 1, jpj 312 DO ji = 1, jpi 313 rnf (ji,jj) = 0.0_wp 314 rnf_b (ji,jj) = 0.0_wp 315 rnfmsk (ji,jj) = 0.0_wp 316 END DO 317 END DO 318 !$OMP END DO NOWAIT 319 !$OMP DO schedule(static) private(jk) 320 DO jk = 1, jpk 321 rnfmsk_z(jk) = 0.0_wp 322 END DO 323 !$OMP END PARALLEL 262 324 RETURN 263 325 ENDIF … … 338 400 CALL iom_close( inum ) ! close file 339 401 ! 340 nk_rnf(:,:) = 0 ! set the number of level over which river runoffs are applied 402 !$OMP PARALLEL 403 !$OMP DO schedule(static) private(jj, ji) 404 DO jj = 1, jpj 405 DO ji = 1, jpi 406 nk_rnf(ji,jj) = 0 ! set the number of level over which river runoffs are applied 407 END DO 408 END DO 409 !$OMP DO schedule(static) private(jj, ji, jk) 341 410 DO jj = 1, jpj 342 411 DO ji = 1, jpi … … 354 423 END DO 355 424 END DO 425 !$OMP DO schedule(static) private(jj, ji, jk) 356 426 DO jj = 1, jpj ! set the associated depth 357 427 DO ji = 1, jpi … … 362 432 END DO 363 433 END DO 434 !$OMP END PARALLEL 364 435 ! 365 436 ELSE IF( ln_rnf_depth_ini ) THEN ! runoffs applied at the surface … … 381 452 DEALLOCATE( zrnfcl ) 382 453 ! 383 h_rnf(:,:) = 1.384 !385 454 zacoef = rn_dep_max / rn_rnf_max ! coef of linear relation between runoff and its depth (150m for max of runoff) 386 455 ! 387 WHERE( zrnf(:,:) > 0._wp ) h_rnf(:,:) = zacoef * zrnf(:,:) ! compute depth for all runoffs 388 ! 456 !$OMP PARALLEL 457 IF( zrnf(ji,jj) > 0._wp ) THEN 458 !$OMP DO schedule(static) private(jj, ji) 459 DO jj = 1, jpj 460 DO ji = 1, jpi 461 h_rnf(ji,jj) = zacoef * zrnf(ji,jj) ! compute depth for all runoffs 462 END DO 463 END DO 464 END IF 465 ! 466 !$OMP DO schedule(static) private(jj, ji, jk) 389 467 DO jj = 1, jpj ! take in account min depth of ocean rn_hmin 390 468 DO ji = 1, jpi … … 396 474 END DO 397 475 ! 398 nk_rnf(:,:) = 0 ! number of levels on which runoffs are distributed 476 !$OMP DO schedule(static) private(jj, ji) 477 DO jj = 1, jpj 478 DO ji = 1, jpi 479 nk_rnf(ji,jj) = 0 ! number of levels on which runoffs are distributed 480 END DO 481 END DO 482 !$OMP DO schedule(static) private(jj, ji, jk) 399 483 DO jj = 1, jpj 400 484 DO ji = 1, jpi … … 409 493 END DO 410 494 END DO 495 !$OMP END PARALLEL 411 496 ! 412 497 DEALLOCATE( zrnf ) 413 498 ! 499 !$OMP PARALLEL DO schedule(static) private(jj, ji, jk) 414 500 DO jj = 1, jpj ! set the associated depth 415 501 DO ji = 1, jpi … … 428 514 ENDIF 429 515 ELSE ! runoffs applied at the surface 430 nk_rnf(:,:) = 1 431 h_rnf (:,:) = e3t_n(:,:,1) 432 ENDIF 433 ! 434 rnf(:,:) = 0._wp ! runoff initialisation 435 rnf_tsc(:,:,:) = 0._wp ! runoffs temperature & salinty contents initilisation 516 !$OMP PARALLEL DO schedule(static) private(jj, ji) 517 DO jj = 1, jpj 518 DO ji = 1, jpi 519 nk_rnf(ji,jj) = 1 520 h_rnf (ji,jj) = e3t_n(ji,jj,1) 521 END DO 522 END DO 523 ENDIF 524 ! 525 !$OMP PARALLEL 526 !$OMP DO schedule(static) private(jj, ji) 527 DO jj = 1, jpj 528 DO ji = 1, jpi 529 rnf(ji,jj) = 0._wp ! runoff initialisation 530 END DO 531 END DO 532 !$OMP END DO NOWAIT 533 DO jn = 1, jpts 534 !$OMP DO schedule(static) private(jj, ji) 535 DO jj = 1, jpj 536 DO ji = 1, jpi 537 rnf_tsc(ji,jj,jn) = 0._wp ! runoffs temperature & salinty contents initilisation 538 END DO 539 END DO 540 END DO 541 !$OMP END PARALLEL 436 542 ! 437 543 ! ! ======================== … … 466 572 IF(lwp) WRITE(numout,*) 467 573 IF(lwp) WRITE(numout,*) ' No specific treatment at river mouths' 468 rnfmsk (:,:) = 0._wp 469 rnfmsk_z(:) = 0._wp 574 !$OMP PARALLEL 575 !$OMP DO schedule(static) private(jj, ji) 576 DO jj = 1, jpj 577 DO ji = 1, jpi 578 rnfmsk (ji,jj) = 0._wp 579 END DO 580 END DO 581 !$OMP END DO NOWAIT 582 !$OMP DO schedule(static) private(jk) 583 DO jk = 1, jpk 584 rnfmsk_z(jk) = 0._wp 585 END DO 586 !$OMP END PARALLEL 470 587 nkrnf = 0 471 588 ENDIF -
trunk/NEMOGCM/NEMO/OPA_SRC/SBC/sbcssm.F90
r7646 r7698 59 59 ! 60 60 ! !* surface T-, U-, V- ocean level variables (T, S, depth, velocity) 61 !$OMP PARALLEL DO schedule(static) private(jj, ji) 61 62 DO jj = 1, jpj 62 63 DO ji = 1, jpi … … 68 69 IF( nn_fsbc == 1 ) THEN ! Instantaneous surface fields ! 69 70 ! ! ---------------------------------------- ! 70 ssu_m(:,:) = ub(:,:,1) 71 ssv_m(:,:) = vb(:,:,1) 72 IF( l_useCT ) THEN ; sst_m(:,:) = eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 73 ELSE ; sst_m(:,:) = zts(:,:,jp_tem) 74 ENDIF 75 sss_m(:,:) = zts(:,:,jp_sal) 71 !$OMP PARALLEL DO schedule(static) private(jj, ji) 72 DO jj = 1, jpj 73 DO ji = 1, jpi 74 ssu_m(ji,jj) = ub(ji,jj,1) 75 ssv_m(ji,jj) = vb(ji,jj,1) 76 END DO 77 END DO 78 IF( l_useCT ) THEN 79 sst_m(:,:) = eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 80 ELSE 81 !$OMP PARALLEL DO schedule(static) private(jj, ji) 82 DO jj = 1, jpj 83 DO ji = 1, jpi 84 sst_m(ji,jj) = zts(ji,jj,jp_tem) 85 END DO 86 END DO 87 ENDIF 88 !$OMP PARALLEL DO schedule(static) private(jj, ji) 89 DO jj = 1, jpj 90 DO ji = 1, jpi 91 sss_m(ji,jj) = zts(ji,jj,jp_sal) 92 END DO 93 END DO 76 94 ! ! removed inverse barometer ssh when Patm forcing is used (for sea-ice dynamics) 77 IF( ln_apr_dyn ) THEN ; ssh_m(:,:) = sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 78 ELSE ; ssh_m(:,:) = sshn(:,:) 79 ENDIF 80 ! 81 e3t_m(:,:) = e3t_n(:,:,1) 82 ! 83 frq_m(:,:) = fraqsr_1lev(:,:) 95 IF( ln_apr_dyn ) THEN 96 !$OMP PARALLEL DO schedule(static) private(jj, ji) 97 DO jj = 1, jpj 98 DO ji = 1, jpi 99 ssh_m(ji,jj) = sshn(ji,jj) - 0.5 * ( ssh_ib(ji,jj) + ssh_ibb(ji,jj) ) 100 END DO 101 END DO 102 ELSE 103 !$OMP PARALLEL DO schedule(static) private(jj, ji) 104 DO jj = 1, jpj 105 DO ji = 1, jpi 106 ssh_m(ji,jj) = sshn(ji,jj) 107 END DO 108 END DO 109 ENDIF 110 ! 111 !$OMP PARALLEL DO schedule(static) private(jj, ji) 112 DO jj = 1, jpj 113 DO ji = 1, jpi 114 e3t_m(ji,jj) = e3t_n(ji,jj,1) 115 ! 116 frq_m(ji,jj) = fraqsr_1lev(ji,jj) 117 END DO 118 END DO 84 119 ! 85 120 ELSE … … 91 126 IF(lwp) WRITE(numout,*) '~~~~~~~ ' 92 127 zcoef = REAL( nn_fsbc - 1, wp ) 93 ssu_m(:,:) = zcoef * ub(:,:,1) 94 ssv_m(:,:) = zcoef * vb(:,:,1) 95 IF( l_useCT ) THEN ; sst_m(:,:) = zcoef * eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 96 ELSE ; sst_m(:,:) = zcoef * zts(:,:,jp_tem) 128 !$OMP PARALLEL DO schedule(static) private(jj, ji) 129 DO jj = 1, jpj 130 DO ji = 1, jpi 131 ssu_m(ji,jj) = zcoef * ub(ji,jj,1) 132 ssv_m(ji,jj) = zcoef * vb(ji,jj,1) 133 END DO 134 END DO 135 IF( l_useCT ) THEN 136 sst_m(:,:) = zcoef * eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 137 ELSE 138 !$OMP PARALLEL DO schedule(static) private(jj, ji) 139 DO jj = 1, jpj 140 DO ji = 1, jpi 141 sst_m(ji,jj) = zcoef * zts(ji,jj,jp_tem) 142 END DO 143 END DO 97 144 ENDIF 98 sss_m(:,:) = zcoef * zts(:,:,jp_sal) 145 !$OMP PARALLEL DO schedule(static) private(jj, ji) 146 DO jj = 1, jpj 147 DO ji = 1, jpi 148 sss_m(ji,jj) = zcoef * zts(ji,jj,jp_sal) 149 END DO 150 END DO 99 151 ! ! removed inverse barometer ssh when Patm forcing is used (for sea-ice dynamics) 100 IF( ln_apr_dyn ) THEN ; ssh_m(:,:) = zcoef * ( sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) ) 101 ELSE ; ssh_m(:,:) = zcoef * sshn(:,:) 152 IF( ln_apr_dyn ) THEN 153 !$OMP PARALLEL DO schedule(static) private(jj, ji) 154 DO jj = 1, jpj 155 DO ji = 1, jpi 156 ssh_m(ji,jj) = zcoef * ( sshn(ji,jj) - 0.5 * ( ssh_ib(ji,jj) + ssh_ibb(ji,jj) ) ) 157 END DO 158 END DO 159 ELSE 160 !$OMP PARALLEL DO schedule(static) private(jj, ji) 161 DO jj = 1, jpj 162 DO ji = 1, jpi 163 ssh_m(ji,jj) = zcoef * sshn(ji,jj) 164 END DO 165 END DO 102 166 ENDIF 103 167 ! 104 e3t_m(:,:) = zcoef * e3t_n(:,:,1) 105 ! 106 frq_m(:,:) = zcoef * fraqsr_1lev(:,:) 168 !$OMP PARALLEL DO schedule(static) private(jj, ji) 169 DO jj = 1, jpj 170 DO ji = 1, jpi 171 e3t_m(ji,jj) = zcoef * e3t_n(ji,jj,1) 172 ! 173 frq_m(ji,jj) = zcoef * fraqsr_1lev(ji,jj) 174 END DO 175 END DO 107 176 ! ! ---------------------------------------- ! 108 177 ELSEIF( MOD( kt - 2 , nn_fsbc ) == 0 ) THEN ! Initialisation: New mean computation ! 109 178 ! ! ---------------------------------------- ! 110 ssu_m(:,:) = 0._wp ! reset to zero ocean mean sbc fields 111 ssv_m(:,:) = 0._wp 112 sst_m(:,:) = 0._wp 113 sss_m(:,:) = 0._wp 114 ssh_m(:,:) = 0._wp 115 e3t_m(:,:) = 0._wp 116 frq_m(:,:) = 0._wp 179 !$OMP PARALLEL DO schedule(static) private(jj, ji) 180 DO jj = 1, jpj 181 DO ji = 1, jpi 182 ssu_m(ji,jj) = 0._wp ! reset to zero ocean mean sbc fields 183 ssv_m(ji,jj) = 0._wp 184 sst_m(ji,jj) = 0._wp 185 sss_m(ji,jj) = 0._wp 186 ssh_m(ji,jj) = 0._wp 187 e3t_m(ji,jj) = 0._wp 188 frq_m(ji,jj) = 0._wp 189 END DO 190 END DO 117 191 ENDIF 118 192 ! ! ---------------------------------------- ! 119 193 ! ! Cumulate at each time step ! 120 194 ! ! ---------------------------------------- ! 121 ssu_m(:,:) = ssu_m(:,:) + ub(:,:,1) 122 ssv_m(:,:) = ssv_m(:,:) + vb(:,:,1) 123 IF( l_useCT ) THEN ; sst_m(:,:) = sst_m(:,:) + eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 124 ELSE ; sst_m(:,:) = sst_m(:,:) + zts(:,:,jp_tem) 125 ENDIF 126 sss_m(:,:) = sss_m(:,:) + zts(:,:,jp_sal) 195 !$OMP PARALLEL DO schedule(static) private(jj, ji) 196 DO jj = 1, jpj 197 DO ji = 1, jpi 198 ssu_m(ji,jj) = ssu_m(ji,jj) + ub(ji,jj,1) 199 ssv_m(ji,jj) = ssv_m(ji,jj) + vb(ji,jj,1) 200 END DO 201 END DO 202 IF( l_useCT ) THEN 203 sst_m(:,:) = sst_m(:,:) + eos_pt_from_ct( zts(:,:,jp_tem), zts(:,:,jp_sal) ) 204 ELSE 205 !$OMP PARALLEL DO schedule(static) private(jj, ji) 206 DO jj = 1, jpj 207 DO ji = 1, jpi 208 sst_m(ji,jj) = sst_m(ji,jj) + zts(ji,jj,jp_tem) 209 END DO 210 END DO 211 ENDIF 212 !$OMP PARALLEL DO schedule(static) private(jj, ji) 213 DO jj = 1, jpj 214 DO ji = 1, jpi 215 sss_m(ji,jj) = sss_m(ji,jj) + zts(ji,jj,jp_sal) 216 END DO 217 END DO 127 218 ! ! removed inverse barometer ssh when Patm forcing is used (for sea-ice dynamics) 128 IF( ln_apr_dyn ) THEN ; ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) - 0.5 * ( ssh_ib(:,:) + ssh_ibb(:,:) ) 129 ELSE ; ssh_m(:,:) = ssh_m(:,:) + sshn(:,:) 130 ENDIF 131 ! 132 e3t_m(:,:) = e3t_m(:,:) + e3t_n(:,:,1) 133 ! 134 frq_m(:,:) = frq_m(:,:) + fraqsr_1lev(:,:) 219 IF( ln_apr_dyn ) THEN 220 !$OMP PARALLEL DO schedule(static) private(jj, ji) 221 DO jj = 1, jpj 222 DO ji = 1, jpi 223 ssh_m(ji,jj) = ssh_m(ji,jj) + sshn(ji,jj) - 0.5 * ( ssh_ib(ji,jj) + ssh_ibb(ji,jj) ) 224 END DO 225 END DO 226 ELSE 227 !$OMP PARALLEL DO schedule(static) private(jj, ji) 228 DO jj = 1, jpj 229 DO ji = 1, jpi 230 ssh_m(ji,jj) = ssh_m(ji,jj) + sshn(ji,jj) 231 END DO 232 END DO 233 ENDIF 234 ! 235 !$OMP PARALLEL DO schedule(static) private(jj, ji) 236 DO jj = 1, jpj 237 DO ji = 1, jpi 238 e3t_m(ji,jj) = e3t_m(ji,jj) + e3t_n(ji,jj,1) 239 ! 240 frq_m(ji,jj) = frq_m(ji,jj) + fraqsr_1lev(ji,jj) 241 END DO 242 END DO 135 243 136 244 ! ! ---------------------------------------- ! … … 138 246 ! ! ---------------------------------------- ! 139 247 zcoef = 1. / REAL( nn_fsbc, wp ) 140 sst_m(:,:) = sst_m(:,:) * zcoef ! mean SST [Celsius] 141 sss_m(:,:) = sss_m(:,:) * zcoef ! mean SSS [psu] 142 ssu_m(:,:) = ssu_m(:,:) * zcoef ! mean suface current [m/s] 143 ssv_m(:,:) = ssv_m(:,:) * zcoef ! 144 ssh_m(:,:) = ssh_m(:,:) * zcoef ! mean SSH [m] 145 e3t_m(:,:) = e3t_m(:,:) * zcoef ! mean vertical scale factor [m] 146 frq_m(:,:) = frq_m(:,:) * zcoef ! mean fraction of solar net radiation absorbed in the 1st T level [-] 248 !$OMP PARALLEL DO schedule(static) private(jj, ji) 249 DO jj = 1, jpj 250 DO ji = 1, jpi 251 sst_m(ji,jj) = sst_m(ji,jj) * zcoef ! mean SST [Celsius] 252 sss_m(ji,jj) = sss_m(ji,jj) * zcoef ! mean SSS [psu] 253 ssu_m(ji,jj) = ssu_m(ji,jj) * zcoef ! mean suface current [m/s] 254 ssv_m(ji,jj) = ssv_m(ji,jj) * zcoef ! 255 ssh_m(ji,jj) = ssh_m(ji,jj) * zcoef ! mean SSH [m] 256 e3t_m(ji,jj) = e3t_m(ji,jj) * zcoef ! mean vertical scale factor [m] 257 frq_m(ji,jj) = frq_m(ji,jj) * zcoef ! mean fraction of solar net radiation absorbed in the 1st T level [-] 258 END DO 259 END DO 147 260 ! 148 261 ENDIF … … 190 303 !!---------------------------------------------------------------------- 191 304 REAL(wp) :: zcoef, zf_sbc ! local scalar 305 INTEGER :: ji, jj ! loop index 192 306 !!---------------------------------------------------------------------- 193 307 ! … … 217 331 CALL iom_get( numror, jpdom_autoglo, 'frq_m' , frq_m ) 218 332 ELSE 219 frq_m(:,:) = 1._wp ! default definition 333 !$OMP PARALLEL DO schedule(static) private(jj, ji) 334 DO jj = 1, jpj 335 DO ji = 1, jpi 336 frq_m(ji,jj) = 1._wp ! default definition 337 END DO 338 END DO 220 339 ENDIF 221 340 ! … … 223 342 IF(lwp) WRITE(numout,*) ' restart with a change in the frequency of mean from ', zf_sbc, ' to ', nn_fsbc 224 343 zcoef = REAL( nn_fsbc - 1, wp ) / zf_sbc 225 ssu_m(:,:) = zcoef * ssu_m(:,:) 226 ssv_m(:,:) = zcoef * ssv_m(:,:) 227 sst_m(:,:) = zcoef * sst_m(:,:) 228 sss_m(:,:) = zcoef * sss_m(:,:) 229 ssh_m(:,:) = zcoef * ssh_m(:,:) 230 e3t_m(:,:) = zcoef * e3t_m(:,:) 231 frq_m(:,:) = zcoef * frq_m(:,:) 344 !$OMP PARALLEL DO schedule(static) private(jj, ji) 345 DO jj = 1, jpj 346 DO ji = 1, jpi 347 ssu_m(ji,jj) = zcoef * ssu_m(ji,jj) 348 ssv_m(ji,jj) = zcoef * ssv_m(ji,jj) 349 sst_m(ji,jj) = zcoef * sst_m(ji,jj) 350 sss_m(ji,jj) = zcoef * sss_m(ji,jj) 351 ssh_m(ji,jj) = zcoef * ssh_m(ji,jj) 352 e3t_m(ji,jj) = zcoef * e3t_m(ji,jj) 353 frq_m(ji,jj) = zcoef * frq_m(ji,jj) 354 END DO 355 END DO 232 356 ELSE 233 357 IF(lwp) WRITE(numout,*) ' mean fields read in the ocean restart file' … … 239 363 ! 240 364 IF(lwp) WRITE(numout,*) ' default initialisation of ss._m arrays' 241 ssu_m(:,:) = ub(:,:,1) 242 ssv_m(:,:) = vb(:,:,1) 365 !$OMP PARALLEL DO schedule(static) private(jj, ji) 366 DO jj = 1, jpj 367 DO ji = 1, jpi 368 ssu_m(ji,jj) = ub(ji,jj,1) 369 ssv_m(ji,jj) = vb(ji,jj,1) 370 END DO 371 END DO 243 372 IF( l_useCT ) THEN ; sst_m(:,:) = eos_pt_from_ct( tsn(:,:,1,jp_tem), tsn(:,:,1,jp_sal) ) 244 373 ELSE ; sst_m(:,:) = tsn(:,:,1,jp_tem) 245 374 ENDIF 246 sss_m(:,:) = tsn (:,:,1,jp_sal) 247 ssh_m(:,:) = sshn (:,:) 248 e3t_m(:,:) = e3t_n(:,:,1) 249 frq_m(:,:) = 1._wp 375 !$OMP PARALLEL DO schedule(static) private(jj, ji) 376 DO jj = 1, jpj 377 DO ji = 1, jpi 378 sss_m(ji,jj) = tsn (ji,jj,1,jp_sal) 379 ssh_m(ji,jj) = sshn (ji,jj) 380 e3t_m(ji,jj) = e3t_n(ji,jj,1) 381 frq_m(ji,jj) = 1._wp 382 END DO 383 END DO 250 384 ! 251 385 ENDIF -
trunk/NEMOGCM/NEMO/OPA_SRC/SBC/sbcssr.F90
r7646 r7698 93 93 ! 94 94 IF( nn_sstr == 1 ) THEN !* Temperature restoring term 95 !$OMP PARALLEL DO schedule(static) private(jj,ji,zqrp) 95 96 DO jj = 1, jpj 96 97 DO ji = 1, jpi … … 105 106 IF( nn_sssr == 1 ) THEN !* Salinity damping term (salt flux only (sfx)) 106 107 zsrp = rn_deds / rday ! from [mm/day] to [kg/m2/s] 108 !$OMP PARALLEL DO schedule(static) private(jj, ji, zerp) 107 109 DO jj = 1, jpj 108 110 DO ji = 1, jpi … … 118 120 zsrp = rn_deds / rday ! from [mm/day] to [kg/m2/s] 119 121 zerp_bnd = rn_sssr_bnd / rday ! - - 122 !$OMP PARALLEL DO schedule(static) private(jj, ji, zerp) 120 123 DO jj = 1, jpj 121 124 DO ji = 1, jpi
Note: See TracChangeset
for help on using the changeset viewer.