Changeset 532
- Timestamp:
- 2006-10-20T08:36:42+02:00 (18 years ago)
- Location:
- trunk/NEMO/OPA_SRC
- Files:
-
- 5 added
- 4 deleted
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/NEMO/OPA_SRC/SBC/flxmod.F90
r473 r532 14 14 USE ocfzpt ! ocean freezing point 15 15 16 #if defined key_oasis3 || defined key_oasis4 17 #if defined key_oasis3 18 USE cpl_oasis3 ! OASIS3 coupling (to ECHAM5) 19 #else if defined key_oasis4 20 USE cpl_oasis4 ! OASIS4 coupling (to ECHAM5) 21 #endif 22 USE flx_oce, only : dqns_ice , & ! : derivative of non solar heat flux on sea ice 23 qsr_oce , & ! : solar flux over ocean 24 qnsr_oce , & ! : total non solar heat flux (longwave downward radiation) over ocean 25 qsr_ice , & ! : solar flux over ice 26 qnsr_ice , & ! : total non solar heat flux (Longwave downward radiation) over ice 27 tn_ice , & ! : ice surface temperature 28 alb_ice , & ! : albedo of ice 29 sprecip , & ! : solid (snow) precipitation over water (!) what about ice? 30 tprecip , & ! : total precipitation ( or liquid precip minus evaporation in coupled mode) 31 calving , & ! : calving 32 rrunoff , & ! : monthly runoff (kg/m2/s) 33 fr1_i0 , & ! : 1st part of the fraction of sol.rad. which penetrate inside the ice cover 34 fr2_i0 ! : 2nd part of the fraction of sol.rad. which penetrate inside the ice cover 35 36 USE ice, only : hicif , & ! : ice thickness 37 frld , & ! : leads fraction = 1-a/totalarea 38 hsnif , & ! : snow thickness 39 u_ice , v_ice ! : ice velocity 40 41 USE ice_oce, only : sst_io ! : sea surface temperature 42 43 #if defined key_cpl_ocevel 44 USE geo2ocean, only : repere, repcmo 45 #endif 46 #endif 16 47 IMPLICIT NONE 17 48 PRIVATE 18 49 50 #if defined key_oasis3 || defined key_oasis4 51 REAL(wp), DIMENSION(jpi,jpj) :: catm 52 #endif 19 53 !! * Routine accessibility 20 54 PUBLIC flx ! routine called by step.F90 … … 65 99 # include "flx_forced_daily.h90" 66 100 67 #elif defined key_coupled 68 # if defined key_ice_lim 101 #elif defined key_oasis3 || defined key_oasis4 69 102 !!---------------------------------------------------------------------- 70 !! 'key_ coupled' andCoupled Ocan/Atmosphere103 !! 'key_oasis3' or 'key_oasis4' and Coupled Ocan/Atmosphere 71 104 !! 'key_ice_lim' with LIM sea-ice model 72 105 !!---------------------------------------------------------------------- 73 # include "flx_coupled_ice.h90" 74 75 # else 76 !!---------------------------------------------------------------------- 77 !! 'key_flx_coupled' and Coupled Ocan/Atmosphere 78 !! Default option without sea-ice model 79 !!---------------------------------------------------------------------- 80 # include "flx_coupled_noice.h90" 81 82 # endif 106 # include "flx_oasis_ice.h90" 83 107 #else 84 108 !!---------------------------------------------------------------------- -
trunk/NEMO/OPA_SRC/SBC/taumod.F90
r434 r532 15 15 USE lbclnk ! 16 16 17 #if defined key_oasis3 || defined key_oasis4 18 USE geo2ocean, only : repcmo 19 USE ice, only : frld ! : leads fraction = 1-a/totalarea 20 #if defined key_oasis3 21 USE cpl_oasis3 ! OASIS3 coupling (to ECHAM5) 22 #else if defined key_oasis4 23 USE cpl_oasis4 ! OASIS4 coupling (to ECHAM5) 24 #endif 25 #endif 17 26 IMPLICIT NONE 18 27 PRIVATE … … 48 57 # include "tau_forced_daily.h90" 49 58 50 #elif defined key_ coupled59 #elif defined key_oasis3 || defined key_oasis4 51 60 ! Coupled case : stress at the coupling frequency 52 # if defined key_ice_lim 53 !!---------------------------------------------------------------------- 54 !! 'key_coupled' Coupled Ocean/Atmosphere 61 !!---------------------------------------------------------------------- 62 !! 'key_oasis3' or 'key_oasis4' and Coupled Ocean/Atmosphere 55 63 !! 'key_ice_lim' LIM sea-ice 56 64 !!---------------------------------------------------------------------- 57 65 ! New way: 3D referential link to the earth (avoid north pole pb) 58 66 ! (3 component stress defined at U- and V-points) 59 # include "tau_coupled_ice.h90" 60 # else 61 !!---------------------------------------------------------------------- 62 !! 'key_coupled' Coupled Ocean/Atmosphere 63 !! Default case NO sea-ice 64 !!---------------------------------------------------------------------- 65 ! old fashion: geographical referential 66 ! (zonal and meridional stress defined at U- and V-points) 67 # include "tau_coupled.h90" 68 # endif 67 # include "tau_oasis_ice.h90" 69 68 #else 70 69 !!---------------------------------------------------------------------- -
trunk/NEMO/OPA_SRC/lib_mpp.F90
r524 r532 45 45 !! ! 04 (R. Bourdalle Badie) isend option in mpi 46 46 !! ! 05 (G. Madec, S. Masson) npolj=5,6 F-point & ice cases 47 !! ! 05 (R. Redler) Replacement of MPI_COMM_WORLD except for MPI_Abort 47 48 !!---------------------------------------------------------------------- 48 49 !! OPA 9.0 , LOCEAN-IPSL (2005) … … 60 61 PUBLIC mpp_lbc_north_e, mpp_minloc, mpp_maxloc, mpp_lnk_3d, mpp_lnk_2d, mpp_lnk_3d_gather, mpp_lnk_2d_e, mpplnks 61 62 PUBLIC mpprecv, mppsend, mppscatter, mppgather, mppobc, mpp_ini_north, mppstop, mppsync 63 #if defined key_oasis3 || defined key_oasis4 64 PUBLIC size, rank 65 #endif 62 66 63 67 !! * Interfaces … … 107 111 INTEGER :: & 108 112 size, & ! number of process 109 rank ! process number [ 0 - size-1 ] 113 rank, & ! process number [ 0 - size-1 ] 114 mpi_comm_opa ! opa local communicator 110 115 111 116 ! variables used in case of north fold condition in mpp_mpi with jpni > 1 … … 117 122 njmppmax ! value of njmpp for the processors of the northern line 118 123 INTEGER :: & ! 119 north_root ! number (in the comm_ world) of proc 0 in the northern comm124 north_root ! number (in the comm_opa) of proc 0 in the northern comm 120 125 INTEGER, DIMENSION(:), ALLOCATABLE :: & 121 126 nrank_north ! dimension ndim_rank_north, number of the procs belonging to ncomm_north … … 272 277 CONTAINS 273 278 274 FUNCTION mynode( )279 FUNCTION mynode(localComm) 275 280 !!---------------------------------------------------------------------- 276 281 !! *** routine mynode *** … … 281 286 #if defined key_mpp_mpi 282 287 !! * Local variables (MPI version) 283 INTEGER :: mynode, ierr 288 INTEGER :: mynode, ierr, code 289 LOGICAL :: mpi_was_called 290 INTEGER,OPTIONAL :: localComm 284 291 NAMELIST/nam_mpp/ c_mpi_send 285 292 !!---------------------------------------------------------------------- … … 300 307 IF( Agrif_Root() ) THEN 301 308 #endif 302 SELECT CASE ( c_mpi_send ) 303 CASE ( 'S' ) ! Standard mpi send (blocking) 304 WRITE(numout,*) ' Standard blocking mpi send (send)' 305 CALL mpi_init( ierr ) 306 CASE ( 'B' ) ! Buffer mpi send (blocking) 307 WRITE(numout,*) ' Buffer blocking mpi send (bsend)' 308 CALL mpi_init_opa( ierr ) 309 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 310 WRITE(numout,*) ' Immediate non-blocking send (isend)' 311 l_isend = .TRUE. 312 CALL mpi_init( ierr ) 313 CASE DEFAULT 314 WRITE(numout,cform_err) 315 WRITE(numout,*) ' bad value for c_mpi_send = ', c_mpi_send 316 nstop = nstop + 1 317 END SELECT 318 309 CALL mpi_initialized ( mpi_was_called, code ) 310 IF( code /= MPI_SUCCESS ) THEN 311 CALL ctl_stop( ' lib_mpp: Error in routine mpi_initialized' ) 312 CALL mpi_abort( mpi_comm_world, code, ierr ) 313 ENDIF 314 315 IF( PRESENT(localComm) .and. mpi_was_called ) THEN 316 mpi_comm_opa = localComm 317 SELECT CASE ( c_mpi_send ) 318 CASE ( 'S' ) ! Standard mpi send (blocking) 319 WRITE(numout,*) ' Standard blocking mpi send (send)' 320 CASE ( 'B' ) ! Buffer mpi send (blocking) 321 WRITE(numout,*) ' Buffer blocking mpi send (bsend)' 322 CALL mpi_init_opa( ierr ) 323 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 324 WRITE(numout,*) ' Immediate non-blocking send (isend)' 325 l_isend = .TRUE. 326 CASE DEFAULT 327 WRITE(numout,cform_err) 328 WRITE(numout,*) ' bad value for c_mpi_send = ', c_mpi_send 329 nstop = nstop + 1 330 END SELECT 331 ELSE IF ( PRESENT(localComm) .and. .not. mpi_was_called ) THEN 332 WRITE(numout,*) ' lib_mpp: You cannot provide a local communicator ' 333 WRITE(numout,*) ' without calling MPI_Init before ! ' 334 ELSE 335 SELECT CASE ( c_mpi_send ) 336 CASE ( 'S' ) ! Standard mpi send (blocking) 337 WRITE(numout,*) ' Standard blocking mpi send (send)' 338 CALL mpi_init( ierr ) 339 CASE ( 'B' ) ! Buffer mpi send (blocking) 340 WRITE(numout,*) ' Buffer blocking mpi send (bsend)' 341 CALL mpi_init_opa( ierr ) 342 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 343 WRITE(numout,*) ' Immediate non-blocking send (isend)' 344 l_isend = .TRUE. 345 CALL mpi_init( ierr ) 346 CASE DEFAULT 347 WRITE(ctmp1,*) ' bad value for c_mpi_send = ', c_mpi_send 348 CALL ctl_stop( ctmp1 ) 349 END SELECT 350 351 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code) 352 IF( code /= MPI_SUCCESS ) THEN 353 CALL ctl_stop( ' lib_mpp: Error in routine mpi_comm_dup' ) 354 CALL mpi_abort( mpi_comm_world, code, ierr ) 355 ENDIF 356 ! 357 ENDIF 319 358 #if defined key_agrif 320 ELSE359 ELSE 321 360 SELECT CASE ( c_mpi_send ) 322 361 CASE ( 'S' ) ! Standard mpi send (blocking) … … 335 374 #endif 336 375 337 CALL mpi_comm_rank( mpi_comm_ world, rank, ierr )338 CALL mpi_comm_size( mpi_comm_ world, size, ierr )376 CALL mpi_comm_rank( mpi_comm_opa, rank, ierr ) 377 CALL mpi_comm_size( mpi_comm_opa, size, ierr ) 339 378 mynode = rank 340 379 #else … … 2795 2834 CASE ( 'S' ) ! Standard mpi send (blocking) 2796 2835 CALL mpi_send ( pmess, kbytes, mpi_double_precision, kdest, ktyp, & 2797 & mpi_comm_ world, iflag )2836 & mpi_comm_opa, iflag ) 2798 2837 CASE ( 'B' ) ! Buffer mpi send (blocking) 2799 2838 CALL mpi_bsend( pmess, kbytes, mpi_double_precision, kdest, ktyp, & 2800 & mpi_comm_ world, iflag )2839 & mpi_comm_opa, iflag ) 2801 2840 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 2802 2841 ! Be carefull, one more argument here : the mpi request identifier.. 2803 2842 CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest, ktyp, & 2804 & mpi_comm_ world, md_req, iflag )2843 & mpi_comm_opa, md_req, iflag ) 2805 2844 END SELECT 2806 2845 #endif … … 2830 2869 2831 2870 CALL mpi_recv( pmess, kbytes, mpi_double_precision, mpi_any_source, ktyp, & 2832 & mpi_comm_ world, istatus, iflag )2871 & mpi_comm_opa, istatus, iflag ) 2833 2872 #endif 2834 2873 … … 2864 2903 itaille=jpi*jpj 2865 2904 CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille, & 2866 & mpi_double_precision, kp , mpi_comm_ world, ierror )2905 & mpi_double_precision, kp , mpi_comm_opa, ierror ) 2867 2906 #endif 2868 2907 … … 2898 2937 2899 2938 CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille, & 2900 & mpi_double_precision, kp, mpi_comm_ world, ierror )2939 & mpi_double_precision, kp, mpi_comm_opa, ierror ) 2901 2940 #endif 2902 2941 … … 2959 2998 CALL mpi_op_create( lc_isl, lcommute, mpi_isl, ierror ) 2960 2999 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer & 2961 , mpi_isl, mpi_comm_ world, ierror )3000 , mpi_isl, mpi_comm_opa, ierror ) 2962 3001 ktab(:) = iwork(:) 2963 3002 #endif … … 3013 3052 CALL mpi_op_create(lc_isl,lcommute,mpi_isl,ierror) 3014 3053 CALL mpi_allreduce(ktab, iwork, 1,mpi_integer & 3015 ,mpi_isl,mpi_comm_ world,ierror)3054 ,mpi_isl,mpi_comm_opa,ierror) 3016 3055 ktab = iwork 3017 3056 #endif … … 3064 3103 3065 3104 CALL mpi_allreduce( ktab, iwork, kdim, mpi_integer, & 3066 & mpi_min, mpi_comm_ world, ierror )3105 & mpi_min, mpi_comm_opa, ierror ) 3067 3106 3068 3107 ktab(:) = iwork(:) … … 3112 3151 3113 3152 CALL mpi_allreduce(ktab,iwork, 1,mpi_integer & 3114 & ,mpi_min,mpi_comm_ world,ierror)3153 & ,mpi_min,mpi_comm_opa,ierror) 3115 3154 3116 3155 ktab = iwork … … 3166 3205 3167 3206 CALL mpi_allreduce(ktab, iwork,kdim,mpi_integer & 3168 ,mpi_sum,mpi_comm_ world,ierror)3207 ,mpi_sum,mpi_comm_opa,ierror) 3169 3208 3170 3209 ktab(:) = iwork(:) … … 3209 3248 3210 3249 CALL mpi_allreduce(ktab,iwork, 1,mpi_integer & 3211 ,mpi_sum,mpi_comm_ world,ierror)3250 ,mpi_sum,mpi_comm_opa,ierror) 3212 3251 3213 3252 ktab = iwork … … 3277 3316 CALL mpi_op_create(lc_isl,lcommute,mpi_isl,ierror) 3278 3317 CALL mpi_allreduce(ptab, zwork,kdim,mpi_double_precision & 3279 ,mpi_isl,mpi_comm_ world,ierror)3318 ,mpi_isl,mpi_comm_opa,ierror) 3280 3319 ptab(:) = zwork(:) 3281 3320 … … 3335 3374 CALL mpi_op_create( lc_isl, lcommute, mpi_isl, ierror ) 3336 3375 CALL mpi_allreduce( ptab, zwork, 1, mpi_double_precision, & 3337 & mpi_isl , mpi_comm_ world, ierror )3376 & mpi_isl , mpi_comm_opa, ierror ) 3338 3377 ptab = zwork 3339 3378 … … 3401 3440 3402 3441 CALL mpi_allreduce(ptab, zwork,kdim,mpi_double_precision & 3403 ,mpi_max,mpi_comm_ world,ierror)3442 ,mpi_max,mpi_comm_opa,ierror) 3404 3443 ptab(:) = zwork(:) 3405 3444 … … 3445 3484 3446 3485 CALL mpi_allreduce( ptab, zwork , 1 , mpi_double_precision, & 3447 & mpi_max, mpi_comm_ world, ierror )3486 & mpi_max, mpi_comm_opa, ierror ) 3448 3487 ptab = zwork 3449 3488 … … 3498 3537 3499 3538 CALL mpi_allreduce(ptab, zwork,kdim,mpi_double_precision & 3500 ,mpi_min,mpi_comm_ world,ierror)3539 ,mpi_min,mpi_comm_opa,ierror) 3501 3540 ptab(:) = zwork(:) 3502 3541 … … 3543 3582 3544 3583 CALL mpi_allreduce( ptab, zwork, 1,mpi_double_precision & 3545 & ,mpi_min,mpi_comm_ world,ierror)3584 & ,mpi_min,mpi_comm_opa,ierror) 3546 3585 ptab = zwork 3547 3586 … … 3596 3635 3597 3636 CALL mpi_allreduce(ptab, zwork,kdim,mpi_double_precision & 3598 & ,mpi_sum,mpi_comm_ world,ierror)3637 & ,mpi_sum,mpi_comm_opa,ierror) 3599 3638 ptab(:) = zwork(:) 3600 3639 … … 3640 3679 3641 3680 CALL mpi_allreduce(ptab, zwork, 1,mpi_double_precision & 3642 & ,mpi_sum,mpi_comm_ world,ierror)3681 & ,mpi_sum,mpi_comm_opa,ierror) 3643 3682 ptab = zwork 3644 3683 … … 3687 3726 zain(2,:)=ki+10000.*kj 3688 3727 3689 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_ WORLD,ierror)3728 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 3690 3729 3691 3730 pmin=zaout(1,1) … … 3738 3777 zain(2,:)=ki+10000.*kj+100000000.*kk 3739 3778 3740 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_ WORLD,ierror)3779 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_OPA,ierror) 3741 3780 3742 3781 pmin=zaout(1,1) … … 3789 3828 zain(2,:)=ki+10000.*kj 3790 3829 3791 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_ WORLD,ierror)3830 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 3792 3831 3793 3832 pmax=zaout(1,1) … … 3839 3878 zain(2,:)=ki+10000.*kj+100000000.*kk 3840 3879 3841 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_ WORLD,ierror)3880 CALL MPI_ALLREDUCE( zain,zaout, 1, MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_OPA,ierror) 3842 3881 3843 3882 pmax=zaout(1,1) … … 3867 3906 INTEGER :: ierror 3868 3907 3869 CALL mpi_barrier(mpi_comm_ world,ierror)3908 CALL mpi_barrier(mpi_comm_opa,ierror) 3870 3909 3871 3910 #endif … … 4201 4240 ! create the world group 4202 4241 ! 4203 CALL MPI_COMM_GROUP(mpi_comm_ world,ngrp_world,ierr)4242 CALL MPI_COMM_GROUP(mpi_comm_opa,ngrp_world,ierr) 4204 4243 ! 4205 4244 ! Create the North group from the world group … … 4208 4247 ! Create the North communicator , ie the pool of procs in the north group 4209 4248 ! 4210 CALL MPI_COMM_CREATE(mpi_comm_ world,ngrp_north,ncomm_north,ierr)4249 CALL MPI_COMM_CREATE(mpi_comm_opa,ngrp_north,ncomm_north,ierr) 4211 4250 4212 4251 … … 4996 5035 !$AGRIF_END_DO_NOT_TREAT 4997 5036 4998 INTEGER :: code,rang 5037 INTEGER :: code,rang,ierr 5038 LOGICAL :: mpi_was_called 4999 5039 5000 5040 ! La valeur suivante doit etre au moins egale a la taille … … 5007 5047 5008 5048 ! Le point d'entree dans la bibliotheque MPI elle-meme 5009 CALL mpi_init(code) 5010 5049 CALL mpi_initialized(mpi_was_called, code) 5050 IF ( code /= MPI_SUCCESS ) THEN 5051 CALL ctl_stop( ' lib_mpp: Error in routine mpi_initialized' ) 5052 CALL mpi_abort( mpi_comm_world, code, ierr ) 5053 ENDIF 5054 5055 IF ( .NOT. mpi_was_called ) THEN 5056 CALL mpi_init(code) 5057 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code) 5058 IF ( code /= MPI_SUCCESS ) THEN 5059 CALL ctl_stop( ' lib_mpp: Error in routine mpi_comm_dup' ) 5060 CALL mpi_abort( mpi_comm_world, code, ierr ) 5061 ENDIF 5062 ENDIF 5011 5063 ! La definition de la zone tampon pour les futurs envois 5012 5064 ! par MPI_BSEND (on alloue une fois pour toute cette zone … … 5034 5086 ! "data" soit 7 segments, c.-a -d. 1750/8 = 210 Mo 5035 5087 IF (taille_tampon > 210000000) THEN 5036 PRINT *,'Attention la valeur BUFFER_LENGTH doit etre <= 210000000'5088 CALL ctl_stop( ' lib_mpp: Attention la valeur BUFFER_LENGTH doit etre <= 210000000' ) 5037 5089 CALL mpi_abort(MPI_COMM_WORLD,2,code) 5038 5090 END IF 5039 5091 5040 CALL mpi_comm_rank(MPI_COMM_ WORLD,rang,code)5092 CALL mpi_comm_rank(MPI_COMM_OPA,rang,code) 5041 5093 IF (rang == 0 ) PRINT *,'Taille du buffer alloue : ',taille_tampon 5042 5094 … … 5078 5130 CONTAINS 5079 5131 5080 FUNCTION mynode() RESULT (function_value) 5132 FUNCTION mynode(localComm) RESULT (function_value) 5133 INTEGER, OPTIONAL :: localComm 5081 5134 function_value = 0 5082 5135 END FUNCTION mynode -
trunk/NEMO/OPA_SRC/opa.F90
r531 r532 28 28 !! ! 00-07 (J-M Molines, M. Imbard) Open Boundary Conditions (CLIPPER) 29 29 !! 9.0 ! 02-08 (G. Madec) F90: Free form and modules 30 !! " ! 04-06 (R. Redler, NEC CCRLE, Germany) add OASIS[3/4] coupled interfaces 30 31 !! " ! 04-08 (C. Talandier) New trends organization 31 32 !! " ! 05-06 (C. Ethe) Add the 1D configuration possibility … … 68 69 69 70 USE step ! OPA time-stepping (stp routine) 71 #if defined key_oasis3 72 USE cpl_oasis3 ! OASIS3 coupling (to ECHAM5) 73 #else if defined key_oasis4 74 USE cpl_oasis4 ! OASIS4 coupling (to ECHAM5) 75 #endif 76 USE dynspg_oce ! Control choice of surface pressure gradient schemes 70 77 USE prtctl ! Print control (prt_ctl_init routine) 71 78 USE ini1d ! re-initialization of u-v mask for the 1D configuration … … 149 156 150 157 CALL opa_closefile 158 #if defined key_oasis3 || defined key_oasis4 159 call cpl_prism_finalize 160 #else 151 161 IF( lk_mpp ) CALL mppstop ! Close all files (mpp) 162 #endif 152 163 153 164 END SUBROUTINE opa_model … … 163 174 #if defined key_coupled 164 175 INTEGER :: itro, istp0 ! ??? 176 #endif 177 #if defined key_oasis3 || defined key_oasis4 178 INTEGER :: localComm 165 179 #endif 166 180 CHARACTER (len=20) :: namelistname … … 197 211 READ ( numnam, namctl ) 198 212 213 #ifdef key_oasis3 || defined key_oasis4 214 call cpl_prism_init(localComm) 215 ! Nodes selection 216 narea = mynode(localComm) 217 #else 199 218 ! Nodes selection 200 219 narea = mynode() 220 #endif 201 221 narea = narea + 1 ! mynode return the rank of proc (0 --> jpnij -1 ) 202 222 lwp = narea == 1 … … 286 306 #endif 287 307 288 #if defined key_coupled 308 #if defined key_coupled && ! defined key_oasis3 && ! defined key_oasis4 289 309 itro = nitend - nit000 + 1 ! Coupled 290 310 istp0 = NINT( rdt ) 291 311 CALL cpl_init( itro, nexco, istp0 ) ! Signal processing and process id exchange 312 #endif 313 314 #if defined key_oasis3 || defined key_oasis4 315 CALL cpl_prism_define 292 316 #endif 293 317 … … 398 422 SELECT CASE ( cp_cfg ) 399 423 CASE ( 'gyre' ) 400 CALL ctl_warn( ' The Benchmark is activated ' )424 CALL ctl_warn( ' The Benchmark is activated ' ) 401 425 CASE DEFAULT 402 CALL ctl_stop( ' The Benchmark is based on the GYRE configuration: key_gyre must be used or set nbench = 0' ) 426 CALL ctl_stop( ' The Benchmark is based on the GYRE configuration: key_gyre must & 427 & be used or set nbench = 0' ) 403 428 END SELECT 404 429 ENDIF 405 430 406 SELECT CASE ( nbit_cmp ) 407 CASE ( 1 ) 408 CALL ctl_warn( ' Bit comparison enabled Single and multiple processor results should bit compare', & 409 & ' WARNING: RESULTS ARE NOT PHYSICAL.' ) 410 CASE DEFAULT 411 CALL ctl_warn( ' Bit comparison not enabled. Single and multiple processor results will differ.' ) 412 END SELECT 431 IF( nbit_cmp == 1 ) THEN 432 CALL ctl_warn( ' Bit comparison enabled. Single and multiple processor results must bit compare', & 433 & ' WARNING: RESULTS ARE NOT PHYSICAL.' ) 434 ENDIF 413 435 414 436
Note: See TracChangeset
for help on using the changeset viewer.