MODULE cpl_oasis3 !!====================================================================== !! *** MODULE cpl_oasis *** !! Coupled O/A : coupled ocean-atmosphere case using OASIS3 V. prism_2_4 !! special case: NEMO OPA/LIM coupled to ECHAM5 !!===================================================================== !! History : !! 9.0 ! 04-06 (R. Redler, NEC CCRLE, Germany) Original code !! " " ! 04-11 (R. Redler, N. Keenlyside) revision !! " " ! 04-11 (V. Gayler, MPI M&D) Grid writing !! " " ! 05-08 (R. Redler, W. Park) frld initialization, paral(2) revision !! " " ! 05-09 (R. Redler) extended to allow for communication over root only !! " " ! 06-01 (W. Park) modification of physical part !! " " ! 06-02 (R. Redler, W. Park) buffer array fix for root exchange !!---------------------------------------------------------------------- #if defined key_oasis3 !!---------------------------------------------------------------------- !! 'key_oasis3' coupled Ocean/Atmosphere via OASIS3 !!---------------------------------------------------------------------- !!---------------------------------------------------------------------- !! cpl_prism_init : initialization of coupled mode communication !! cpl_prism_define : definition of grid and fields !! cpl_prism_send : send out fields in coupled mode !! cpl_prism_recv : receive fields in coupled mode !! cpl_prism_finalize : finalize the coupled mode communication !!---------------------------------------------------------------------- !! * Modules used !##################### WARNING coupled mode ############################### !##################### WARNING coupled mode ############################### ! Following lines must be enabled if coupling with OASIS ! ! USE mod_prism_proto ! OASIS3 prism module ! USE mod_prism_def_partition_proto! OASIS3 prism module for partitioning ! USE mod_prism_grids_writing ! OASIS3 prism module for writing grid files ! USE mod_prism_put_proto ! OASIS3 prism module for sending ! USE mod_prism_get_proto ! OASIS3 prism module for receiving ! USE mod_prism_grids_writing ! OASIS3 prism module for writing grids !##################### WARNING coupled mode ############################### !##################### WARNING coupled mode ############################### #if defined key_mpp_mpi USE lib_mpp, only : mppsize, mpprank ! message passing USE lib_mpp, only : mppsend ! message passing USE lib_mpp, only : mpprecv ! message passing #endif USE daymod ! date and time info USE dom_oce ! ocean space and time domain USE sbc_ice ! surface boundary condition: ice USE in_out_manager ! I/O manager USE par_oce ! USE phycst, only : rt0 ! freezing point of sea water USE oce, only: tn, un, vn USE ice_2, only: frld, hicif, hsnif IMPLICIT NONE ! ! Exchange parameters for coupling ORCA-LIM with ECHAM5 ! #if defined key_cpl_ocevel INTEGER, PARAMETER :: nsend = 6 #else INTEGER, PARAMETER :: nsend = 4 #endif #if defined key_cpl_discharge INTEGER, PARAMETER :: nrecv = 20 #else INTEGER, PARAMETER :: nrecv = 17 #endif INTEGER, DIMENSION(nsend) :: send_id INTEGER, DIMENSION(nrecv) :: recv_id CHARACTER(len=32) :: cpl_send (nsend) CHARACTER(len=32) :: cpl_recv (nrecv) PRIVATE INTEGER :: localRank ! local MPI rank INTEGER :: comp_id ! id returned by prism_init_comp INTEGER :: range(5) INTEGER, PARAMETER :: localRoot = 0 INTEGER :: localSize ! local MPI size INTEGER :: localComm ! local MPI size LOGICAL :: commRank ! true for ranks doing OASIS communication LOGICAL, SAVE :: prism_was_initialized LOGICAL, SAVE :: prism_was_terminated INTEGER, SAVE :: write_grid INTEGER :: ierror ! return error code REAL(wp), DIMENSION(:,:), ALLOCATABLE :: exfld ! Temporary buffer for receiving #ifdef key_cpl_rootexchg LOGICAL :: rootexchg =.true. ! logical switch #else LOGICAL :: rootexchg =.false. ! logical switch #endif REAL(wp), DIMENSION(:), ALLOCATABLE :: buffer ! Temporary buffer for exchange INTEGER, DIMENSION(:,:), ALLOCATABLE :: ranges ! Temporary buffer for exchange !! Routine accessibility PUBLIC cpl_prism_init PUBLIC cpl_prism_define PUBLIC cpl_prism_send PUBLIC cpl_prism_recv PUBLIC cpl_prism_finalize PUBLIC send_id, recv_id !!---------------------------------------------------------------------- !! OPA 9.0 , LOCEAN-IPSL (2006) !! $Id$ !! Software governed by the CeCILL licence (modipsl/doc/NEMO_CeCILL.txt) !!---------------------------------------------------------------------- CONTAINS SUBROUTINE cpl_prism_init( localCommunicator ) IMPLICIT NONE !!------------------------------------------------------------------- !! *** ROUTINE cpl_prism_init *** !! !! ** Purpose : Initialize coupled mode communication for ocean !! exchange between AGCM, OGCM and COUPLER. (OASIS3 software) !! !! ** Method : OASIS3 MPI communication !!-------------------------------------------------------------------- !! * Arguments !! INTEGER, INTENT(OUT) :: localCommunicator !! !! * Local declarations !! CHARACTER(len=4) :: comp_name ! name of this PRISM component !! !!-------------------------------------------------------------------- !! IF(lwp) WRITE(numout,*) IF(lwp) WRITE(numout,*) 'cpl_prism_init : initialization in coupled ocean/atmosphere case' IF(lwp) WRITE(numout,*) '~~~~~~~~~~~~~~~~' IF(lwp) WRITE(numout,*) #if defined key_flx_bulk_monthly || defined key_flx_bulk_daily || defined key_flx_forced_daily IF(lwp)WRITE(numout,cform_err) IF(lwp)WRITE(numout,*) ' key_coupled and key_flx_bulk_* key_flx_forced_daily are incompatible' nstop = nstop + 1 #endif comp_name = 'opa9' !------------------------------------------------------------------ ! 1st Initialize the PRISM system for the application !------------------------------------------------------------------ CALL prism_init_comp_proto ( comp_id, comp_name, ierror ) IF ( ierror /= PRISM_Ok ) & CALL prism_abort_proto (comp_id, 'cpl_prism_init', 'Failure in prism_init_comp_proto') prism_was_initialized = .true. !------------------------------------------------------------------ ! 3rd Get an MPI communicator for OPA local communication !------------------------------------------------------------------ CALL prism_get_localcomm_proto ( localComm, ierror ) IF ( ierror /= PRISM_Ok ) & CALL prism_abort_proto (comp_id, 'cpl_prism_init','Failure in prism_get_localcomm_proto' ) localCommunicator = localComm END SUBROUTINE cpl_prism_init SUBROUTINE cpl_prism_define () IMPLICIT NONE !!------------------------------------------------------------------- !! *** ROUTINE cpl_prism_define *** !! !! ** Purpose : Define grid and field information for ocean !! exchange between AGCM, OGCM and COUPLER. (OASIS3 software) !! !! ** Method : OASIS3 MPI communication !!-------------------------------------------------------------------- !! * Arguments !! !! * Local declarations !! INTEGER :: grid_id(2) ! id returned by prism_def_grid INTEGER :: part_id INTEGER :: paral(5) ! OASIS3 box partition INTEGER :: shape(2,3) ! shape of arrays passed to PSMILe INTEGER :: nodim(2) INTEGER :: data_type ! data type of transients INTEGER :: ji, jj ! local loop indicees INTEGER :: nx, ny, nc ! local variables INTEGER :: im1, ip1 INTEGER :: jm1, jp1 INTEGER :: i_grid ! loop index INTEGER :: info INTEGER :: maxlen INTEGER :: mask(jpi,jpj) REAL(kind=wp) :: area(jpi,jpj) CHARACTER(len=4) :: point_name ! name of the grid points REAL(kind=wp) :: rclam(jpi,jpj,4) REAL(kind=wp) :: rcphi(jpi,jpj,4) REAL(kind=wp) :: glam_b(jpi,jpj) ! buffer for orca2 grid correction REAL(kind=wp) :: gphi_b(jpi,jpj) ! buffer for orca2 grid correction !! !!-------------------------------------------------------------------- IF(lwp) WRITE(numout,*) IF(lwp) WRITE(numout,*) 'cpl_prism_define : initialization in coupled ocean/atmosphere case' IF(lwp) WRITE(numout,*) '~~~~~~~~~~~~~~~~~' IF(lwp) WRITE(numout,*) #if defined key_flx_bulk_monthly || defined key_flx_bulk_daily || defined key_flx_forced_daily IF(lwp)WRITE(numout,cform_err) IF(lwp)WRITE(numout,*) ' key_coupled and key_flx_bulk_... are incompatible' nstop = nstop + 1 #endif ! ----------------------------------------------------------------- ! ... Some initialisation ! ----------------------------------------------------------------- send_id = 0 recv_id = 0 #if defined key_mpp_mpi ! ----------------------------------------------------------------- ! ... Some MPI stuff relevant for optional exchange via root only ! ----------------------------------------------------------------- commRank = .false. localRank = mpprank ! from lib_mpp localSize = mppsize ! from lib_mpp IF ( rootexchg ) THEN IF ( localRank == localRoot ) commRank = .true. ELSE commRank = .true. ENDIF IF ( rootexchg .and. localRank == localRoot ) THEN ALLOCATE(ranges(5,0:localSize-1), stat = ierror) IF (ierror > 0) THEN CALL prism_abort_proto ( comp_id, 'cpl_prism_define', 'Failure in allocating Integer') RETURN ENDIF ENDIF #else ! ! For non-parallel configurations the one and only process ("localRoot") ! takes part in the communication ! localRank = localRoot commRank = .true. #endif ! ----------------------------------------------------------------- ! ... If necessary the root process writes the global grid info ! ----------------------------------------------------------------- IF ( localRank == localRoot ) THEN WRITE(numout,*)'Opening file SSTOCEAN, unit= 199' OPEN (199,STATUS='NEW',FILE="sstocean",FORM='UNFORMATTED',err=310) ! In case the sstocean of OASIS3 from a previous run exists ! the programs jumps to the end of the if-block ! !* 2.0 Write exchange fields to OASIS data file. ! ----------------------------------------- WHERE (tmask(:,:,1) > 0.5 ) mask(:,:) = 0 ELSE WHERE mask(:,:) = 1 END WHERE ! Initialise ice mask at the very first start only frld = 1. WRITE(199) 'SSTOCEAN' WRITE(199) (tn(:,:,1)*mask(:,:))+rt0 WRITE(199) 'SICOCEAN' WRITE(199) (1.-frld(:,:))*mask(:,:) #if defined key_cpl_albedo tn_ice = 271.285 alb_ice = 0.75 WRITE(199) 'STIOCEAN' WRITE(199) tn_ice(:,:) WRITE(199) 'SAIOCEAN' WRITE(199) alb_ice(:,:) #else hicit = 0. hsnit = 0. WRITE(199) 'SITOCEAN' WRITE(199) hicif(:,:)*mask(:,:) WRITE(199) 'SNTOCEAN' WRITE(199) hsnif(:,:)*mask(:,:) #endif #if defined key_cpl_ocevel un(:,:,1) = 0. vn(:,:,1) = 0. WHERE (umask(:,:,1) > 0.5 ) mask(:,:) = 0 ELSE WHERE mask(:,:) = 1 END WHERE WRITE(199) 'SUNOCEAN' WRITE(199) un(:,:,1)*mask(:,:) WHERE (vmask(:,:,1) > 0.5 ) mask(:,:) = 0 ELSE WHERE mask(:,:) = 1 END WHERE WRITE(199) 'SVNOCEAN' WRITE(199) vn(:,:,1)*mask(:,:) #endif WRITE(numout,*) WRITE(numout,*)' sstocean written' WRITE(numout,*)' ***************' CLOSE(199) 310 CONTINUE CALL prism_start_grids_writing ( write_grid ) ENDIF ! localRank == localRoot IF ( localRank == localRoot .and. write_grid == 1 ) THEN !------------------------------------------------------------------ ! 1st write global grid information (ORCA tripolar) characteristics ! for surface coupling into a OASIS3 specific grid file. For ! surface coupling it is sufficient to specify only one vertical ! z-level. !------------------------------------------------------------------ ! ! ... Treat corners in the horizontal plane ! nx = jpi ny = jpj nc = 4 DO i_grid = 1, 3 IF ( i_grid == 1 ) THEN ! -------------------------------------------------------- ! ... Write the grid info for T points ! -------------------------------------------------------- point_name = 'opat' glam_b = glamt gphi_b = gphit DO ji = 1, jpi DO jj = 1, jpj im1 = ji-1 jm1 = jj-1 IF (ji == 1) im1 = jpi-2 IF (jj == 1) jm1 = jj rclam(ji,jj,1) = glamf(ji,jj) rclam(ji,jj,2) = glamf(im1,jj) rclam(ji,jj,3) = glamf(im1,jm1) rclam(ji,jj,4) = glamf(ji,jm1) rcphi(ji,jj,1) = gphif(ji,jj) rcphi(ji,jj,2) = gphif(im1,jj) rcphi(ji,jj,3) = gphif(im1,jm1) rcphi(ji,jj,4) = gphif(ji,jm1) END DO END DO ! Correction of one (land) grid cell of the orca2 grid. ! It was causing problems with the SCRIP interpolation. IF (jpiglo == 182 .AND. jpjglo == 149) THEN rclam(145,106,2) = -1.0 rcphi(145,106,2) = 41.0 ENDIF WHERE (tmask(:,:,1) > 0.5 ) mask(:,:) = 0 ELSE WHERE mask(:,:) = 1 END WHERE area = e1t * e2t ELSE IF ( i_grid == 2 ) THEN ! -------------------------------------------------------- ! ... Write the grid info for u points ! -------------------------------------------------------- point_name = 'opau' glam_b = glamu gphi_b = gphiu DO ji = 1, jpi DO jj = 1, jpj ip1 = ji+1 jm1 = jj-1 IF (ji == jpiglo) ip1 = 3 IF (jj == 1) jm1 = jj rclam(ji,jj,1) = glamv(ip1,jj) rclam(ji,jj,2) = glamv(ji,jj) rclam(ji,jj,3) = glamv(ji,jm1) rclam(ji,jj,4) = glamv(ip1,jm1) rcphi(ji,jj,1) = gphiv(ip1,jj) rcphi(ji,jj,2) = gphiv(ji,jj) rcphi(ji,jj,3) = gphiv(ji,jm1) rcphi(ji,jj,4) = gphiv(ip1,jm1) END DO END DO ! Correction of three (land) grid cell of the orca2 grid. ! It was causing problems with the SCRIP interpolation. IF (jpiglo == 182 .AND. jpjglo == 149) THEN glam_b(144,106) = -1.0 gphi_b(144,106) = 40.5 rclam (144,106,2) = -1.5 rcphi (144,106,2) = 41.0 glam_b(144,107) = -1.0 gphi_b(144,107) = 41.5 rclam (144,107,2) = -1.5 rcphi (144,107,2) = 42.0 rclam (144,107,3) = -1.5 rcphi (144,107,3) = 41.0 glam_b(144,108) = -1.0 gphi_b(144,108) = 42.5 rclam (144,108,2) = -1.5 rcphi (144,108,2) = 43.0 rclam (144,108,3) = -1.5 rcphi (144,108,3) = 42.0 ENDIF WHERE (umask(:,:,1) > 0.5 ) mask(:,:) = 0 ELSE WHERE mask(:,:) = 1 END WHERE area = e1u * e2u ELSE IF ( i_grid == 3 ) THEN ! -------------------------------------------------------- ! ... Write the grid info for v points ! -------------------------------------------------------- point_name = 'opav' glam_b = glamv gphi_b = gphiv DO ji = 1, jpi DO jj = 1, jpj im1 = ji-1 jp1 = jj+1 IF (ji == 1) im1 = jpiglo-2 IF (jj == jpjglo) jp1 = jj rclam(ji,jj,1) = glamu(ji,jp1) rclam(ji,jj,2) = glamu(im1,jp1) rclam(ji,jj,3) = glamu(im1,jj) rclam(ji,jj,4) = glamu(ji,jj) rcphi(ji,jj,1) = gphiu(ji,jp1) rcphi(ji,jj,2) = gphiu(im1,jp1) rcphi(ji,jj,3) = gphiu(im1,jj) rcphi(ji,jj,4) = gphiu(ji,jj) END DO END DO ! Correction of one (land) grid cell of the orca2 grid. ! It was causing problems with the SCRIP interpolation. IF (jpiglo == 182 .AND. jpjglo == 149) THEN rclam(145,105,2) = -1.0 rcphi(145,105,2) = 40.5 ENDIF WHERE (vmask(:,:,1) > 0.5 ) mask(:,:) = 0 ELSE WHERE mask(:,:) = 1 END WHERE area = e1v * e2v ENDIF ! i_grid WHERE (glam_b(:,:) < 0.) glam_b(:,:) = glam_b(:,:) + 360. END WHERE WHERE (glam_b(:,:) > 360.) glam_b(:,:) = glam_b(:,:) - 360. END WHERE WHERE (rclam(:,:,:) < 0.) rclam(:,:,:) = rclam(:,:,:) + 360. END WHERE WHERE (rclam(:,:,:) > 360.) rclam(:,:,:) = rclam(:,:,:) - 360. END WHERE mask(:,jpjglo)=1 CALL prism_write_grid ( point_name, nx, ny, glam_b, gphi_b ) CALL prism_write_corner ( point_name, nx, ny, nc, rclam, rcphi ) CALL prism_write_mask ( point_name, nx, ny, mask ) CALL prism_write_area ( point_name, nx, ny, area ) END DO ! i_grid CALL prism_terminate_grids_writing () ENDIF ! localRank == localRoot .and. write_grid == 1 ! ----------------------------------------------------------------- ! ... Define the partition ! ----------------------------------------------------------------- IF ( rootexchg ) THEN paral(1) = 2 ! box partitioning paral(2) = 0 ! NEMO lower left corner global offset paral(3) = jpiglo ! local extent in i paral(4) = jpjglo ! local extent in j paral(5) = jpiglo ! global extent in x range(1) = nimpp-1+nldi ! global start in i range(2) = nlei-nldi+1 ! local size in i of valid region range(3) = njmpp-1+nldj ! global start in j range(4) = nlej-nldj+1 ! local size in j of valid region range(5) = range(2) & * range(4) ! local horizontal size IF(ln_ctl) THEN write(numout,*) ' rootexchg: range(1:5)', range ENDIF ! ! Collect ranges from all NEMO procs on the local root process ! CALL mpi_gather(range, 5, MPI_INTEGER, & ranges, 5, MPI_INTEGER, localRoot, localComm, ierror) IF ( localRank == localRoot ) THEN maxlen = maxval(ranges(5,:)) ALLOCATE(buffer(1:maxlen), stat = ierror) IF (ierror > 0) THEN CALL prism_abort_proto ( comp_id, 'cpl_prism_define', 'Failure in allocating buffer') RETURN ENDIF ENDIF ELSE paral(1) = 2 ! box partitioning !2dtest paral(2) = jpiglo & !2dtest * (nldj-1+njmpp-1) & !2dtest + (nldi-1+nimpp-1) ! NEMO lower left corner global offset paral(2) = jpiglo & * (nldj-1+njmpp-1) ! NEMO lower left corner global offset paral(3) = nlei-nldi+1 ! local extent in i paral(4) = nlej-nldj+1 ! local extent in j paral(5) = jpiglo ! global extent in x IF(ln_ctl) THEN print*, ' multiexchg: paral (1:5)', paral print*, ' multiexchg: jpi, jpj =', jpi, jpj print*, ' multiexchg: nldi, nlei, nimpp =', nldi, nlei, nimpp print*, ' multiexchg: nldj, nlej, njmpp =', nldj, nlej, njmpp ENDIF IF ( paral(3) /= nlei-nldi+1 ) THEN print*, 'WARNING!!! in cpl_oasis3 - cpl_prism_define' print*, 'cpl_prism_define: local extend in i is ', paral(3), ' should equal ', nlei-nldi+1 ENDIF IF ( paral(4) /= nlej-nldj+1 ) THEN print*, 'WARNING!!! in cpl_oasis3 - cpl_prism_define' print*, 'cpl_prism_define: local extend in j is ', paral(4), ' should equal ', nlej-nldj+1 ENDIF ENDIF IF ( commRank ) & CALL prism_def_partition_proto ( part_id, paral, ierror ) grid_id(1)= part_id !------------------------------------------------------------------ ! 3rd Declare the transient variables !------------------------------------------------------------------ ! ! ... Define symbolic names for the transient fields send by the ocean ! These must be identical to the names specified in the SMIOC file. ! cpl_send( 1)='SSTOCEAN' ! sea surface temperature -> sst_io cpl_send( 2)='SICOCEAN' ! sea ice area fraction -> 1.-frld #if defined key_cpl_albedo cpl_send( 3)='STIOCEAN' ! surface temperature over sea ice -> tn_ice cpl_send( 4)='SAIOCEAN' ! albedo over sea ice -> alb_ice #else cpl_send( 3)='SITOCEAN' ! sea ice thickness -> hicif (only 1 layer available!) cpl_send( 4)='SNTOCEAN' ! surface snow thickness over sea ice -> hsnif #endif #if defined key_cpl_ocevel cpl_send( 5)='SUNOCEAN' ! U-velocity -> un cpl_send( 6)='SVNOCEAN' ! V-velocity -> vn #endif ! ! ... Define symbolic names for transient fields received by the ocean. ! These must be identical to the names specified in the SMIOC file. ! ! ... a) U-Grid fields ! cpl_recv( 1)='TXWOCEWU' ! weighted surface downward eastward stress cpl_recv( 2)='TYWOCEWU' ! weighted surface downward northward stress cpl_recv( 3)='TXIOCEWU' ! weighted surface downward eastward stress over ice cpl_recv( 4)='TYIOCEWU' ! weighted surface downward northward stress over ice ! ! ... a) V-Grid fields ! cpl_recv( 5)='TXWOCEWV' ! weighted surface downward eastward stress cpl_recv( 6)='TYWOCEWV' ! weighted surface downward northward stress cpl_recv( 7)='TXIOCEWV' ! weighted surface downward eastward stress over ice cpl_recv( 8)='TYIOCEWV' ! weighted surface downward northward stress over ice ! ! ... a) T-Grid fields ! cpl_recv( 9)='FRWOCEPE' ! P-E over water -> zpew cpl_recv(10)='FRIOCEPE' ! P-E over ice -> zpei cpl_recv(11)='FRROCESN' ! surface downward snow fall -> zpsol cpl_recv(12)='FRIOCEEV' ! surface upward snow flux where sea ice -> zevice cpl_recv(13)='QSWOCESR' ! surface net downward shortwave flux -> qsr_oce cpl_recv(14)='QSWOCENS' ! surface downward non-solar heat flux in air -> qnsr_oce cpl_recv(15)='QSIOCESR' ! solar heat flux on sea ice -> qsr_ice cpl_recv(16)='QSIOCENS' ! non-solar heat flux on sea ice -> qnsr_ice cpl_recv(17)='QSIOCEDQ' ! non-solar heat flux derivative -> dqns_ice #ifdef key_cpl_discharge cpl_recv(18)='FRWOCEIS' ! ice discharge into ocean -> calving cpl_recv(19)='FRWOCERD' ! river discharge into ocean -> zrunriv cpl_recv(20)='FRWOCECD' ! continental discharge into ocean -> zruncot #endif ! ! data_type has to be PRISM_REAL as PRISM_DOUBLE is not supported. ! For exchange of double precision fields the OASIS3 has to be compiled ! with use_realtype_single. (see OASIS3 User Guide prism_2-4, 5th Ed., ! p. 13 and p. 53 for further explanation.) ! data_type = PRISM_REAL nodim(1) = 3 ! check nodim(2) = 0 ! ! ... Define the shape for the area that excludes the halo ! For serial configuration (key_mpp_mpi not being active) ! nl* is set to the global values 1 and jp*glo. ! IF ( rootexchg ) THEN shape(1,1) = 1 shape(2,1) = jpiglo shape(1,2) = 1 shape(2,2) = jpjglo shape(1,3) = 1 shape(2,3) = 1 ELSE shape(1,1) = 1 shape(2,1) = nlei-nldi+1 ! jpi shape(1,2) = 1 shape(2,2) = nlej-nldj+1 ! jpj shape(1,3) = 1 shape(2,3) = 1 ENDIF ! ! ----------------------------------------------------------------- ! ... Allocate memory for data exchange ! ----------------------------------------------------------------- ! ALLOCATE(exfld(shape(1,1):shape(2,1),shape(1,2):shape(2,2)), stat = ierror) IF (ierror > 0) THEN CALL prism_abort_proto ( comp_id, 'cpl_prism_define', 'Failure in allocating exfld') RETURN ENDIF ! ! ... Announce send variables, all on T points. ! info = PRISM_Out ! IF ( commRank ) THEN DO ji = 1, nsend ! if ( ji == 2 ) ; then ; nodim(2) = 2 ; else ; nodim(2) = 0 ; endif CALL prism_def_var_proto (send_id(ji), cpl_send(ji), grid_id(1), & nodim, info, shape, data_type, ierror) IF ( ierror /= PRISM_Ok ) THEN PRINT *, 'Failed to define transient ', ji, TRIM(cpl_send(ji)) CALL prism_abort_proto ( comp_id, 'cpl_prism_define', 'Failure in prism_def_var') ENDIF ENDDO ! nodim(1) = 3 ! check nodim(2) = 0 ! ! ... Announce recv variables. ! info = PRISM_In ! ! ... a) on U points ! DO ji = 1, 4 CALL prism_def_var_proto (recv_id(ji), cpl_recv(ji), grid_id(1), & nodim, info, shape, data_type, ierror) IF ( ierror /= PRISM_Ok ) THEN PRINT *, 'Failed to define transient ', ji, TRIM(cpl_recv(ji)) CALL prism_abort_proto ( comp_id, 'cpl_prism_define', 'Failure in prism_def_var') ENDIF ENDDO ! ! ... b) on V points ! DO ji = 5, 8 CALL prism_def_var_proto (recv_id(ji), cpl_recv(ji), grid_id(1), & nodim, info, shape, data_type, ierror) IF ( ierror /= PRISM_Ok ) THEN PRINT *, 'Failed to define transient ', TRIM(cpl_recv(ji)) CALL prism_abort_proto ( comp_id, 'cpl_prism_define', 'Failure in prism_def_var') ENDIF ENDDO ! ! ... c) on T points ! DO ji = 9, nrecv CALL prism_def_var_proto (recv_id(ji), cpl_recv(ji), grid_id(1), & nodim, info, shape, data_type, ierror) IF ( ierror /= PRISM_Ok ) THEN PRINT *, 'Failed to define transient ', TRIM(cpl_recv(ji)) CALL prism_abort_proto ( comp_id, 'cpl_prism_define', 'Failure in prism_def_var') ENDIF ENDDO ENDIF ! commRank !------------------------------------------------------------------ ! 4th End of definition phase !------------------------------------------------------------------ IF ( commRank ) THEN CALL prism_enddef_proto(ierror) IF ( ierror /= PRISM_Ok ) & CALL prism_abort_proto ( comp_id, 'cpl_prism_define', 'Failure in prism_enddef') ENDIF END SUBROUTINE cpl_prism_define SUBROUTINE cpl_prism_send( var_id, date, data_array, info ) IMPLICIT NONE !!--------------------------------------------------------------------- !! *** ROUTINE cpl_prism_send *** !! !! ** Purpose : - At each coupling time-step,this routine sends fields !! like sst or ice cover to the coupler or remote application. !!---------------------------------------------------------------------- !! * Arguments !! INTEGER, INTENT( IN ) :: var_id ! variable Id INTEGER, INTENT( OUT ) :: info ! OASIS3 info argument INTEGER, INTENT( IN ) :: date ! ocean time-step in seconds REAL(wp) :: data_array(:,:) !! !! * Local declarations !! #if defined key_mpp_mpi REAL(wp) :: global_array(jpiglo,jpjglo) ! !mpi INTEGER :: status(MPI_STATUS_SIZE) !mpi INTEGER :: type ! MPI data type INTEGER :: request ! MPI isend request INTEGER :: ji, jj, jn ! local loop indicees #else INTEGER :: ji #endif !! !!-------------------------------------------------------------------- !! #if defined key_mpp_mpi request = 0 IF ( rootexchg ) THEN ! !mpi IF ( wp == 4 ) type = MPI_REAL !mpi IF ( wp == 8 ) type = MPI_DOUBLE_PRECISION ! ! collect data on the local root process ! if ( var_id == 1 .and. localRank == localRoot .and. ln_ctl ) then do ji = 0, localSize-1 WRITE(numout,*) ' rootexchg: ranges for rank ', ji, ' are ', ranges(:,ji) enddo endif IF ( localRank /= localRoot ) THEN DO jj = nldj, nlej DO ji = nldi, nlei exfld(ji-nldi+1,jj-nldj+1)=data_array(ji,jj) ENDDO ENDDO !mpi CALL mpi_send(exfld, range(5), type, localRoot, localRank, localComm, ierror) CALL mppsend (localRank, exfld, range(5), localRoot, request) if ( var_id == 1 .and. ln_ctl ) then WRITE(numout,*) ' rootexchg: This is process ', localRank WRITE(numout,*) ' rootexchg: We have a range of ', range ! WRITE(numout,*) ' rootexchg: We got SST to process ', data_array endif ENDIF IF ( localRank == localRoot ) THEN DO jj = ranges(3,localRoot), ranges(3,localRoot)+ranges(4,localRoot)-1 DO ji = ranges(1,localRoot), ranges(1,localRoot)+ranges(2,localRoot)-1 global_array(ji,jj) = data_array(ji,jj) ! workaround ENDDO ENDDO DO jn = 1, localSize-1 !mpi CALL mpi_recv(buffer, ranges(5,jn), type, localRoot, jn, localComm, status, ierror) CALL mpprecv(jn, buffer, ranges(5,jn)) if ( var_id == 1 .and. ln_ctl ) then WRITE(numout,*) ' rootexchg: Handling data from process ', jn ! WRITE(numout,*) ' rootexchg: We got SST to process ', buffer endif DO jj = ranges(3,jn), ranges(3,jn)+ranges(4,jn)-1 DO ji = ranges(1,jn), ranges(1,jn)+ranges(2,jn)-1 global_array(ji,jj) = buffer((jj-ranges(3,jn))*ranges(2,jn) + ji-ranges(1,jn)+1) ENDDO ENDDO ENDDO CALL prism_put_proto ( var_id, date, global_array, info ) ENDIF ELSE DO jj = nldj, nlej DO ji = nldi, nlei exfld(ji-nldi+1,jj-nldj+1)=data_array(ji,jj) ENDDO ENDDO CALL prism_put_proto ( var_id, date, exfld, info ) ENDIF #else ! ! send local data from every process to OASIS3 ! IF ( commRank ) & CALL prism_put_proto ( var_id, date, data_array, info ) #endif IF ( commRank ) THEN IF (ln_ctl .and. lwp) THEN IF ( info == PRISM_Sent .OR. & info == PRISM_ToRest .OR. & info == PRISM_SentOut .OR. & info == PRISM_ToRestOut ) THEN WRITE(numout,*) '****************' DO ji = 1, nsend IF (var_id == send_id(ji) ) THEN WRITE(numout,*) 'prism_put_proto: Outgoing ', cpl_send(ji) EXIT ENDIF ENDDO WRITE(numout,*) 'prism_put_proto: var_id ', var_id WRITE(numout,*) 'prism_put_proto: date ', date WRITE(numout,*) 'prism_put_proto: info ', info WRITE(numout,*) ' - Minimum value is ', MINVAL(data_array) WRITE(numout,*) ' - Maximum value is ', MAXVAL(data_array) WRITE(numout,*) ' - Sum value is ', SUM(data_array) WRITE(numout,*) '****************' ENDIF ENDIF ENDIF END SUBROUTINE cpl_prism_send SUBROUTINE cpl_prism_recv( var_id, date, data_array, info ) IMPLICIT NONE !!--------------------------------------------------------------------- !! *** ROUTINE cpl_prism_recv *** !! !! ** Purpose : - At each coupling time-step,this routine receives fields !! like stresses and fluxes from the coupler or remote application. !!---------------------------------------------------------------------- !! * Arguments !! INTEGER, INTENT( IN ) :: var_id ! variable Id INTEGER, INTENT( OUT ) :: info ! variable Id INTEGER, INTENT( IN ) :: date ! ocean time-step in seconds REAL(wp),INTENT( OUT ) :: data_array(:,:) !! !! * Local declarations !! #if defined key_mpp_mpi REAL(wp) :: global_array(jpiglo,jpjglo) ! ! LOGICAL :: action = .false. LOGICAL :: action !mpi INTEGER :: status(MPI_STATUS_SIZE) !mpi INTEGER :: type ! MPI data type INTEGER :: request ! MPI isend request INTEGER :: ji, jj, jn ! local loop indices #else INTEGER :: ji #endif !! !!-------------------------------------------------------------------- !! #ifdef key_mpp_mpi action = .false. request = 0 IF ( rootexchg ) THEN ! ! receive data from OASIS3 on local root ! IF ( commRank ) & CALL prism_get_proto ( var_id, date, global_array, info ) CALL MPI_BCAST ( info, 1, MPI_INTEGER, localRoot, localComm, ierror ) ELSE ! ! receive local data from OASIS3 on every process ! CALL prism_get_proto ( var_id, date, exfld, info ) ENDIF IF ( info == PRISM_Recvd .OR. & info == PRISM_FromRest .OR. & info == PRISM_RecvOut .OR. & info == PRISM_FromRestOut ) action = .true. IF (ln_ctl .and. lwp) THEN WRITE(numout,*) "info", info, var_id WRITE(numout,*) "date", date, var_id WRITE(numout,*) "action", action, var_id ENDIF IF ( rootexchg .and. action ) THEN ! !mpi IF ( wp == 4 ) type = MPI_REAL !mpi IF ( wp == 8 ) type = MPI_DOUBLE_PRECISION ! ! distribute data to processes ! IF ( localRank == localRoot ) THEN DO jj = ranges(3,localRoot), ranges(3,localRoot)+ranges(4,localRoot)-1 DO ji = ranges(1,localRoot), ranges(1,localRoot)+ranges(2,localRoot)-1 exfld(ji-ranges(1,localRoot)+1,jj-ranges(3,localRoot)+1) = global_array(ji,jj) ENDDO ENDDO DO jn = 1, localSize-1 DO jj = ranges(3,jn), ranges(3,jn)+ranges(4,jn)-1 DO ji = ranges(1,jn), ranges(1,jn)+ranges(2,jn)-1 buffer((jj-ranges(3,jn))*ranges(2,jn) + ji-ranges(1,jn)+1) = global_array(ji,jj) ENDDO ENDDO !mpi CALL mpi_send(buffer, ranges(5,jn), type, jn, jn, localComm, ierror) CALL mppsend (jn, buffer, ranges(5,jn), jn, request) ENDDO ENDIF IF ( localRank /= localRoot ) THEN !mpi CALL mpi_recv(exfld, range(5), type, localRoot, localRank, localComm, status, ierror) CALL mpprecv(localRank, exfld, range(5)) ENDIF ENDIF IF ( action ) THEN data_array = 0.0 DO jj = nldj, nlej DO ji = nldi, nlei data_array(ji,jj)=exfld(ji-nldi+1,jj-nldj+1) ENDDO ENDDO IF (ln_ctl .and. lwp) THEN WRITE(numout,*) '****************' DO ji = 1, nrecv IF (var_id == recv_id(ji) ) THEN WRITE(numout,*) 'prism_get_proto: Incoming ', cpl_recv(ji) EXIT ENDIF ENDDO WRITE(numout,*) 'prism_get_proto: var_id ', var_id WRITE(numout,*) 'prism_get_proto: date ', date WRITE(numout,*) 'prism_get_proto: info ', info WRITE(numout,*) ' - Minimum value is ', MINVAL(data_array) WRITE(numout,*) ' - Maximum value is ', MAXVAL(data_array) WRITE(numout,*) ' - Sum value is ', SUM(data_array) WRITE(numout,*) '****************' ENDIF ENDIF #else CALL prism_get_proto ( var_id, date, exfld, info) IF (info == PRISM_Recvd .OR. & info == PRISM_FromRest .OR. & info == PRISM_RecvOut .OR. & info == PRISM_FromRestOut ) THEN data_array = exfld IF (ln_ctl .and. lwp ) THEN WRITE(numout,*) '****************' DO ji = 1, nrecv IF (var_id == recv_id(ji) ) THEN WRITE(numout,*) 'prism_get_proto: Incoming ', cpl_recv(ji) EXIT ENDIF ENDDO WRITE(numout,*) 'prism_get_proto: var_id ', var_id WRITE(numout,*) 'prism_get_proto: date ', date WRITE(numout,*) 'prism_get_proto: info ', info WRITE(numout,*) ' - Minimum value is ', MINVAL(data_array) WRITE(numout,*) ' - Maximum value is ', MAXVAL(data_array) WRITE(numout,*) ' - Sum value is ', SUM(data_array) WRITE(numout,*) '****************' ENDIF ENDIF #endif END SUBROUTINE cpl_prism_recv SUBROUTINE cpl_prism_finalize IMPLICIT NONE !!--------------------------------------------------------------------- !! *** ROUTINE cpl_prism_finalize *** !! !! ** Purpose : - Finalizes the coupling. If MPI_init has not been !! called explicitly before cpl_prism_init it will also close !! MPI communication. !!---------------------------------------------------------------------- DEALLOCATE(exfld) if ( prism_was_initialized ) then if ( prism_was_terminated ) then print *, 'prism has already been terminated.' else call prism_terminate_proto ( ierror ) prism_was_terminated = .true. endif else print *, 'Initialize prism before terminating it.' endif END SUBROUTINE cpl_prism_finalize #endif END MODULE cpl_oasis3