Changeset 14789 for NEMO/branches/2021/dev_r13747_HPC-11_mcastril_HPDAonline_DiagGPU/src/OCE/LBC/lib_mpp.F90
- Timestamp:
- 2021-05-05T13:18:04+02:00 (3 years ago)
- Location:
- NEMO/branches/2021/dev_r13747_HPC-11_mcastril_HPDAonline_DiagGPU
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
NEMO/branches/2021/dev_r13747_HPC-11_mcastril_HPDAonline_DiagGPU
- Property svn:externals
-
old new 3 3 ^/utils/build/mk@HEAD mk 4 4 ^/utils/tools@HEAD tools 5 ^/vendors/AGRIF/dev _r12970_AGRIF_CMEMSext/AGRIF5 ^/vendors/AGRIF/dev@HEAD ext/AGRIF 6 6 ^/vendors/FCM@HEAD ext/FCM 7 7 ^/vendors/IOIPSL@HEAD ext/IOIPSL 8 ^/vendors/PPR@HEAD ext/PPR 8 9 9 10 # SETTE 10 ^/utils/CI/sette@1 3559sette11 ^/utils/CI/sette@14244 sette
-
- Property svn:externals
-
NEMO/branches/2021/dev_r13747_HPC-11_mcastril_HPDAonline_DiagGPU/src/OCE/LBC/lib_mpp.F90
r13636 r14789 20 20 !! 4.0 ! 2011 (G. Madec) move ctl_ routines from in_out_manager 21 21 !! 3.5 ! 2012 (S.Mocavero, I. Epicoco) Add mpp_lnk_bdy_3d/2d routines to optimize the BDY comm. 22 !! 3.5 ! 2013 (C. Ethe, G. Madec) message passing arrays as local variables 22 !! 3.5 ! 2013 (C. Ethe, G. Madec) message passing arrays as local variables 23 23 !! 3.5 ! 2013 (S.Mocavero, I.Epicoco - CMCC) north fold optimizations 24 24 !! 3.6 ! 2015 (O. Tintó and M. Castrillo - BSC) Added '_multiple' case for 2D lbc and max … … 55 55 USE dom_oce ! ocean space and time domain 56 56 USE in_out_manager ! I/O manager 57 #if ! defined key_mpi_off 58 USE MPI 59 #endif 57 60 58 61 IMPLICIT NONE … … 66 69 PUBLIC mppscatter, mppgather 67 70 PUBLIC mpp_ini_znl 71 PUBLIC mpp_ini_nc 68 72 PUBLIC mppsend, mpprecv ! needed by TAM and ICB routines 69 73 PUBLIC mppsend_sp, mpprecv_sp ! needed by TAM and ICB routines … … 72 76 PUBLIC mpp_bcast_nml 73 77 PUBLIC tic_tac 74 #if ! defined key_mpp_mpi78 #if defined key_mpp_off 75 79 PUBLIC MPI_wait 76 80 PUBLIC MPI_Wtime 77 81 #endif 78 82 79 83 !! * Interfaces 80 84 !! define generic interface for these routine as they are called sometimes … … 106 110 END INTERFACE 107 111 112 TYPE, PUBLIC :: PTR_4D_sp !: array of 4D pointers (used in lbclnk and lbcnfd) 113 REAL(sp), DIMENSION (:,:,:,:), POINTER :: pt4d 114 END TYPE PTR_4D_sp 115 116 TYPE, PUBLIC :: PTR_4D_dp !: array of 4D pointers (used in lbclnk and lbcnfd) 117 REAL(dp), DIMENSION (:,:,:,:), POINTER :: pt4d 118 END TYPE PTR_4D_dp 119 108 120 !! ========================= !! 109 121 !! MPI variable definition !! 110 122 !! ========================= !! 111 #if defined key_mpp_mpi 112 !$AGRIF_DO_NOT_TREAT 113 INCLUDE 'mpif.h' 114 !$AGRIF_END_DO_NOT_TREAT 123 #if ! defined key_mpi_off 115 124 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .TRUE. !: mpp flag 116 #else 125 #else 117 126 INTEGER, PUBLIC, PARAMETER :: MPI_STATUS_SIZE = 1 118 127 INTEGER, PUBLIC, PARAMETER :: MPI_REAL = 4 … … 120 129 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .FALSE. !: mpp flag 121 130 #endif 122 123 INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2)124 131 125 132 INTEGER, PUBLIC :: mppsize ! number of process … … 131 138 INTEGER :: MPI_SUMDD 132 139 140 ! Neighbourgs informations 141 INTEGER, PARAMETER, PUBLIC :: n_hlsmax = 3 142 INTEGER, DIMENSION( 8), PUBLIC :: mpinei !: 8-neighbourg MPI indexes (starting at 0, -1 if no neighbourg) 143 INTEGER, DIMENSION(n_hlsmax,8), PUBLIC :: mpiSnei !: 8-neighbourg Send MPI indexes (starting at 0, -1 if no neighbourg) 144 INTEGER, DIMENSION(n_hlsmax,8), PUBLIC :: mpiRnei !: 8-neighbourg Recv MPI indexes (starting at 0, -1 if no neighbourg) 145 INTEGER, PARAMETER, PUBLIC :: jpwe = 1 !: WEst 146 INTEGER, PARAMETER, PUBLIC :: jpea = 2 !: EAst 147 INTEGER, PARAMETER, PUBLIC :: jpso = 3 !: SOuth 148 INTEGER, PARAMETER, PUBLIC :: jpno = 4 !: NOrth 149 INTEGER, PARAMETER, PUBLIC :: jpsw = 5 !: South-West 150 INTEGER, PARAMETER, PUBLIC :: jpse = 6 !: South-East 151 INTEGER, PARAMETER, PUBLIC :: jpnw = 7 !: North-West 152 INTEGER, PARAMETER, PUBLIC :: jpne = 8 !: North-East 153 154 LOGICAL, DIMENSION(8), PUBLIC :: l_SelfPerio ! should we explicitely take care of I/J periodicity 155 LOGICAL, PUBLIC :: l_IdoNFold 156 133 157 ! variables used for zonal integration 134 INTEGER, PUBLIC :: ncomm_znl !: communicator made by the processors on the same zonal average135 LOGICAL, PUBLIC :: l_znl_root !: True on the 'left'most processor on the same row136 INTEGER :: ngrp_znl !group ID for the znl processors137 INTEGER :: ndim_rank_znl !number of processors on the same zonal average158 INTEGER, PUBLIC :: ncomm_znl !: communicator made by the processors on the same zonal average 159 LOGICAL, PUBLIC :: l_znl_root !: True on the 'left'most processor on the same row 160 INTEGER :: ngrp_znl !: group ID for the znl processors 161 INTEGER :: ndim_rank_znl !: number of processors on the same zonal average 138 162 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_znl ! dimension ndim_rank_znl, number of the procs into the same znl domain 163 164 ! variables used for MPI3 neighbourhood collectives 165 INTEGER, DIMENSION(n_hlsmax), PUBLIC :: mpi_nc_com4 ! MPI3 neighbourhood collectives communicator 166 INTEGER, DIMENSION(n_hlsmax), PUBLIC :: mpi_nc_com8 ! MPI3 neighbourhood collectives communicator (with diagionals) 139 167 140 168 ! North fold condition in mpp_mpi with jpni > 1 (PUBLIC for TAM) … … 178 206 REAL(dp), DIMENSION(2), PUBLIC :: waiting_time = 0._dp 179 207 REAL(dp) , PUBLIC :: compute_time = 0._dp, elapsed_time = 0._dp 180 208 181 209 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend 182 210 183 211 LOGICAL, PUBLIC :: ln_nnogather !: namelist control of northfold comms 184 LOGICAL, PUBLIC :: l_north_nogather = .FALSE. !: internal control of northfold comms 185 212 INTEGER, PUBLIC :: nn_comm !: namelist control of comms 213 214 INTEGER, PUBLIC, PARAMETER :: jpfillnothing = 1 215 INTEGER, PUBLIC, PARAMETER :: jpfillcst = 2 216 INTEGER, PUBLIC, PARAMETER :: jpfillcopy = 3 217 INTEGER, PUBLIC, PARAMETER :: jpfillperio = 4 218 INTEGER, PUBLIC, PARAMETER :: jpfillmpi = 5 219 186 220 !! * Substitutions 187 221 # include "do_loop_substitute.h90" … … 204 238 LOGICAL :: llmpi_init 205 239 !!---------------------------------------------------------------------- 206 #if defined key_mpp_mpi240 #if ! defined key_mpi_off 207 241 ! 208 242 CALL mpi_initialized ( llmpi_init, ierr ) … … 218 252 IF( ierr /= MPI_SUCCESS ) CALL ctl_stop( 'STOP', ' lib_mpp: Error in routine mpi_init' ) 219 253 ENDIF 220 254 221 255 IF( PRESENT(localComm) ) THEN 222 256 IF( Agrif_Root() ) THEN … … 260 294 INTEGER , INTENT(in ) :: kdest ! receive process number 261 295 INTEGER , INTENT(in ) :: ktyp ! tag of the message 262 INTEGER , INTENT(in 296 INTEGER , INTENT(inout) :: md_req ! argument for isend 263 297 !! 264 298 INTEGER :: iflag … … 266 300 !!---------------------------------------------------------------------- 267 301 ! 268 #if defined key_mpp_mpi302 #if ! defined key_mpi_off 269 303 IF (wp == dp) THEN 270 304 mpi_working_type = mpi_double_precision … … 289 323 INTEGER , INTENT(in ) :: kdest ! receive process number 290 324 INTEGER , INTENT(in ) :: ktyp ! tag of the message 291 INTEGER , INTENT(in 325 INTEGER , INTENT(inout) :: md_req ! argument for isend 292 326 !! 293 327 INTEGER :: iflag 294 328 !!---------------------------------------------------------------------- 295 329 ! 296 #if defined key_mpp_mpi330 #if ! defined key_mpi_off 297 331 CALL mpi_isend( pmess, kbytes, mpi_double_precision, kdest , ktyp, mpi_comm_oce, md_req, iflag ) 298 332 #endif … … 312 346 INTEGER , INTENT(in ) :: kdest ! receive process number 313 347 INTEGER , INTENT(in ) :: ktyp ! tag of the message 314 INTEGER , INTENT(in 348 INTEGER , INTENT(inout) :: md_req ! argument for isend 315 349 !! 316 350 INTEGER :: iflag 317 351 !!---------------------------------------------------------------------- 318 352 ! 319 #if defined key_mpp_mpi353 #if ! defined key_mpi_off 320 354 CALL mpi_isend( pmess, kbytes, mpi_real, kdest , ktyp, mpi_comm_oce, md_req, iflag ) 321 355 #endif … … 342 376 !!---------------------------------------------------------------------- 343 377 ! 344 #if defined key_mpp_mpi378 #if ! defined key_mpi_off 345 379 ! If a specific process number has been passed to the receive call, 346 380 ! use that one. Default is to use mpi_any_source … … 375 409 !!---------------------------------------------------------------------- 376 410 ! 377 #if defined key_mpp_mpi411 #if ! defined key_mpi_off 378 412 ! If a specific process number has been passed to the receive call, 379 413 ! use that one. Default is to use mpi_any_source … … 404 438 !!---------------------------------------------------------------------- 405 439 ! 406 #if defined key_mpp_mpi440 #if ! defined key_mpi_off 407 441 ! If a specific process number has been passed to the receive call, 408 442 ! use that one. Default is to use mpi_any_source … … 432 466 ! 433 467 itaille = jpi * jpj 434 #if defined key_mpp_mpi468 #if ! defined key_mpi_off 435 469 CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , & 436 470 & mpi_double_precision, kp , mpi_comm_oce, ierror ) … … 459 493 itaille = jpi * jpj 460 494 ! 461 #if defined key_mpp_mpi495 #if ! defined key_mpi_off 462 496 CALL mpi_scatter( pio, itaille, mpi_double_precision, ptab, itaille , & 463 497 & mpi_double_precision, kp , mpi_comm_oce, ierror ) … … 468 502 END SUBROUTINE mppscatter 469 503 470 504 471 505 SUBROUTINE mpp_delay_sum( cdname, cdelay, y_in, pout, ldlast, kcom ) 472 506 !!---------------------------------------------------------------------- … … 488 522 COMPLEX(dp), ALLOCATABLE, DIMENSION(:) :: ytmp 489 523 !!---------------------------------------------------------------------- 490 #if defined key_mpp_mpi524 #if ! defined key_mpi_off 491 525 ilocalcomm = mpi_comm_oce 492 526 IF( PRESENT(kcom) ) ilocalcomm = kcom 493 527 494 528 isz = SIZE(y_in) 495 529 496 530 IF( narea == 1 .AND. numcom == -1 ) CALL mpp_report( cdname, ld_dlg = .TRUE. ) 497 531 … … 514 548 END IF 515 549 ENDIF 516 550 517 551 IF( ndelayid(idvar) == -1 ) THEN ! first call without restart: define %y1d and %z1d from y_in with blocking allreduce 518 552 ! -------------------------- … … 542 576 END SUBROUTINE mpp_delay_sum 543 577 544 578 545 579 SUBROUTINE mpp_delay_max( cdname, cdelay, p_in, pout, ldlast, kcom ) 546 580 !!---------------------------------------------------------------------- … … 552 586 CHARACTER(len=*), INTENT(in ) :: cdname ! name of the calling subroutine 553 587 CHARACTER(len=*), INTENT(in ) :: cdelay ! name (used as id) of the delayed operation 554 REAL(wp), INTENT(in ), DIMENSION(:) :: p_in ! 555 REAL(wp), INTENT( out), DIMENSION(:) :: pout ! 588 REAL(wp), INTENT(in ), DIMENSION(:) :: p_in ! 589 REAL(wp), INTENT( out), DIMENSION(:) :: pout ! 556 590 LOGICAL, INTENT(in ) :: ldlast ! true if this is the last time we call this routine 557 591 INTEGER, INTENT(in ), OPTIONAL :: kcom … … 562 596 INTEGER :: MPI_TYPE 563 597 !!---------------------------------------------------------------------- 564 565 #if defined key_mpp_mpi598 599 #if ! defined key_mpi_off 566 600 if( wp == dp ) then 567 601 MPI_TYPE = MPI_DOUBLE_PRECISION … … 570 604 else 571 605 CALL ctl_stop( "Error defining type, wp is neither dp nor sp" ) 572 606 573 607 end if 574 608 … … 624 658 END SUBROUTINE mpp_delay_max 625 659 626 660 627 661 SUBROUTINE mpp_delay_rcv( kid ) 628 662 !!---------------------------------------------------------------------- 629 663 !! *** routine mpp_delay_rcv *** 630 664 !! 631 !! ** Purpose : force barrier for delayed mpp (needed for restart) 632 !! 633 !!---------------------------------------------------------------------- 634 INTEGER,INTENT(in ) :: kid 665 !! ** Purpose : force barrier for delayed mpp (needed for restart) 666 !! 667 !!---------------------------------------------------------------------- 668 INTEGER,INTENT(in ) :: kid 635 669 INTEGER :: ierr 636 670 !!---------------------------------------------------------------------- 637 #if defined key_mpp_mpi671 #if ! defined key_mpi_off 638 672 IF( ln_timing ) CALL tic_tac( .TRUE., ld_global = .TRUE.) 639 673 ! test on ndelayid(kid) useless as mpi_wait return immediatly if the request handle is MPI_REQUEST_NULL … … 657 691 !!---------------------------------------------------------------------- 658 692 ! 659 #if defined key_mpp_mpi693 #if ! defined key_mpi_off 660 694 call MPI_BCAST(kleng, 1, MPI_INT, 0, mpi_comm_oce, iflag) 661 695 call MPI_BARRIER(mpi_comm_oce, iflag) … … 669 703 END SUBROUTINE mpp_bcast_nml 670 704 671 705 672 706 !!---------------------------------------------------------------------- 673 707 !! *** mppmax_a_int, mppmax_int, mppmax_a_real, mppmax_real *** 674 !! 708 !! 675 709 !!---------------------------------------------------------------------- 676 710 !! … … 724 758 !!---------------------------------------------------------------------- 725 759 !! *** mppmin_a_int, mppmin_int, mppmin_a_real, mppmin_real *** 726 !! 760 !! 727 761 !!---------------------------------------------------------------------- 728 762 !! … … 776 810 !!---------------------------------------------------------------------- 777 811 !! *** mppsum_a_int, mppsum_int, mppsum_a_real, mppsum_real *** 778 !! 812 !! 779 813 !! Global sum of 1D array or a variable (integer, real or complex) 780 814 !!---------------------------------------------------------------------- … … 850 884 !!---------------------------------------------------------------------- 851 885 !! *** mpp_minloc2d, mpp_minloc3d, mpp_maxloc2d, mpp_maxloc3d 852 !! 886 !! 853 887 !!---------------------------------------------------------------------- 854 888 !! … … 923 957 !!----------------------------------------------------------------------- 924 958 ! 925 #if defined key_mpp_mpi959 #if ! defined key_mpi_off 926 960 CALL mpi_barrier( mpi_comm_oce, ierror ) 927 961 #endif … … 930 964 931 965 932 SUBROUTINE mppstop( ld_abort ) 966 SUBROUTINE mppstop( ld_abort ) 933 967 !!---------------------------------------------------------------------- 934 968 !! *** routine mppstop *** … … 939 973 LOGICAL, OPTIONAL, INTENT(in) :: ld_abort ! source process number 940 974 LOGICAL :: ll_abort 941 INTEGER :: info 975 INTEGER :: info, ierr 942 976 !!---------------------------------------------------------------------- 943 977 ll_abort = .FALSE. 944 978 IF( PRESENT(ld_abort) ) ll_abort = ld_abort 945 979 ! 946 #if defined key_mpp_mpi980 #if ! defined key_mpi_off 947 981 IF(ll_abort) THEN 948 CALL mpi_abort( MPI_COMM_WORLD )982 CALL mpi_abort( MPI_COMM_WORLD, 123, info ) 949 983 ELSE 950 984 CALL mppsync … … 959 993 SUBROUTINE mpp_comm_free( kcom ) 960 994 !!---------------------------------------------------------------------- 961 INTEGER, INTENT(in ) :: kcom995 INTEGER, INTENT(inout) :: kcom 962 996 !! 963 997 INTEGER :: ierr 964 998 !!---------------------------------------------------------------------- 965 999 ! 966 #if defined key_mpp_mpi1000 #if ! defined key_mpi_off 967 1001 CALL MPI_COMM_FREE(kcom, ierr) 968 1002 #endif … … 996 1030 INTEGER, ALLOCATABLE, DIMENSION(:) :: kwork 997 1031 !!---------------------------------------------------------------------- 998 #if defined key_mpp_mpi999 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_world : ', ngrp_world1000 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_world : ', mpi_comm_world1001 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - mpi_comm_oce : ', mpi_comm_oce1032 #if ! defined key_mpi_off 1033 !-$$ WRITE (numout,*) 'mpp_ini_znl ', mpprank, ' - ngrp_world : ', ngrp_world 1034 !-$$ WRITE (numout,*) 'mpp_ini_znl ', mpprank, ' - mpi_comm_world : ', mpi_comm_world 1035 !-$$ WRITE (numout,*) 'mpp_ini_znl ', mpprank, ' - mpi_comm_oce : ', mpi_comm_oce 1002 1036 ! 1003 1037 ALLOCATE( kwork(jpnij), STAT=ierr ) … … 1010 1044 ! 1011 1045 CALL MPI_ALLGATHER ( njmpp, 1, mpi_integer, kwork, 1, mpi_integer, mpi_comm_oce, ierr ) 1012 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - kwork pour njmpp : ', kwork1046 !-$$ WRITE (numout,*) 'mpp_ini_znl ', mpprank, ' - kwork pour njmpp : ', kwork 1013 1047 !-$$ CALL flush(numout) 1014 1048 ! … … 1020 1054 ENDIF 1021 1055 END DO 1022 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ndim_rank_znl : ', ndim_rank_znl1056 !-$$ WRITE (numout,*) 'mpp_ini_znl ', mpprank, ' - ndim_rank_znl : ', ndim_rank_znl 1023 1057 !-$$ CALL flush(numout) 1024 1058 ! Allocate the right size to nrank_znl … … 1033 1067 ENDIF 1034 1068 END DO 1035 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - nrank_znl : ', nrank_znl1069 !-$$ WRITE (numout,*) 'mpp_ini_znl ', mpprank, ' - nrank_znl : ', nrank_znl 1036 1070 !-$$ CALL flush(numout) 1037 1071 1038 1072 ! Create the opa group 1039 1073 CALL MPI_COMM_GROUP(mpi_comm_oce,ngrp_opa,ierr) 1040 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_opa : ', ngrp_opa1074 !-$$ WRITE (numout,*) 'mpp_ini_znl ', mpprank, ' - ngrp_opa : ', ngrp_opa 1041 1075 !-$$ CALL flush(numout) 1042 1076 1043 1077 ! Create the znl group from the opa group 1044 1078 CALL MPI_GROUP_INCL ( ngrp_opa, ndim_rank_znl, nrank_znl, ngrp_znl, ierr ) 1045 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ngrp_znl ', ngrp_znl1079 !-$$ WRITE (numout,*) 'mpp_ini_znl ', mpprank, ' - ngrp_znl ', ngrp_znl 1046 1080 !-$$ CALL flush(numout) 1047 1081 1048 1082 ! Create the znl communicator from the opa communicator, ie the pool of procs in the same row 1049 1083 CALL MPI_COMM_CREATE ( mpi_comm_oce, ngrp_znl, ncomm_znl, ierr ) 1050 !-$$ WRITE (numout,*) 'mpp_ini_znl ', nproc, ' - ncomm_znl ', ncomm_znl1084 !-$$ WRITE (numout,*) 'mpp_ini_znl ', mpprank, ' - ncomm_znl ', ncomm_znl 1051 1085 !-$$ CALL flush(numout) 1052 1086 ! … … 1068 1102 END SUBROUTINE mpp_ini_znl 1069 1103 1104 1105 SUBROUTINE mpp_ini_nc( khls ) 1106 !!---------------------------------------------------------------------- 1107 !! *** routine mpp_ini_nc *** 1108 !! 1109 !! ** Purpose : Initialize special communicators for MPI3 neighbourhood 1110 !! collectives 1111 !! 1112 !! ** Method : - Create graph communicators starting from the processes 1113 !! distribution along i and j directions 1114 ! 1115 !! ** output 1116 !! mpi_nc_com4 = MPI3 neighbourhood collectives communicator 1117 !! mpi_nc_com8 = MPI3 neighbourhood collectives communicator (with diagonals) 1118 !!---------------------------------------------------------------------- 1119 INTEGER, INTENT(in ) :: khls ! halo size, default = nn_hls 1120 ! 1121 INTEGER, DIMENSION(:), ALLOCATABLE :: iSnei4, iRnei4, iSnei8, iRnei8 1122 INTEGER :: iScnt4, iRcnt4, iScnt8, iRcnt8 1123 INTEGER :: ierr 1124 LOGICAL, PARAMETER :: ireord = .FALSE. 1125 !!---------------------------------------------------------------------- 1126 #if ! defined key_mpi_off && ! defined key_mpi2 1127 1128 iScnt4 = COUNT( mpiSnei(khls,1:4) >= 0 ) 1129 iRcnt4 = COUNT( mpiRnei(khls,1:4) >= 0 ) 1130 iScnt8 = COUNT( mpiSnei(khls,1:8) >= 0 ) 1131 iRcnt8 = COUNT( mpiRnei(khls,1:8) >= 0 ) 1132 1133 ALLOCATE( iSnei4(iScnt4), iRnei4(iRcnt4), iSnei8(iScnt8), iRnei8(iRcnt8) ) ! ok if icnt4 or icnt8 = 0 1134 1135 iSnei4 = PACK( mpiSnei(khls,1:4), mask = mpiSnei(khls,1:4) >= 0 ) 1136 iRnei4 = PACK( mpiRnei(khls,1:4), mask = mpiRnei(khls,1:4) >= 0 ) 1137 iSnei8 = PACK( mpiSnei(khls,1:8), mask = mpiSnei(khls,1:8) >= 0 ) 1138 iRnei8 = PACK( mpiRnei(khls,1:8), mask = mpiRnei(khls,1:8) >= 0 ) 1139 1140 CALL MPI_Dist_graph_create_adjacent( mpi_comm_oce, iScnt4, iSnei4, MPI_UNWEIGHTED, iRcnt4, iRnei4, MPI_UNWEIGHTED, & 1141 & MPI_INFO_NULL, ireord, mpi_nc_com4(khls), ierr ) 1142 CALL MPI_Dist_graph_create_adjacent( mpi_comm_oce, iScnt8, iSnei8, MPI_UNWEIGHTED, iRcnt8, iRnei8, MPI_UNWEIGHTED, & 1143 & MPI_INFO_NULL, ireord, mpi_nc_com8(khls), ierr) 1144 1145 DEALLOCATE( iSnei4, iRnei4, iSnei8, iRnei8 ) 1146 #endif 1147 END SUBROUTINE mpp_ini_nc 1148 1070 1149 1071 1150 SUBROUTINE mpp_ini_north … … 1082 1161 !! 1083 1162 !! ** output 1084 !! njmppmax = njmpp for northern procs1085 1163 !! ndim_rank_north = number of processors in the northern line 1086 1164 !! nrank_north (ndim_rank_north) = number of the northern procs. … … 1096 1174 !!---------------------------------------------------------------------- 1097 1175 ! 1098 #if defined key_mpp_mpi 1099 njmppmax = MAXVAL( njmppt ) 1176 #if ! defined key_mpi_off 1100 1177 ! 1101 1178 ! Look for how many procs on the northern boundary … … 1178 1255 INTEGER :: ji, jj, jk, jh, jf, jcount ! dummy loop indices 1179 1256 !!---------------------------------------------------------------------- 1180 #if defined key_mpp_mpi1257 #if ! defined key_mpi_off 1181 1258 ! 1182 1259 ll_lbc = .FALSE. … … 1248 1325 END DO 1249 1326 IF ( crname_lbc(n_sequence_lbc) /= 'already counted' ) THEN 1250 WRITE(numcom,'(A, I4, A, A)') ' - ', 1,' times by subroutine ', TRIM(crname_lbc(n com_rec_max))1327 WRITE(numcom,'(A, I4, A, A)') ' - ', 1,' times by subroutine ', TRIM(crname_lbc(n_sequence_lbc)) 1251 1328 END IF 1252 1329 WRITE(numcom,*) ' ' … … 1259 1336 jj = 0 1260 1337 END IF 1261 jj = jj + 1 1338 jj = jj + 1 1262 1339 END DO 1263 1340 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_glb(n_sequence_glb)) … … 1275 1352 jj = 0 1276 1353 END IF 1277 jj = jj + 1 1354 jj = jj + 1 1278 1355 END DO 1279 1356 WRITE(numcom,'(A, I4, A, A)') ' - ', jj,' times by subroutine ', TRIM(crname_dlg(n_sequence_dlg)) … … 1291 1368 END SUBROUTINE mpp_report 1292 1369 1293 1370 1294 1371 SUBROUTINE tic_tac (ld_tic, ld_global) 1295 1372 … … 1299 1376 REAL(dp), SAVE :: tic_ct = 0._dp 1300 1377 INTEGER :: ii 1301 #if defined key_mpp_mpi1378 #if ! defined key_mpi_off 1302 1379 1303 1380 IF( ncom_stp <= nit000 ) RETURN … … 1307 1384 IF( ld_global ) ii = 2 1308 1385 END IF 1309 1386 1310 1387 IF ( ld_tic ) THEN 1311 1388 tic_wt(ii) = MPI_Wtime() ! start count tic->tac (waiting time) … … 1316 1393 ENDIF 1317 1394 #endif 1318 1395 1319 1396 END SUBROUTINE tic_tac 1320 1397 1321 #if ! defined key_mpp_mpi1398 #if defined key_mpi_off 1322 1399 SUBROUTINE mpi_wait(request, status, ierror) 1323 1400 INTEGER , INTENT(in ) :: request … … 1326 1403 END SUBROUTINE mpi_wait 1327 1404 1328 1405 1329 1406 FUNCTION MPI_Wtime() 1330 1407 REAL(wp) :: MPI_Wtime … … 1388 1465 ! 1389 1466 IF( cd1 == 'STOP' ) THEN 1390 WRITE(numout,*) 1467 WRITE(numout,*) 1391 1468 WRITE(numout,*) 'huge E-R-R-O-R : immediate stop' 1392 WRITE(numout,*) 1469 WRITE(numout,*) 1393 1470 CALL FLUSH(numout) 1394 1471 CALL SLEEP(60) ! make sure that all output and abort files are written by all cores. 60s should be enough... … … 1487 1564 ENDIF 1488 1565 IF( iost /= 0 .AND. TRIM(clfile) == '/dev/null' ) & ! for windows 1489 & OPEN(UNIT=knum,FILE='NUL', FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 1566 & OPEN(UNIT=knum,FILE='NUL', FORM=cdform, ACCESS=cdacce, STATUS=cdstat , ERR=100, IOSTAT=iost ) 1490 1567 IF( iost == 0 ) THEN 1491 1568 IF(ldwp .AND. kout > 0) THEN … … 1529 1606 ! 1530 1607 WRITE (clios, '(I5.0)') kios 1531 IF( kios < 0 ) THEN 1608 IF( kios < 0 ) THEN 1532 1609 CALL ctl_warn( 'end of record or file while reading namelist ' & 1533 1610 & // TRIM(cdnam) // ' iostat = ' // TRIM(clios) ) … … 1575 1652 !csp = NEW_LINE('A') 1576 1653 ! a new line character is the best seperator but some systems (e.g.Cray) 1577 ! seem to terminate namelist reads from internal files early if they 1654 ! seem to terminate namelist reads from internal files early if they 1578 1655 ! encounter new-lines. Use a single space for safety. 1579 1656 csp = ' ' … … 1594 1671 iltc = LEN_TRIM(chline) 1595 1672 IF ( iltc.GT.0 ) THEN 1596 inl = INDEX(chline, '!') 1673 inl = INDEX(chline, '!') 1597 1674 IF( inl.eq.0 ) THEN 1598 1675 itot = itot + iltc + 1 ! +1 for the newline character … … 1640 1717 !write(*,'(32A)') cdnambuff 1641 1718 ENDIF 1642 #if defined key_mpp_mpi1719 #if ! defined key_mpi_off 1643 1720 CALL mpp_bcast_nml( cdnambuff, itot ) 1644 1721 #endif
Note: See TracChangeset
for help on using the changeset viewer.