Changeset 3598 for trunk/NEMOGCM/NEMO/OPA_SRC/LBC/lib_mpp.F90
- Timestamp:
- 2012-11-19T14:35:09+01:00 (11 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
trunk/NEMOGCM/NEMO/OPA_SRC/LBC/lib_mpp.F90
r3435 r3598 17 17 !! - ! 2008 (R. Benshila) add mpp_ini_ice 18 18 !! 3.2 ! 2009 (R. Benshila) SHMEM suppression, north fold in lbc_nfd 19 !! 3.2 ! 2009 (O. Marti) add mpp_ini_znl 19 !! 3.2 ! 2009 (O. Marti) add mpp_ini_znl 20 20 !! 4.0 ! 2011 (G. Madec) move ctl_ routines from in_out_manager 21 21 !!---------------------------------------------------------------------- … … 27 27 !! get_unit : give the index of an unused logical unit 28 28 !!---------------------------------------------------------------------- 29 #if defined key_mpp_mpi 29 #if defined key_mpp_mpi 30 30 !!---------------------------------------------------------------------- 31 31 !! 'key_mpp_mpi' MPI massively parallel processing library … … 52 52 !! mpp_lbc_north_e : variant of mpp_lbc_north for extra outer halo 53 53 !!---------------------------------------------------------------------- 54 USE dom_oce ! ocean space and time domain 54 USE dom_oce ! ocean space and time domain 55 55 USE lbcnfd ! north fold treatment 56 56 USE in_out_manager ! I/O manager … … 58 58 IMPLICIT NONE 59 59 PRIVATE 60 60 61 61 PUBLIC ctl_stop, ctl_warn, get_unit, ctl_opn 62 62 PUBLIC mynode, mppstop, mppsync, mpp_comm_free … … 67 67 PUBLIC mppobc, mpp_ini_ice, mpp_ini_znl 68 68 PUBLIC mppsize 69 PUBLIC lib_mpp_alloc ! Called in nemogcm.F90 69 PUBLIC lib_mpp_alloc ! Called in nemogcm.F90 70 PUBLIC mppsend, mpprecv ! (PUBLIC for TAM) 70 71 71 72 !! * Interfaces … … 84 85 END INTERFACE 85 86 INTERFACE mpp_lbc_north 86 MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d 87 MODULE PROCEDURE mpp_lbc_north_3d, mpp_lbc_north_2d 87 88 END INTERFACE 88 89 INTERFACE mpp_minloc … … 92 93 MODULE PROCEDURE mpp_maxloc2d ,mpp_maxloc3d 93 94 END INTERFACE 94 95 95 96 !! ========================= !! 96 97 !! MPI variable definition !! … … 99 100 INCLUDE 'mpif.h' 100 101 !$AGRIF_END_DO_NOT_TREAT 101 102 102 103 LOGICAL, PUBLIC, PARAMETER :: lk_mpp = .TRUE. !: mpp flag 103 104 104 105 INTEGER, PARAMETER :: nprocmax = 2**10 ! maximun dimension (required to be a power of 2) 105 106 106 107 INTEGER :: mppsize ! number of process 107 108 INTEGER :: mpprank ! process number [ 0 - size-1 ] … … 126 127 INTEGER :: ndim_rank_znl ! number of processors on the same zonal average 127 128 INTEGER, DIMENSION(:), ALLOCATABLE, SAVE :: nrank_znl ! dimension ndim_rank_znl, number of the procs into the same znl domain 128 129 129 130 ! North fold condition in mpp_mpi with jpni > 1 130 131 INTEGER :: ngrp_world ! group ID for the world processors … … 140 141 CHARACTER(len=1) :: cn_mpi_send = 'S' ! type od mpi send/recieve (S=standard, B=bsend, I=isend) 141 142 LOGICAL :: l_isend = .FALSE. ! isend use indicator (T if cn_mpi_send='I') 142 INTEGER :: nn_buffer = 0 ! size of the buffer in case of mpi_bsend 143 143 INTEGER :: nn_buffer = 0 ! size of the buffer in case of mpi_bsend 144 144 145 REAL(wp), DIMENSION(:), ALLOCATABLE, SAVE :: tampon ! buffer in case of bsend 145 146 … … 173 174 ! North fold arrays used to minimise the use of allgather operations. Set in nemo_northcomms (nemogcm) so need to be public 174 175 INTEGER, PUBLIC, PARAMETER :: jpmaxngh = 8 ! Assumed maximum number of active neighbours 175 INTEGER, PUBLIC, PARAMETER :: jptyps = 5 ! Number of different neighbour lists to be used for northfold exchanges 176 INTEGER, PUBLIC, PARAMETER :: jptyps = 5 ! Number of different neighbour lists to be used for northfold exchanges 176 177 INTEGER, PUBLIC, DIMENSION (jpmaxngh,jptyps) :: isendto 177 178 INTEGER, PUBLIC, DIMENSION (jptyps) :: nsndto … … 229 230 !!---------------------------------------------------------------------- 230 231 !! *** routine mynode *** 231 !! 232 !! 232 233 !! ** Purpose : Find processor unit 233 234 !!---------------------------------------------------------------------- 234 CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt 235 INTEGER , INTENT(in ) :: kumnam ! namelist logical unit 236 INTEGER , INTENT(inout) :: kstop ! stop indicator 235 CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt 236 INTEGER , INTENT(in ) :: kumnam ! namelist logical unit 237 INTEGER , INTENT(inout) :: kstop ! stop indicator 237 238 INTEGER, OPTIONAL , INTENT(in ) :: localComm 238 239 ! … … 258 259 #if defined key_agrif 259 260 IF( .NOT. Agrif_Root() ) THEN 260 jpni = Agrif_Parent(jpni ) 261 jpni = Agrif_Parent(jpni ) 261 262 jpnj = Agrif_Parent(jpnj ) 262 263 jpnij = Agrif_Parent(jpnij) … … 282 283 CALL mpi_initialized ( mpi_was_called, code ) 283 284 IF( code /= MPI_SUCCESS ) THEN 284 DO ji = 1, SIZE(ldtxt) 285 DO ji = 1, SIZE(ldtxt) 285 286 IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode 286 END DO 287 END DO 287 288 WRITE(*, cform_err) 288 289 WRITE(*, *) 'lib_mpp: Error in routine mpi_initialized' … … 297 298 CASE ( 'B' ) ! Buffer mpi send (blocking) 298 299 WRITE(ldtxt(ii),*) ' Buffer blocking mpi send (bsend)' ; ii = ii + 1 299 IF( Agrif_Root() ) CALL mpi_init_opa( ldtxt, ii, ierr ) 300 IF( Agrif_Root() ) CALL mpi_init_opa( ldtxt, ii, ierr ) 300 301 CASE ( 'I' ) ! Immediate mpi send (non-blocking send) 301 302 WRITE(ldtxt(ii),*) ' Immediate non-blocking send (isend)' ; ii = ii + 1 … … 330 331 ENDIF 331 332 332 IF( PRESENT(localComm) ) THEN 333 IF( PRESENT(localComm) ) THEN 333 334 IF( Agrif_Root() ) THEN 334 335 mpi_comm_opa = localComm … … 337 338 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code) 338 339 IF( code /= MPI_SUCCESS ) THEN 339 DO ji = 1, SIZE(ldtxt) 340 DO ji = 1, SIZE(ldtxt) 340 341 IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode 341 342 END DO … … 344 345 CALL mpi_abort( mpi_comm_world, code, ierr ) 345 346 ENDIF 346 ENDIF 347 ENDIF 347 348 348 349 CALL mpi_comm_rank( mpi_comm_opa, mpprank, ierr ) 349 350 CALL mpi_comm_size( mpi_comm_opa, mppsize, ierr ) 350 351 mynode = mpprank 351 ! 352 ! 352 353 CALL MPI_OP_CREATE(DDPDD_MPI, .TRUE., MPI_SUMDD, ierr) 353 354 ! … … 361 362 !! ** Purpose : Message passing manadgement 362 363 !! 363 !! ** Method : Use mppsend and mpprecv function for passing mask 364 !! ** Method : Use mppsend and mpprecv function for passing mask 364 365 !! between processors following neighboring subdomains. 365 366 !! domain parameters … … 368 369 !! nbondi : mark for "east-west local boundary" 369 370 !! nbondj : mark for "north-south local boundary" 370 !! noea : number for local neighboring processors 371 !! noea : number for local neighboring processors 371 372 !! nowe : number for local neighboring processors 372 373 !! noso : number for local neighboring processors … … 381 382 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 382 383 ! ! = 1. , the sign is kept 383 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 384 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 384 385 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 385 386 !! … … 402 403 DO jk = 1, jpk 403 404 DO jj = nlcj+1, jpj ! added line(s) (inner only) 404 ptab(nldi :nlei , jj ,jk) = ptab(nldi:nlei, nlej,jk) 405 ptab(nldi :nlei , jj ,jk) = ptab(nldi:nlei, nlej,jk) 405 406 ptab(1 :nldi-1, jj ,jk) = ptab(nldi , nlej,jk) 406 407 ptab(nlei+1:nlci , jj ,jk) = ptab( nlei, nlej,jk) … … 413 414 END DO 414 415 ! 415 ELSE ! standard close or cyclic treatment 416 ELSE ! standard close or cyclic treatment 416 417 ! 417 418 ! ! East-West boundaries … … 432 433 ! 2. East and west directions exchange 433 434 ! ------------------------------------ 434 ! we play with the neigbours AND the row number because of the periodicity 435 ! we play with the neigbours AND the row number because of the periodicity 435 436 ! 436 437 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions … … 441 442 t3we(:,jl,:,1) = ptab(iihom +jl,:,:) 442 443 END DO 443 END SELECT 444 END SELECT 444 445 ! 445 446 ! ! Migrations 446 447 imigr = jpreci * jpj * jpk 447 448 ! 448 SELECT CASE ( nbondi ) 449 SELECT CASE ( nbondi ) 449 450 CASE ( -1 ) 450 451 CALL mppsend( 2, t3we(1,1,1,1), imigr, noea, ml_req1 ) … … 472 473 ptab(iihom+jl,:,:) = t3ew(:,jl,:,2) 473 474 END DO 474 CASE ( 0 ) 475 CASE ( 0 ) 475 476 DO jl = 1, jpreci 476 477 ptab(jl ,:,:) = t3we(:,jl,:,2) … … 499 500 imigr = jprecj * jpi * jpk 500 501 ! 501 SELECT CASE ( nbondj ) 502 SELECT CASE ( nbondj ) 502 503 CASE ( -1 ) 503 504 CALL mppsend( 4, t3sn(1,1,1,1), imigr, nono, ml_req1 ) … … 511 512 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 512 513 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 513 CASE ( 1 ) 514 CASE ( 1 ) 514 515 CALL mppsend( 3, t3ns(1,1,1,1), imigr, noso, ml_req1 ) 515 516 CALL mpprecv( 4, t3sn(1,1,1,2), imigr, noso ) … … 525 526 ptab(:,ijhom+jl,:) = t3ns(:,jl,:,2) 526 527 END DO 527 CASE ( 0 ) 528 CASE ( 0 ) 528 529 DO jl = 1, jprecj 529 530 ptab(:,jl ,:) = t3sn(:,jl,:,2) … … 555 556 !!---------------------------------------------------------------------- 556 557 !! *** routine mpp_lnk_2d *** 557 !! 558 !! 558 559 !! ** Purpose : Message passing manadgement for 2d array 559 560 !! 560 !! ** Method : Use mppsend and mpprecv function for passing mask 561 !! ** Method : Use mppsend and mpprecv function for passing mask 561 562 !! between processors following neighboring subdomains. 562 563 !! domain parameters … … 565 566 !! nbondi : mark for "east-west local boundary" 566 567 !! nbondj : mark for "north-south local boundary" 567 !! noea : number for local neighboring processors 568 !! noea : number for local neighboring processors 568 569 !! nowe : number for local neighboring processors 569 570 !! noso : number for local neighboring processors … … 576 577 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary 577 578 ! ! = 1. , the sign is kept 578 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 579 CHARACTER(len=3), OPTIONAL , INTENT(in ) :: cd_mpp ! fill the overlap area only 579 580 REAL(wp) , OPTIONAL , INTENT(in ) :: pval ! background value (used at closed boundaries) 580 581 !! … … 597 598 ! WARNING pt2d is defined only between nld and nle 598 599 DO jj = nlcj+1, jpj ! added line(s) (inner only) 599 pt2d(nldi :nlei , jj ) = pt2d(nldi:nlei, nlej) 600 pt2d(nldi :nlei , jj ) = pt2d(nldi:nlei, nlej) 600 601 pt2d(1 :nldi-1, jj ) = pt2d(nldi , nlej) 601 602 pt2d(nlei+1:nlci , jj ) = pt2d( nlei, nlej) … … 607 608 END DO 608 609 ! 609 ELSE ! standard close or cyclic treatment 610 ELSE ! standard close or cyclic treatment 610 611 ! 611 612 ! ! East-West boundaries … … 626 627 ! 2. East and west directions exchange 627 628 ! ------------------------------------ 628 ! we play with the neigbours AND the row number because of the periodicity 629 ! we play with the neigbours AND the row number because of the periodicity 629 630 ! 630 631 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions … … 724 725 pt2d(:,ijhom+jl) = t2ns(:,jl,2) 725 726 END DO 726 CASE ( 1 ) 727 CASE ( 1 ) 727 728 DO jl = 1, jprecj 728 729 pt2d(:,jl ) = t2sn(:,jl,2) … … 752 753 !! ** Purpose : Message passing manadgement for two 3D arrays 753 754 !! 754 !! ** Method : Use mppsend and mpprecv function for passing mask 755 !! ** Method : Use mppsend and mpprecv function for passing mask 755 756 !! between processors following neighboring subdomains. 756 757 !! domain parameters … … 759 760 !! nbondi : mark for "east-west local boundary" 760 761 !! nbondj : mark for "north-south local boundary" 761 !! noea : number for local neighboring processors 762 !! noea : number for local neighboring processors 762 763 !! nowe : number for local neighboring processors 763 764 !! noso : number for local neighboring processors … … 767 768 !! 768 769 !!---------------------------------------------------------------------- 769 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which 770 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab1 ! first and second 3D array on which 770 771 REAL(wp), DIMENSION(jpi,jpj,jpk), INTENT(inout) :: ptab2 ! the boundary condition is applied 771 CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays 772 CHARACTER(len=1) , INTENT(in ) :: cd_type1 ! nature of ptab1 and ptab2 arrays 772 773 CHARACTER(len=1) , INTENT(in ) :: cd_type2 ! i.e. grid-points = T , U , V , F or W points 773 774 REAL(wp) , INTENT(in ) :: psgn ! =-1 the sign change across the north fold boundary … … 795 796 ENDIF 796 797 797 798 798 799 ! ! North-South boundaries 799 800 IF( .NOT. cd_type1 == 'F' ) ptab1(:, 1 :jprecj,:) = 0.e0 ! south except at F-point … … 805 806 ! 2. East and west directions exchange 806 807 ! ------------------------------------ 807 ! we play with the neigbours AND the row number because of the periodicity 808 ! we play with the neigbours AND the row number because of the periodicity 808 809 ! 809 810 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions … … 821 822 imigr = jpreci * jpj * jpk *2 822 823 ! 823 SELECT CASE ( nbondi ) 824 SELECT CASE ( nbondi ) 824 825 CASE ( -1 ) 825 826 CALL mppsend( 2, t4we(1,1,1,1,1), imigr, noea, ml_req1 ) … … 848 849 ptab2(iihom+jl,:,:) = t4ew(:,jl,:,2,2) 849 850 END DO 850 CASE ( 0 ) 851 CASE ( 0 ) 851 852 DO jl = 1, jpreci 852 853 ptab1(jl ,:,:) = t4we(:,jl,:,1,2) … … 880 881 imigr = jprecj * jpi * jpk * 2 881 882 ! 882 SELECT CASE ( nbondj ) 883 SELECT CASE ( nbondj ) 883 884 CASE ( -1 ) 884 885 CALL mppsend( 4, t4sn(1,1,1,1,1), imigr, nono, ml_req1 ) … … 892 893 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 893 894 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 894 CASE ( 1 ) 895 CASE ( 1 ) 895 896 CALL mppsend( 3, t4ns(1,1,1,1,1), imigr, noso, ml_req1 ) 896 897 CALL mpprecv( 4, t4sn(1,1,1,1,2), imigr, noso ) … … 907 908 ptab2(:,ijhom+jl,:) = t4ns(:,jl,:,2,2) 908 909 END DO 909 CASE ( 0 ) 910 CASE ( 0 ) 910 911 DO jl = 1, jprecj 911 912 ptab1(:,jl ,:) = t4sn(:,jl,:,1,2) … … 927 928 ! 928 929 SELECT CASE ( jpni ) 929 CASE ( 1 ) 930 CASE ( 1 ) 930 931 CALL lbc_nfd ( ptab1, cd_type1, psgn ) ! only for northern procs. 931 932 CALL lbc_nfd ( ptab2, cd_type2, psgn ) … … 933 934 CALL mpp_lbc_north( ptab1, cd_type1, psgn ) ! for all northern procs. 934 935 CALL mpp_lbc_north (ptab2, cd_type2, psgn) 935 END SELECT 936 END SELECT 936 937 ! 937 938 ENDIF … … 943 944 !!---------------------------------------------------------------------- 944 945 !! *** routine mpp_lnk_2d_e *** 945 !! 946 !! 946 947 !! ** Purpose : Message passing manadgement for 2d array (with halo) 947 948 !! 948 !! ** Method : Use mppsend and mpprecv function for passing mask 949 !! ** Method : Use mppsend and mpprecv function for passing mask 949 950 !! between processors following neighboring subdomains. 950 951 !! domain parameters … … 955 956 !! nbondi : mark for "east-west local boundary" 956 957 !! nbondj : mark for "north-south local boundary" 957 !! noea : number for local neighboring processors 958 !! noea : number for local neighboring processors 958 959 !! nowe : number for local neighboring processors 959 960 !! noso : number for local neighboring processors … … 984 985 IF( .NOT. cd_type == 'F' ) pt2d(:, 1-jpr2dj : jprecj ) = 0.e0 ! south except at F-point 985 986 pt2d(:,nlcj-jprecj+1:jpj+jpr2dj) = 0.e0 ! north 986 987 987 988 ! ! East-West boundaries 988 989 ! !* Cyclic east-west … … 1004 1005 CASE ( 1 ) ; CALL lbc_nfd ( pt2d(1:jpi,1:jpj+jpr2dj), cd_type, psgn, pr2dj=jpr2dj ) 1005 1006 CASE DEFAULT ; CALL mpp_lbc_north_e( pt2d , cd_type, psgn ) 1006 END SELECT 1007 END SELECT 1007 1008 ! 1008 1009 ENDIF … … 1010 1011 ! 2. East and west directions exchange 1011 1012 ! ------------------------------------ 1012 ! we play with the neigbours AND the row number because of the periodicity 1013 ! we play with the neigbours AND the row number because of the periodicity 1013 1014 ! 1014 1015 SELECT CASE ( nbondi ) ! Read Dirichlet lateral conditions … … 1096 1097 ! 1097 1098 ! ! Write Dirichlet lateral conditions 1098 ijhom = nlcj - jprecj 1099 ijhom = nlcj - jprecj 1099 1100 ! 1100 1101 SELECT CASE ( nbondj ) … … 1108 1109 pt2d(:,ijhom+jl ) = tr2ns(:,jl,2) 1109 1110 END DO 1110 CASE ( 1 ) 1111 CASE ( 1 ) 1111 1112 DO jl = 1, iprecj 1112 1113 pt2d(:,jl-jpr2dj) = tr2sn(:,jl,2) … … 1120 1121 !!---------------------------------------------------------------------- 1121 1122 !! *** routine mppsend *** 1122 !! 1123 !! 1123 1124 !! ** Purpose : Send messag passing array 1124 1125 !! … … 1156 1157 INTEGER , INTENT(in ) :: kbytes ! suze of the array pmess 1157 1158 INTEGER , INTENT(in ) :: ktyp ! Tag of the recevied message 1158 INTEGER, OPTIONAL, INTENT(in) :: ksource ! source process number 1159 INTEGER, OPTIONAL, INTENT(in) :: ksource ! source process number 1159 1160 !! 1160 1161 INTEGER :: istatus(mpi_status_size) … … 1164 1165 ! 1165 1166 1166 ! If a specific process number has been passed to the receive call, 1167 ! If a specific process number has been passed to the receive call, 1167 1168 ! use that one. Default is to use mpi_any_source 1168 1169 use_source=mpi_any_source … … 1179 1180 !!---------------------------------------------------------------------- 1180 1181 !! *** routine mppgather *** 1181 !! 1182 !! ** Purpose : Transfert between a local subdomain array and a work 1182 !! 1183 !! ** Purpose : Transfert between a local subdomain array and a work 1183 1184 !! array which is distributed following the vertical level. 1184 1185 !! … … 1193 1194 itaille = jpi * jpj 1194 1195 CALL mpi_gather( ptab, itaille, mpi_double_precision, pio, itaille , & 1195 & mpi_double_precision, kp , mpi_comm_opa, ierror ) 1196 & mpi_double_precision, kp , mpi_comm_opa, ierror ) 1196 1197 ! 1197 1198 END SUBROUTINE mppgather … … 1202 1203 !! *** routine mppscatter *** 1203 1204 !! 1204 !! ** Purpose : Transfert between awork array which is distributed 1205 !! ** Purpose : Transfert between awork array which is distributed 1205 1206 !! following the vertical level and the local subdomain array. 1206 1207 !! … … 1224 1225 !!---------------------------------------------------------------------- 1225 1226 !! *** routine mppmax_a_int *** 1226 !! 1227 !! 1227 1228 !! ** Purpose : Find maximum value in an integer layout array 1228 1229 !! … … 1230 1231 INTEGER , INTENT(in ) :: kdim ! size of array 1231 1232 INTEGER , INTENT(inout), DIMENSION(kdim) :: ktab ! input array 1232 INTEGER , INTENT(in ), OPTIONAL :: kcom ! 1233 INTEGER , INTENT(in ), OPTIONAL :: kcom ! 1233 1234 !! 1234 1235 INTEGER :: ierror, localcomm ! temporary integer … … 1255 1256 INTEGER, INTENT(inout) :: ktab ! ??? 1256 1257 INTEGER, INTENT(in ), OPTIONAL :: kcom ! ??? 1257 !! 1258 !! 1258 1259 INTEGER :: ierror, iwork, localcomm ! temporary integer 1259 1260 !!---------------------------------------------------------------------- 1260 1261 ! 1261 localcomm = mpi_comm_opa 1262 localcomm = mpi_comm_opa 1262 1263 IF( PRESENT(kcom) ) localcomm = kcom 1263 1264 ! … … 1272 1273 !!---------------------------------------------------------------------- 1273 1274 !! *** routine mppmin_a_int *** 1274 !! 1275 !! 1275 1276 !! ** Purpose : Find minimum value in an integer layout array 1276 1277 !! … … 1320 1321 !!---------------------------------------------------------------------- 1321 1322 !! *** routine mppsum_a_int *** 1322 !! 1323 !! 1323 1324 !! ** Purpose : Global integer sum, 1D array case 1324 1325 !! … … 1341 1342 !!---------------------------------------------------------------------- 1342 1343 !! *** routine mppsum_int *** 1343 !! 1344 !! 1344 1345 !! ** Purpose : Global integer sum 1345 1346 !! 1346 1347 !!---------------------------------------------------------------------- 1347 1348 INTEGER, INTENT(inout) :: ktab 1348 !! 1349 !! 1349 1350 INTEGER :: ierror, iwork 1350 1351 !!---------------------------------------------------------------------- … … 1360 1361 !!---------------------------------------------------------------------- 1361 1362 !! *** routine mppmax_a_real *** 1362 !! 1363 !! 1363 1364 !! ** Purpose : Maximum 1364 1365 !! … … 1384 1385 !!---------------------------------------------------------------------- 1385 1386 !! *** routine mppmax_real *** 1386 !! 1387 !! 1387 1388 !! ** Purpose : Maximum 1388 1389 !! … … 1395 1396 !!---------------------------------------------------------------------- 1396 1397 ! 1397 localcomm = mpi_comm_opa 1398 localcomm = mpi_comm_opa 1398 1399 IF( PRESENT(kcom) ) localcomm = kcom 1399 1400 ! … … 1407 1408 !!---------------------------------------------------------------------- 1408 1409 !! *** routine mppmin_a_real *** 1409 !! 1410 !! 1410 1411 !! ** Purpose : Minimum of REAL, array case 1411 1412 !! … … 1419 1420 !!----------------------------------------------------------------------- 1420 1421 ! 1421 localcomm = mpi_comm_opa 1422 localcomm = mpi_comm_opa 1422 1423 IF( PRESENT(kcom) ) localcomm = kcom 1423 1424 ! … … 1431 1432 !!---------------------------------------------------------------------- 1432 1433 !! *** routine mppmin_real *** 1433 !! 1434 !! 1434 1435 !! ** Purpose : minimum of REAL, scalar case 1435 1436 !! 1436 1437 !!----------------------------------------------------------------------- 1437 REAL(wp), INTENT(inout) :: ptab ! 1438 REAL(wp), INTENT(inout) :: ptab ! 1438 1439 INTEGER , INTENT(in ), OPTIONAL :: kcom 1439 1440 !! … … 1443 1444 !!----------------------------------------------------------------------- 1444 1445 ! 1445 localcomm = mpi_comm_opa 1446 localcomm = mpi_comm_opa 1446 1447 IF( PRESENT(kcom) ) localcomm = kcom 1447 1448 ! … … 1455 1456 !!---------------------------------------------------------------------- 1456 1457 !! *** routine mppsum_a_real *** 1457 !! 1458 !! 1458 1459 !! ** Purpose : global sum, REAL ARRAY argument case 1459 1460 !! … … 1464 1465 !! 1465 1466 INTEGER :: ierror ! temporary integer 1466 INTEGER :: localcomm 1467 REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace 1467 INTEGER :: localcomm 1468 REAL(wp), DIMENSION(kdim) :: zwork ! temporary workspace 1468 1469 !!----------------------------------------------------------------------- 1469 1470 ! 1470 localcomm = mpi_comm_opa 1471 localcomm = mpi_comm_opa 1471 1472 IF( PRESENT(kcom) ) localcomm = kcom 1472 1473 ! … … 1480 1481 !!---------------------------------------------------------------------- 1481 1482 !! *** routine mppsum_real *** 1482 !! 1483 !! 1483 1484 !! ** Purpose : global sum, SCALAR argument case 1484 1485 !! … … 1487 1488 INTEGER , INTENT(in ), OPTIONAL :: kcom 1488 1489 !! 1489 INTEGER :: ierror, localcomm 1490 INTEGER :: ierror, localcomm 1490 1491 REAL(wp) :: zwork 1491 1492 !!----------------------------------------------------------------------- 1492 1493 ! 1493 localcomm = mpi_comm_opa 1494 localcomm = mpi_comm_opa 1494 1495 IF( PRESENT(kcom) ) localcomm = kcom 1495 1496 ! … … 1524 1525 1525 1526 END SUBROUTINE mppsum_realdd 1526 1527 1527 1528 1528 1529 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 1529 1530 !!---------------------------------------------------------------------- … … 1551 1552 1552 1553 END SUBROUTINE mppsum_a_realdd 1553 1554 1554 1555 SUBROUTINE mpp_minloc2d( ptab, pmask, pmin, ki,kj ) 1555 1556 !!------------------------------------------------------------------------ … … 1646 1647 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 1647 1648 INTEGER , INTENT( out) :: ki, kj ! index of maximum in global frame 1648 !! 1649 !! 1649 1650 INTEGER :: ierror 1650 1651 INTEGER, DIMENSION (2) :: ilocs … … 1685 1686 REAL(wp) , INTENT( out) :: pmax ! Global maximum of ptab 1686 1687 INTEGER , INTENT( out) :: ki, kj, kk ! index of maximum in global frame 1687 !! 1688 !! 1688 1689 REAL(wp) :: zmax ! local maximum 1689 1690 REAL(wp), DIMENSION(2,1) :: zain, zaout … … 1715 1716 !!---------------------------------------------------------------------- 1716 1717 !! *** routine mppsync *** 1717 !! 1718 !! 1718 1719 !! ** Purpose : Massively parallel processors, synchroneous 1719 1720 !! … … 1730 1731 !!---------------------------------------------------------------------- 1731 1732 !! *** routine mppstop *** 1732 !! 1733 !! 1733 1734 !! ** purpose : Stop massively parallel processors method 1734 1735 !! … … 1746 1747 !!---------------------------------------------------------------------- 1747 1748 !! *** routine mppobc *** 1748 !! 1749 !! 1749 1750 !! ** Purpose : Message passing manadgement for open boundary 1750 1751 !! conditions array … … 1757 1758 !! nbondi : mark for "east-west local boundary" 1758 1759 !! nbondj : mark for "north-south local boundary" 1759 !! noea : number for local neighboring processors 1760 !! noea : number for local neighboring processors 1760 1761 !! nowe : number for local neighboring processors 1761 1762 !! noso : number for local neighboring processors … … 1806 1807 CALL mppstop 1807 1808 ENDIF 1808 1809 1809 1810 ! Communication level by level 1810 1811 ! ---------------------------- … … 1921 1922 DO jj = ijpt0, ijpt1 ! north/south boundaries 1922 1923 DO ji = iipt0,ilpt1 1923 ptab(ji,jk) = ztab(ji,jj) 1924 ptab(ji,jk) = ztab(ji,jj) 1924 1925 END DO 1925 1926 END DO … … 1927 1928 DO jj = ijpt0, ilpt1 ! east/west boundaries 1928 1929 DO ji = iipt0,iipt1 1929 ptab(jj,jk) = ztab(ji,jj) 1930 ptab(jj,jk) = ztab(ji,jj) 1930 1931 END DO 1931 1932 END DO … … 1937 1938 ! 1938 1939 END SUBROUTINE mppobc 1939 1940 1940 1941 1941 1942 SUBROUTINE mpp_comm_free( kcom ) … … 1996 1997 kice = 0 1997 1998 DO jjproc = 1, jpnij 1998 IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 1 1999 IF( jjproc == narea .AND. pindic .GT. 0 ) kice(jjproc) = 1 1999 2000 END DO 2000 2001 ! 2001 2002 zwork = 0 2002 2003 CALL MPI_ALLREDUCE( kice, zwork, jpnij, mpi_integer, mpi_sum, mpi_comm_opa, ierr ) 2003 ndim_rank_ice = SUM( zwork ) 2004 ndim_rank_ice = SUM( zwork ) 2004 2005 2005 2006 ! Allocate the right size to nrank_north … … 2007 2008 ALLOCATE( nrank_ice(ndim_rank_ice) ) 2008 2009 ! 2009 ii = 0 2010 ii = 0 2010 2011 nrank_ice = 0 2011 2012 DO jjproc = 1, jpnij 2012 2013 IF( zwork(jjproc) == 1) THEN 2013 2014 ii = ii + 1 2014 nrank_ice(ii) = jjproc -1 2015 nrank_ice(ii) = jjproc -1 2015 2016 ENDIF 2016 2017 END DO … … 2094 2095 IF (ALLOCATED (nrank_znl)) DEALLOCATE(nrank_znl) 2095 2096 ALLOCATE(nrank_znl(ndim_rank_znl)) 2096 ii = 0 2097 ii = 0 2097 2098 nrank_znl (:) = 0 2098 2099 DO jproc=1,jpnij 2099 2100 IF ( kwork(jproc) == njmpp) THEN 2100 2101 ii = ii + 1 2101 nrank_znl(ii) = jproc -1 2102 nrank_znl(ii) = jproc -1 2102 2103 ENDIF 2103 2104 END DO … … 2123 2124 2124 2125 ! Determines if processor if the first (starting from i=1) on the row 2125 IF ( jpni == 1 ) THEN 2126 IF ( jpni == 1 ) THEN 2126 2127 l_znl_root = .TRUE. 2127 2128 ELSE … … 2141 2142 !! *** routine mpp_ini_north *** 2142 2143 !! 2143 !! ** Purpose : Initialize special communicator for north folding 2144 !! ** Purpose : Initialize special communicator for north folding 2144 2145 !! condition together with global variables needed in the mpp folding 2145 2146 !! … … 2202 2203 !! *** routine mpp_lbc_north_3d *** 2203 2204 !! 2204 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2205 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2205 2206 !! in mpp configuration in case of jpn1 > 1 2206 2207 !! 2207 2208 !! ** Method : North fold condition and mpp with more than one proc 2208 !! in i-direction require a specific treatment. We gather 2209 !! in i-direction require a specific treatment. We gather 2209 2210 !! the 4 northern lines of the global domain on 1 processor 2210 2211 !! and apply lbc north-fold on this sub array. Then we … … 2215 2216 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 2216 2217 ! ! = T , U , V , F or W gridpoints 2217 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2218 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2218 2219 !! ! = 1. , the sign is kept 2219 2220 INTEGER :: ji, jj, jr … … 2224 2225 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for mpi_isend when avoiding mpi_allgather 2225 2226 !!---------------------------------------------------------------------- 2226 ! 2227 ! 2227 2228 ijpj = 4 2228 2229 ityp = -1 … … 2239 2240 IF ( l_north_nogather ) THEN 2240 2241 ! 2241 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified 2242 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified 2242 2243 ! (in nemo_northcomms) as being involved in this process' northern boundary exchange 2243 2244 ! … … 2264 2265 ityp = 5 2265 2266 CASE DEFAULT 2266 ityp = -1 ! Set a default value for unsupported types which 2267 ityp = -1 ! Set a default value for unsupported types which 2267 2268 ! will cause a fallback to the mpi_allgather method 2268 2269 END SELECT … … 2313 2314 ! The ztab array has been either: 2314 2315 ! a. Fully populated by the mpi_allgather operation or 2315 ! b. Had the active points for this domain and northern neighbours populated 2316 ! b. Had the active points for this domain and northern neighbours populated 2316 2317 ! by peer to peer exchanges 2317 ! Either way the array may be folded by lbc_nfd and the result for the span of 2318 ! Either way the array may be folded by lbc_nfd and the result for the span of 2318 2319 ! this domain will be identical. 2319 2320 ! … … 2334 2335 !! *** routine mpp_lbc_north_2d *** 2335 2336 !! 2336 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2337 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2337 2338 !! in mpp configuration in case of jpn1 > 1 (for 2d array ) 2338 2339 !! 2339 2340 !! ** Method : North fold condition and mpp with more than one proc 2340 !! in i-direction require a specific treatment. We gather 2341 !! in i-direction require a specific treatment. We gather 2341 2342 !! the 4 northern lines of the global domain on 1 processor 2342 2343 !! and apply lbc north-fold on this sub array. Then we … … 2347 2348 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 2348 2349 ! ! = T , U , V , F or W gridpoints 2349 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2350 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the north fold 2350 2351 !! ! = 1. , the sign is kept 2351 2352 INTEGER :: ji, jj, jr … … 2371 2372 IF ( l_north_nogather ) THEN 2372 2373 ! 2373 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified 2374 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified 2374 2375 ! (in nemo_northcomms) as being involved in this process' northern boundary exchange 2375 2376 ! … … 2396 2397 ityp = 5 2397 2398 CASE DEFAULT 2398 ityp = -1 ! Set a default value for unsupported types which 2399 ityp = -1 ! Set a default value for unsupported types which 2399 2400 ! will cause a fallback to the mpi_allgather method 2400 2401 END SELECT … … 2446 2447 ! The ztab array has been either: 2447 2448 ! a. Fully populated by the mpi_allgather operation or 2448 ! b. Had the active points for this domain and northern neighbours populated 2449 ! b. Had the active points for this domain and northern neighbours populated 2449 2450 ! by peer to peer exchanges 2450 ! Either way the array may be folded by lbc_nfd and the result for the span of 2451 ! Either way the array may be folded by lbc_nfd and the result for the span of 2451 2452 ! this domain will be identical. 2452 2453 ! … … 2468 2469 !! *** routine mpp_lbc_north_2d *** 2469 2470 !! 2470 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2471 !! in mpp configuration in case of jpn1 > 1 and for 2d 2471 !! ** Purpose : Ensure proper north fold horizontal bondary condition 2472 !! in mpp configuration in case of jpn1 > 1 and for 2d 2472 2473 !! array with outer extra halo 2473 2474 !! 2474 2475 !! ** Method : North fold condition and mpp with more than one proc 2475 !! in i-direction require a specific treatment. We gather 2476 !! the 4+2*jpr2dj northern lines of the global domain on 1 2477 !! processor and apply lbc north-fold on this sub array. 2476 !! in i-direction require a specific treatment. We gather 2477 !! the 4+2*jpr2dj northern lines of the global domain on 1 2478 !! processor and apply lbc north-fold on this sub array. 2478 2479 !! Then we scatter the north fold array back to the processors. 2479 2480 !! … … 2482 2483 CHARACTER(len=1) , INTENT(in ) :: cd_type ! nature of pt3d grid-points 2483 2484 ! ! = T , U , V , F or W -points 2484 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 2485 REAL(wp) , INTENT(in ) :: psgn ! = -1. the sign change across the 2485 2486 !! ! north fold, = 1. otherwise 2486 2487 INTEGER :: ji, jj, jr … … 2525 2526 !! Scatter back to pt2d 2526 2527 DO jj = nlcj - ijpj + 1 , nlcj +jpr2dj 2527 ij = ij +1 2528 ij = ij +1 2528 2529 DO ji= 1, nlci 2529 2530 pt2d(ji,jj) = ztab_e(ji+nimpp-1,ij) … … 2542 2543 !! ** Method :: define buffer size in namelist, if 0 no buffer attachment 2543 2544 !! but classical mpi_init 2544 !! 2545 !! History :: 01/11 :: IDRIS initial version for IBM only 2545 !! 2546 !! History :: 01/11 :: IDRIS initial version for IBM only 2546 2547 !! 08/04 :: R. Benshila, generalisation 2547 2548 !!--------------------------------------------------------------------- 2548 CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt 2549 CHARACTER(len=*),DIMENSION(:), INTENT( out) :: ldtxt 2549 2550 INTEGER , INTENT(inout) :: ksft 2550 2551 INTEGER , INTENT( out) :: code … … 2555 2556 CALL mpi_initialized( mpi_was_called, code ) ! MPI initialization 2556 2557 IF ( code /= MPI_SUCCESS ) THEN 2557 DO ji = 1, SIZE(ldtxt) 2558 DO ji = 1, SIZE(ldtxt) 2558 2559 IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode 2559 END DO 2560 END DO 2560 2561 WRITE(*, cform_err) 2561 2562 WRITE(*, *) ' lib_mpp: Error in routine mpi_initialized' … … 2567 2568 CALL mpi_comm_dup( mpi_comm_world, mpi_comm_opa, code ) 2568 2569 IF ( code /= MPI_SUCCESS ) THEN 2569 DO ji = 1, SIZE(ldtxt) 2570 DO ji = 1, SIZE(ldtxt) 2570 2571 IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode 2571 2572 END DO … … 2580 2581 ! Buffer allocation and attachment 2581 2582 ALLOCATE( tampon(nn_buffer), stat = ierr ) 2582 IF( ierr /= 0 ) THEN 2583 DO ji = 1, SIZE(ldtxt) 2583 IF( ierr /= 0 ) THEN 2584 DO ji = 1, SIZE(ldtxt) 2584 2585 IF( TRIM(ldtxt(ji)) /= '' ) WRITE(*,*) ldtxt(ji) ! control print of mynode 2585 2586 END DO … … 2660 2661 FUNCTION mynode( ldtxt, kumnam, kstop, localComm ) RESULT (function_value) 2661 2662 INTEGER, OPTIONAL , INTENT(in ) :: localComm 2662 CHARACTER(len=*),DIMENSION(:) :: ldtxt 2663 CHARACTER(len=*),DIMENSION(:) :: ldtxt 2663 2664 INTEGER :: kumnam, kstop 2664 2665 IF( PRESENT( localComm ) .OR. .NOT.PRESENT( localComm ) ) function_value = 0 … … 2672 2673 REAL , DIMENSION(:) :: parr 2673 2674 INTEGER :: kdim 2674 INTEGER, OPTIONAL :: kcom 2675 INTEGER, OPTIONAL :: kcom 2675 2676 WRITE(*,*) 'mpp_sum_as: You should not have seen this print! error?', kdim, parr(1), kcom 2676 2677 END SUBROUTINE mpp_sum_as … … 2679 2680 REAL , DIMENSION(:,:) :: parr 2680 2681 INTEGER :: kdim 2681 INTEGER, OPTIONAL :: kcom 2682 INTEGER, OPTIONAL :: kcom 2682 2683 WRITE(*,*) 'mpp_sum_a2s: You should not have seen this print! error?', kdim, parr(1,1), kcom 2683 2684 END SUBROUTINE mpp_sum_a2s … … 2686 2687 INTEGER, DIMENSION(:) :: karr 2687 2688 INTEGER :: kdim 2688 INTEGER, OPTIONAL :: kcom 2689 INTEGER, OPTIONAL :: kcom 2689 2690 WRITE(*,*) 'mpp_sum_ai: You should not have seen this print! error?', kdim, karr(1), kcom 2690 2691 END SUBROUTINE mpp_sum_ai … … 2692 2693 SUBROUTINE mpp_sum_s( psca, kcom ) ! Dummy routine 2693 2694 REAL :: psca 2694 INTEGER, OPTIONAL :: kcom 2695 INTEGER, OPTIONAL :: kcom 2695 2696 WRITE(*,*) 'mpp_sum_s: You should not have seen this print! error?', psca, kcom 2696 2697 END SUBROUTINE mpp_sum_s … … 2698 2699 SUBROUTINE mpp_sum_i( kint, kcom ) ! Dummy routine 2699 2700 integer :: kint 2700 INTEGER, OPTIONAL :: kcom 2701 INTEGER, OPTIONAL :: kcom 2701 2702 WRITE(*,*) 'mpp_sum_i: You should not have seen this print! error?', kint, kcom 2702 2703 END SUBROUTINE mpp_sum_i … … 2707 2708 WRITE(*,*) 'mppsum_realdd: You should not have seen this print! error?', ytab 2708 2709 END SUBROUTINE mppsum_realdd 2709 2710 2710 2711 SUBROUTINE mppsum_a_realdd( ytab, kdim, kcom ) 2711 2712 INTEGER , INTENT( in ) :: kdim ! size of ytab … … 2718 2719 REAL , DIMENSION(:) :: parr 2719 2720 INTEGER :: kdim 2720 INTEGER, OPTIONAL :: kcom 2721 INTEGER, OPTIONAL :: kcom 2721 2722 WRITE(*,*) 'mppmax_a_real: You should not have seen this print! error?', kdim, parr(1), kcom 2722 2723 END SUBROUTINE mppmax_a_real … … 2724 2725 SUBROUTINE mppmax_real( psca, kcom ) 2725 2726 REAL :: psca 2726 INTEGER, OPTIONAL :: kcom 2727 INTEGER, OPTIONAL :: kcom 2727 2728 WRITE(*,*) 'mppmax_real: You should not have seen this print! error?', psca, kcom 2728 2729 END SUBROUTINE mppmax_real … … 2731 2732 REAL , DIMENSION(:) :: parr 2732 2733 INTEGER :: kdim 2733 INTEGER, OPTIONAL :: kcom 2734 INTEGER, OPTIONAL :: kcom 2734 2735 WRITE(*,*) 'mppmin_a_real: You should not have seen this print! error?', kdim, parr(1), kcom 2735 2736 END SUBROUTINE mppmin_a_real … … 2737 2738 SUBROUTINE mppmin_real( psca, kcom ) 2738 2739 REAL :: psca 2739 INTEGER, OPTIONAL :: kcom 2740 INTEGER, OPTIONAL :: kcom 2740 2741 WRITE(*,*) 'mppmin_real: You should not have seen this print! error?', psca, kcom 2741 2742 END SUBROUTINE mppmin_real … … 2744 2745 INTEGER, DIMENSION(:) :: karr 2745 2746 INTEGER :: kdim 2746 INTEGER, OPTIONAL :: kcom 2747 INTEGER, OPTIONAL :: kcom 2747 2748 WRITE(*,*) 'mppmax_a_int: You should not have seen this print! error?', kdim, karr(1), kcom 2748 2749 END SUBROUTINE mppmax_a_int … … 2750 2751 SUBROUTINE mppmax_int( kint, kcom) 2751 2752 INTEGER :: kint 2752 INTEGER, OPTIONAL :: kcom 2753 INTEGER, OPTIONAL :: kcom 2753 2754 WRITE(*,*) 'mppmax_int: You should not have seen this print! error?', kint, kcom 2754 2755 END SUBROUTINE mppmax_int … … 2757 2758 INTEGER, DIMENSION(:) :: karr 2758 2759 INTEGER :: kdim 2759 INTEGER, OPTIONAL :: kcom 2760 INTEGER, OPTIONAL :: kcom 2760 2761 WRITE(*,*) 'mppmin_a_int: You should not have seen this print! error?', kdim, karr(1), kcom 2761 2762 END SUBROUTINE mppmin_a_int … … 2763 2764 SUBROUTINE mppmin_int( kint, kcom ) 2764 2765 INTEGER :: kint 2765 INTEGER, OPTIONAL :: kcom 2766 INTEGER, OPTIONAL :: kcom 2766 2767 WRITE(*,*) 'mppmin_int: You should not have seen this print! error?', kint, kcom 2767 2768 END SUBROUTINE mppmin_int … … 2850 2851 !! *** ROUTINE stop_opa *** 2851 2852 !! 2852 !! ** Purpose : print in ocean.outpput file a error message and 2853 !! ** Purpose : print in ocean.outpput file a error message and 2853 2854 !! increment the error number (nstop) by one. 2854 2855 !!---------------------------------------------------------------------- … … 2857 2858 !!---------------------------------------------------------------------- 2858 2859 ! 2859 nstop = nstop + 1 2860 nstop = nstop + 1 2860 2861 IF(lwp) THEN 2861 2862 WRITE(numout,cform_err) … … 2889 2890 !! *** ROUTINE stop_warn *** 2890 2891 !! 2891 !! ** Purpose : print in ocean.outpput file a error message and 2892 !! ** Purpose : print in ocean.outpput file a error message and 2892 2893 !! increment the warning number (nwarn) by one. 2893 2894 !!---------------------------------------------------------------------- … … 2895 2896 CHARACTER(len=*), INTENT(in), OPTIONAL :: cd6, cd7, cd8, cd9, cd10 2896 2897 !!---------------------------------------------------------------------- 2897 ! 2898 nwarn = nwarn + 1 2898 ! 2899 nwarn = nwarn + 1 2899 2900 IF(lwp) THEN 2900 2901 WRITE(numout,cform_war) … … 2982 2983 STOP 'ctl_opn bad opening' 2983 2984 ENDIF 2984 2985 2985 2986 END SUBROUTINE ctl_opn 2986 2987 … … 2992 2993 !! ** Purpose : return the index of an unused logical unit 2993 2994 !!---------------------------------------------------------------------- 2994 LOGICAL :: llopn 2995 LOGICAL :: llopn 2995 2996 !!---------------------------------------------------------------------- 2996 2997 !
Note: See TracChangeset
for help on using the changeset viewer.