Changeset 1926
- Timestamp:
- 2010-06-10T13:06:13+02:00 (14 years ago)
- Location:
- branches/DEV_1879_mpp_sca/NEMO/OPA_SRC
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
branches/DEV_1879_mpp_sca/NEMO/OPA_SRC/lib_mpp.F90
r1874 r1926 159 159 REAL(wp), DIMENSION(1-jpr2di:jpi+jpr2di,jprecj+jpr2dj,2) :: tr2ns, tr2sn ! 2d for north-south & south-north + extra outer halo 160 160 REAL(wp), DIMENSION(1-jpr2dj:jpj+jpr2dj,jpreci+jpr2di,2) :: tr2ew, tr2we ! 2d for east-west & west-east + extra outer halo 161 162 ! North fold arrays used to minimise the use of allgather operations. Set in opa_northcomms so need to be public 163 INTEGER, PUBLIC, PARAMETER :: jpmaxngh = 8 ! Assumed maximum number of active neighbours 164 INTEGER, PUBLIC, DIMENSION (jpmaxngh,4) :: isendto 165 INTEGER, PUBLIC, DIMENSION (4) :: nsndto 166 LOGICAL, PUBLIC :: lnorth_nogather = .FALSE. 167 INTEGER, PUBLIC :: ityp 161 168 !!---------------------------------------------------------------------- 162 169 !! NEMO/OPA 3.2 , LOCEAN-IPSL (2009) … … 354 361 CASE ( -1 ) 355 362 CALL mppsend( 2, t3we(1,1,1,1), imigr, noea, ml_req1 ) 356 CALL mpprecv( 1, t3ew(1,1,1,2), imigr )363 CALL mpprecv( 1, t3ew(1,1,1,2), imigr, noea ) 357 364 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 358 365 CASE ( 0 ) 359 366 CALL mppsend( 1, t3ew(1,1,1,1), imigr, nowe, ml_req1 ) 360 367 CALL mppsend( 2, t3we(1,1,1,1), imigr, noea, ml_req2 ) 361 CALL mpprecv( 1, t3ew(1,1,1,2), imigr )362 CALL mpprecv( 2, t3we(1,1,1,2), imigr )368 CALL mpprecv( 1, t3ew(1,1,1,2), imigr, noea ) 369 CALL mpprecv( 2, t3we(1,1,1,2), imigr, nowe ) 363 370 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 364 371 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 365 372 CASE ( 1 ) 366 373 CALL mppsend( 1, t3ew(1,1,1,1), imigr, nowe, ml_req1 ) 367 CALL mpprecv( 2, t3we(1,1,1,2), imigr )374 CALL mpprecv( 2, t3we(1,1,1,2), imigr, nowe ) 368 375 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 369 376 END SELECT … … 407 414 CASE ( -1 ) 408 415 CALL mppsend( 4, t3sn(1,1,1,1), imigr, nono, ml_req1 ) 409 CALL mpprecv( 3, t3ns(1,1,1,2), imigr )416 CALL mpprecv( 3, t3ns(1,1,1,2), imigr, nono ) 410 417 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 411 418 CASE ( 0 ) 412 419 CALL mppsend( 3, t3ns(1,1,1,1), imigr, noso, ml_req1 ) 413 420 CALL mppsend( 4, t3sn(1,1,1,1), imigr, nono, ml_req2 ) 414 CALL mpprecv( 3, t3ns(1,1,1,2), imigr )415 CALL mpprecv( 4, t3sn(1,1,1,2), imigr )421 CALL mpprecv( 3, t3ns(1,1,1,2), imigr, nono ) 422 CALL mpprecv( 4, t3sn(1,1,1,2), imigr, noso ) 416 423 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 417 424 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 418 425 CASE ( 1 ) 419 426 CALL mppsend( 3, t3ns(1,1,1,1), imigr, noso, ml_req1 ) 420 CALL mpprecv( 4, t3sn(1,1,1,2), imigr )427 CALL mpprecv( 4, t3sn(1,1,1,2), imigr, noso ) 421 428 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 422 429 END SELECT … … 548 555 CASE ( -1 ) 549 556 CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req1 ) 550 CALL mpprecv( 1, t2ew(1,1,2), imigr )557 CALL mpprecv( 1, t2ew(1,1,2), imigr, noea ) 551 558 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 552 559 CASE ( 0 ) 553 560 CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) 554 561 CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req2 ) 555 CALL mpprecv( 1, t2ew(1,1,2), imigr )556 CALL mpprecv( 2, t2we(1,1,2), imigr )562 CALL mpprecv( 1, t2ew(1,1,2), imigr, noea ) 563 CALL mpprecv( 2, t2we(1,1,2), imigr, nowe ) 557 564 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 558 565 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 559 566 CASE ( 1 ) 560 567 CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) 561 CALL mpprecv( 2, t2we(1,1,2), imigr )568 CALL mpprecv( 2, t2we(1,1,2), imigr, nowe ) 562 569 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 563 570 END SELECT … … 601 608 CASE ( -1 ) 602 609 CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req1 ) 603 CALL mpprecv( 3, t2ns(1,1,2), imigr )610 CALL mpprecv( 3, t2ns(1,1,2), imigr, nono ) 604 611 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 605 612 CASE ( 0 ) 606 613 CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) 607 614 CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req2 ) 608 CALL mpprecv( 3, t2ns(1,1,2), imigr )609 CALL mpprecv( 4, t2sn(1,1,2), imigr )615 CALL mpprecv( 3, t2ns(1,1,2), imigr, nono ) 616 CALL mpprecv( 4, t2sn(1,1,2), imigr, noso ) 610 617 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 611 618 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 612 619 CASE ( 1 ) 613 620 CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) 614 CALL mpprecv( 4, t2sn(1,1,2), imigr )621 CALL mpprecv( 4, t2sn(1,1,2), imigr, noso ) 615 622 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 616 623 END SELECT … … 729 736 CASE ( -1 ) 730 737 CALL mppsend( 2, t4we(1,1,1,1,1), imigr, noea, ml_req1 ) 731 CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr )738 CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr, noea ) 732 739 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 733 740 CASE ( 0 ) 734 741 CALL mppsend( 1, t4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 735 742 CALL mppsend( 2, t4we(1,1,1,1,1), imigr, noea, ml_req2 ) 736 CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr )737 CALL mpprecv( 2, t4we(1,1,1,1,2), imigr )743 CALL mpprecv( 1, t4ew(1,1,1,1,2), imigr, noea ) 744 CALL mpprecv( 2, t4we(1,1,1,1,2), imigr, nowe ) 738 745 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 739 746 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 740 747 CASE ( 1 ) 741 748 CALL mppsend( 1, t4ew(1,1,1,1,1), imigr, nowe, ml_req1 ) 742 CALL mpprecv( 2, t4we(1,1,1,1,2), imigr )749 CALL mpprecv( 2, t4we(1,1,1,1,2), imigr, nowe ) 743 750 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 744 751 END SELECT … … 788 795 CASE ( -1 ) 789 796 CALL mppsend( 4, t4sn(1,1,1,1,1), imigr, nono, ml_req1 ) 790 CALL mpprecv( 3, t4ns(1,1,1,1,2), imigr )797 CALL mpprecv( 3, t4ns(1,1,1,1,2), imigr, nono ) 791 798 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 792 799 CASE ( 0 ) 793 800 CALL mppsend( 3, t4ns(1,1,1,1,1), imigr, noso, ml_req1 ) 794 801 CALL mppsend( 4, t4sn(1,1,1,1,1), imigr, nono, ml_req2 ) 795 CALL mpprecv( 3, t4ns(1,1,1,1,2), imigr )796 CALL mpprecv( 4, t4sn(1,1,1,1,2), imigr )802 CALL mpprecv( 3, t4ns(1,1,1,1,2), imigr, nono ) 803 CALL mpprecv( 4, t4sn(1,1,1,1,2), imigr, noso ) 797 804 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 798 805 IF(l_isend) CALL mpi_wait(ml_req2, ml_stat, ml_err) 799 806 CASE ( 1 ) 800 807 CALL mppsend( 3, t4ns(1,1,1,1,1), imigr, noso, ml_req1 ) 801 CALL mpprecv( 4, t4sn(1,1,1,1,2), imigr )808 CALL mpprecv( 4, t4sn(1,1,1,1,2), imigr, noso ) 802 809 IF(l_isend) CALL mpi_wait(ml_req1, ml_stat, ml_err) 803 810 END SELECT … … 932 939 CASE ( -1 ) 933 940 CALL mppsend( 2, tr2we(1-jpr2dj,1,1), imigr, noea, ml_req1 ) 934 CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr )941 CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr, noea ) 935 942 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 936 943 CASE ( 0 ) 937 944 CALL mppsend( 1, tr2ew(1-jpr2dj,1,1), imigr, nowe, ml_req1 ) 938 945 CALL mppsend( 2, tr2we(1-jpr2dj,1,1), imigr, noea, ml_req2 ) 939 CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr )940 CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr )946 CALL mpprecv( 1, tr2ew(1-jpr2dj,1,2), imigr, noea ) 947 CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr, nowe ) 941 948 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 942 949 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 943 950 CASE ( 1 ) 944 951 CALL mppsend( 1, tr2ew(1-jpr2dj,1,1), imigr, nowe, ml_req1 ) 945 CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr )952 CALL mpprecv( 2, tr2we(1-jpr2dj,1,2), imigr, nowe ) 946 953 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 947 954 END SELECT … … 985 992 CASE ( -1 ) 986 993 CALL mppsend( 4, tr2sn(1-jpr2di,1,1), imigr, nono, ml_req1 ) 987 CALL mpprecv( 3, tr2ns(1-jpr2di,1,2), imigr )994 CALL mpprecv( 3, tr2ns(1-jpr2di,1,2), imigr, nono ) 988 995 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 989 996 CASE ( 0 ) 990 997 CALL mppsend( 3, tr2ns(1-jpr2di,1,1), imigr, noso, ml_req1 ) 991 998 CALL mppsend( 4, tr2sn(1-jpr2di,1,1), imigr, nono, ml_req2 ) 992 CALL mpprecv( 3, tr2ns(1-jpr2di,1,2), imigr )993 CALL mpprecv( 4, tr2sn(1-jpr2di,1,2), imigr )999 CALL mpprecv( 3, tr2ns(1-jpr2di,1,2), imigr, nono ) 1000 CALL mpprecv( 4, tr2sn(1-jpr2di,1,2), imigr, noso ) 994 1001 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 995 1002 IF(l_isend) CALL mpi_wait(ml_req2,ml_stat,ml_err) 996 1003 CASE ( 1 ) 997 1004 CALL mppsend( 3, tr2ns(1-jpr2di,1,1), imigr, noso, ml_req1 ) 998 CALL mpprecv( 4, tr2sn(1-jpr2di,1,2), imigr )1005 CALL mpprecv( 4, tr2sn(1-jpr2di,1,2), imigr, noso ) 999 1006 IF(l_isend) CALL mpi_wait(ml_req1,ml_stat,ml_err) 1000 1007 END SELECT … … 1051 1058 1052 1059 1053 SUBROUTINE mpprecv( ktyp, pmess, kbytes )1060 SUBROUTINE mpprecv( ktyp, pmess, kbytes, ksource ) 1054 1061 !!---------------------------------------------------------------------- 1055 1062 !! *** routine mpprecv *** … … 1061 1068 INTEGER , INTENT(in ) :: kbytes ! suze of the array pmess 1062 1069 INTEGER , INTENT(in ) :: ktyp ! Tag of the recevied message 1070 INTEGER, OPTIONAL, INTENT(in) :: ksource ! source process number 1063 1071 !! 1064 1072 INTEGER :: istatus(mpi_status_size) 1065 1073 INTEGER :: iflag 1066 !!---------------------------------------------------------------------- 1067 ! 1068 CALL mpi_recv( pmess, kbytes, mpi_double_precision, mpi_any_source, ktyp, mpi_comm_opa, istatus, iflag ) 1074 INTEGER :: use_source 1075 !!---------------------------------------------------------------------- 1076 ! 1077 1078 ! If a specific process number has been passed to the receive call, 1079 ! use that one. Default is to use mpi_any_source 1080 use_source=mpi_any_source 1081 if(present(ksource)) then 1082 use_source=ksource 1083 end if 1084 1085 CALL mpi_recv( pmess, kbytes, mpi_double_precision, use_source, ktyp, mpi_comm_opa, istatus, iflag ) 1069 1086 ! 1070 1087 END SUBROUTINE mpprecv … … 1681 1698 IF( nbondi == -1 ) THEN 1682 1699 CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req1 ) 1683 CALL mpprecv( 1, t2ew(1,1,2), imigr )1700 CALL mpprecv( 1, t2ew(1,1,2), imigr, noea ) 1684 1701 IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1685 1702 ELSEIF( nbondi == 0 ) THEN 1686 1703 CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) 1687 1704 CALL mppsend( 2, t2we(1,1,1), imigr, noea, ml_req2 ) 1688 CALL mpprecv( 1, t2ew(1,1,2), imigr )1689 CALL mpprecv( 2, t2we(1,1,2), imigr )1705 CALL mpprecv( 1, t2ew(1,1,2), imigr, noea ) 1706 CALL mpprecv( 2, t2we(1,1,2), imigr, nowe ) 1690 1707 IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1691 1708 IF(l_isend) CALL mpi_wait( ml_req2, ml_stat, ml_err ) 1692 1709 ELSEIF( nbondi == 1 ) THEN 1693 1710 CALL mppsend( 1, t2ew(1,1,1), imigr, nowe, ml_req1 ) 1694 CALL mpprecv( 2, t2we(1,1,2), imigr )1711 CALL mpprecv( 2, t2we(1,1,2), imigr, nowe ) 1695 1712 IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1696 1713 ENDIF … … 1727 1744 IF( nbondj == -1 ) THEN 1728 1745 CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req1 ) 1729 CALL mpprecv( 3, t2ns(1,1,2), imigr )1746 CALL mpprecv( 3, t2ns(1,1,2), imigr, nono ) 1730 1747 IF(l_isend) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1731 1748 ELSEIF( nbondj == 0 ) THEN 1732 1749 CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) 1733 1750 CALL mppsend( 4, t2sn(1,1,1), imigr, nono, ml_req2 ) 1734 CALL mpprecv( 3, t2ns(1,1,2), imigr )1735 CALL mpprecv( 4, t2sn(1,1,2), imigr )1751 CALL mpprecv( 3, t2ns(1,1,2), imigr, nono ) 1752 CALL mpprecv( 4, t2sn(1,1,2), imigr, noso ) 1736 1753 IF( l_isend ) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1737 1754 IF( l_isend ) CALL mpi_wait( ml_req2, ml_stat, ml_err ) 1738 1755 ELSEIF( nbondj == 1 ) THEN 1739 1756 CALL mppsend( 3, t2ns(1,1,1), imigr, noso, ml_req1 ) 1740 CALL mpprecv( 4, t2sn(1,1,2), imigr )1757 CALL mpprecv( 4, t2sn(1,1,2), imigr, noso) 1741 1758 IF( l_isend ) CALL mpi_wait( ml_req1, ml_stat, ml_err ) 1742 1759 ENDIF … … 2045 2062 REAL(wp), DIMENSION(jpi ,4,jpk) :: znorthloc 2046 2063 REAL(wp), DIMENSION(jpi ,4,jpk,jpni) :: znorthgloio 2064 REAL(wp), DIMENSION(jpi, 4,jpk) :: zfoldwrk ! Workspace for message transfers avoiding mpi_allgather 2065 INTEGER, DIMENSION (jpmaxngh) :: ml_req5 ! for mpi_isend when avoiding mpi_allgather 2066 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather 2067 INTEGER, DIMENSION(MPI_STATUS_SIZE) :: ml_stat ! for mpi_isend when avoiding mpi_allgather 2047 2068 !!---------------------------------------------------------------------- 2048 2069 ! 2049 2070 ijpj = 4 2071 ityp = -1 2050 2072 ijpjm1 = 3 2051 2073 ztab(:,:,:) = 0.e0 … … 2058 2080 ! ! Build in procs of ncomm_north the znorthgloio 2059 2081 itaille = jpi * jpk * ijpj 2060 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2061 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2082 IF ( lnorth_nogather ) THEN 2083 ! 2084 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified (in opa_northcomms) 2085 ! as being involved in this process' northern boundary exchange 2086 ! 2087 ! First put local values into the global arraay 2088 DO jj = nlcj-ijpj+1, nlcj 2089 ij = jj - nlcj + ijpj 2090 DO ji = 1, nlci 2091 ztab(ji+nimpp-1,ij,:) = pt3d(ji,jj,:) 2092 END DO 2093 END DO 2094 2095 ! 2096 ! Set the exchange type in order to access the correct list of active neighbours 2097 ! 2098 SELECT CASE ( cd_type ) 2099 CASE ( 'T' , 'W' ) 2100 ityp = 1 2101 CASE ( 'U' ) 2102 ityp = 2 2103 CASE ( 'V' ) 2104 ityp = 3 2105 CASE ( 'F' ) 2106 ityp = 4 2107 CASE DEFAULT 2108 ! 2109 ! Set a default value for unsupported types which will cause a fallback to 2110 ! the mpi_allgather method 2111 ! 2112 ityp = -1 2113 END SELECT 2114 IF ( ityp .gt. 0 ) THEN 2115 2116 DO jr = 1,nsndto(ityp) 2117 CALL mppsend(5, znorthloc, itaille, isendto(jr,ityp), ml_req5(jr) ) 2118 END DO 2119 DO jr = 1,nsndto(ityp) 2120 CALL mpprecv(5, zfoldwrk, itaille, isendto(jr,ityp)) 2121 iproc = isendto(jr,ityp) + 1 2122 ildi=nldit (iproc) 2123 ilei=nleit (iproc) 2124 iilb=nimppt(iproc) 2125 DO jj = 1, 4 2126 DO ji = ildi, ilei 2127 ztab(ji+iilb-1,jj,:) = zfoldwrk(ji,jj,:) 2128 END DO 2129 END DO 2130 END DO 2131 IF(l_isend) THEN 2132 DO jr = 1,nsndto(ityp) 2133 CALL mpi_wait(ml_req5(jr), ml_stat, ml_err) 2134 END DO 2135 ENDIF 2136 2137 ENDIF 2138 2139 ENDIF 2140 2141 IF ( ityp .lt. 0 ) THEN 2142 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2143 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2062 2144 ! 2063 2145 ! ! recover the global north array 2064 DO jr = 1, ndim_rank_north 2065 iproc = nrank_north(jr) + 1 2066 ildi = nldit (iproc) 2067 ilei = nleit (iproc) 2068 iilb = nimppt(iproc) 2069 DO jj = 1, 4 2070 DO ji = ildi, ilei 2071 ztab(ji+iilb-1,jj,:) = znorthgloio(ji,jj,:,jr) 2146 DO jr = 1, ndim_rank_north 2147 iproc = nrank_north(jr) + 1 2148 ildi = nldit (iproc) 2149 ilei = nleit (iproc) 2150 iilb = nimppt(iproc) 2151 DO jj = 1, 4 2152 DO ji = ildi, ilei 2153 ztab(ji+iilb-1,jj,:) = znorthgloio(ji,jj,:,jr) 2154 END DO 2072 2155 END DO 2073 2156 END DO 2074 END DO 2157 ENDIF 2158 ! 2159 ! The ztab array has been either: 2160 ! a. Fully populated by the mpi_allgather operation or 2161 ! b. Had the active points for this domain and northern neighbours populated by peer to peer exchanges 2162 ! Either way the array may be folded by lbc_nfd and the result for the span of this domain will be identical. 2075 2163 ! 2076 2164 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition … … 2111 2199 REAL(wp), DIMENSION(jpi ,4) :: znorthloc 2112 2200 REAL(wp), DIMENSION(jpi ,4,jpni) :: znorthgloio 2201 REAL(wp), DIMENSION(jpi, 4) :: zfoldwrk ! Workspace for message transfers avoiding mpi_allgather 2202 INTEGER, DIMENSION (jpmaxngh) :: ml_req5 ! for mpi_isend when avoiding mpi_allgather 2203 INTEGER :: ml_err ! for mpi_isend when avoiding mpi_allgather 2204 INTEGER, DIMENSION(MPI_STATUS_SIZE):: ml_stat ! for mpi_isend when avoiding mpi_allgather 2113 2205 !!---------------------------------------------------------------------- 2114 2206 ! 2115 2207 ijpj = 4 2208 ityp = -1 2116 2209 ijpjm1 = 3 2117 2210 ztab(:,:) = 0.e0 … … 2124 2217 ! ! Build in procs of ncomm_north the znorthgloio 2125 2218 itaille = jpi * ijpj 2126 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2127 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2128 ! 2129 DO jr = 1, ndim_rank_north ! recover the global north array 2130 iproc = nrank_north(jr) + 1 2131 ildi=nldit (iproc) 2132 ilei=nleit (iproc) 2133 iilb=nimppt(iproc) 2134 DO jj = 1, 4 2135 DO ji = ildi, ilei 2136 ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr) 2219 IF ( lnorth_nogather ) THEN 2220 ! 2221 ! Avoid the use of mpi_allgather by exchanging only with the processes already identified (in opa_northcomms) 2222 ! as being involved in this process' northern boundary exchange 2223 ! 2224 ! First put local values into the global array 2225 ! 2226 DO jj = nlcj-ijpj+1, nlcj 2227 ij = jj - nlcj + ijpj 2228 DO ji = 1, nlci 2229 ztab(ji+nimpp-1,ij) = pt2d(ji,jj) 2230 END DO 2231 END DO 2232 2233 ! 2234 ! Set the exchange type in order to access the correct list of active neighbours 2235 ! 2236 SELECT CASE ( cd_type ) 2237 CASE ( 'T' , 'W' ) 2238 ityp = 1 2239 CASE ( 'U' ) 2240 ityp = 2 2241 CASE ( 'V' ) 2242 ityp = 3 2243 CASE ( 'F' ) 2244 ityp = 4 2245 CASE DEFAULT 2246 ! 2247 ! Set a default value for unsupported types which will cause a fallback to 2248 ! the mpi_allgather method 2249 ! 2250 ityp = -1 2251 END SELECT 2252 2253 IF ( ityp .gt. 0 ) THEN 2254 2255 DO jr = 1,nsndto(ityp) 2256 CALL mppsend(5, znorthloc, itaille, isendto(jr,ityp), ml_req5(jr) ) 2257 END DO 2258 DO jr = 1,nsndto(ityp) 2259 CALL mpprecv(5, zfoldwrk, itaille, isendto(jr,ityp)) 2260 iproc = isendto(jr,ityp) + 1 2261 ildi=nldit (iproc) 2262 ilei=nleit (iproc) 2263 iilb=nimppt(iproc) 2264 DO jj = 1, 4 2265 DO ji = ildi, ilei 2266 ztab(ji+iilb-1,jj) = zfoldwrk(ji,jj) 2267 END DO 2137 2268 END DO 2138 END DO 2139 END DO 2140 ! 2141 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition 2142 ! 2143 ! 2144 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 2145 ij = jj - nlcj + ijpj 2146 DO ji = 1, nlci 2147 pt2d(ji,jj) = ztab(ji+nimpp-1,ij) 2148 END DO 2149 END DO 2269 END DO 2270 IF(l_isend) THEN 2271 DO jr = 1,nsndto(ityp) 2272 CALL mpi_wait(ml_req5(jr), ml_stat, ml_err) 2273 END DO 2274 ENDIF 2275 2276 ENDIF 2277 2278 ENDIF 2279 2280 IF ( ityp .lt. 0 ) THEN 2281 CALL MPI_ALLGATHER( znorthloc , itaille, MPI_DOUBLE_PRECISION, & 2282 & znorthgloio, itaille, MPI_DOUBLE_PRECISION, ncomm_north, ierr ) 2283 ! 2284 DO jr = 1, ndim_rank_north ! recover the global north array 2285 iproc = nrank_north(jr) + 1 2286 ildi=nldit (iproc) 2287 ilei=nleit (iproc) 2288 iilb=nimppt(iproc) 2289 DO jj = 1, 4 2290 DO ji = ildi, ilei 2291 ztab(ji+iilb-1,jj) = znorthgloio(ji,jj,jr) 2292 END DO 2293 END DO 2294 END DO 2295 ENDIF 2296 ! 2297 ! The ztab array has been either: 2298 ! a. Fully populated by the mpi_allgather operation or 2299 ! b. Had the active points for this domain and northern neighbours populated by peer to peer exchanges 2300 ! Either way the array may be folded by lbc_nfd and the result for the span of this domain will be identical. 2301 ! 2302 CALL lbc_nfd( ztab, cd_type, psgn ) ! North fold boundary condition 2303 ! 2304 ! 2305 DO jj = nlcj-ijpj+1, nlcj ! Scatter back to pt2d 2306 ij = jj - nlcj + ijpj 2307 DO ji = 1, nlci 2308 pt2d(ji,jj) = ztab(ji+nimpp-1,ij) 2309 END DO 2310 END DO 2150 2311 ! 2151 2312 END SUBROUTINE mpp_lbc_north_2d -
branches/DEV_1879_mpp_sca/NEMO/OPA_SRC/opa.F90
r1793 r1926 257 257 ENDIF 258 258 !!gm c1d end 259 260 CALL opa_northcomms ! Initialise the northfold neighbour lists (must be done after the masks are defined) 259 261 260 262 IF( ln_ctl ) CALL prt_ctl_init ! Print control … … 407 409 END SUBROUTINE opa_closefile 408 410 411 SUBROUTINE opa_northcomms 412 !!====================================================================== 413 !! *** ROUTINE opa_northcomms *** 414 !! opa_northcomms : Setup for north fold exchanges with explicit peer to peer messaging 415 !!===================================================================== 416 !!---------------------------------------------------------------------- 417 !! 418 !! ** Purpose : Initialization of the northern neighbours lists. 419 !!---------------------------------------------------------------------- 420 421 INTEGER :: ji, jj, jk, ij ! dummy loop indices 422 INTEGER :: ijpj ! ??? 423 INTEGER, DIMENSION (jpi,4,4) :: ifoldnbrs 424 REAL(wp), DIMENSION (jpi,jpj) :: znnbrs ! workspace 425 LOGICAL, DIMENSION (jpnij) :: lrankset ! workspace 426 427 IF(lwp) WRITE(numout,*) 428 IF(lwp) WRITE(numout,*) 'opa_northcomms : Initialization of the northern neighbours lists' 429 IF(lwp) WRITE(numout,*) '~~~~~~~~~~' 430 431 !!---------------------------------------------------------------------- 432 nsndto = 0 433 isendto = -1 434 ijpj = 4 435 ! 436 ! Exchange and store ranks on northern rows 437 438 lrankset = .FALSE. 439 znnbrs = narea * tmask(:,:,1) 440 CALL lbc_lnk( znnbrs, 'T', 1. ) 441 442 IF ( njmppt(narea) .eq. MAXVAL( njmppt )) THEN 443 do jj = nlcj-ijpj+1, nlcj 444 ij = jj - nlcj + ijpj 445 ifoldnbrs(:,ij,1) = int(znnbrs(:,jj)) 446 do ji = 1,jpi 447 if(int(znnbrs(ji,jj)) .ne. 0 .and. int(znnbrs(ji,jj)) .ne. narea ) & 448 & lrankset(int(znnbrs(ji,jj))) = .true. 449 end do 450 end do 451 452 do jj = 1,jpnij 453 IF (lrankset(jj)) THEN 454 nsndto(1) = nsndto(1) + 1 455 IF(nsndto(1) .gt. jpmaxngh ) THEN 456 CALL ctl_stop( ' Too many neighbours in opa_northcomms ', & 457 & ' jpmaxngh will need to be increased ') 458 ENDIF 459 isendto(nsndto(1),1) = jj-1 ! narea converted to MPI rank 460 ENDIF 461 end do 462 ENDIF 463 464 lrankset = .FALSE. 465 znnbrs = narea * umask(:,:,1) 466 CALL lbc_lnk( znnbrs, 'U', 1. ) 467 468 IF ( njmppt(narea) .eq. MAXVAL( njmppt )) THEN 469 do jj = nlcj-ijpj+1, nlcj 470 ij = jj - nlcj + ijpj 471 ifoldnbrs(:,ij,2) = int(znnbrs(:,jj)) 472 do ji = 1,jpi 473 if(int(znnbrs(ji,jj)) .ne. 0 .and. int(znnbrs(ji,jj)) .ne. narea ) & 474 & lrankset(int(znnbrs(ji,jj))) = .true. 475 end do 476 end do 477 478 do jj = 1,jpnij 479 IF (lrankset(jj)) THEN 480 nsndto(2) = nsndto(2) + 1 481 IF(nsndto(2) .gt. jpmaxngh ) THEN 482 CALL ctl_stop( ' Too many neighbours in opa_northcomms ', & 483 & ' jpmaxngh will need to be increased ') 484 ENDIF 485 isendto(nsndto(2),2) = jj-1 ! narea converted to MPI rank 486 ENDIF 487 end do 488 ENDIF 489 490 lrankset = .FALSE. 491 znnbrs = narea * vmask(:,:,1) 492 CALL lbc_lnk( znnbrs, 'V', 1. ) 493 494 IF ( njmppt(narea) .eq. MAXVAL( njmppt )) THEN 495 do jj = nlcj-ijpj+1, nlcj 496 ij = jj - nlcj + ijpj 497 ifoldnbrs(:,ij,3) = int(znnbrs(:,jj)) 498 do ji = 1,jpi 499 if(int(znnbrs(ji,jj)) .ne. 0 .and. int(znnbrs(ji,jj)) .ne. narea ) & 500 & lrankset(int(znnbrs(ji,jj))) = .true. 501 end do 502 end do 503 504 do jj = 1,jpnij 505 IF (lrankset(jj)) THEN 506 nsndto(3) = nsndto(3) + 1 507 IF(nsndto(3) .gt. jpmaxngh ) THEN 508 CALL ctl_stop( ' Too many neighbours in opa_northcomms ', & 509 & ' jpmaxngh will need to be increased ') 510 ENDIF 511 isendto(nsndto(3),3) = jj-1 ! narea converted to MPI rank 512 ENDIF 513 end do 514 ENDIF 515 516 lrankset = .FALSE. 517 znnbrs = narea * fmask(:,:,1) 518 ! 519 ! filter top rows to counter any strong slip conditions 520 ! 521 do jj = nlcj-ijpj+1, nlcj 522 do ji = 1,jpi 523 znnbrs(ji,jj) = narea * MIN(1.0,fmask(ji,jj,1)) 524 end do 525 enddo 526 CALL lbc_lnk( znnbrs, 'F', 1. ) 527 528 IF ( njmppt(narea) .eq. MAXVAL( njmppt )) THEN 529 do jj = nlcj-ijpj+1, nlcj 530 ij = jj - nlcj + ijpj 531 ifoldnbrs(:,ij,4) = int(znnbrs(:,jj)) 532 do ji = 1,jpi 533 if(int(znnbrs(ji,jj)) .ne. 0 .and. int(znnbrs(ji,jj)) .ne. narea ) & 534 & lrankset(int(znnbrs(ji,jj))) = .true. 535 end do 536 end do 537 538 do jj = 1,jpnij 539 IF (lrankset(jj)) THEN 540 nsndto(4) = nsndto(4) + 1 541 IF(nsndto(4) .gt. jpmaxngh ) THEN 542 CALL ctl_stop( ' Too many neighbours in opa_northcomms ', & 543 & ' jpmaxngh will need to be increased ') 544 ENDIF 545 isendto(nsndto(4),4) = jj-1 ! narea converted to MPI rank 546 ENDIF 547 end do 548 549 lnorth_nogather = .TRUE. 550 ENDIF 551 552 END SUBROUTINE opa_northcomms 409 553 !!====================================================================== 410 554 END MODULE opa
Note: See TracChangeset
for help on using the changeset viewer.