Changeset 1176 for XIOS/dev/branch_yushan_merged/src
- Timestamp:
- 06/21/17 09:09:59 (7 years ago)
- Location:
- XIOS/dev/branch_yushan_merged/src
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/src/buffer_client.cpp
r1134 r1176 88 88 { 89 89 MPI_Issend(buffer[current], count, MPI_CHAR, serverRank, 20, interComm, &request); 90 #pragma omp critical (_output) 90 91 pending = true; 91 92 if (current == 1) current = 0; -
XIOS/dev/branch_yushan_merged/src/client.cpp
r1164 r1176 107 107 MPI_Comm_rank(intraComm,&intraCommRank) ; 108 108 109 #pragma omp critical(_output)109 /*#pragma omp critical(_output) 110 110 { 111 111 info(10)<<"intercommCreate::client "<<test_omp_rank<< " "<< &test_omp_rank <<" intraCommSize : "<<intraCommSize 112 112 <<" intraCommRank :"<<intraCommRank<<" serverLeader "<< serverLeader 113 113 <<" globalComm : "<< &(CXios::globalComm) << endl ; 114 } 114 }*/ 115 115 116 116 -
XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp
r1172 r1176 104 104 int clientRank; 105 105 MPI_Comm_rank(commLevel,&clientRank); 106 ep_lib::MPI_Barrier(commLevel); 106 107 int groupRankBegin = this->getGroupBegin()[level]; 107 108 int nbClient = this->getNbInGroup()[level]; … … 180 181 int currentIndex = 0; 181 182 int nbRecvClient = recvRankClient.size(); 182 for (int idx = 0; idx < nbRecvClient; ++idx) 183 { 184 if (0 != recvNbIndexClientCount[idx]) 185 { 186 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 187 } 188 currentIndex += recvNbIndexClientCount[idx]; 189 } 190 183 191 184 boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 192 185 iteIndex = client2ClientIndex.end(); … … 194 187 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 195 188 189 190 191 for (int idx = 0; idx < nbRecvClient; ++idx) 192 { 193 if (0 != recvNbIndexClientCount[idx]) 194 { 195 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 196 } 197 currentIndex += recvNbIndexClientCount[idx]; 198 } 199 200 196 201 std::vector<ep_lib::MPI_Status> status(request.size()); 197 202 MPI_Waitall(request.size(), &request[0], &status[0]); 203 198 204 199 205 CArray<size_t,1>* tmpGlobalIndex; … … 208 214 --level; 209 215 computeIndexInfoMappingLevel(*tmpGlobalIndex, this->internalComm_, level); 216 210 217 } 211 218 else // Now, we are in the last level where necessary mappings are. … … 372 379 MPI_Comm_rank(commLevel,&clientRank); 373 380 computeSendRecvRank(level, clientRank); 381 ep_lib::MPI_Barrier(commLevel); 374 382 375 383 int groupRankBegin = this->getGroupBegin()[level]; … … 666 674 667 675 int nRequest = 0; 676 677 678 for (int idx = 0; idx < sendNbRank.size(); ++idx) 679 { 680 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 681 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 682 ++nRequest; 683 } 684 668 685 for (int idx = 0; idx < recvNbRank.size(); ++idx) 669 686 { 670 687 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 671 688 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 672 ++nRequest;673 }674 675 for (int idx = 0; idx < sendNbRank.size(); ++idx)676 {677 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT,678 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]);679 689 ++nRequest; 680 690 } … … 714 724 715 725 int nRequest = 0; 716 for (int idx = 0; idx < recvBuffSize; ++idx) 717 { 718 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 719 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 720 ++nRequest; 721 } 726 722 727 723 728 for (int idx = 0; idx < sendBuffSize; ++idx) … … 734 739 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 735 740 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 741 ++nRequest; 742 } 743 744 for (int idx = 0; idx < recvBuffSize; ++idx) 745 { 746 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 747 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 736 748 ++nRequest; 737 749 } -
XIOS/dev/branch_yushan_merged/src/data_output.cpp
r1096 r1176 4 4 #include "group_template.hpp" 5 5 #include "context.hpp" 6 6 //mpi.hpp 7 7 namespace xios 8 8 { 9 /// ////////////////////// D éfinitions ////////////////////// ///9 /// ////////////////////// Dfinitions ////////////////////// /// 10 10 11 11 CDataOutput::~CDataOutput(void) -
XIOS/dev/branch_yushan_merged/src/io/nc4_data_input.cpp
r1172 r1176 57 57 #ifdef _usingEP 58 58 SuperClass::type = ONE_FILE; 59 //printf("SuperClass::type = %d\n", SuperClass::type);59 printf("SuperClass::type = %d\n", SuperClass::type); 60 60 #endif 61 61 switch (SuperClass::type) -
XIOS/dev/branch_yushan_merged/src/io/nc4_data_output.cpp
r1172 r1176 1102 1102 1103 1103 SuperClassWriter::definition_end(); 1104 printf("SuperClass::type = %d \n", SuperClass::type);1104 printf("SuperClass::type = %d, typePrec = %d\n", SuperClass::type, typePrec); 1105 1105 switch (SuperClass::type) 1106 1106 { -
XIOS/dev/branch_yushan_merged/src/test/test_complete_omp.f90
r1134 r1176 84 84 jbegin=jbegin+nj 85 85 ENDDO 86 87 if((ni.LE.0) .OR. (nj.LE.0)) call MPI_Abort() 86 88 87 89 iend=ibegin+ni-1 ; jend=jbegin+nj-1 -
XIOS/dev/branch_yushan_merged/src/test/test_remap_omp.f90
r1153 r1176 52 52 if(rank < size-2) then 53 53 54 !$omp parallel default( firstprivate) firstprivate(dtime)54 !$omp parallel default(private) firstprivate(dtime) 55 55 56 56 !!! XIOS Initialization (get the local communicator) … … 69 69 70 70 ierr=NF90_INQ_VARID(ncid,"bounds_lon",varid) 71 ierr=NF90_INQUIRE_VARIABLE(ncid, varid, dimids=dimids)71 ierr=NF90_INQUIRE_VARIABLE(ncid, varid, dimids=dimids) 72 72 ierr=NF90_INQUIRE_DIMENSION(ncid, dimids(1), len=src_nvertex) 73 73 ierr=NF90_INQUIRE_DIMENSION(ncid, dimids(2), len=src_ni_glo) … … 82 82 src_ibegin= remain * (div+1) + (rank-remain) * div ; 83 83 ENDIF 84 85 if(src_ni .LE. 0) CALL MPI_ABORT() 86 84 87 85 88 ALLOCATE(src_lon(src_ni), src_lon_tmp(src_ni)) … … 95 98 ALLOCATE(lval1(interpolatedLlm)) 96 99 ALLOCATE(lval2(llm2)) 100 lval2 = 0 101 lval=0 102 lval1=0 97 103 98 104 ierr=NF90_INQ_VARID(ncid,"lon",varid) … … 161 167 ENDIF 162 168 169 if(dst_ni .LE. 0) CALL MPI_ABORT() 170 163 171 ALLOCATE(dst_lon(dst_ni)) 164 172 ALLOCATE(dst_lat(dst_ni))
Note: See TracChangeset
for help on using the changeset viewer.