- Timestamp:
- 06/21/17 09:09:59 (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp
r1172 r1176 104 104 int clientRank; 105 105 MPI_Comm_rank(commLevel,&clientRank); 106 ep_lib::MPI_Barrier(commLevel); 106 107 int groupRankBegin = this->getGroupBegin()[level]; 107 108 int nbClient = this->getNbInGroup()[level]; … … 180 181 int currentIndex = 0; 181 182 int nbRecvClient = recvRankClient.size(); 182 for (int idx = 0; idx < nbRecvClient; ++idx) 183 { 184 if (0 != recvNbIndexClientCount[idx]) 185 { 186 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 187 } 188 currentIndex += recvNbIndexClientCount[idx]; 189 } 190 183 191 184 boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 192 185 iteIndex = client2ClientIndex.end(); … … 194 187 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 195 188 189 190 191 for (int idx = 0; idx < nbRecvClient; ++idx) 192 { 193 if (0 != recvNbIndexClientCount[idx]) 194 { 195 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 196 } 197 currentIndex += recvNbIndexClientCount[idx]; 198 } 199 200 196 201 std::vector<ep_lib::MPI_Status> status(request.size()); 197 202 MPI_Waitall(request.size(), &request[0], &status[0]); 203 198 204 199 205 CArray<size_t,1>* tmpGlobalIndex; … … 208 214 --level; 209 215 computeIndexInfoMappingLevel(*tmpGlobalIndex, this->internalComm_, level); 216 210 217 } 211 218 else // Now, we are in the last level where necessary mappings are. … … 372 379 MPI_Comm_rank(commLevel,&clientRank); 373 380 computeSendRecvRank(level, clientRank); 381 ep_lib::MPI_Barrier(commLevel); 374 382 375 383 int groupRankBegin = this->getGroupBegin()[level]; … … 666 674 667 675 int nRequest = 0; 676 677 678 for (int idx = 0; idx < sendNbRank.size(); ++idx) 679 { 680 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 681 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 682 ++nRequest; 683 } 684 668 685 for (int idx = 0; idx < recvNbRank.size(); ++idx) 669 686 { 670 687 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 671 688 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 672 ++nRequest;673 }674 675 for (int idx = 0; idx < sendNbRank.size(); ++idx)676 {677 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT,678 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]);679 689 ++nRequest; 680 690 } … … 714 724 715 725 int nRequest = 0; 716 for (int idx = 0; idx < recvBuffSize; ++idx) 717 { 718 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 719 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 720 ++nRequest; 721 } 726 722 727 723 728 for (int idx = 0; idx < sendBuffSize; ++idx) … … 734 739 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 735 740 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 741 ++nRequest; 742 } 743 744 for (int idx = 0; idx < recvBuffSize; ++idx) 745 { 746 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 747 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 736 748 ++nRequest; 737 749 }
Note: See TracChangeset
for help on using the changeset viewer.