- Timestamp:
- 05/16/17 17:54:30 (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp
r892 r1134 10 10 #include "utils.hpp" 11 11 #include "mpi_tag.hpp" 12 #ifdef _usingEP 13 #include "ep_declaration.hpp" 14 #endif 15 12 16 13 17 namespace xios 14 18 { 15 19 template<typename T, typename H> 16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm)20 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 17 21 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 18 22 { … … 34 38 template<typename T, typename H> 35 39 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 36 const MPI_Comm& clientIntraComm)40 const ep_lib::MPI_Comm& clientIntraComm) 37 41 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 38 42 { … … 59 63 template<typename T, typename H> 60 64 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 61 const MPI_Comm& clientIntraComm)65 const ep_lib::MPI_Comm& clientIntraComm) 62 66 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 63 67 { … … 95 99 template<typename T, typename H> 96 100 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 97 const MPI_Comm& commLevel,101 const ep_lib::MPI_Comm& commLevel, 98 102 int level) 99 103 { … … 169 173 recvIndexBuff = new unsigned long[recvNbIndexCount]; 170 174 171 std::vector< MPI_Request> request;175 std::vector<ep_lib::MPI_Request> request; 172 176 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 173 177 iteRecvIndex = recvRankClient.end(), … … 179 183 { 180 184 if (0 != recvNbIndexClientCount[idx]) 185 { 181 186 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 187 } 182 188 currentIndex += recvNbIndexClientCount[idx]; 183 189 } … … 188 194 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 189 195 190 std::vector< MPI_Status> status(request.size());196 std::vector<ep_lib::MPI_Status> status(request.size()); 191 197 MPI_Waitall(request.size(), &request[0], &status[0]); 192 198 … … 242 248 } 243 249 244 std::vector< MPI_Request> requestOnReturn;250 std::vector<ep_lib::MPI_Request> requestOnReturn; 245 251 currentIndex = 0; 246 252 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 293 299 } 294 300 295 std::vector< MPI_Status> statusOnReturn(requestOnReturn.size());301 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 296 302 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 297 303 … … 360 366 template<typename T, typename H> 361 367 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 362 const MPI_Comm& commLevel,368 const ep_lib::MPI_Comm& commLevel, 363 369 int level) 364 370 { … … 412 418 { 413 419 client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 414 // ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]);415 420 ProcessDHTElement<InfoType>::packElement(infoTmp[idx], client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 416 421 ++sendNbIndexBuff[indexClient]; … … 439 444 // it will send a message to the correct clients. 440 445 // Contents of the message are index and its corresponding informatioin 441 std::vector< MPI_Request> request;446 std::vector<ep_lib::MPI_Request> request; 442 447 int currentIndex = 0; 443 448 int nbRecvClient = recvRankClient.size(); … … 458 463 iteIndex = client2ClientIndex.end(); 459 464 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 465 { 460 466 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 467 } 468 461 469 boost::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 462 470 iteInfo = client2ClientInfo.end(); 463 471 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 472 { 464 473 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 465 474 466 std::vector<MPI_Status> status(request.size()); 475 } 476 477 std::vector<ep_lib::MPI_Status> status(request.size()); 478 467 479 MPI_Waitall(request.size(), &request[0], &status[0]); 468 480 … … 518 530 template<typename T, typename H> 519 531 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 520 const MPI_Comm& clientIntraComm,521 std::vector< MPI_Request>& requestSendIndex)522 { 523 MPI_Request request;532 const ep_lib::MPI_Comm& clientIntraComm, 533 std::vector<ep_lib::MPI_Request>& requestSendIndex) 534 { 535 ep_lib::MPI_Request request; 524 536 requestSendIndex.push_back(request); 525 537 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, … … 536 548 template<typename T, typename H> 537 549 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 538 const MPI_Comm& clientIntraComm,539 std::vector< MPI_Request>& requestRecvIndex)540 { 541 MPI_Request request;550 const ep_lib::MPI_Comm& clientIntraComm, 551 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 552 { 553 ep_lib::MPI_Request request; 542 554 requestRecvIndex.push_back(request); 543 555 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, … … 555 567 template<typename T, typename H> 556 568 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 557 const MPI_Comm& clientIntraComm,558 std::vector< MPI_Request>& requestSendInfo)559 { 560 MPI_Request request;569 const ep_lib::MPI_Comm& clientIntraComm, 570 std::vector<ep_lib::MPI_Request>& requestSendInfo) 571 { 572 ep_lib::MPI_Request request; 561 573 requestSendInfo.push_back(request); 562 563 574 MPI_Isend(info, infoSize, MPI_CHAR, 564 575 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); … … 575 586 template<typename T, typename H> 576 587 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 577 const MPI_Comm& clientIntraComm,578 std::vector< MPI_Request>& requestRecvInfo)579 { 580 MPI_Request request;588 const ep_lib::MPI_Comm& clientIntraComm, 589 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 590 { 591 ep_lib::MPI_Request request; 581 592 requestRecvInfo.push_back(request); 582 593 … … 651 662 { 652 663 recvNbElements.resize(recvNbRank.size()); 653 std::vector< MPI_Request> request(sendNbRank.size()+recvNbRank.size());654 std::vector< MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());664 std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 665 std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 655 666 656 667 int nRequest = 0; … … 696 707 std::vector<int> recvBuff(recvBuffSize*2,0); 697 708 698 std::vector< MPI_Request> request(sendBuffSize+recvBuffSize);699 std::vector< MPI_Status> requestStatus(sendBuffSize+recvBuffSize);709 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 710 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 700 711 701 712 int nRequest = 0; … … 721 732 } 722 733 734 //MPI_Barrier(this->internalComm_); 735 723 736 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 737 724 738 int nbRecvRank = 0, nbRecvElements = 0; 725 739 recvNbRank.clear();
Note: See TracChangeset
for help on using the changeset viewer.