- Timestamp:
- 11/19/18 15:52:54 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_omp/src/client_client_dht_template_impl.hpp
r1542 r1601 14 14 { 15 15 template<typename T, typename H> 16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm)16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 17 17 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 18 18 { 19 MPI_Comm_size(clientIntraComm, &nbClient_);19 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 20 20 this->computeMPICommLevel(); 21 21 int nbLvl = this->getNbLevel(); … … 34 34 template<typename T, typename H> 35 35 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 36 const MPI_Comm& clientIntraComm)36 const ep_lib::MPI_Comm& clientIntraComm) 37 37 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 38 38 { 39 MPI_Comm_size(clientIntraComm, &nbClient_);39 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 40 40 this->computeMPICommLevel(); 41 41 int nbLvl = this->getNbLevel(); … … 59 59 template<typename T, typename H> 60 60 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 61 const MPI_Comm& clientIntraComm)61 const ep_lib::MPI_Comm& clientIntraComm) 62 62 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 63 63 { 64 MPI_Comm_size(clientIntraComm, &nbClient_);64 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 65 65 this->computeMPICommLevel(); 66 66 int nbLvl = this->getNbLevel(); … … 95 95 template<typename T, typename H> 96 96 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 97 const MPI_Comm& commLevel,97 const ep_lib::MPI_Comm& commLevel, 98 98 int level) 99 99 { 100 100 int clientRank; 101 MPI_Comm_rank(commLevel,&clientRank);101 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 102 102 int groupRankBegin = this->getGroupBegin()[level]; 103 103 int nbClient = this->getNbInGroup()[level]; … … 169 169 recvIndexBuff = new unsigned long[recvNbIndexCount]; 170 170 171 std::vector<MPI_Request> request; 171 int request_size = 0; 172 for (int idx = 0; idx < recvRankClient.size(); ++idx) 173 { 174 if (0 != recvNbIndexClientCount[idx]) 175 request_size ++; 176 } 177 178 request_size += client2ClientIndex.size(); 179 180 std::vector<ep_lib::MPI_Request> request(request_size); 181 172 182 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 173 183 iteRecvIndex = recvRankClient.end(), … … 176 186 int currentIndex = 0; 177 187 int nbRecvClient = recvRankClient.size(); 188 int request_position = 0; 178 189 for (int idx = 0; idx < nbRecvClient; ++idx) 179 190 { 180 191 if (0 != recvNbIndexClientCount[idx]) 181 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request);192 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 182 193 currentIndex += recvNbIndexClientCount[idx]; 183 194 } … … 186 197 iteIndex = client2ClientIndex.end(); 187 198 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 188 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request);189 190 std::vector< MPI_Status> status(request.size());191 MPI_Waitall(request.size(), &request[0], &status[0]);199 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 200 201 std::vector<ep_lib::MPI_Status> status(request.size()); 202 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 192 203 193 204 CArray<size_t,1>* tmpGlobalIndex; … … 242 253 } 243 254 244 std::vector<MPI_Request> requestOnReturn; 255 int requestOnReturn_size=0; 256 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 257 { 258 if (0 != recvNbIndexOnReturn[idx]) 259 { 260 requestOnReturn_size += 2; 261 } 262 } 263 264 for (int idx = 0; idx < nbRecvClient; ++idx) 265 { 266 if (0 != sendNbIndexOnReturn[idx]) 267 { 268 requestOnReturn_size += 2; 269 } 270 } 271 272 int requestOnReturn_position=0; 273 274 std::vector<ep_lib::MPI_Request> requestOnReturn(requestOnReturn_size); 245 275 currentIndex = 0; 246 276 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 248 278 if (0 != recvNbIndexOnReturn[idx]) 249 279 { 250 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn);280 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 251 281 recvInfoFromClients(recvRankOnReturn[idx], 252 282 recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 253 283 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 254 commLevel, requestOnReturn);284 commLevel, &requestOnReturn[requestOnReturn_position++]); 255 285 } 256 286 currentIndex += recvNbIndexOnReturn[idx]; … … 286 316 287 317 sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 288 sendNbIndexOnReturn[idx], commLevel, requestOnReturn);318 sendNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 289 319 sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 290 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn);320 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, &requestOnReturn[requestOnReturn_position++]); 291 321 } 292 322 currentIndex += recvNbIndexClientCount[idx]; 293 323 } 294 324 295 std::vector< MPI_Status> statusOnReturn(requestOnReturn.size());296 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]);325 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 326 ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 297 327 298 328 Index2VectorInfoTypeMap indexToInfoMapping; … … 360 390 template<typename T, typename H> 361 391 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 362 const MPI_Comm& commLevel,392 const ep_lib::MPI_Comm& commLevel, 363 393 int level) 364 394 { 365 395 int clientRank; 366 MPI_Comm_rank(commLevel,&clientRank);396 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 367 397 computeSendRecvRank(level, clientRank); 368 398 … … 439 469 // it will send a message to the correct clients. 440 470 // Contents of the message are index and its corresponding informatioin 441 std::vector<MPI_Request> request; 471 int request_size = 0; 472 for (int idx = 0; idx < recvRankClient.size(); ++idx) 473 { 474 if (0 != recvNbIndexClientCount[idx]) 475 { 476 request_size += 2; 477 } 478 } 479 480 request_size += client2ClientIndex.size(); 481 request_size += client2ClientInfo.size(); 482 483 std::vector<ep_lib::MPI_Request> request(request_size); 442 484 int currentIndex = 0; 443 485 int nbRecvClient = recvRankClient.size(); 486 int request_position=0; 444 487 for (int idx = 0; idx < nbRecvClient; ++idx) 445 488 { 446 489 if (0 != recvNbIndexClientCount[idx]) 447 490 { 448 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request);449 recvInfoFromClients(recvRankClient[idx],450 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(),451 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(),452 commLevel, request);491 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 492 recvInfoFromClients(recvRankClient[idx], 493 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 494 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 495 commLevel, &request[request_position++]); 453 496 } 454 497 currentIndex += recvNbIndexClientCount[idx]; … … 458 501 iteIndex = client2ClientIndex.end(); 459 502 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 460 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request);503 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 461 504 std::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 462 505 iteInfo = client2ClientInfo.end(); 463 506 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 464 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request);465 466 std::vector< MPI_Status> status(request.size());467 MPI_Waitall(request.size(), &request[0], &status[0]);507 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, &request[request_position++]); 508 509 std::vector<ep_lib::MPI_Status> status(request.size()); 510 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 468 511 469 512 Index2VectorInfoTypeMap indexToInfoMapping; … … 518 561 template<typename T, typename H> 519 562 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 520 const MPI_Comm& clientIntraComm,521 std::vector< MPI_Request>& requestSendIndex)522 { 523 MPI_Request request;563 const ep_lib::MPI_Comm& clientIntraComm, 564 std::vector<ep_lib::MPI_Request>& requestSendIndex) 565 { 566 ep_lib::MPI_Request request; 524 567 requestSendIndex.push_back(request); 525 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG,568 ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 526 569 clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 570 } 571 572 /*! 573 Send message containing index to clients 574 \param [in] clientDestRank rank of destination client 575 \param [in] indices index to send 576 \param [in] indiceSize size of index array to send 577 \param [in] clientIntraComm communication group of client 578 \param [in] requestSendIndex sending request 579 */ 580 template<typename T, typename H> 581 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 582 const ep_lib::MPI_Comm& clientIntraComm, 583 ep_lib::MPI_Request* requestSendIndex) 584 { 585 ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 586 clientDestRank, MPI_DHT_INDEX, clientIntraComm, requestSendIndex); 527 587 } 528 588 … … 536 596 template<typename T, typename H> 537 597 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 538 const MPI_Comm& clientIntraComm,539 std::vector< MPI_Request>& requestRecvIndex)540 { 541 MPI_Request request;598 const ep_lib::MPI_Comm& clientIntraComm, 599 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 600 { 601 ep_lib::MPI_Request request; 542 602 requestRecvIndex.push_back(request); 543 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG,603 ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 544 604 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 605 } 606 607 /*! 608 Receive message containing index to clients 609 \param [in] clientDestRank rank of destination client 610 \param [in] indices index to send 611 \param [in] clientIntraComm communication group of client 612 \param [in] requestRecvIndex receiving request 613 */ 614 template<typename T, typename H> 615 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 616 const ep_lib::MPI_Comm& clientIntraComm, 617 ep_lib::MPI_Request *requestRecvIndex) 618 { 619 ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 620 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, requestRecvIndex); 545 621 } 546 622 … … 555 631 template<typename T, typename H> 556 632 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 557 const MPI_Comm& clientIntraComm,558 std::vector< MPI_Request>& requestSendInfo)559 { 560 MPI_Request request;633 const ep_lib::MPI_Comm& clientIntraComm, 634 std::vector<ep_lib::MPI_Request>& requestSendInfo) 635 { 636 ep_lib::MPI_Request request; 561 637 requestSendInfo.push_back(request); 562 638 563 MPI_Isend(info, infoSize, MPI_CHAR,639 ep_lib::MPI_Isend(info, infoSize, MPI_CHAR, 564 640 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 641 } 642 643 /*! 644 Send message containing information to clients 645 \param [in] clientDestRank rank of destination client 646 \param [in] info info array to send 647 \param [in] infoSize info array size to send 648 \param [in] clientIntraComm communication group of client 649 \param [in] requestSendInfo sending request 650 */ 651 template<typename T, typename H> 652 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 653 const ep_lib::MPI_Comm& clientIntraComm, 654 ep_lib::MPI_Request *requestSendInfo) 655 { 656 ep_lib::MPI_Isend(info, infoSize, MPI_CHAR, 657 clientDestRank, MPI_DHT_INFO, clientIntraComm, requestSendInfo); 565 658 } 566 659 … … 575 668 template<typename T, typename H> 576 669 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 577 const MPI_Comm& clientIntraComm,578 std::vector< MPI_Request>& requestRecvInfo)579 { 580 MPI_Request request;670 const ep_lib::MPI_Comm& clientIntraComm, 671 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 672 { 673 ep_lib::MPI_Request request; 581 674 requestRecvInfo.push_back(request); 582 675 583 MPI_Irecv(info, infoSize, MPI_CHAR,676 ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR, 584 677 clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 678 } 679 680 /*! 681 Receive message containing information from other clients 682 \param [in] clientDestRank rank of destination client 683 \param [in] info info array to receive 684 \param [in] infoSize info array size to receive 685 \param [in] clientIntraComm communication group of client 686 \param [in] requestRecvInfo list of receiving request 687 */ 688 template<typename T, typename H> 689 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 690 const ep_lib::MPI_Comm& clientIntraComm, 691 ep_lib::MPI_Request* requestRecvInfo) 692 { 693 ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR, 694 clientSrcRank, MPI_DHT_INFO, clientIntraComm, requestRecvInfo); 585 695 } 586 696 … … 651 761 { 652 762 recvNbElements.resize(recvNbRank.size()); 653 std::vector< MPI_Request> request(sendNbRank.size()+recvNbRank.size());654 std::vector< MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());763 std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 764 std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 655 765 656 766 int nRequest = 0; 657 767 for (int idx = 0; idx < recvNbRank.size(); ++idx) 658 768 { 659 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT,769 ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 660 770 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 661 771 ++nRequest; … … 664 774 for (int idx = 0; idx < sendNbRank.size(); ++idx) 665 775 { 666 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT,776 ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 667 777 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 668 778 ++nRequest; 669 779 } 670 780 671 MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]);781 ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 672 782 } 673 783 … … 696 806 std::vector<int> recvBuff(recvBuffSize*2,0); 697 807 698 std::vector<MPI_Request> request(sendBuffSize+recvBuffSize); 699 std::vector<MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 700 808 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 809 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 701 810 int nRequest = 0; 702 811 for (int idx = 0; idx < recvBuffSize; ++idx) 703 812 { 704 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 705 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 706 ++nRequest; 813 ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 814 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 707 815 } 708 816 … … 716 824 for (int idx = 0; idx < sendBuffSize; ++idx) 717 825 { 718 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 719 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 720 ++nRequest; 721 } 722 723 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 826 ep_lib::MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 827 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 828 } 829 830 ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 724 831 int nbRecvRank = 0, nbRecvElements = 0; 725 832 recvNbRank.clear();
Note: See TracChangeset
for help on using the changeset viewer.