Changeset 1638 for XIOS/trunk/src/client_client_dht_template_impl.hpp
- Timestamp:
- 01/22/19 16:15:03 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/client_client_dht_template_impl.hpp
r1542 r1638 14 14 { 15 15 template<typename T, typename H> 16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm)16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 17 17 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 18 18 { 19 MPI_Comm_size(clientIntraComm, &nbClient_);19 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 20 20 this->computeMPICommLevel(); 21 21 int nbLvl = this->getNbLevel(); … … 34 34 template<typename T, typename H> 35 35 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 36 const MPI_Comm& clientIntraComm)36 const ep_lib::MPI_Comm& clientIntraComm) 37 37 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 38 38 { 39 MPI_Comm_size(clientIntraComm, &nbClient_);39 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 40 40 this->computeMPICommLevel(); 41 41 int nbLvl = this->getNbLevel(); … … 59 59 template<typename T, typename H> 60 60 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 61 const MPI_Comm& clientIntraComm)61 const ep_lib::MPI_Comm& clientIntraComm) 62 62 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 63 63 { 64 MPI_Comm_size(clientIntraComm, &nbClient_);64 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 65 65 this->computeMPICommLevel(); 66 66 int nbLvl = this->getNbLevel(); … … 95 95 template<typename T, typename H> 96 96 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 97 const MPI_Comm& commLevel,97 const ep_lib::MPI_Comm& commLevel, 98 98 int level) 99 99 { 100 100 int clientRank; 101 MPI_Comm_rank(commLevel,&clientRank);101 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 102 102 int groupRankBegin = this->getGroupBegin()[level]; 103 103 int nbClient = this->getNbInGroup()[level]; … … 169 169 recvIndexBuff = new unsigned long[recvNbIndexCount]; 170 170 171 std::vector<MPI_Request> request; 171 int request_size = 0; 172 for (int idx = 0; idx < recvRankClient.size(); ++idx) 173 { 174 if (0 != recvNbIndexClientCount[idx]) 175 request_size ++; 176 } 177 178 request_size += client2ClientIndex.size(); 179 180 std::vector<ep_lib::MPI_Request> request(request_size); 172 181 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 173 182 iteRecvIndex = recvRankClient.end(), … … 176 185 int currentIndex = 0; 177 186 int nbRecvClient = recvRankClient.size(); 187 int request_position = 0; 178 188 for (int idx = 0; idx < nbRecvClient; ++idx) 179 189 { 180 190 if (0 != recvNbIndexClientCount[idx]) 181 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request);191 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 182 192 currentIndex += recvNbIndexClientCount[idx]; 183 193 } … … 186 196 iteIndex = client2ClientIndex.end(); 187 197 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 188 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request);189 190 std::vector< MPI_Status> status(request.size());191 MPI_Waitall(request.size(), &request[0], &status[0]);198 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 199 200 std::vector<ep_lib::MPI_Status> status(request.size()); 201 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 192 202 193 203 CArray<size_t,1>* tmpGlobalIndex; … … 242 252 } 243 253 244 std::vector<MPI_Request> requestOnReturn; 254 int requestOnReturn_size=0; 255 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 256 { 257 if (0 != recvNbIndexOnReturn[idx]) 258 { 259 requestOnReturn_size += 2; 260 } 261 } 262 263 for (int idx = 0; idx < nbRecvClient; ++idx) 264 { 265 if (0 != sendNbIndexOnReturn[idx]) 266 { 267 requestOnReturn_size += 2; 268 } 269 } 270 271 int requestOnReturn_position=0; 272 273 std::vector<ep_lib::MPI_Request> requestOnReturn(requestOnReturn_size); 245 274 currentIndex = 0; 246 275 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 248 277 if (0 != recvNbIndexOnReturn[idx]) 249 278 { 250 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn);279 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 251 280 recvInfoFromClients(recvRankOnReturn[idx], 252 281 recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 253 282 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 254 commLevel, requestOnReturn);283 commLevel, &requestOnReturn[requestOnReturn_position++]); 255 284 } 256 285 currentIndex += recvNbIndexOnReturn[idx]; … … 286 315 287 316 sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 288 sendNbIndexOnReturn[idx], commLevel, requestOnReturn);317 sendNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 289 318 sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 290 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn);319 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, &requestOnReturn[requestOnReturn_position++]); 291 320 } 292 321 currentIndex += recvNbIndexClientCount[idx]; 293 322 } 294 323 295 std::vector< MPI_Status> statusOnReturn(requestOnReturn.size());296 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]);324 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 325 ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 297 326 298 327 Index2VectorInfoTypeMap indexToInfoMapping; … … 360 389 template<typename T, typename H> 361 390 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 362 const MPI_Comm& commLevel,391 const ep_lib::MPI_Comm& commLevel, 363 392 int level) 364 393 { 365 394 int clientRank; 366 MPI_Comm_rank(commLevel,&clientRank);395 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 367 396 computeSendRecvRank(level, clientRank); 368 397 … … 439 468 // it will send a message to the correct clients. 440 469 // Contents of the message are index and its corresponding informatioin 441 std::vector<MPI_Request> request; 470 int request_size = 0; 471 for (int idx = 0; idx < recvRankClient.size(); ++idx) 472 { 473 if (0 != recvNbIndexClientCount[idx]) 474 { 475 request_size += 2; 476 } 477 } 478 479 request_size += client2ClientIndex.size(); 480 request_size += client2ClientInfo.size(); 481 482 std::vector<ep_lib::MPI_Request> request(request_size); 483 442 484 int currentIndex = 0; 443 485 int nbRecvClient = recvRankClient.size(); 486 int request_position=0; 444 487 for (int idx = 0; idx < nbRecvClient; ++idx) 445 488 { 446 489 if (0 != recvNbIndexClientCount[idx]) 447 490 { 448 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 491 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 492 //if(clientRank==0) printf("recv index request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request)); 449 493 recvInfoFromClients(recvRankClient[idx], 450 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 451 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 452 commLevel, request); 494 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 495 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 496 commLevel, &request[request_position++]); 497 //if(clientRank==0) printf("recv info request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request)); 453 498 } 454 499 currentIndex += recvNbIndexClientCount[idx]; … … 458 503 iteIndex = client2ClientIndex.end(); 459 504 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 460 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 505 { sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 506 } //if(clientRank==0) printf("send index request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request));} 461 507 std::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 462 508 iteInfo = client2ClientInfo.end(); 463 509 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 464 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 465 466 std::vector<MPI_Status> status(request.size()); 467 MPI_Waitall(request.size(), &request[0], &status[0]); 510 { sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, &request[request_position++]); 511 }// if(clientRank==0) printf("send info request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request));} 512 513 std::vector<ep_lib::MPI_Status> status(request.size()); 514 515 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 468 516 469 517 Index2VectorInfoTypeMap indexToInfoMapping; … … 518 566 template<typename T, typename H> 519 567 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 520 const MPI_Comm& clientIntraComm,521 std::vector< MPI_Request>& requestSendIndex)522 { 523 MPI_Request request;568 const ep_lib::MPI_Comm& clientIntraComm, 569 std::vector<ep_lib::MPI_Request>& requestSendIndex) 570 { 571 ep_lib::MPI_Request request; 524 572 requestSendIndex.push_back(request); 525 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG,573 ep_lib::MPI_Isend(indices, indiceSize, EP_UNSIGNED_LONG, 526 574 clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 575 } 576 577 /*! 578 Send message containing index to clients 579 \param [in] clientDestRank rank of destination client 580 \param [in] indices index to send 581 \param [in] indiceSize size of index array to send 582 \param [in] clientIntraComm communication group of client 583 \param [in] requestSendIndex sending request 584 */ 585 template<typename T, typename H> 586 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 587 const ep_lib::MPI_Comm& clientIntraComm, 588 ep_lib::MPI_Request* requestSendIndex) 589 { 590 ep_lib::MPI_Isend(indices, indiceSize, EP_UNSIGNED_LONG, 591 clientDestRank, MPI_DHT_INDEX, clientIntraComm, requestSendIndex); 527 592 } 528 593 … … 536 601 template<typename T, typename H> 537 602 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 538 const MPI_Comm& clientIntraComm,539 std::vector< MPI_Request>& requestRecvIndex)540 { 541 MPI_Request request;603 const ep_lib::MPI_Comm& clientIntraComm, 604 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 605 { 606 ep_lib::MPI_Request request; 542 607 requestRecvIndex.push_back(request); 543 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG,608 ep_lib::MPI_Irecv(indices, indiceSize, EP_UNSIGNED_LONG, 544 609 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 610 } 611 612 /*! 613 Receive message containing index to clients 614 \param [in] clientDestRank rank of destination client 615 \param [in] indices index to send 616 \param [in] clientIntraComm communication group of client 617 \param [in] requestRecvIndex receiving request 618 */ 619 template<typename T, typename H> 620 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 621 const ep_lib::MPI_Comm& clientIntraComm, 622 ep_lib::MPI_Request *requestRecvIndex) 623 { 624 ep_lib::MPI_Irecv(indices, indiceSize, EP_UNSIGNED_LONG, 625 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, requestRecvIndex); 545 626 } 546 627 … … 555 636 template<typename T, typename H> 556 637 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 557 const MPI_Comm& clientIntraComm,558 std::vector< MPI_Request>& requestSendInfo)559 { 560 MPI_Request request;638 const ep_lib::MPI_Comm& clientIntraComm, 639 std::vector<ep_lib::MPI_Request>& requestSendInfo) 640 { 641 ep_lib::MPI_Request request; 561 642 requestSendInfo.push_back(request); 562 643 563 MPI_Isend(info, infoSize, MPI_CHAR,644 ep_lib::MPI_Isend(info, infoSize, EP_CHAR, 564 645 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 646 } 647 648 /*! 649 Send message containing information to clients 650 \param [in] clientDestRank rank of destination client 651 \param [in] info info array to send 652 \param [in] infoSize info array size to send 653 \param [in] clientIntraComm communication group of client 654 \param [in] requestSendInfo sending request 655 */ 656 template<typename T, typename H> 657 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 658 const ep_lib::MPI_Comm& clientIntraComm, 659 ep_lib::MPI_Request *requestSendInfo) 660 { 661 ep_lib::MPI_Isend(info, infoSize, EP_CHAR, 662 clientDestRank, MPI_DHT_INFO, clientIntraComm, requestSendInfo); 565 663 } 566 664 … … 575 673 template<typename T, typename H> 576 674 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 577 const MPI_Comm& clientIntraComm,578 std::vector< MPI_Request>& requestRecvInfo)579 { 580 MPI_Request request;675 const ep_lib::MPI_Comm& clientIntraComm, 676 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 677 { 678 ep_lib::MPI_Request request; 581 679 requestRecvInfo.push_back(request); 582 680 583 MPI_Irecv(info, infoSize, MPI_CHAR,681 ep_lib::MPI_Irecv(info, infoSize, EP_CHAR, 584 682 clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 683 } 684 685 /*! 686 Receive message containing information from other clients 687 \param [in] clientDestRank rank of destination client 688 \param [in] info info array to receive 689 \param [in] infoSize info array size to receive 690 \param [in] clientIntraComm communication group of client 691 \param [in] requestRecvInfo list of receiving request 692 */ 693 template<typename T, typename H> 694 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 695 const ep_lib::MPI_Comm& clientIntraComm, 696 ep_lib::MPI_Request* requestRecvInfo) 697 { 698 ep_lib::MPI_Irecv(info, infoSize, EP_CHAR, 699 clientSrcRank, MPI_DHT_INFO, clientIntraComm, requestRecvInfo); 585 700 } 586 701 … … 651 766 { 652 767 recvNbElements.resize(recvNbRank.size()); 653 std::vector< MPI_Request> request(sendNbRank.size()+recvNbRank.size());654 std::vector< MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());768 std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 769 std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 655 770 656 771 int nRequest = 0; 657 772 for (int idx = 0; idx < recvNbRank.size(); ++idx) 658 773 { 659 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT,774 ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, EP_INT, 660 775 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 661 776 ++nRequest; … … 664 779 for (int idx = 0; idx < sendNbRank.size(); ++idx) 665 780 { 666 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT,781 ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, EP_INT, 667 782 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 668 783 ++nRequest; 669 784 } 670 785 671 MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]);786 ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 672 787 } 673 788 … … 696 811 std::vector<int> recvBuff(recvBuffSize*2,0); 697 812 698 std::vector< MPI_Request> request(sendBuffSize+recvBuffSize);699 std::vector< MPI_Status> requestStatus(sendBuffSize+recvBuffSize);813 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 814 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 700 815 701 816 int nRequest = 0; 702 817 for (int idx = 0; idx < recvBuffSize; ++idx) 703 818 { 704 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT,819 ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, EP_INT, 705 820 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 706 821 ++nRequest; … … 716 831 for (int idx = 0; idx < sendBuffSize; ++idx) 717 832 { 718 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT,833 ep_lib::MPI_Isend(&sendBuff[idx*2], 2, EP_INT, 719 834 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 720 835 ++nRequest; 721 836 } 722 837 723 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]);838 ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 724 839 int nbRecvRank = 0, nbRecvElements = 0; 725 840 recvNbRank.clear();
Note: See TracChangeset
for help on using the changeset viewer.