Changeset 1638 for XIOS/trunk/src/transformation/grid_transformation.cpp
- Timestamp:
- 01/22/19 16:15:03 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/transformation/grid_transformation.cpp
r1637 r1638 514 514 sendRankSizeMap[itIndex->first] = sendSize; 515 515 } 516 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);516 ep_lib::MPI_Allgather(&connectedClient,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 517 517 518 518 displ[0]=0 ; … … 521 521 int* recvRankBuff=new int[recvSize]; 522 522 int* recvSizeBuff=new int[recvSize]; 523 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);524 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);523 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,EP_INT,recvRankBuff,recvCount,displ,EP_INT,client->intraComm); 524 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,EP_INT,recvSizeBuff,recvCount,displ,EP_INT,client->intraComm); 525 525 for (int i = 0; i < nbClient; ++i) 526 526 { … … 534 534 535 535 // Sending global index of grid source to corresponding process as well as the corresponding mask 536 std::vector< MPI_Request> requests;537 std::vector< MPI_Status> status;536 std::vector<ep_lib::MPI_Request> requests(recvRankSizeMap.size()*2 + globaIndexWeightFromSrcToDst.size()*2); 537 std::vector<ep_lib::MPI_Status> status; 538 538 std::unordered_map<int, unsigned char* > recvMaskDst; 539 539 std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 540 int requests_position = 0; 540 541 for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 541 542 { … … 545 546 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 546 547 547 requests.push_back(MPI_Request()); 548 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 549 requests.push_back(MPI_Request()); 550 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 548 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 549 ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 550 551 //requests.push_back(ep_lib::MPI_Request()); 552 //ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 553 //requests.push_back(ep_lib::MPI_Request()); 554 //ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 551 555 } 552 556 … … 583 587 584 588 // Send global index source and mask 585 requests.push_back(MPI_Request()); 586 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 587 requests.push_back(MPI_Request()); 588 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 589 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 590 ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, EP_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 591 //requests.push_back(ep_lib::MPI_Request()); 592 //ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 593 //requests.push_back(ep_lib::MPI_Request()); 594 //ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, EP_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 589 595 } 590 596 591 597 status.resize(requests.size()); 592 MPI_Waitall(requests.size(), &requests[0], &status[0]);598 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 593 599 594 600 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 595 std::vector<MPI_Request>().swap(requests); 596 std::vector<MPI_Status>().swap(status); 601 requests.resize(sendRankSizeMap.size() + recvRankSizeMap.size()); 602 requests_position = 0; 603 std::vector<ep_lib::MPI_Status>().swap(status); 597 604 // Okie, on destination side, we will wait for information of masked index of source 598 605 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 601 608 int recvSize = itSend->second; 602 609 603 requests.push_back(MPI_Request()); 604 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 610 ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 611 //requests.push_back(ep_lib::MPI_Request()); 612 //ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 605 613 } 606 614 … … 638 646 639 647 // Okie, now inform the destination which source index are masked 640 requests.push_back(MPI_Request()); 641 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 648 ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 649 //requests.push_back(ep_lib::MPI_Request()); 650 //ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 642 651 } 643 652 status.resize(requests.size()); 644 MPI_Waitall(requests.size(), &requests[0], &status[0]);653 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 645 654 646 655 // Cool, now we can fill in local index of grid destination (counted for masked index)
Note: See TracChangeset
for help on using the changeset viewer.