- Timestamp:
- 11/19/18 15:52:54 (6 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation.cpp
r1542 r1601 500 500 sendRankSizeMap[itIndex->first] = sendSize; 501 501 } 502 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);502 ep_lib::MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 503 503 504 504 displ[0]=0 ; … … 507 507 int* recvRankBuff=new int[recvSize]; 508 508 int* recvSizeBuff=new int[recvSize]; 509 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);510 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);509 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 510 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 511 511 for (int i = 0; i < nbClient; ++i) 512 512 { … … 520 520 521 521 // Sending global index of grid source to corresponding process as well as the corresponding mask 522 std::vector< MPI_Request> requests;523 std::vector< MPI_Status> status;522 std::vector<ep_lib::MPI_Request> requests(recvRankSizeMap.size()*2 + globaIndexWeightFromSrcToDst.size()*2); 523 std::vector<ep_lib::MPI_Status> status; 524 524 std::unordered_map<int, unsigned char* > recvMaskDst; 525 525 std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 526 int requests_position = 0; 526 527 for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 527 528 { … … 531 532 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 532 533 533 requests.push_back(MPI_Request()); 534 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 535 requests.push_back(MPI_Request()); 536 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 534 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 535 ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 537 536 } 538 537 … … 569 568 570 569 // Send global index source and mask 571 requests.push_back(MPI_Request()); 572 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 573 requests.push_back(MPI_Request()); 574 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 570 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 571 ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 575 572 } 576 573 577 574 status.resize(requests.size()); 578 MPI_Waitall(requests.size(), &requests[0], &status[0]);575 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 579 576 580 577 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 581 std::vector<MPI_Request>().swap(requests); 582 std::vector<MPI_Status>().swap(status); 578 requests.resize(sendRankSizeMap.size() + recvRankSizeMap.size()); 579 requests_position = 0; 580 std::vector<ep_lib::MPI_Status>().swap(status); 583 581 // Okie, on destination side, we will wait for information of masked index of source 584 582 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 587 585 int recvSize = itSend->second; 588 586 589 requests.push_back(MPI_Request()); 590 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 587 ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 591 588 } 592 589 … … 624 621 625 622 // Okie, now inform the destination which source index are masked 626 requests.push_back(MPI_Request()); 627 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 623 ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 628 624 } 629 625 status.resize(requests.size()); 630 MPI_Waitall(requests.size(), &requests[0], &status[0]);626 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 631 627 632 628 // Cool, now we can fill in local index of grid destination (counted for masked index)
Note: See TracChangeset
for help on using the changeset viewer.