Ignore:
Timestamp:
11/19/18 15:52:54 (6 years ago)
Author:
yushan
Message:

branch_openmp merged with trunk r1597

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation.cpp

    r1542 r1601  
    500500    sendRankSizeMap[itIndex->first] = sendSize; 
    501501  } 
    502   MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
     502  ep_lib::MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
    503503 
    504504  displ[0]=0 ; 
     
    507507  int* recvRankBuff=new int[recvSize]; 
    508508  int* recvSizeBuff=new int[recvSize]; 
    509   MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
    510   MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
     509  ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
     510  ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
    511511  for (int i = 0; i < nbClient; ++i) 
    512512  { 
     
    520520 
    521521  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    522   std::vector<MPI_Request> requests; 
    523   std::vector<MPI_Status> status; 
     522  std::vector<ep_lib::MPI_Request> requests(recvRankSizeMap.size()*2 + globaIndexWeightFromSrcToDst.size()*2); 
     523  std::vector<ep_lib::MPI_Status> status; 
    524524  std::unordered_map<int, unsigned char* > recvMaskDst; 
    525525  std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
     526  int requests_position = 0; 
    526527  for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 
    527528  { 
     
    531532    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 
    532533 
    533     requests.push_back(MPI_Request()); 
    534     MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    535     requests.push_back(MPI_Request()); 
    536     MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 
     534    ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 
     535    ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 
    537536  } 
    538537 
     
    569568 
    570569    // Send global index source and mask 
    571     requests.push_back(MPI_Request()); 
    572     MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    573     requests.push_back(MPI_Request()); 
    574     MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 
     570    ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 
     571    ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 
    575572  } 
    576573 
    577574  status.resize(requests.size()); 
    578   MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     575  ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    579576 
    580577  // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 
    581   std::vector<MPI_Request>().swap(requests); 
    582   std::vector<MPI_Status>().swap(status); 
     578  requests.resize(sendRankSizeMap.size() + recvRankSizeMap.size()); 
     579  requests_position = 0; 
     580  std::vector<ep_lib::MPI_Status>().swap(status); 
    583581  // Okie, on destination side, we will wait for information of masked index of source 
    584582  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) 
     
    587585    int recvSize = itSend->second; 
    588586 
    589     requests.push_back(MPI_Request()); 
    590     MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
     587    ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
    591588  } 
    592589 
     
    624621 
    625622    // Okie, now inform the destination which source index are masked 
    626     requests.push_back(MPI_Request()); 
    627     MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
     623    ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
    628624  } 
    629625  status.resize(requests.size()); 
    630   MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     626  ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    631627 
    632628  // Cool, now we can fill in local index of grid destination (counted for masked index) 
Note: See TracChangeset for help on using the changeset viewer.