Ignore:
Timestamp:
01/22/19 16:15:03 (5 years ago)
Author:
yushan
Message:

dev on ADA

Location:
XIOS/trunk/src/transformation
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/src/transformation/axis_algorithm_interpolate.cpp

    r1622 r1638  
    272272 
    273273    int* recvCount=new int[nbClient]; 
    274     MPI_Allgather(&numValue,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
     274    ep_lib::MPI_Allgather(&numValue,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 
    275275 
    276276    int* displ=new int[nbClient]; 
     
    279279 
    280280    // Each client have enough global info of axis 
    281     MPI_Allgatherv(sendIndexBuff,numValue,MPI_INT,recvIndexBuff,recvCount,displ,MPI_INT,client->intraComm); 
    282     MPI_Allgatherv(sendValueBuff,numValue,MPI_DOUBLE,&(recvBuff[0]),recvCount,displ,MPI_DOUBLE,client->intraComm); 
     281    ep_lib::MPI_Allgatherv(sendIndexBuff,numValue,EP_INT,recvIndexBuff,recvCount,displ,EP_INT,client->intraComm); 
     282    ep_lib::MPI_Allgatherv(sendValueBuff,numValue,EP_DOUBLE,&(recvBuff[0]),recvCount,displ,EP_DOUBLE,client->intraComm); 
    283283 
    284284    for (int idx = 0; idx < srcSize; ++idx) 
  • XIOS/trunk/src/transformation/axis_algorithm_inverse.cpp

    r1622 r1638  
    161161    sendRankSizeMap[itIndex->first] = sendSize; 
    162162  } 
    163   MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
     163  ep_lib::MPI_Allgather(&connectedClient,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 
    164164 
    165165  displ[0]=0 ; 
     
    168168  int* recvRankBuff=new int[recvSize]; 
    169169  int* recvSizeBuff=new int[recvSize]; 
    170   MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
    171   MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
     170  ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,EP_INT,recvRankBuff,recvCount,displ,EP_INT,client->intraComm); 
     171  ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,EP_INT,recvSizeBuff,recvCount,displ,EP_INT,client->intraComm); 
    172172  for (int i = 0; i < nbClient; ++i) 
    173173  { 
     
    181181 
    182182  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    183   std::vector<MPI_Request> requests; 
    184   std::vector<MPI_Status> status; 
     183  std::vector<ep_lib::MPI_Request> requests; 
     184  std::vector<ep_lib::MPI_Status> status; 
    185185  std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
    186186  std::unordered_map<int, double* > sendValueToDest; 
     
    192192    sendValueToDest[recvRank] = new double [recvSize]; 
    193193 
    194     requests.push_back(MPI_Request()); 
    195     MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
     194    requests.push_back(ep_lib::MPI_Request()); 
     195    ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    196196  } 
    197197 
     
    214214 
    215215    // Send global index source and mask 
    216     requests.push_back(MPI_Request()); 
    217     MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
     216    requests.push_back(ep_lib::MPI_Request()); 
     217    ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    218218  } 
    219219 
    220220  status.resize(requests.size()); 
    221   MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    222  
    223  
    224   std::vector<MPI_Request>().swap(requests); 
    225   std::vector<MPI_Status>().swap(status); 
     221  ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     222 
     223 
     224  std::vector<ep_lib::MPI_Request>().swap(requests); 
     225  std::vector<ep_lib::MPI_Status>().swap(status); 
    226226 
    227227  // Okie, on destination side, we will wait for information of masked index of source 
     
    231231    int recvSize = itSend->second; 
    232232 
    233     requests.push_back(MPI_Request()); 
    234     MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
     233    requests.push_back(ep_lib::MPI_Request()); 
     234    ep_lib::MPI_Irecv(recvValueFromSrc[recvRank], recvSize, EP_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
    235235  } 
    236236 
     
    249249    } 
    250250    // Okie, now inform the destination which source index are masked 
    251     requests.push_back(MPI_Request()); 
    252     MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
     251    requests.push_back(ep_lib::MPI_Request()); 
     252    ep_lib::MPI_Isend(sendValueToDest[recvRank], recvSize, EP_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
    253253  } 
    254254  status.resize(requests.size()); 
    255   MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     255  ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    256256 
    257257 
  • XIOS/trunk/src/transformation/domain_algorithm_generate_rectilinear.cpp

    r1622 r1638  
    7070        StdSize hashValue = hashFunc.hashVec(globalAxisIndex); 
    7171        std::vector<StdSize> recvBuff(client->clientSize); 
    72         MPI_Gather(&hashValue, 1, MPI_UNSIGNED_LONG, 
    73                    &recvBuff[0], 1, MPI_UNSIGNED_LONG, 
     72        ep_lib::MPI_Gather(&hashValue, 1, EP_UNSIGNED_LONG, 
     73                   &recvBuff[0], 1, EP_UNSIGNED_LONG, 
    7474                   0, 
    7575                   client->intraComm); 
     
    8787        } 
    8888 
    89         MPI_Bcast(&nbLocalAxis[0], nbAxis, MPI_INT, 
     89        ep_lib::MPI_Bcast(&nbLocalAxis[0], nbAxis, EP_INT, 
    9090                  0, client->intraComm); 
    9191      } 
  • XIOS/trunk/src/transformation/domain_algorithm_interpolate.cpp

    r1622 r1638  
    434434  CContextClient* client=context->client; 
    435435 
    436   MPI_Comm poleComme(MPI_COMM_NULL); 
     436  ep_lib::MPI_Comm poleComme(EP_COMM_NULL); 
     437  #ifdef _usingMPI 
    437438  MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 
    438   if (MPI_COMM_NULL != poleComme) 
     439  #elif _usingEP 
     440  ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 
     441  #endif 
     442  if (EP_COMM_NULL != poleComme) 
    439443  { 
    440444    int nbClientPole; 
    441     MPI_Comm_size(poleComme, &nbClientPole); 
     445    ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 
    442446 
    443447    std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, 
     
    450454    std::vector<int> recvCount(nbClientPole,0); 
    451455    std::vector<int> displ(nbClientPole,0); 
    452     MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 
     456    ep_lib::MPI_Allgather(&nbWeight,1,EP_INT,&recvCount[0],1,EP_INT,poleComme) ; 
    453457 
    454458    displ[0]=0; 
     
    473477 
    474478    // Gather all index and weight for pole 
    475     MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 
    476     MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 
     479    ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,EP_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],EP_INT,poleComme); 
     480    ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,EP_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],EP_DOUBLE,poleComme); 
    477481 
    478482    std::map<int,double> recvTemp; 
     
    631635 
    632636 
    633   MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 
     637  ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, EP_INT, EP_SUM, client->intraComm); 
    634638 
    635639  int* sendIndexDestBuff = new int [sendBuffSize]; 
     
    637641  double* sendWeightBuff = new double [sendBuffSize]; 
    638642 
    639   std::vector<MPI_Request> sendRequest; 
     643  std::vector<ep_lib::MPI_Request> sendRequest; 
    640644 
    641645  int sendOffSet = 0, l = 0; 
     
    658662    } 
    659663 
    660     sendRequest.push_back(MPI_Request()); 
    661     MPI_Isend(sendIndexDestBuff + sendOffSet, 
     664    sendRequest.push_back(ep_lib::MPI_Request()); 
     665    ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet, 
    662666             k, 
    663              MPI_INT, 
     667             EP_INT, 
    664668             itMap->first, 
    665669             MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 
    666670             client->intraComm, 
    667671             &sendRequest.back()); 
    668     sendRequest.push_back(MPI_Request()); 
    669     MPI_Isend(sendIndexSrcBuff + sendOffSet, 
     672    sendRequest.push_back(ep_lib::MPI_Request()); 
     673    ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet, 
    670674             k, 
    671              MPI_INT, 
     675             EP_INT, 
    672676             itMap->first, 
    673677             MPI_DOMAIN_INTERPOLATION_SRC_INDEX, 
    674678             client->intraComm, 
    675679             &sendRequest.back()); 
    676     sendRequest.push_back(MPI_Request()); 
    677     MPI_Isend(sendWeightBuff + sendOffSet, 
     680    sendRequest.push_back(ep_lib::MPI_Request()); 
     681    ep_lib::MPI_Isend(sendWeightBuff + sendOffSet, 
    678682             k, 
    679              MPI_DOUBLE, 
     683             EP_DOUBLE, 
    680684             itMap->first, 
    681685             MPI_DOMAIN_INTERPOLATION_WEIGHT, 
     
    693697  while (receivedSize < recvBuffSize) 
    694698  { 
    695     MPI_Status recvStatus; 
     699    ep_lib::MPI_Status recvStatus; 
     700    #ifdef _usingMPI 
    696701    MPI_Recv((recvIndexDestBuff + receivedSize), 
    697702             recvBuffSize, 
    698              MPI_INT, 
     703             EP_INT, 
    699704             MPI_ANY_SOURCE, 
    700705             MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 
    701706             client->intraComm, 
    702707             &recvStatus); 
     708    #elif _usingEP 
     709    ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 
     710             recvBuffSize, 
     711             EP_INT, 
     712             -2, 
     713             MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 
     714             client->intraComm, 
     715             &recvStatus); 
     716    #endif 
    703717 
    704718    int countBuff = 0; 
    705     MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 
     719    ep_lib::MPI_Get_count(&recvStatus, EP_INT, &countBuff); 
     720    #ifdef _usingMPI 
    706721    clientSrcRank = recvStatus.MPI_SOURCE; 
    707  
    708     MPI_Recv((recvIndexSrcBuff + receivedSize), 
     722    #elif _usingEP 
     723    clientSrcRank = recvStatus.ep_src; 
     724    #endif 
     725 
     726    ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 
    709727             recvBuffSize, 
    710              MPI_INT, 
     728             EP_INT, 
    711729             clientSrcRank, 
    712730             MPI_DOMAIN_INTERPOLATION_SRC_INDEX, 
     
    714732             &recvStatus); 
    715733 
    716     MPI_Recv((recvWeightBuff + receivedSize), 
     734    ep_lib::MPI_Recv((recvWeightBuff + receivedSize), 
    717735             recvBuffSize, 
    718              MPI_DOUBLE, 
     736             EP_DOUBLE, 
    719737             clientSrcRank, 
    720738             MPI_DOMAIN_INTERPOLATION_WEIGHT, 
     
    730748  } 
    731749 
    732   std::vector<MPI_Status> requestStatus(sendRequest.size()); 
     750  std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 
     751  #ifdef _usingMPI 
    733752  MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 
     753  #elif _usingEP 
     754  std::vector<ep_lib::MPI_Status> waitstat(sendRequest.size()); 
     755  ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &waitstat[0]); 
     756  #endif 
    734757 
    735758  delete [] sendIndexDestBuff; 
     
    745768  
    746769/*! Redefined some functions of CONetCDF4 to make use of them */ 
    747 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const MPI_Comm comm) 
     770CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm) 
    748771  : CNc4DataOutput(NULL, filename, false, false, true, comm, false, true) {} 
    749772int CDomainAlgorithmInterpolate::WriteNetCdf::addDimensionWrite(const StdString& name,  
     
    835858  } 
    836859 
    837   MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    838   MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     860  ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, EP_LONG, EP_SUM, client->intraComm); 
     861  ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, EP_LONG, EP_SUM, client->intraComm); 
    839862   
    840863  if (0 == globalNbWeight) 
  • XIOS/trunk/src/transformation/domain_algorithm_interpolate.hpp

    r1480 r1638  
    7070  { 
    7171  public: 
    72     WriteNetCdf(const StdString& filename, const MPI_Comm comm); 
     72    WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm); 
    7373    int addDimensionWrite(const StdString& name, const StdSize size = UNLIMITED_DIM); 
    7474    int addVariableWrite(const StdString& name, nc_type type, 
  • XIOS/trunk/src/transformation/generic_algorithm_transformation.cpp

    r1637 r1638  
    136136      { 
    137137        distributed=domainListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]->isDistributed() ; 
    138         MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
     138        ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, EP_INT, EP_LOR, client->intraComm) ; 
    139139     
    140140      } 
     
    142142      { 
    143143        distributed=axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]->isDistributed() ; 
    144         MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
     144        ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, EP_INT, EP_LOR, client->intraComm) ; 
    145145      } 
    146146      else //it's a scalar 
     
    238238  int sendValue = (computeGlobalIndexOnProc) ? 1 : 0; 
    239239  int recvValue = 0; 
    240   MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 
     240  ep_lib::MPI_Allreduce(&sendValue, &recvValue, 1, EP_INT, EP_SUM, client->intraComm); 
    241241  computeGlobalIndexOnProc = (0 < recvValue); 
    242242 
  • XIOS/trunk/src/transformation/grid_transformation.cpp

    r1637 r1638  
    514514    sendRankSizeMap[itIndex->first] = sendSize; 
    515515  } 
    516   MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
     516  ep_lib::MPI_Allgather(&connectedClient,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 
    517517 
    518518  displ[0]=0 ; 
     
    521521  int* recvRankBuff=new int[recvSize]; 
    522522  int* recvSizeBuff=new int[recvSize]; 
    523   MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
    524   MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
     523  ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,EP_INT,recvRankBuff,recvCount,displ,EP_INT,client->intraComm); 
     524  ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,EP_INT,recvSizeBuff,recvCount,displ,EP_INT,client->intraComm); 
    525525  for (int i = 0; i < nbClient; ++i) 
    526526  { 
     
    534534 
    535535  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    536   std::vector<MPI_Request> requests; 
    537   std::vector<MPI_Status> status; 
     536  std::vector<ep_lib::MPI_Request> requests(recvRankSizeMap.size()*2 + globaIndexWeightFromSrcToDst.size()*2); 
     537  std::vector<ep_lib::MPI_Status> status; 
    538538  std::unordered_map<int, unsigned char* > recvMaskDst; 
    539539  std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
     540  int requests_position = 0; 
    540541  for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 
    541542  { 
     
    545546    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 
    546547 
    547     requests.push_back(MPI_Request()); 
    548     MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    549     requests.push_back(MPI_Request()); 
    550     MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 
     548    ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 
     549    ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 
     550 
     551    //requests.push_back(ep_lib::MPI_Request()); 
     552    //ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
     553    //requests.push_back(ep_lib::MPI_Request()); 
     554    //ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 
    551555  } 
    552556 
     
    583587 
    584588    // Send global index source and mask 
    585     requests.push_back(MPI_Request()); 
    586     MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    587     requests.push_back(MPI_Request()); 
    588     MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 
     589    ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 
     590    ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, EP_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 
     591    //requests.push_back(ep_lib::MPI_Request()); 
     592    //ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
     593    //requests.push_back(ep_lib::MPI_Request()); 
     594    //ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, EP_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 
    589595  } 
    590596 
    591597  status.resize(requests.size()); 
    592   MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     598  ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    593599 
    594600  // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 
    595   std::vector<MPI_Request>().swap(requests); 
    596   std::vector<MPI_Status>().swap(status); 
     601  requests.resize(sendRankSizeMap.size() + recvRankSizeMap.size()); 
     602  requests_position = 0; 
     603  std::vector<ep_lib::MPI_Status>().swap(status); 
    597604  // Okie, on destination side, we will wait for information of masked index of source 
    598605  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) 
     
    601608    int recvSize = itSend->second; 
    602609 
    603     requests.push_back(MPI_Request()); 
    604     MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
     610    ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
     611    //requests.push_back(ep_lib::MPI_Request()); 
     612    //ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
    605613  } 
    606614 
     
    638646 
    639647    // Okie, now inform the destination which source index are masked 
    640     requests.push_back(MPI_Request()); 
    641     MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
     648    ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
     649    //requests.push_back(ep_lib::MPI_Request()); 
     650    //ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
    642651  } 
    643652  status.resize(requests.size()); 
    644   MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     653  ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    645654 
    646655  // Cool, now we can fill in local index of grid destination (counted for masked index) 
Note: See TracChangeset for help on using the changeset viewer.