Changeset 833


Ignore:
Timestamp:
04/08/16 15:00:15 (5 years ago)
Author:
mhnguyen
Message:

Improvements for dht

+) Implement adaptive hierarchy for dht, level of hierarchy depends on number of processes
+) Remove some redundant codes

Test
+) On Curie
+) All tests are correct

Location:
XIOS/trunk/src
Files:
16 edited

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/src/client_client_dht_template.hpp

    r830 r833  
    2020namespace xios 
    2121{ 
    22 template<typename T, class HierarchyPolicy = DivideCommByTwo> class CClientClientDHTTemplate; 
     22template<typename T, class HierarchyPolicy = DivideAdaptiveComm> class CClientClientDHTTemplate; 
    2323 
    2424/*! 
     
    6161                                      int level); 
    6262 
     63    void computeSendRecvRank(int level, int rank); 
     64 
     65    void sendRecvRank(int level, 
     66                      const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements, 
     67                      int& recvNbRank, int& recvNbElements); 
     68 
    6369  protected: 
    6470    void probeIndexMessageFromClients(unsigned long* recvIndexGlobalBuff, 
     
    8187 
    8288    // Send global index to clients 
    83     void sendIndexToClients(int clientDestRank, std::vector<size_t>& indexGlobal, 
     89    void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
    8490                            const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal); 
    8591 
     
    100106    Index2InfoTypeMap indexToInfoMappingLevel_; 
    101107 
    102     //! intracommuntion of clients 
    103     MPI_Comm intraCommRoot_; 
     108    std::vector<std::vector<int> > sendRank_; 
     109 
     110    std::vector<std::vector<int> > recvRank_; 
    104111 
    105112    //! Flag to specify whether data is distributed or not 
  • XIOS/trunk/src/client_client_dht_template_impl.hpp

    r832 r833  
    2525                                                        const MPI_Comm& clientIntraComm, 
    2626                                                        int hierarLvl) 
    27   : index2InfoMapping_(), indexToInfoMappingLevel_() 
    28 { 
    29   this->computeMPICommLevel(clientIntraComm, hierarLvl); 
    30   int lvl = this->commLevel_.size() - 1; 
    31   computeDistributedIndex(indexInfoMap, this->commLevel_[lvl], lvl); 
     27  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_() 
     28{ 
     29  this->computeMPICommLevel(); 
     30  int nbLvl = this->getNbLevel(); 
     31  sendRank_.resize(nbLvl); 
     32  recvRank_.resize(nbLvl); 
     33  computeDistributedIndex(indexInfoMap, clientIntraComm, nbLvl-1); 
    3234} 
    3335 
     
    4446void CClientClientDHTTemplate<T,H>::computeIndexInfoMapping(const CArray<size_t,1>& indices) 
    4547{ 
    46   int lvl = this->commLevel_.size() - 1; 
    47   computeIndexInfoMappingLevel(indices, this->commLevel_[lvl], lvl); 
     48  int nbLvl = this->getNbLevel(); 
     49  computeIndexInfoMappingLevel(indices, this->internalComm_, nbLvl-1); 
    4850} 
    4951 
     
    6062                                                                 int level) 
    6163{ 
    62   int nbClient, clientRank; 
    63   MPI_Comm_size(commLevel,&nbClient); 
     64  int clientRank; 
    6465  MPI_Comm_rank(commLevel,&clientRank); 
     66  int groupRankBegin = this->getGroupBegin()[level]; 
     67  int nbClient = this->getNbInGroup()[level]; 
    6568  std::vector<size_t> hashedIndex; 
    6669  computeHashIndex(hashedIndex, nbClient); 
     
    7073  std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 
    7174                                      iteClientHash = hashedIndex.end(); 
    72   std::map<int, std::vector<size_t> > client2ClientIndex; 
     75  std::vector<int> sendBuff(nbClient,0); 
     76  std::vector<int> sendNbIndexBuff(nbClient,0); 
    7377 
    7478  // Number of global index whose mapping server are on other clients 
    7579  int nbIndexToSend = 0; 
     80  size_t index; 
    7681  HashXIOS<size_t> hashGlobalIndex; 
    7782  for (int i = 0; i < ssize; ++i) 
    7883  { 
    79     size_t index = indices(i); 
     84    index = indices(i); 
    8085    hashedVal  = hashGlobalIndex(index); 
    8186    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedVal); 
    82     if (iteClientHash != itClientHash) 
     87    int indexClient = std::distance(itbClientHash, itClientHash)-1; 
     88    ++sendNbIndexBuff[indexClient]; 
     89  } 
     90 
     91  std::map<int, size_t* > client2ClientIndex; 
     92  for (int idx = 0; idx < nbClient; ++idx) 
     93  { 
     94    if (0 != sendNbIndexBuff[idx]) 
     95    { 
     96      client2ClientIndex[idx+groupRankBegin] = new unsigned long [sendNbIndexBuff[idx]]; 
     97      nbIndexToSend += sendNbIndexBuff[idx]; 
     98      sendBuff[idx] = 1; 
     99      sendNbIndexBuff[idx] = 0; 
     100    } 
     101  } 
     102 
     103  for (int i = 0; i < ssize; ++i) 
     104  { 
     105    index = indices(i); 
     106    hashedVal  = hashGlobalIndex(index); 
     107    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedVal); 
    83108    { 
    84109      int indexClient = std::distance(itbClientHash, itClientHash)-1; 
    85110      { 
    86         client2ClientIndex[indexClient].push_back(index); 
    87         ++nbIndexToSend; 
     111        client2ClientIndex[indexClient+groupRankBegin][sendNbIndexBuff[indexClient]] = index;; 
     112        ++sendNbIndexBuff[indexClient]; 
    88113      } 
    89114    } 
    90115  } 
    91116 
    92   int* sendBuff = new int[nbClient]; 
    93   for (int i = 0; i < nbClient; ++i) sendBuff[i] = 0; 
    94   std::map<int, std::vector<size_t> >::iterator itb  = client2ClientIndex.begin(), it, 
    95                                                 ite  = client2ClientIndex.end(); 
    96   for (it = itb; it != ite; ++it) sendBuff[it->first] = 1; 
    97   int* recvBuff = new int[nbClient]; 
    98   MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel); 
     117  int recvNbClient, recvNbIndexCount; 
     118  sendRecvRank(level, sendBuff, sendNbIndexBuff, 
     119               recvNbClient, recvNbIndexCount); 
     120 
     121  std::map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 
     122                                                iteIndex = client2ClientIndex.end(); 
    99123 
    100124  std::list<MPI_Request> sendIndexRequest; 
    101   if (0 != nbIndexToSend) 
    102       for (it = itb; it != ite; ++it) 
    103          sendIndexToClients(it->first, it->second, commLevel, sendIndexRequest); 
    104  
    105   int nbDemandingClient = recvBuff[clientRank], nbSendBuffInfoReceived = 0; 
     125  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 
     126     sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, sendIndexRequest); 
     127 
     128  int nbDemandingClient = recvNbClient; //recvBuff[clientRank], 
     129  int nbSendBuffInfoReceived = 0; 
    106130 
    107131  // Receiving demand as well as the responds from other clients 
     
    109133  // Buffer to receive demand from other clients, it can be allocated or not depending whether it has demand(s) 
    110134  // There are some cases we demand duplicate index so need to determine maxium size of demanding buffer 
    111   for (it = itb; it != ite; ++it) sendBuff[it->first] = (it->second).size(); 
    112   MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel); 
    113  
    114135  unsigned long* recvBuffIndex = 0; 
    115   int maxNbIndexDemandedFromOthers = recvBuff[clientRank]; 
    116  
     136  int maxNbIndexDemandedFromOthers = recvNbIndexCount; 
    117137  if (0 != maxNbIndexDemandedFromOthers) 
    118138    recvBuffIndex = new unsigned long[maxNbIndexDemandedFromOthers]; 
    119139 
    120140  // Buffer to receive respond from other clients, it can be allocated or not depending whether it demands other clients 
    121 //  InfoType* recvBuffInfo = 0; 
    122141  unsigned char* recvBuffInfo = 0; 
    123142  int nbIndexReceivedFromOthers = nbIndexToSend; 
     
    181200  { 
    182201    --level; 
    183     computeIndexInfoMappingLevel(tmpGlobalIndexOnClient, this->commLevel_[level], level); 
     202    computeIndexInfoMappingLevel(tmpGlobalIndexOnClient, this->internalComm_, level); 
    184203  } 
    185204  else 
     
    234253        int clientSourceRank = statusInfo.MPI_SOURCE; 
    235254        unsigned char* beginBuff = infoBuffBegin[clientSourceRank]; 
    236         std::vector<size_t>& indexTmp = client2ClientIndex[clientSourceRank]; 
     255        size_t* indexTmp = client2ClientIndex[clientSourceRank]; 
    237256        int infoIndex = 0; 
    238257        for (int i = 0; i < actualCountInfo; ++i) 
     
    253272  if (0 != maxNbIndexDemandedFromOthers) delete [] recvBuffIndex; 
    254273  if (0 != nbIndexReceivedFromOthers) delete [] recvBuffInfo; 
     274  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) delete [] itIndex->second; 
    255275  for (int idx = 0; idx < infoToSend.size(); ++idx) delete [] infoToSend[idx]; 
    256   delete [] sendBuff; 
    257   delete [] recvBuff; 
    258276} 
    259277 
     
    292310                                                            int level) 
    293311{ 
    294   int nbClient, clientRank; 
    295   MPI_Comm_size(commLevel,&nbClient); 
     312  int clientRank; 
    296313  MPI_Comm_rank(commLevel,&clientRank); 
     314  computeSendRecvRank(level, clientRank); 
     315 
     316  int groupRankBegin = this->getGroupBegin()[level]; 
     317  int nbClient = this->getNbInGroup()[level]; 
    297318  std::vector<size_t> hashedIndex; 
    298319  computeHashIndex(hashedIndex, nbClient); 
    299320 
    300   int* sendBuff = new int[nbClient]; 
    301   int* sendNbIndexBuff = new int[nbClient]; 
    302   for (int i = 0; i < nbClient; ++i) 
    303   { 
    304     sendBuff[i] = 0; sendNbIndexBuff[i] = 0; 
    305   } 
    306  
    307   // Compute size of sending and receving buffer 
    308   std::map<int, std::vector<size_t> > client2ClientIndex; 
    309   std::map<int, std::vector<InfoType> > client2ClientInfo; 
    310  
     321  std::vector<int> sendBuff(nbClient,0); 
     322  std::vector<int> sendNbIndexBuff(nbClient,0); 
    311323  std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 
    312324                                      iteClientHash = hashedIndex.end(); 
    313   typename boost::unordered_map<size_t,InfoType>::const_iterator it  = indexInfoMap.begin(), 
     325  typename boost::unordered_map<size_t,InfoType>::const_iterator itb = indexInfoMap.begin(),it, 
    314326                                                                 ite = indexInfoMap.end(); 
    315327  HashXIOS<size_t> hashGlobalIndex; 
    316   for (; it != ite; ++it) 
     328 
     329  // Compute size of sending and receving buffer 
     330  for (it = itb; it != ite; ++it) 
    317331  { 
    318332    size_t hashIndex = hashGlobalIndex(it->first); 
    319333    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 
    320     if (itClientHash != iteClientHash) 
    321334    { 
    322335      int indexClient = std::distance(itbClientHash, itClientHash)-1; 
    323336      { 
    324         sendBuff[indexClient] = 1; 
    325337        ++sendNbIndexBuff[indexClient]; 
    326         client2ClientIndex[indexClient].push_back(it->first); 
    327         client2ClientInfo[indexClient].push_back(it->second); 
    328338      } 
    329339    } 
    330340  } 
    331341 
     342  std::map<int, size_t*> client2ClientIndex; 
     343  std::map<int, unsigned char*> client2ClientInfo; 
     344  for (int idx = 0; idx < nbClient; ++idx) 
     345  { 
     346    if (0 != sendNbIndexBuff[idx]) 
     347    { 
     348      client2ClientIndex[idx+groupRankBegin] = new unsigned long [sendNbIndexBuff[idx]]; 
     349      client2ClientInfo[idx+groupRankBegin] = new unsigned char [sendNbIndexBuff[idx]*ProcessDHTElement<InfoType>::typeSize()]; 
     350      sendNbIndexBuff[idx] = 0; 
     351      sendBuff[idx] = 1; 
     352    } 
     353  } 
     354 
     355  std::vector<int> sendNbInfo(nbClient,0); 
     356  for (it = itb; it != ite; ++it) 
     357  { 
     358    size_t hashIndex = hashGlobalIndex(it->first); 
     359    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 
     360    { 
     361      int indexClient = std::distance(itbClientHash, itClientHash)-1; 
     362      { 
     363        client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 
     364        ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 
     365        ++sendNbIndexBuff[indexClient]; 
     366      } 
     367    } 
     368  } 
     369 
    332370  // Calculate from how many clients each client receive message. 
    333   int* recvBuff = new int[nbClient]; 
    334   MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel); 
    335   int recvNbClient = recvBuff[clientRank]; 
    336  
    337371  // Calculate size of buffer for receiving message 
    338   int* recvNbIndexBuff = new int[nbClient]; 
    339   MPI_Allreduce(sendNbIndexBuff, recvNbIndexBuff, nbClient, MPI_INT, MPI_SUM, commLevel); 
    340   int recvNbIndexCount = recvNbIndexBuff[clientRank]; 
    341   unsigned long* recvIndexBuff = new unsigned long[recvNbIndexCount]; 
    342   unsigned char* recvInfoBuff = new unsigned char[recvNbIndexCount*ProcessDHTElement<InfoType>::typeSize()]; 
     372  int recvNbClient, recvNbIndexCount; 
     373  sendRecvRank(level, sendBuff, sendNbIndexBuff, 
     374               recvNbClient, recvNbIndexCount); 
    343375 
    344376  // If a client holds information about index and the corresponding which don't belong to it, 
     
    346378  // Contents of the message are index and its corresponding informatioin 
    347379  std::list<MPI_Request> sendRequest; 
    348   std::map<int, std::vector<size_t> >::iterator itIndex  = client2ClientIndex.begin(), 
    349                                                 iteIndex = client2ClientIndex.end(); 
    350   for (; itIndex != iteIndex; ++itIndex) 
    351     sendIndexToClients(itIndex->first, itIndex->second, commLevel, sendRequest); 
    352   typename std::map<int, std::vector<InfoType> >::iterator itbInfo = client2ClientInfo.begin(), itInfo, 
    353                                                            iteInfo = client2ClientInfo.end(); 
    354  
    355   std::vector<int> infoSizeToSend(client2ClientInfo.size(),0); 
    356   std::vector<unsigned char*> infoToSend(client2ClientInfo.size()); 
    357   itInfo = itbInfo; 
    358   for (int idx = 0; itInfo != iteInfo; ++itInfo, ++idx) 
    359   { 
    360     const std::vector<InfoType>& infoVec = itInfo->second; 
    361     int infoVecSize = infoVec.size(); 
    362     std::vector<int> infoIndex(infoVecSize); 
    363     for (int i = 0; i < infoVecSize; ++i) 
    364     { 
    365       infoIndex[i] = infoSizeToSend[idx]; 
    366       ProcessDHTElement<InfoType>::packElement(infoVec[i], NULL, infoSizeToSend[idx]); 
    367     } 
    368  
    369     infoToSend[idx] = new unsigned char[infoSizeToSend[idx]]; 
    370     infoSizeToSend[idx] = 0; 
    371     for (int i = 0; i < infoVecSize; ++i) 
    372     { 
    373       ProcessDHTElement<InfoType>::packElement(infoVec[i], infoToSend[idx], infoSizeToSend[idx]); 
    374     } 
    375  
    376     sendInfoToClients(itInfo->first, infoToSend[idx], infoSizeToSend[idx], commLevel, sendRequest); 
    377   } 
    378  
     380  std::map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 
     381                                    iteIndex = client2ClientIndex.end(); 
     382  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 
     383    sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, sendRequest); 
     384  std::map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 
     385                                          iteInfo = client2ClientInfo.end(); 
     386  for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 
     387    sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, sendRequest); 
     388 
     389 
     390  unsigned long* recvIndexBuff = new unsigned long[recvNbIndexCount]; 
     391  unsigned char* recvInfoBuff = new unsigned char[recvNbIndexCount*ProcessDHTElement<InfoType>::typeSize()]; 
    379392 
    380393  std::map<int, MPI_Request>::iterator itRequestIndex, itRequestInfo; 
     
    465478  } 
    466479 
    467   for (int idx = 0; idx < infoToSend.size(); ++idx) delete [] infoToSend[idx]; 
    468   delete [] sendBuff; 
    469   delete [] sendNbIndexBuff; 
    470   delete [] recvBuff; 
    471   delete [] recvNbIndexBuff; 
     480  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) delete [] itIndex->second; 
     481  for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) delete [] itInfo->second; 
    472482  delete [] recvIndexBuff; 
    473483  delete [] recvInfoBuff; 
     
    477487  { 
    478488    --level; 
    479     computeDistributedIndex(indexToInfoMapping, this->commLevel_[level], level); 
     489    computeDistributedIndex(indexToInfoMapping, this->internalComm_, level); 
    480490  } 
    481491  else 
     
    563573*/ 
    564574template<typename T, typename H> 
    565 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, std::vector<size_t>& indices, 
     575void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
    566576                                                       const MPI_Comm& clientIntraComm, 
    567577                                                       std::list<MPI_Request>& requestSendIndex) 
     
    569579  MPI_Request request; 
    570580  requestSendIndex.push_back(request); 
    571   MPI_Isend(&(indices)[0], (indices).size(), MPI_UNSIGNED_LONG, 
     581  MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 
    572582            clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 
    573583} 
     
    658668} 
    659669 
    660 } 
     670/*! 
     671  Compute how many processes one process needs to send to and from how many processes it will receive 
     672*/ 
     673template<typename T, typename H> 
     674void CClientClientDHTTemplate<T,H>::computeSendRecvRank(int level, int rank) 
     675{ 
     676  int groupBegin = this->getGroupBegin()[level]; 
     677  int nbInGroup  = this->getNbInGroup()[level]; 
     678  const std::vector<int>& groupParentBegin = this->getGroupParentsBegin()[level]; 
     679  const std::vector<int>& nbInGroupParents = this->getNbInGroupParents()[level]; 
     680 
     681  std::vector<size_t> hashedIndexGroup; 
     682  computeHashIndex(hashedIndexGroup, nbInGroup); 
     683  size_t a = hashedIndexGroup[rank-groupBegin]; 
     684  size_t b = hashedIndexGroup[rank-groupBegin+1]-1; 
     685 
     686  int currentGroup, offset; 
     687  size_t e,f; 
     688 
     689  // Do a simple math [a,b) intersect [c,d) 
     690  for (int idx = 0; idx < groupParentBegin.size(); ++idx) 
     691  { 
     692    std::vector<size_t> hashedIndexGroupParent; 
     693    int nbInGroupParent = nbInGroupParents[idx]; 
     694    if (0 != nbInGroupParent) 
     695      computeHashIndex(hashedIndexGroupParent, nbInGroupParent); 
     696    for (int i = 0; i < nbInGroupParent; ++i) 
     697    { 
     698      size_t c = hashedIndexGroupParent[i]; 
     699      size_t d = hashedIndexGroupParent[i+1]-1; 
     700 
     701    if (!((d < a) || (b <c))) 
     702        recvRank_[level].push_back(groupParentBegin[idx]+i); 
     703    } 
     704 
     705    offset = rank - groupParentBegin[idx]; 
     706    if ((offset<nbInGroupParents[idx]) && (0 <= offset)) 
     707    { 
     708      e = hashedIndexGroupParent[offset]; 
     709      f = hashedIndexGroupParent[offset+1]-1; 
     710    } 
     711  } 
     712 
     713  std::vector<size_t>::const_iterator itbHashGroup = hashedIndexGroup.begin(), itHashGroup, 
     714                                      iteHashGroup = hashedIndexGroup.end(); 
     715  itHashGroup = std::lower_bound(itbHashGroup, iteHashGroup, e+1); 
     716  int begin = std::distance(itbHashGroup, itHashGroup)-1; 
     717  itHashGroup = std::upper_bound(itbHashGroup, iteHashGroup, f); 
     718  int end = std::distance(itbHashGroup, itHashGroup) -1; 
     719  sendRank_[level].resize(end-begin+1); 
     720  for (int idx = 0; idx < sendRank_[level].size(); ++idx) sendRank_[level][idx] = idx + groupBegin + begin; 
     721} 
     722 
     723/*! 
     724  Send and receive number of process each process need to listen to as well as number 
     725  of index it will receive 
     726*/ 
     727template<typename T, typename H> 
     728void CClientClientDHTTemplate<T,H>::sendRecvRank(int level, 
     729                                                 const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements, 
     730                                                 int& recvNbRank, int& recvNbElements) 
     731{ 
     732  int groupBegin = this->getGroupBegin()[level]; 
     733 
     734  int offSet = 0; 
     735  std::vector<int>& sendRank = sendRank_[level]; 
     736  std::vector<int>& recvRank = recvRank_[level]; 
     737  int sendBuffSize = sendRank.size(); 
     738  int* sendBuff = new int [sendBuffSize*2]; 
     739  std::vector<MPI_Request> request(sendBuffSize); 
     740  std::vector<MPI_Status> requestStatus(sendBuffSize); 
     741  int recvBuffSize = recvRank.size(); 
     742  int* recvBuff = new int [2]; 
     743 
     744  for (int idx = 0; idx < sendBuffSize; ++idx) 
     745  { 
     746    offSet = sendRank[idx]-groupBegin; 
     747    sendBuff[idx*2] = sendNbRank[offSet]; 
     748    sendBuff[idx*2+1] = sendNbElements[offSet]; 
     749  } 
     750 
     751  for (int idx = 0; idx < sendBuffSize; ++idx) 
     752  { 
     753    MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 
     754              sendRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[idx]); 
     755  } 
     756 
     757  MPI_Status status; 
     758  int nbRecvRank = 0, nbRecvElements = 0; 
     759  for (int idx = 0; idx < recvBuffSize; ++idx) 
     760  { 
     761    MPI_Recv(recvBuff, 2, MPI_INT, 
     762             recvRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &status); 
     763    nbRecvRank += *(recvBuff); 
     764    nbRecvElements += *(recvBuff+1); 
     765  } 
     766 
     767  MPI_Waitall(sendBuffSize, &request[0], &requestStatus[0]); 
     768 
     769  recvNbRank = nbRecvRank; 
     770  recvNbElements = nbRecvElements; 
     771 
     772  delete [] sendBuff; 
     773  delete [] recvBuff; 
     774} 
     775 
     776} 
  • XIOS/trunk/src/dht_data_types.hpp

    r830 r833  
    1515namespace xios 
    1616{ 
    17   typedef std::pair<int,int> PairIntInt; 
    18 //struct PairIntInt 
    19 //{ 
    20 ////  PairIntInt(int f, int s) : first(f), second(s) {} 
    21 ////  PairIntInt(const PairIntInt& p) 
    22 ////  { 
    23 ////    this->first = p.first; 
    24 ////    this->second=p.second; 
    25 ////  } 
    26 // 
    27 //  int first; 
    28 //  int second; 
    29 //}; 
     17typedef std::pair<int,int> PairIntInt; 
    3018 
    3119template<typename T> 
  • XIOS/trunk/src/distribution_client.cpp

    r831 r833  
    1818   , gridMask_(), localDomainIndex_(), localAxisIndex_(), indexMap_(), indexDomainData_(), indexAxisData_() 
    1919   , isDataDistributed_(true), axisNum_(0), domainNum_(0), nIndexDomain_(), nIndexAxis_() 
    20    , localDataIndex_(), localMaskIndex_()//, globalDataSendToServer_(), localDataIndexSendToServer_() 
     20   , localDataIndex_(), localMaskIndex_() 
    2121   , globalLocalDataSendToServerMap_() 
    2222   , infoIndex_() 
     
    3131   , gridMask_(), localDomainIndex_(), localAxisIndex_(), indexMap_(), indexDomainData_(), indexAxisData_() 
    3232   , isDataDistributed_(true), axisNum_(0), domainNum_(0), nIndexDomain_(), nIndexAxis_() 
    33    , localDataIndex_(), localMaskIndex_()//, globalDataSendToServer_(), localDataIndexSendToServer_() 
     33   , localDataIndex_(), localMaskIndex_() 
    3434   , globalLocalDataSendToServerMap_() 
    3535   , infoIndex_() 
     
    507507 
    508508  // Now allocate these arrays 
    509 //  globalDataSendToServer_.resize(indexSend2ServerCount); 
    510 //  localDataIndexSendToServer_.resize(indexSend2ServerCount); 
    511509  localDataIndex_.resize(indexLocalDataOnClientCount); 
    512  
    513510  localMaskIndex_.resize(indexSend2ServerCount); 
    514511 
     
    658655            } 
    659656          } 
    660 //          globalDataSendToServer_[indexSend2ServerCount] = globalIndex; 
    661 //          localDataIndexSendToServer_[indexSend2ServerCount] = indexLocalDataOnClientCount; 
    662657          globalLocalDataSendToServerMap_[globalIndex] = indexLocalDataOnClientCount; 
    663658          localMaskIndex_[indexSend2ServerCount] = gridMaskIndex; 
  • XIOS/trunk/src/io/netCdfInterface.cpp

    r811 r833  
    279279    sstr << "Error when calling function nc_inq_varname(ncid, varId, varNameBuff)" << std::endl 
    280280         << errormsg << std::endl 
    281          << "Unable to get variable name given its id: " << varId << std::endl; 
     281         << "Unable to get variable name: "<< varName << " given its id: " << varId << std::endl; 
    282282    StdString e = sstr.str(); 
    283283    throw CNetCdfException(e); 
     
    329329    sstr << "Error when calling function nc_inq_dimname(ncid, dimId, fullNameIn)" << std::endl 
    330330         << errormsg << std::endl 
    331          << "Unable to get dimension name given its id: " << dimId << std::endl; 
     331         << "Unable to get dimension name: " << dimName << " given its id: " << dimId << std::endl; 
    332332    StdString e = sstr.str(); 
    333333    throw CNetCdfException(e); 
     
    612612    sstr << "Error when calling function nc_inq_attname(ncid, varid, attnum, attName)" << std::endl; 
    613613    sstr << errormsg << std::endl; 
    614     sstr << "Unable to query the name of attribute " << attnum << " given the location id:" << ncid << " and the variable id:" << varid << std::endl; 
     614    sstr << "Unable to query the name: " << name << " of attribute " << attnum << " given the location id:" << ncid << " and the variable id:" << varid << std::endl; 
    615615    StdString e = sstr.str(); 
    616616    throw CNetCdfException(e); 
  • XIOS/trunk/src/io/netCdfInterface_impl.hpp

    r686 r833  
    3232    { 
    3333      StdStringStream sstr; 
     34      StdString varName; 
    3435      sstr << "Error when calling function ncGetAttType(ncid, varId, attrName.c_str(), data)" << std::endl; 
    3536      sstr << nc_strerror(status) << std::endl; 
    36       sstr << "Unable to read attribute " << attrName << " given the location id: " << ncid << " and the variable id: " << varId << std::endl; 
     37      inqVarName(ncid, varId, varName); 
     38      sstr << "Unable to read attribute " << attrName << " given the location id: " << ncid << " and the variable whose id: " << varId << " and name: " << varName << std::endl; 
    3739      throw CNetCdfException(sstr.str()); 
    3840    } 
     
    5961    { 
    6062      StdStringStream sstr; 
     63      StdString varName; 
    6164      sstr << "Error when calling function ncPutAttType(ncid, varId, attrName.c_str(), numVal, data)" << std::endl; 
    6265      sstr << nc_strerror(status) << std::endl; 
    63       sstr << "Unable to set attribute " << attrName << " given the location id: " << ncid << " and the variable id: " << varId 
     66      inqVarName(ncid, varId, varName); 
     67      sstr << "Unable to set attribute " << attrName << " given the location id: " << ncid << " and the variable whose id: " << varId << " and name: " << varName << std::endl 
    6468           << " with " << numVal << " elements." << std::endl; 
    6569      throw CNetCdfException(sstr.str()); 
     
    8589    { 
    8690      StdStringStream sstr; 
     91      StdString varName; 
    8792      sstr << "Error when calling function ncGetVaraType(ncid, varId, start, count, data)" << std::endl; 
    8893      sstr << nc_strerror(status) << std::endl; 
    89       sstr << "Unable to read data given the location id: " << ncid << " and the variable id: " << varId << std::endl; 
     94      inqVarName(ncid, varId, varName); 
     95      sstr << "Unable to read data given the location id: " << ncid << " and the variable whose id: " << varId << " and name: " << varName << std::endl; 
    9096      throw CNetCdfException(sstr.str()); 
    9197    } 
     
    110116    { 
    111117      StdStringStream sstr; 
     118      StdString varName; 
    112119      sstr << "Error when calling function ncPutVaraType(ncid, varId, start, count, data)" << std::endl; 
    113120      sstr << nc_strerror(status) << std::endl; 
    114       sstr << "Unable to write data given the location id: " << ncid << " and the variable id: " << varId << std::endl; 
     121      inqVarName(ncid, varId, varName); 
     122      sstr << "Unable to write data given the location id: " << ncid << " and the variable whose id: " << varId << " and name: " << varName << std::endl; 
    115123      throw CNetCdfException(sstr.str()); 
    116124    } 
  • XIOS/trunk/src/mpi_tag.hpp

    r829 r833  
    1313/* Tag for mpi communication to send and receive info in distributed hashed table version 1*/ 
    1414#define MPI_DHT_INFO_0 22 
     15 
     16/* Tag for mpi communication to send and receive index in distributed hashed table version 1*/ 
     17#define MPI_DHT_INDEX_1 27 
     18 
     19/* Tag for mpi communication to send and receive info in distributed hashed table version 1*/ 
     20#define MPI_DHT_INFO_1 24 
    1521 
    1622/* Tag for mpi communication to send and receive info of current grid source in grid transformation*/ 
  • XIOS/trunk/src/policy.cpp

    r721 r833  
    4949} 
    5050 
     51DivideAdaptiveComm::DivideAdaptiveComm(const MPI_Comm& mpiComm) 
     52  : internalComm_(mpiComm), level_(0), groupBegin_(), nbInGroup_(), computed_(false) 
     53{ 
     54 
    5155} 
    5256 
     57void DivideAdaptiveComm::computeMPICommLevel() 
     58{ 
     59  if (computed_) return; 
     60  computed_ = true; 
     61 
     62  int mpiSize, mpiRank; 
     63  MPI_Comm_size(internalComm_,&mpiSize); 
     64  MPI_Comm_rank(internalComm_,&mpiRank); 
     65 
     66  int maxChild=1; 
     67  int m; 
     68  do 
     69  { 
     70    m=1; 
     71    ++maxChild; 
     72    for(int i=0;i<maxChild;++i) m *= maxChild; 
     73   } while(m<mpiSize); 
     74 
     75  int maxLevel=0; 
     76  for(int size=1; size<=mpiSize; size*=maxChild) ++maxLevel; 
     77 
     78  int pos, n, idx; 
     79  level_=0; 
     80  int begin=0; 
     81  int end=mpiSize-1; 
     82  int nb=end-begin+1; 
     83 
     84  nbInGroup_ = groupBegin_= std::vector<int>(maxLevel); 
     85  nbInGroupParents_ = groupParentsBegin_= std::vector<std::vector<int> >(maxLevel,std::vector<int>(maxChild)); 
     86 
     87  groupBegin_[level_] = begin; 
     88  nbInGroup_[level_] = nb; 
     89  ++level_; 
     90  while (nb>2 && (level_<maxLevel)) 
     91  { 
     92    n = 0; idx = 0; 
     93    pos = begin; 
     94    for(int i=0;i<maxChild && i<nb;i++) 
     95    { 
     96      if (i<nb%maxChild) n = nb/maxChild + 1; 
     97      else n = nb/maxChild; 
     98 
     99      if (mpiRank>=pos && mpiRank<pos+n) 
     100      { 
     101        begin=pos; 
     102        end=pos+n-1; 
     103      } 
     104      groupParentsBegin_[level_-1][idx] = pos; 
     105      nbInGroupParents_[level_-1][idx] = n; 
     106      ++idx; 
     107      pos=pos+n; 
     108    } 
     109    groupBegin_[level_] = begin; 
     110    nbInGroup_[level_] = nb = end-begin+1; 
     111 
     112    ++level_; 
     113  } 
     114 
     115  for (int i = 0; i < nbInGroup_[level_-1];++i) 
     116  { 
     117    groupParentsBegin_[level_-1][i] = groupBegin_[level_-1]+i; 
     118    nbInGroupParents_[level_-1][i] = 1; 
     119  } 
     120 
     121 
     122 
     123//  parent=vector<int>(maxLevel+1); 
     124//  child=vector<vector<int> >(maxLevel+1,vector<int>(maxChild)); 
     125//  nbChild=vector<int> (maxLevel+1); 
     126 
     127//  do 
     128//  { 
     129//    n=0; 
     130//    pos=begin; 
     131//    nbChild[level_]=0; 
     132//    parent[level_+1]=begin; 
     133//    for(int i=0;i<maxChild && i<nb;i++) 
     134//    { 
     135//      if (i<nb%maxChild) n = nb/maxChild + 1; 
     136//      else n = nb/maxChild; 
     137// 
     138//      if (mpiRank>=pos && mpiRank<pos+n) 
     139//      { 
     140//        begin=pos; 
     141//        end=pos+n-1; 
     142//      } 
     143//      child[level_][i]=pos; 
     144//      pos=pos+n; 
     145//      nbChild[level_]++; 
     146//    } 
     147//    nb=end-begin+1; 
     148//    level_=level_+1; 
     149//  } while (nb>1); 
     150} 
     151 
     152//void DivideAdaptiveComm::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int color, int level) 
     153//{ 
     154////  int clientRank; 
     155////  MPI_Comm_rank(mpiCommLevel,&clientRank); 
     156// 
     157//   --level; 
     158//  if (0 < level) 
     159//  { 
     160//   int color = clientRank % 2; 
     161//   commLevel_.push_back(MPI_Comm()); 
     162//   MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back())); 
     163//   divideMPICommLevel(commLevel_.back(), level); 
     164//  } 
     165//} 
     166 
     167 
     168} 
     169 
  • XIOS/trunk/src/policy.hpp

    r721 r833  
    2828}; 
    2929 
     30class DivideAdaptiveComm 
     31{ 
     32protected: 
     33  DivideAdaptiveComm(const MPI_Comm& mpiComm); 
     34 
     35  void computeMPICommLevel(); 
     36  const std::vector<int>& getGroupBegin() { return groupBegin_; } 
     37  const std::vector<int>& getNbInGroup() { return nbInGroup_; } 
     38  const std::vector<std::vector<int> >& getGroupParentsBegin() { return groupParentsBegin_; } 
     39  const std::vector<std::vector<int> >& getNbInGroupParents() { return nbInGroupParents_; } 
     40  int getNbLevel() { return level_; } 
     41 
     42protected: 
     43  const MPI_Comm& internalComm_; 
     44  std::vector<std::vector<int> > groupParentsBegin_; 
     45  std::vector<std::vector<int> > nbInGroupParents_; 
     46 
     47  int level_; 
     48  std::vector<int> groupBegin_;  //! Rank beginning of a group 
     49  std::vector<int> nbInGroup_; //! Number of process in each group 
     50  bool computed_; 
     51//  std::vector<std::vector<int> > child_; /*!< List of child rank for each level */ 
     52//  std::vector<int> nbChild_;         /*!< Number of child for each level */ 
     53}; 
     54 
    3055} 
    3156 
  • XIOS/trunk/src/transformation/axis_algorithm_interpolate.cpp

    r831 r833  
    128128void CAxisAlgorithmInterpolate::computeWeightedValueAndMapping(const std::map<int, std::vector<std::pair<int,double> > >& interpolatingIndexValues, int transPos) 
    129129{ 
    130   std::map<int, std::vector<int> >& transMap = this->transformationMapping_[transPos]; 
    131   std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[transPos]; 
     130  TransformationIndexMap& transMap = this->transformationMapping_[transPos]; 
     131  TransformationWeightMap& transWeight = this->transformationWeight_[transPos]; 
    132132  std::map<int, std::vector<std::pair<int,double> > >::const_iterator itb = interpolatingIndexValues.begin(), it, 
    133133                                                                      ite = interpolatingIndexValues.end(); 
  • XIOS/trunk/src/transformation/axis_algorithm_inverse.cpp

    r829 r833  
    2424           << "Size of axis destionation " <<axisDestination->getId() << " is " << axisDestination->n_glo.getValue()); 
    2525  } 
    26  
    27 //  this->computeIndexSourceMapping(); 
    2826} 
    2927 
     
    3331  this->transformationWeight_.resize(1); 
    3432 
    35   std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0]; 
    36   std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0]; 
     33  TransformationIndexMap& transMap = this->transformationMapping_[0]; 
     34  TransformationWeightMap& transWeight = this->transformationWeight_[0]; 
    3735 
    3836  int globalIndexSize = axisDestGlobalIndex_.size(); 
     
    6967  CTransformationMapping transformationMap(axisDest_, axisSrc_); 
    7068 
    71   std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0]; 
    72   std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0]; 
     69  TransformationIndexMap& transMap = this->transformationMapping_[0]; 
     70  TransformationWeightMap& transWeight = this->transformationWeight_[0]; 
    7371 
    7472  CTransformationMapping::DestinationIndexMap globaIndexMapFromDestToSource; 
    75   std::map<int, std::vector<int> >::const_iterator it = transMap.begin(), ite = transMap.end(); 
     73  TransformationIndexMap::const_iterator it = transMap.begin(), ite = transMap.end(); 
    7674  int localIndex = 0; 
    7775  for (; it != ite; ++it) 
  • XIOS/trunk/src/transformation/axis_algorithm_zoom.cpp

    r827 r833  
    4747  this->transformationWeight_.resize(1); 
    4848 
    49   std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0]; 
    50   std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0]; 
     49  TransformationIndexMap& transMap = this->transformationMapping_[0]; 
     50  TransformationWeightMap& transWeight = this->transformationWeight_[0]; 
    5151 
    5252  for (StdSize idx = 0; idx < ni; ++idx) 
     
    7979  StdSize iBeginMask = axisDest_->begin.getValue(); 
    8080  StdSize globalIndexMask = 0; 
    81   std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0]; 
    82   std::map<int, std::vector<int> >::const_iterator ite = (transMap).end(); 
     81  TransformationIndexMap& transMap = this->transformationMapping_[0]; 
     82  TransformationIndexMap::const_iterator ite = (transMap).end(); 
    8383  for (StdSize idx = 0; idx < niMask; ++idx) 
    8484  { 
  • XIOS/trunk/src/transformation/domain_algorithm_interpolate.cpp

    r829 r833  
    2323{ 
    2424  interpDomain_->checkValid(domainSource); 
    25 //  computeIndexSourceMapping(); 
    2625} 
    2726 
     
    385384  this->transformationWeight_.resize(1); 
    386385 
    387   std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0]; 
    388   std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0]; 
     386  TransformationIndexMap& transMap = this->transformationMapping_[0]; 
     387  TransformationWeightMap& transWeight = this->transformationWeight_[0]; 
    389388 
    390389  boost::unordered_map<size_t,int> globalIndexOfDomainDest; 
  • XIOS/trunk/src/transformation/domain_algorithm_zoom.cpp

    r827 r833  
    7272  this->transformationWeight_.resize(1); 
    7373 
    74   std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0]; 
    75   std::map<int, std::vector<double> >& transWeight = this->transformationWeight_[0]; 
     74  TransformationIndexMap& transMap = this->transformationMapping_[0]; 
     75  TransformationWeightMap& transWeight = this->transformationWeight_[0]; 
    7676 
    7777//  std::map<int, std::vector<int> >& transMap = this->transformationMapping_; 
     
    117117  int globalIndexMask = 0; 
    118118 
    119   std::map<int, std::vector<int> >& transMap = this->transformationMapping_[0]; 
    120   std::map<int, std::vector<int> >::const_iterator ite = (transMap).end(); 
     119  TransformationIndexMap& transMap = this->transformationMapping_[0]; 
     120  TransformationIndexMap::const_iterator ite = (transMap).end(); 
    121121  for (int j = 0; j < njMask; ++j) 
    122122  { 
  • XIOS/trunk/src/transformation/generic_algorithm_transformation.cpp

    r831 r833  
    3535  for (size_t idxTrans = 0; idxTrans < transformationMapping_.size(); ++idxTrans) 
    3636  { 
    37     std::map<int, std::vector<int> >::const_iterator itbTransMap = transformationMapping_[idxTrans].begin(), itTransMap, 
     37    TransformationIndexMap::const_iterator itbTransMap = transformationMapping_[idxTrans].begin(), itTransMap, 
    3838                                                     iteTransMap = transformationMapping_[idxTrans].end(); 
    39     std::map<int, std::vector<double> >::const_iterator itTransWeight = transformationWeight_[idxTrans].begin(); 
     39    TransformationWeightMap::const_iterator itTransWeight = transformationWeight_[idxTrans].begin(); 
    4040 
    4141    // If transformation position exists 
    42     std::map<int, std::vector<int> >::const_iterator itTransPos, iteTransPos; 
     42    TransformationIndexMap::const_iterator itTransPos, iteTransPos; 
    4343    if (!isTransPosEmpty) 
    4444    { 
  • XIOS/trunk/src/transformation/generic_algorithm_transformation.hpp

    r831 r833  
    7070 
    7171protected: 
     72  typedef boost::unordered_map<int, std::vector<int> > TransformationIndexMap; 
     73  typedef boost::unordered_map<int, std::vector<double> > TransformationWeightMap; 
     74  typedef boost::unordered_map<int, std::vector<int> > TransformationPositionMap; 
     75 
    7276  //! Map between global index of destination element and source element 
    73   std::vector<std::map<int, std::vector<int> > > transformationMapping_; 
     77  std::vector<TransformationIndexMap> transformationMapping_; 
    7478  //! Weight corresponding of source to destination 
    75   std::vector<std::map<int, std::vector<double> > > transformationWeight_; 
     79  std::vector<TransformationWeightMap> transformationWeight_; 
    7680  //! Map of global index of destination element and corresponding global index of other elements in the same grid 
    7781  //! By default, one index of an element corresponds to all index of remaining element in the grid. So it's empty 
    78   std::vector<std::map<int, std::vector<int> > > transformationPosition_; 
     82  std::vector<TransformationPositionMap> transformationPosition_; 
    7983 
    8084  //! Id of auxillary inputs which help doing transformation dynamically 
Note: See TracChangeset for help on using the changeset viewer.