Ignore:
Timestamp:
06/09/16 11:33:19 (8 years ago)
Author:
mhnguyen
Message:

Various clean up

+) Remove some redundant codes

Test
+) On Curie
+) tests pass

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/src/client_client_dht_template_impl.hpp

    r863 r867  
    205205      if (iteIndexToInfoMap != itIndexToInfoMap) 
    206206        sendNbIndexOnReturn[idx] += itIndexToInfoMap->second.size(); 
    207 //        ++sendNbIndexOnReturn[idx]; 
    208207    } 
    209208    currentIndex += recvNbIndexClientCount[idx]; 
     
    294293    ProcessDHTElement<InfoType>::unpackElement(unpackedInfo, recvInfoBuffOnReturn, infoIndex); 
    295294    indexToInfoMapping[recvIndexBuffOnReturn[idx]].push_back(unpackedInfo); 
    296 //    ProcessDHTElement<InfoType>::unpackElement(indexToInfoMapping[recvIndexBuffOnReturn[idx]], recvInfoBuffOnReturn, infoIndex); 
    297   } 
    298  
    299   indexToInfoMappingLevel_.swap(indexToInfoMapping); //indexToInfoMappingLevel_ = (indexToInfoMapping); 
     295  } 
     296 
     297  indexToInfoMappingLevel_.swap(indexToInfoMapping); 
    300298  if (0 != recvNbIndexCount) delete [] recvIndexBuff; 
    301299  for (boost::unordered_map<int,size_t*>::const_iterator it = client2ClientIndex.begin(); 
     
    318316      delete [] it->second; 
    319317} 
    320  
    321 ///*! 
    322 //    Compute mapping between indices and information corresponding to these indices 
    323 //for each level of hierarchical DHT. Recursive function 
    324 //   \param [in] indices indices a proc has 
    325 //   \param [in] commLevel communicator of current level 
    326 //   \param [in] level current level 
    327 //*/ 
    328 //template<typename T, typename H> 
    329 //void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 
    330 //                                                                 const MPI_Comm& commLevel, 
    331 //                                                                 int level) 
    332 //{ 
    333 //  int clientRank; 
    334 //  MPI_Comm_rank(commLevel,&clientRank); 
    335 //  int groupRankBegin = this->getGroupBegin()[level]; 
    336 //  int nbClient = this->getNbInGroup()[level]; 
    337 //  std::vector<size_t> hashedIndex; 
    338 //  computeHashIndex(hashedIndex, nbClient); 
    339 // 
    340 //  size_t ssize = indices.numElements(), hashedVal; 
    341 // 
    342 //  std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 
    343 //                                      iteClientHash = hashedIndex.end(); 
    344 //  std::vector<int> sendBuff(nbClient,0); 
    345 //  std::vector<int> sendNbIndexBuff(nbClient,0); 
    346 // 
    347 //  // Number of global index whose mapping server are on other clients 
    348 //  int nbIndexToSend = 0; 
    349 //  size_t index; 
    350 //  HashXIOS<size_t> hashGlobalIndex; 
    351 //  for (int i = 0; i < ssize; ++i) 
    352 //  { 
    353 //    index = indices(i); 
    354 //    hashedVal  = hashGlobalIndex(index); 
    355 //    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedVal); 
    356 //    int indexClient = std::distance(itbClientHash, itClientHash)-1; 
    357 //    ++sendNbIndexBuff[indexClient]; 
    358 //  } 
    359 // 
    360 //  boost::unordered_map<int, size_t* > client2ClientIndex; 
    361 //  for (int idx = 0; idx < nbClient; ++idx) 
    362 //  { 
    363 //    if (0 != sendNbIndexBuff[idx]) 
    364 //    { 
    365 //      client2ClientIndex[idx+groupRankBegin] = new unsigned long [sendNbIndexBuff[idx]]; 
    366 //      nbIndexToSend += sendNbIndexBuff[idx]; 
    367 //      sendBuff[idx] = 1; 
    368 //      sendNbIndexBuff[idx] = 0; 
    369 //    } 
    370 //  } 
    371 // 
    372 //  for (int i = 0; i < ssize; ++i) 
    373 //  { 
    374 //    index = indices(i); 
    375 //    hashedVal  = hashGlobalIndex(index); 
    376 //    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedVal); 
    377 //    { 
    378 //      int indexClient = std::distance(itbClientHash, itClientHash)-1; 
    379 //      { 
    380 //        client2ClientIndex[indexClient+groupRankBegin][sendNbIndexBuff[indexClient]] = index; 
    381 //        ++sendNbIndexBuff[indexClient]; 
    382 //      } 
    383 //    } 
    384 //  } 
    385 // 
    386 //  std::vector<int> recvRankClient, recvNbIndexClientCount; 
    387 //  sendRecvRank(level, sendBuff, sendNbIndexBuff, 
    388 //               recvRankClient, recvNbIndexClientCount); 
    389 // 
    390 //  int recvNbIndexCount = 0; 
    391 //  for (int idx = 0; idx < recvNbIndexClientCount.size(); ++idx) 
    392 //    recvNbIndexCount += recvNbIndexClientCount[idx]; 
    393 // 
    394 //  unsigned long* recvIndexBuff; 
    395 //  if (0 != recvNbIndexCount) 
    396 //    recvIndexBuff = new unsigned long[recvNbIndexCount]; 
    397 // 
    398 //  std::vector<MPI_Request> request; 
    399 //  std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 
    400 //                             iteRecvIndex = recvRankClient.end(), 
    401 //                           itbRecvNbIndex = recvNbIndexClientCount.begin(), 
    402 //                           itRecvNbIndex; 
    403 //  int currentIndex = 0; 
    404 //  int nbRecvClient = recvRankClient.size(); 
    405 //  for (int idx = 0; idx < nbRecvClient; ++idx) 
    406 //  { 
    407 //    if (0 != recvNbIndexClientCount[idx]) 
    408 //      recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
    409 //    currentIndex += recvNbIndexClientCount[idx]; 
    410 //  } 
    411 // 
    412 //  boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 
    413 //                                                iteIndex = client2ClientIndex.end(); 
    414 //  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 
    415 //    sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
    416 // 
    417 //  std::vector<MPI_Status> status(request.size()); 
    418 //  MPI_Waitall(request.size(), &request[0], &status[0]); 
    419 // 
    420 //  CArray<size_t,1>* tmpGlobalIndex; 
    421 //  if (0 != recvNbIndexCount) 
    422 //    tmpGlobalIndex = new CArray<size_t,1>(recvIndexBuff, shape(recvNbIndexCount), neverDeleteData); 
    423 //  else 
    424 //    tmpGlobalIndex = new CArray<size_t,1>(); 
    425 // 
    426 //  // OK, we go to the next level and do something recursive 
    427 //  if (0 < level) 
    428 //  { 
    429 //    --level; 
    430 //    computeIndexInfoMappingLevel(*tmpGlobalIndex, this->internalComm_, level); 
    431 //  } 
    432 //  else // Now, we are in the last level where necessary mappings are. 
    433 //    indexToInfoMappingLevel_= (index2InfoMapping_); 
    434 // 
    435 //  typename Index2InfoTypeMap::const_iterator iteIndexToInfoMap = indexToInfoMappingLevel_.end(), itIndexToInfoMap; 
    436 //  std::vector<int> sendNbIndexOnReturn(nbRecvClient,0); 
    437 //  currentIndex = 0; 
    438 //  for (int idx = 0; idx < nbRecvClient; ++idx) 
    439 //  { 
    440 //    for (int i = 0; i < recvNbIndexClientCount[idx]; ++i) 
    441 //    { 
    442 //      itIndexToInfoMap = indexToInfoMappingLevel_.find(*(recvIndexBuff+currentIndex+i)); 
    443 //      if (iteIndexToInfoMap != itIndexToInfoMap) ++sendNbIndexOnReturn[idx]; 
    444 //    } 
    445 //    currentIndex += recvNbIndexClientCount[idx]; 
    446 //  } 
    447 // 
    448 //  std::vector<int> recvRankOnReturn(client2ClientIndex.size()); 
    449 //  std::vector<int> recvNbIndexOnReturn(client2ClientIndex.size(),0); 
    450 //  int indexIndex = 0; 
    451 //  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex, ++indexIndex) 
    452 //  { 
    453 //    recvRankOnReturn[indexIndex] = itIndex->first; 
    454 //  } 
    455 //  sendRecvOnReturn(recvRankClient, sendNbIndexOnReturn, 
    456 //                   recvRankOnReturn, recvNbIndexOnReturn); 
    457 // 
    458 //  int recvNbIndexCountOnReturn = 0; 
    459 //  for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 
    460 //    recvNbIndexCountOnReturn += recvNbIndexOnReturn[idx]; 
    461 // 
    462 //  unsigned long* recvIndexBuffOnReturn; 
    463 //  unsigned char* recvInfoBuffOnReturn; 
    464 //  if (0 != recvNbIndexCountOnReturn) 
    465 //  { 
    466 //    recvIndexBuffOnReturn = new unsigned long[recvNbIndexCountOnReturn]; 
    467 //    recvInfoBuffOnReturn = new unsigned char[recvNbIndexCountOnReturn*ProcessDHTElement<InfoType>::typeSize()]; 
    468 //  } 
    469 // 
    470 //  std::vector<MPI_Request> requestOnReturn; 
    471 //  currentIndex = 0; 
    472 //  for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 
    473 //  { 
    474 //    if (0 != recvNbIndexOnReturn[idx]) 
    475 //    { 
    476 //      recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 
    477 //      recvInfoFromClients(recvRankOnReturn[idx], 
    478 //                          recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 
    479 //                          recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 
    480 //                          commLevel, requestOnReturn); 
    481 //    } 
    482 //    currentIndex += recvNbIndexOnReturn[idx]; 
    483 //  } 
    484 // 
    485 //  boost::unordered_map<int,unsigned char*> client2ClientInfoOnReturn; 
    486 //  boost::unordered_map<int,size_t*> client2ClientIndexOnReturn; 
    487 //  currentIndex = 0; 
    488 //  for (int idx = 0; idx < nbRecvClient; ++idx) 
    489 //  { 
    490 //    if (0 != sendNbIndexOnReturn[idx]) 
    491 //    { 
    492 //      int rank = recvRankClient[idx]; 
    493 //      client2ClientIndexOnReturn[rank] = new unsigned long [sendNbIndexOnReturn[idx]]; 
    494 //      client2ClientInfoOnReturn[rank] = new unsigned char [sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize()]; 
    495 //      unsigned char* tmpInfoPtr = client2ClientInfoOnReturn[rank]; 
    496 //      int infoIndex = 0; 
    497 //      int nb = 0; 
    498 //      for (int i = 0; i < recvNbIndexClientCount[idx]; ++i) 
    499 //      { 
    500 //        itIndexToInfoMap = indexToInfoMappingLevel_.find(*(recvIndexBuff+currentIndex+i)); 
    501 //        if (iteIndexToInfoMap != itIndexToInfoMap) 
    502 //        { 
    503 //          client2ClientIndexOnReturn[rank][nb] = itIndexToInfoMap->first; 
    504 //          ProcessDHTElement<InfoType>::packElement(itIndexToInfoMap->second, tmpInfoPtr, infoIndex); 
    505 //          ++nb; 
    506 //        } 
    507 //      } 
    508 // 
    509 //      sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 
    510 //                         sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 
    511 //      sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 
    512 //                        sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 
    513 //    } 
    514 //    currentIndex += recvNbIndexClientCount[idx]; 
    515 //  } 
    516 // 
    517 //  std::vector<MPI_Status> statusOnReturn(requestOnReturn.size()); 
    518 //  MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 
    519 // 
    520 //  boost::unordered_map<size_t,InfoType> indexToInfoMapping; 
    521 //  indexToInfoMapping.rehash(std::ceil(recvNbIndexCountOnReturn/indexToInfoMapping.max_load_factor())); 
    522 //  int infoIndex = 0; 
    523 //  for (int idx = 0; idx < recvNbIndexCountOnReturn; ++idx) 
    524 //  { 
    525 //    ProcessDHTElement<InfoType>::unpackElement(indexToInfoMapping[recvIndexBuffOnReturn[idx]], recvInfoBuffOnReturn, infoIndex); 
    526 //  } 
    527 // 
    528 //  indexToInfoMappingLevel_.swap(indexToInfoMapping); //indexToInfoMappingLevel_ = (indexToInfoMapping); 
    529 //  if (0 != recvNbIndexCount) delete [] recvIndexBuff; 
    530 //  for (boost::unordered_map<int,size_t*>::const_iterator it = client2ClientIndex.begin(); 
    531 //                                                        it != client2ClientIndex.end(); ++it) 
    532 //      delete [] it->second; 
    533 //  delete tmpGlobalIndex; 
    534 // 
    535 //  if (0 != recvNbIndexCountOnReturn) 
    536 //  { 
    537 //    delete [] recvIndexBuffOnReturn; 
    538 //    delete [] recvInfoBuffOnReturn; 
    539 //  } 
    540 // 
    541 //  for (boost::unordered_map<int,unsigned char*>::const_iterator it = client2ClientInfoOnReturn.begin(); 
    542 //                                                               it != client2ClientInfoOnReturn.end(); ++it) 
    543 //      delete [] it->second; 
    544 // 
    545 //  for (boost::unordered_map<int,size_t*>::const_iterator it = client2ClientIndexOnReturn.begin(); 
    546 //                                            it != client2ClientIndexOnReturn.end(); ++it) 
    547 //      delete [] it->second; 
    548 //} 
    549318 
    550319/*! 
     
    689458  Index2VectorInfoTypeMap indexToInfoMapping; 
    690459  indexToInfoMapping.rehash(std::ceil(currentIndex/indexToInfoMapping.max_load_factor())); 
    691 //  boost::unordered_map<size_t,int> tmpInfoSize; 
    692 //  tmpInfoSize.rehash(std::ceil(currentIndex/tmpInfoSize.max_load_factor())); 
    693 //  currentIndex = 0; 
    694 //  for (int idx = 0; idx < nbRecvClient; ++idx) 
    695 //  { 
    696 //    int count = recvNbIndexClientCount[idx]; 
    697 //    for (int i = 0; i < count; ++i) 
    698 //    { 
    699 //      ++tmpInfoSize[*(recvIndexBuff+currentIndex+i)]; 
    700 //    } 
    701 //    currentIndex += count; 
    702 //  } 
    703 // 
    704 //  for (boost::unordered_map<size_t,int>::iterator it=tmpInfoSize.begin(); it != tmpInfoSize.end(); ++it) 
    705 //  { 
    706 //    indexToInfoMapping[it->first].resize(it->second); 
    707 //    it->second = 0; 
    708 //  } 
    709  
    710460  currentIndex = 0; 
    711461  InfoType infoValue; 
     
    714464  for (int idx = 0; idx < nbRecvClient; ++idx) 
    715465  { 
    716 //    size_t index; 
     466    size_t index; 
    717467    int count = recvNbIndexClientCount[idx]; 
    718468    for (int i = 0; i < count; ++i) 
    719469    { 
    720 //      index = *(recvIndexBuff+currentIndex+i); 
    721470      ProcessDHTElement<InfoType>::unpackElement(infoValue, infoBuff, infoIndex); 
    722 //      ProcessDHTElement<InfoType>::unpackElement(indexToInfoMapping[index][tmpInfoSize[index]], infoBuff, infoIndex); 
    723 //      ++tmpInfoSize[index]; 
    724471      indexToInfoMapping[*(recvIndexBuff+currentIndex+i)].push_back(infoValue); 
    725472    } 
     
    749496    index2InfoMapping_.swap(indexToInfoMapping); 
    750497} 
    751  
    752 ///*! 
    753 //  Compute distribution of global index for servers 
    754 //  Each client already holds a piece of information and its associated index. 
    755 //This information will be redistributed among processes by projecting indices into size_t space, 
    756 //the corresponding information will be also distributed on size_t space. 
    757 //After the redistribution, each client holds rearranged index and its corresponding information. 
    758 //  \param [in] indexInfoMap index and its corresponding info (usually server index) 
    759 //  \param [in] commLevel communicator of current level 
    760 //  \param [in] level current level 
    761 //*/ 
    762 //template<typename T, typename H> 
    763 //void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const boost::unordered_map<size_t,T>& indexInfoMap, 
    764 //                                                            const MPI_Comm& commLevel, 
    765 //                                                            int level) 
    766 //{ 
    767 //  int clientRank; 
    768 //  MPI_Comm_rank(commLevel,&clientRank); 
    769 //  computeSendRecvRank(level, clientRank); 
    770 // 
    771 //  int groupRankBegin = this->getGroupBegin()[level]; 
    772 //  int nbClient = this->getNbInGroup()[level]; 
    773 //  std::vector<size_t> hashedIndex; 
    774 //  computeHashIndex(hashedIndex, nbClient); 
    775 // 
    776 //  std::vector<int> sendBuff(nbClient,0); 
    777 //  std::vector<int> sendNbIndexBuff(nbClient,0); 
    778 //  std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 
    779 //                                      iteClientHash = hashedIndex.end(); 
    780 //  typename boost::unordered_map<size_t,InfoType>::const_iterator itb = indexInfoMap.begin(),it, 
    781 //                                                                 ite = indexInfoMap.end(); 
    782 //  HashXIOS<size_t> hashGlobalIndex; 
    783 // 
    784 //  // Compute size of sending and receving buffer 
    785 //  for (it = itb; it != ite; ++it) 
    786 //  { 
    787 //    size_t hashIndex = hashGlobalIndex(it->first); 
    788 //    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 
    789 //    { 
    790 //      int indexClient = std::distance(itbClientHash, itClientHash)-1; 
    791 //      { 
    792 //        ++sendNbIndexBuff[indexClient]; 
    793 //      } 
    794 //    } 
    795 //  } 
    796 // 
    797 //  boost::unordered_map<int, size_t*> client2ClientIndex; 
    798 //  boost::unordered_map<int, unsigned char*> client2ClientInfo; 
    799 //  for (int idx = 0; idx < nbClient; ++idx) 
    800 //  { 
    801 //    if (0 != sendNbIndexBuff[idx]) 
    802 //    { 
    803 //      client2ClientIndex[idx+groupRankBegin] = new unsigned long [sendNbIndexBuff[idx]]; 
    804 //      client2ClientInfo[idx+groupRankBegin] = new unsigned char [sendNbIndexBuff[idx]*ProcessDHTElement<InfoType>::typeSize()]; 
    805 //      sendNbIndexBuff[idx] = 0; 
    806 //      sendBuff[idx] = 1; 
    807 //    } 
    808 //  } 
    809 // 
    810 //  std::vector<int> sendNbInfo(nbClient,0); 
    811 //  for (it = itb; it != ite; ++it) 
    812 //  { 
    813 //    size_t hashIndex = hashGlobalIndex(it->first); 
    814 //    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex); 
    815 //    { 
    816 //      int indexClient = std::distance(itbClientHash, itClientHash)-1; 
    817 //      { 
    818 //        client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 
    819 //        ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 
    820 //        ++sendNbIndexBuff[indexClient]; 
    821 //      } 
    822 //    } 
    823 //  } 
    824 // 
    825 //  // Calculate from how many clients each client receive message. 
    826 //  // Calculate size of buffer for receiving message 
    827 //  std::vector<int> recvRankClient, recvNbIndexClientCount; 
    828 //  sendRecvRank(level, sendBuff, sendNbIndexBuff, 
    829 //               recvRankClient, recvNbIndexClientCount); 
    830 // 
    831 //  int recvNbIndexCount = 0; 
    832 //  for (int idx = 0; idx < recvNbIndexClientCount.size(); ++idx) 
    833 //    recvNbIndexCount += recvNbIndexClientCount[idx]; 
    834 // 
    835 //  unsigned long* recvIndexBuff; 
    836 //  unsigned char* recvInfoBuff; 
    837 //  if (0 != recvNbIndexCount) 
    838 //  { 
    839 //    recvIndexBuff = new unsigned long[recvNbIndexCount]; 
    840 //    recvInfoBuff = new unsigned char[recvNbIndexCount*ProcessDHTElement<InfoType>::typeSize()]; 
    841 //  } 
    842 // 
    843 //  // If a client holds information about index and the corresponding which don't belong to it, 
    844 //  // it will send a message to the correct clients. 
    845 //  // Contents of the message are index and its corresponding informatioin 
    846 //  std::vector<MPI_Request> request; 
    847 //  int currentIndex = 0; 
    848 //  int nbRecvClient = recvRankClient.size(); 
    849 //  for (int idx = 0; idx < nbRecvClient; ++idx) 
    850 //  { 
    851 //    if (0 != recvNbIndexClientCount[idx]) 
    852 //    { 
    853 //      recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
    854 //      recvInfoFromClients(recvRankClient[idx], 
    855 //                          recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 
    856 //                          recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 
    857 //                          commLevel, request); 
    858 //    } 
    859 //    currentIndex += recvNbIndexClientCount[idx]; 
    860 //  } 
    861 // 
    862 //  boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 
    863 //                                                iteIndex = client2ClientIndex.end(); 
    864 //  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 
    865 //    sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
    866 //  boost::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 
    867 //                                                      iteInfo = client2ClientInfo.end(); 
    868 //  for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 
    869 //    sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 
    870 // 
    871 //  std::vector<MPI_Status> status(request.size()); 
    872 //  MPI_Waitall(request.size(), &request[0], &status[0]); 
    873 // 
    874 //  boost::unordered_map<size_t,InfoType> indexToInfoMapping; 
    875 //  indexToInfoMapping.rehash(std::ceil(currentIndex/indexToInfoMapping.max_load_factor())); 
    876 //  currentIndex = 0; 
    877 //  InfoType infoValue; 
    878 //  int infoIndex = 0; 
    879 //  unsigned char* infoBuff = recvInfoBuff; 
    880 //  for (int idx = 0; idx < nbRecvClient; ++idx) 
    881 //  { 
    882 //    int count = recvNbIndexClientCount[idx]; 
    883 //    for (int i = 0; i < count; ++i) 
    884 //    { 
    885 //      ProcessDHTElement<InfoType>::unpackElement(infoValue, infoBuff, infoIndex); 
    886 //      indexToInfoMapping[*(recvIndexBuff+currentIndex+i)] = infoValue; 
    887 //    } 
    888 //    currentIndex += count; 
    889 //  } 
    890 // 
    891 //  if (0 != recvNbIndexCount) 
    892 //  { 
    893 //    delete [] recvIndexBuff; 
    894 //    delete [] recvInfoBuff; 
    895 //  } 
    896 //  for (boost::unordered_map<int,unsigned char*>::const_iterator it = client2ClientInfo.begin(); 
    897 //                                                               it != client2ClientInfo.end(); ++it) 
    898 //      delete [] it->second; 
    899 // 
    900 //  for (boost::unordered_map<int,size_t*>::const_iterator it = client2ClientIndex.begin(); 
    901 //                                                        it != client2ClientIndex.end(); ++it) 
    902 //      delete [] it->second; 
    903 // 
    904 //  // Ok, now do something recursive 
    905 //  if (0 < level) 
    906 //  { 
    907 //    --level; 
    908 //    computeDistributedIndex(indexToInfoMapping, this->internalComm_, level); 
    909 //  } 
    910 //  else 
    911 //    index2InfoMapping_.swap(indexToInfoMapping); //index2InfoMapping_ = (indexToInfoMapping); 
    912 //} 
    913498 
    914499/*! 
     
    1107692  { 
    1108693    MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
    1109               recvRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 
     694              recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    1110695    ++nRequest; 
    1111696  } 
     
    1121706  { 
    1122707    MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 
    1123               sendRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 
     708              sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    1124709    ++nRequest; 
    1125710  } 
Note: See TracChangeset for help on using the changeset viewer.