Ignore:
Timestamp:
10/06/15 17:17:11 (9 years ago)
Author:
mhnguyen
Message:

Templated version of distributed hashed table

+) Implement DHT in more generic way to work with different type of information
+) Some old codes of DHT are kept to be a reference (they will be deleted soon)

Test
+) On local, mode attached, 8 processes
+) test_remap passes and result is correct

File:
1 moved

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/src/client_client_dht_template_impl.hpp

    r720 r721  
    11/*! 
    2    \file client_client_dht.cpp 
     2   \file client_client_dht_template_impl.hpp 
    33   \author Ha NGUYEN 
    4    \since 15 Sep 2015 
    5    \date 15 Sep 2015 
     4   \since 05 Oct 2015 
     5   \date 05 Oct 2015 
    66 
    77   \brief Distributed hashed table implementation. 
    88 */ 
    9 #include "client_client_dht.hpp" 
    10 #include <limits> 
    11 #include <cmath> 
    12 #include <boost/functional/hash.hpp> 
     9#include "client_client_dht_template.hpp" 
    1310#include "utils.hpp" 
    1411#include "mpi_tag.hpp" 
     
    1613namespace xios 
    1714{ 
    18  
    19 CClientClientDHT::CClientClientDHT(const boost::unordered_map<size_t,int>& indexInfoMap, 
    20                                    const MPI_Comm& clientIntraComm, bool isDataDistributed, int hierarLvl) 
    21   : intraCommRoot_(clientIntraComm), commLevel_(), isDataDistributed_(isDataDistributed), 
    22     nbLevel_(hierarLvl), globalIndexToServerMapping_(), globalIndexToInfoMappingLevel_() 
    23 { 
    24   computeMPICommLevel(clientIntraComm); 
    25   int lvl = commLevel_.size() - 1; 
    26   computeDistributedIndex(indexInfoMap, commLevel_[lvl], lvl); 
    27 } 
    28  
    29 CClientClientDHT::~CClientClientDHT() 
    30 { 
    31 } 
    32  
    33 /*! 
    34   Calculate MPI communicator for each level of hierarchy. 
    35   \param[in] mpiCommRoot MPI communicator of the level 0 (usually communicator of all clients) 
    36 */ 
    37 void CClientClientDHT::computeMPICommLevel(const MPI_Comm& mpiCommRoot) 
    38 { 
    39   int nbProc; 
    40   MPI_Comm_size(mpiCommRoot,&nbProc); 
    41   if (nbLevel_ > nbProc) nbLevel_ = std::log(nbProc); 
    42   else if (1 > nbLevel_) nbLevel_ = 1; 
    43  
    44   commLevel_.push_back(mpiCommRoot); 
    45   divideMPICommLevel(mpiCommRoot, nbLevel_); 
    46 } 
    47  
    48 /*! 
    49   Divide each MPI communicator into sub-communicator. Recursive function 
    50   \param [in] mpiCommLevel MPI communicator of current level 
    51   \param [in] level current level 
    52 */ 
    53 void CClientClientDHT::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int level) 
    54 { 
    55   int clientRank; 
    56   MPI_Comm_rank(mpiCommLevel,&clientRank); 
    57  
    58    --level; 
    59   if (0 < level) 
    60   { 
    61    int color = clientRank % 2; 
    62    commLevel_.push_back(MPI_Comm()); 
    63    MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back())); 
    64    divideMPICommLevel(commLevel_.back(), level); 
    65   } 
     15/*! 
     16  Constructor with initial distribution information and the corresponding index 
     17  Each client (process) holds a piece of information as well as the attached index, the index 
     18will be redistributed (projected) into size_t space as long as the associated information. 
     19  \param [in] indexInfoMap initial index and information mapping 
     20  \param [in] clientIntraComm communicator of clients 
     21  \param [in] hierarLvl level of hierarchy 
     22*/ 
     23template<typename T, typename H> 
     24CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const boost::unordered_map<size_t,T>& indexInfoMap, 
     25                                                      const MPI_Comm& clientIntraComm, 
     26                                                      int hierarLvl) 
     27  : index2InfoMapping_(), indexToInfoMappingLevel_() 
     28{ 
     29  this->computeMPICommLevel(clientIntraComm, hierarLvl); 
     30  int lvl = this->commLevel_.size() - 1; 
     31  computeDistributedIndex(indexInfoMap, this->commLevel_[lvl], lvl); 
     32} 
     33 
     34template<typename T, typename H> 
     35CClientClientDHTTemplate<T,H>::~CClientClientDHTTemplate() 
     36{ 
    6637} 
    6738 
     
    7041  \param [in] indices indices a proc has 
    7142*/ 
    72 void CClientClientDHT::computeServerIndexMapping(const CArray<size_t,1>& indices) 
    73 { 
    74   int lvl = commLevel_.size() - 1; 
    75   computeIndexMapping(indices, commLevel_[lvl], lvl); 
     43template<typename T, typename H> 
     44void CClientClientDHTTemplate<T,H>::computeIndexInfoMapping(const CArray<size_t,1>& indices) 
     45{ 
     46  int lvl = this->commLevel_.size() - 1; 
     47  computeIndexInfoMappingLevel(indices, this->commLevel_[lvl], lvl); 
    7648  size_t size = indices.numElements(); 
    7749  for (size_t idx = 0; idx < size; ++idx) 
    7850  { 
    79     int serverIdx = globalIndexToInfoMappingLevel_[indices(idx)]; 
     51    int serverIdx = indexToInfoMappingLevel_[indices(idx)]; 
    8052    indexGlobalOnServer_[serverIdx].push_back(indices(idx)); 
    8153  } 
     
    8961   \param [in] level current level 
    9062*/ 
    91 void CClientClientDHT::computeIndexMapping(const CArray<size_t,1>& indices, 
    92                                            const MPI_Comm& commLevel, 
    93                                            int level) 
     63template<typename T, typename H> 
     64void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 
     65                                                                 const MPI_Comm& commLevel, 
     66                                                                 int level) 
    9467{ 
    9568  int nbClient, clientRank; 
     
    10578  std::map<int, std::vector<size_t> > client2ClientIndex; 
    10679 
    107   // Number of global index whose mapping server can be found out thanks to index-server mapping 
    108   int nbIndexAlreadyOnClient = 0; 
    109  
    11080  // Number of global index whose mapping server are on other clients 
    111   int nbIndexSendToOthers = 0; 
     81  int nbIndexToSend = 0; 
    11282  HashXIOS<size_t> hashGlobalIndex; 
    11383  for (int i = 0; i < ssize; ++i) 
     
    12191      { 
    12292        client2ClientIndex[indexClient].push_back(index); 
    123         ++nbIndexSendToOthers; 
     93        ++nbIndexToSend; 
    12494      } 
    12595    } 
    12696  } 
    127   info << "level " << level << " nbIndexsendtoOther " << nbIndexSendToOthers << std::endl; 
    12897 
    12998  int* sendBuff = new int[nbClient]; 
     
    135104  MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel); 
    136105 
    137   std::list<MPI_Request> sendRequest; 
    138   if (0 != nbIndexSendToOthers) 
     106  std::list<MPI_Request> sendIndexRequest; 
     107  if (0 != nbIndexToSend) 
    139108      for (it = itb; it != ite; ++it) 
    140          sendIndexToClients(it->first, it->second, commLevel, sendRequest); 
    141  
    142   int nbDemandingClient = recvBuff[clientRank], nbIndexServerReceived = 0; 
     109         sendIndexToClients(it->first, it->second, commLevel, sendIndexRequest); 
     110 
     111  int nbDemandingClient = recvBuff[clientRank], nbSendBuffInfoReceived = 0; 
    143112 
    144113  // Receiving demand as well as the responds from other clients 
    145114  // The demand message contains global index; meanwhile the responds have server index information 
    146115  // Buffer to receive demand from other clients, it can be allocated or not depending whether it has demand(s) 
    147     // There are some cases we demand duplicate index so need to determine maxium size of demanding buffer 
     116  // There are some cases we demand duplicate index so need to determine maxium size of demanding buffer 
    148117  for (it = itb; it != ite; ++it) sendBuff[it->first] = (it->second).size(); 
    149118  MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, commLevel); 
     
    151120  unsigned long* recvBuffIndex = 0; 
    152121  int maxNbIndexDemandedFromOthers = recvBuff[clientRank]; 
    153 //  if (!isDataDistributed_) maxNbIndexDemandedFromOthers = nbDemandingClient * nbIndexSendToOthers; //globalIndexToServerMapping_.size(); // Not very optimal but it's general 
    154122 
    155123  if (0 != maxNbIndexDemandedFromOthers) 
     
    157125 
    158126  // Buffer to receive respond from other clients, it can be allocated or not depending whether it demands other clients 
    159   int* recvBuffInfo = 0; 
    160   int nbIndexReceivedFromOthers = nbIndexSendToOthers; 
     127  InfoType* recvBuffInfo = 0; 
     128  int nbIndexReceivedFromOthers = nbIndexToSend; 
    161129  if (0 != nbIndexReceivedFromOthers) 
    162     recvBuffInfo = new int[nbIndexReceivedFromOthers]; 
     130    recvBuffInfo = new InfoType[nbIndexReceivedFromOthers]; 
    163131 
    164132  std::map<int, MPI_Request>::iterator itRequest; 
    165133  std::vector<int> demandAlreadyReceived, repondAlreadyReceived; 
    166134 
    167     // Counting of buffer for receiving global index 
    168   int countIndex = 0; 
    169  
    170   // Request returned by MPI_IRecv function about global index 
    171   std::map<int, MPI_Request> requestRecvIndex; 
    172  
    173   // Mapping client rank and the beginning position of receiving buffer for message of global index from this client 
     135 
     136  int countIndex = 0;  // Counting of buffer for receiving index 
     137  std::map<int, MPI_Request> requestRecvIndex; // Request returned by MPI_IRecv function about index 
     138 
     139  // Mapping client rank and the beginning position of receiving buffer for message of index from this client 
    174140  std::map<int, unsigned long*> indexBuffBegin; 
    175141 
     
    177143 
    178144  CArray<size_t,1> tmpGlobalIndexOnClient(maxNbIndexDemandedFromOthers); 
     145 
    179146  int k = 0; 
    180  
    181   while ((0 < nbDemandingClient) || (!sendRequest.empty())) 
     147  while ((0 < nbDemandingClient) || (!sendIndexRequest.empty())) 
    182148  { 
    183149    // Just check whether a client has any demand from other clients. 
     
    215181    } 
    216182 
    217     testSendRequest(sendRequest); 
     183    testSendRequest(sendIndexRequest); 
    218184  } 
    219185 
     
    221187  { 
    222188    --level; 
    223     computeIndexMapping(tmpGlobalIndexOnClient, commLevel_[level], level); 
     189    computeIndexInfoMappingLevel(tmpGlobalIndexOnClient, this->commLevel_[level], level); 
    224190  } 
    225191  else 
    226     globalIndexToInfoMappingLevel_ = globalIndexToServerMapping_; 
    227  
    228   std::map<int, std::vector<int> > client2ClientInfo; 
     192    indexToInfoMappingLevel_ = index2InfoMapping_; 
     193 
     194  std::map<int, std::vector<InfoType> > client2ClientInfo; 
    229195  std::list<MPI_Request> sendInfoRequest; 
    230196  std::map<int, std::vector<size_t> >::iterator itbSrc2Idx = src2Index.begin(), itSrc2Idx, 
     
    236202    for (int idx = 0; idx < srcIdx.size(); ++idx) 
    237203    { 
    238 //      client2ClientInfo[clientSourceRank].push_back(globalIndexToServerMapping_[srcIdx[idx]]); 
    239       client2ClientInfo[clientSourceRank].push_back(globalIndexToInfoMappingLevel_[srcIdx[idx]]); 
     204      client2ClientInfo[clientSourceRank].push_back(indexToInfoMappingLevel_[srcIdx[idx]]); 
    240205    } 
    241206    sendInfoToClients(clientSourceRank, client2ClientInfo[clientSourceRank], commLevel, sendInfoRequest); 
    242207  } 
    243208 
    244   boost::unordered_map<size_t,int> indexToInfoMapping; 
    245  
    246   // Counting of buffer for receiving server index 
    247   int countInfo = 0; 
    248  
     209  boost::unordered_map<size_t,InfoType> indexToInfoMapping; 
     210  int countInfo = 0; // Counting of buffer for receiving server index 
    249211  std::map<int, MPI_Request> requestRecvInfo; 
    250212 
    251213  // Mapping client rank and the begining position of receiving buffer for message of server index from this client 
    252   std::map<int, int*> infoBuffBegin; 
    253  
    254   while ((!sendInfoRequest.empty()) || (nbIndexServerReceived < nbIndexReceivedFromOthers)) 
     214  std::map<int, InfoType*> infoBuffBegin; 
     215 
     216  while ((!sendInfoRequest.empty()) || (nbSendBuffInfoReceived < nbIndexReceivedFromOthers)) 
    255217  { 
    256218    testSendRequest(sendInfoRequest); 
     
    273235        MPI_Get_count(&statusInfo, MPI_INT, &count); 
    274236        int clientSourceRank = statusInfo.MPI_SOURCE; 
    275         int* beginBuff = infoBuffBegin[clientSourceRank]; 
    276         std::vector<size_t>& globalIndexTmp = client2ClientIndex[clientSourceRank]; 
     237        InfoType* beginBuff = infoBuffBegin[clientSourceRank]; 
     238        std::vector<size_t>& indexTmp = client2ClientIndex[clientSourceRank]; 
    277239        for (int i = 0; i < count; ++i) 
    278240        { 
    279           indexToInfoMapping[globalIndexTmp[i]] = *(beginBuff+i); 
    280 //          globalIndexToServerMapping_[globalIndexTmp[i]] = *(beginBuff+i); 
     241          indexToInfoMapping[indexTmp[i]] = *(beginBuff+i); 
    281242        } 
    282         nbIndexServerReceived += count; 
     243        nbSendBuffInfoReceived += count; 
    283244        repondAlreadyReceived.push_back(clientSourceRank); 
    284245      } 
     
    290251  } 
    291252 
    292   globalIndexToInfoMappingLevel_ = indexToInfoMapping; 
    293   info << "temp " << tmpGlobalIndexOnClient << std::endl; 
     253  indexToInfoMappingLevel_ = indexToInfoMapping; 
    294254  if (0 != maxNbIndexDemandedFromOthers) delete [] recvBuffIndex; 
    295255  if (0 != nbIndexReceivedFromOthers) delete [] recvBuffInfo; 
     
    301261  Compute the hash index distribution of whole size_t space then each client will have a range of this distribution 
    302262*/ 
    303 void CClientClientDHT::computeHashIndex(std::vector<size_t>& hashedIndex, int nbClient) 
     263template<typename T, typename H> 
     264void CClientClientDHTTemplate<T,H>::computeHashIndex(std::vector<size_t>& hashedIndex, int nbClient) 
    304265{ 
    305266  // Compute range of hash index for each client 
     
    326287  \param [in] level current level 
    327288*/ 
    328 void CClientClientDHT::computeDistributedIndex(const boost::unordered_map<size_t,int>& indexInfoMap, 
    329                                                const MPI_Comm& commLevel, 
    330                                                int level) 
     289template<typename T, typename H> 
     290void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const boost::unordered_map<size_t,T>& indexInfoMap, 
     291                                                          const MPI_Comm& commLevel, 
     292                                                          int level) 
    331293{ 
    332294  int nbClient, clientRank; 
     
    345307  // Compute size of sending and receving buffer 
    346308  std::map<int, std::vector<size_t> > client2ClientIndex; 
    347   std::map<int, std::vector<int> > client2ClientInfo; 
     309  std::map<int, std::vector<InfoType> > client2ClientInfo; 
    348310 
    349311  std::vector<size_t>::const_iterator itbClientHash = hashedIndex.begin(), itClientHash, 
    350312                                      iteClientHash = hashedIndex.end(); 
    351   boost::unordered_map<size_t,int>::const_iterator it  = indexInfoMap.begin(), 
    352                                                    ite = indexInfoMap.end(); 
     313  typename boost::unordered_map<size_t,InfoType>::const_iterator it  = indexInfoMap.begin(), 
     314                                                                 ite = indexInfoMap.end(); 
    353315  HashXIOS<size_t> hashGlobalIndex; 
    354316  for (; it != ite; ++it) 
     
    378340  int recvNbIndexCount = recvNbIndexBuff[clientRank]; 
    379341  unsigned long* recvIndexBuff = new unsigned long[recvNbIndexCount]; 
    380   int* recvInfoBuff = new int[recvNbIndexCount]; 
     342  InfoType* recvInfoBuff = new InfoType[recvNbIndexCount]; 
    381343 
    382344  // If a client holds information about index and the corresponding which don't belong to it, 
     
    388350  for (; itIndex != iteIndex; ++itIndex) 
    389351    sendIndexToClients(itIndex->first, itIndex->second, commLevel, sendRequest); 
    390   std::map<int, std::vector<int> >::iterator itInfo  = client2ClientInfo.begin(), 
    391                                              iteInfo = client2ClientInfo.end(); 
     352  typename std::map<int, std::vector<InfoType> >::iterator itInfo  = client2ClientInfo.begin(), 
     353                                                           iteInfo = client2ClientInfo.end(); 
    392354  for (; itInfo != iteInfo; ++itInfo) 
    393355    sendInfoToClients(itInfo->first, itInfo->second, commLevel, sendRequest); 
     
    414376  std::map<int, int*> infoBuffBegin; 
    415377 
    416   boost::unordered_map<size_t,int> indexToInfoMapping; 
     378  boost::unordered_map<size_t,InfoType> indexToInfoMapping; 
    417379 
    418380  // Now each client trys to listen to demand from others. 
     
    422384    testSendRequest(sendRequest); 
    423385    probeIndexMessageFromClients(recvIndexBuff, recvNbIndexCount, 
    424                                        countIndex, indexBuffBegin, 
    425                                        requestRecvIndex, commLevel); 
     386                                 countIndex, indexBuffBegin, 
     387                                 requestRecvIndex, commLevel); 
    426388    // Processing complete request 
    427389    for (itRequestIndex = requestRecvIndex.begin(); 
     
    430392    { 
    431393      int rank = itRequestIndex->first; 
    432       int count = computeBuffCountIndexGlobal(itRequestIndex->second); 
     394      int count = computeBuffCountIndex(itRequestIndex->second); 
    433395      if (0 != count) 
    434396        countBuffIndex[rank] = count; 
     
    436398 
    437399    probeInfoMessageFromClients(recvInfoBuff, recvNbIndexCount, 
    438                                        countInfo, infoBuffBegin, 
    439                                        requestRecvInfo, commLevel); 
     400                                countInfo, infoBuffBegin, 
     401                                requestRecvInfo, commLevel); 
    440402    for (itRequestInfo = requestRecvInfo.begin(); 
    441403         itRequestInfo != requestRecvInfo.end(); 
     
    443405    { 
    444406      int rank = itRequestInfo->first; 
    445       int count = computeBuffCountIndexServer(itRequestInfo->second); 
     407      int count = computeBuffCountInfo(itRequestInfo->second); 
    446408      if (0 != count) 
    447409        countBuffInfo[rank] = count; 
     
    457419        int count = it->second; 
    458420        for (int i = 0; i < count; ++i) 
    459            indexToInfoMapping.insert(std::make_pair<size_t,int>(*(indexBuffBegin[rank]+i),*(infoBuffBegin[rank]+i))); 
     421           indexToInfoMapping.insert(std::make_pair<size_t,InfoType>(*(indexBuffBegin[rank]+i),*(infoBuffBegin[rank]+i))); 
    460422        processedList.push_back(rank); 
    461423        --recvNbClient; 
     
    485447  { 
    486448    --level; 
    487     computeDistributedIndex(indexToInfoMapping, commLevel_[level], level); 
     449    computeDistributedIndex(indexToInfoMapping, this->commLevel_[level], level); 
    488450  } 
    489451  else 
    490     globalIndexToServerMapping_ = indexToInfoMapping; 
     452    index2InfoMapping_ = indexToInfoMapping; 
    491453} 
    492454 
     
    502464  \param [in] intraComm communicator 
    503465*/ 
    504 void CClientClientDHT::probeIndexMessageFromClients(unsigned long* recvIndexBuff, 
     466template<typename T, typename H> 
     467void CClientClientDHTTemplate<T,H>::probeIndexMessageFromClients(unsigned long* recvIndexBuff, 
    505468                                                    const int recvNbIndexCount, 
    506469                                                    int& countIndex, 
     
    536499  \param [in] intraComm communicator 
    537500*/ 
    538 void CClientClientDHT::probeInfoMessageFromClients(int* recvInfoBuff, 
     501template<typename T, typename H> 
     502void CClientClientDHTTemplate<T,H>::probeInfoMessageFromClients(T* recvInfoBuff, 
    539503                                                   const int recvNbIndexCount, 
    540504                                                   int& countInfo, 
    541                                                    std::map<int, int*>& infoBuffBegin, 
     505                                                   std::map<int, T*>& infoBuffBegin, 
    542506                                                   std::map<int, MPI_Request>& requestRecvInfo, 
    543507                                                   const MPI_Comm& intraComm) 
     
    550514  if ((true == flagInfo) && (countInfo < recvNbIndexCount)) 
    551515  { 
    552     MPI_Get_count(&statusInfo, MPI_INT, &count); 
    553     infoBuffBegin.insert(std::make_pair<int, int*>(statusInfo.MPI_SOURCE, recvInfoBuff+countInfo)); 
    554     MPI_Irecv(recvInfoBuff+countInfo, count, MPI_INT, 
     516    MPI_Get_count(&statusInfo, MPI_CHAR, &count); 
     517    infoBuffBegin.insert(std::make_pair<int, T*>(statusInfo.MPI_SOURCE, recvInfoBuff+countInfo)); 
     518    MPI_Irecv(recvInfoBuff+countInfo, count, MPI_CHAR, 
    555519              statusInfo.MPI_SOURCE, MPI_DHT_INFO, intraComm, 
    556520              &requestRecvInfo[statusInfo.MPI_SOURCE]); 
    557521 
    558     countInfo += count; 
     522    countInfo += count/infoTypeSize; 
    559523  } 
    560524} 
     
    567531  \param [in] requestSendIndex list of sending request 
    568532*/ 
    569 void CClientClientDHT::sendIndexToClients(int clientDestRank, std::vector<size_t>& indices, 
     533template<typename T, typename H> 
     534void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, std::vector<size_t>& indices, 
    570535                                          const MPI_Comm& clientIntraComm, 
    571536                                          std::list<MPI_Request>& requestSendIndex) 
     
    584549  \param [in] requestSendInfo list of sending request 
    585550*/ 
    586 void CClientClientDHT::sendInfoToClients(int clientDestRank, std::vector<int>& info, 
     551template<typename T, typename H> 
     552void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, std::vector<T>& info, 
    587553                                         const MPI_Comm& clientIntraComm, 
    588554                                         std::list<MPI_Request>& requestSendInfo) 
     
    590556  MPI_Request request; 
    591557  requestSendInfo.push_back(request); 
    592   MPI_Isend(&(info)[0], (info).size(), MPI_INT, 
     558 
     559  MPI_Isend(&(info)[0], info.size() * infoTypeSize, MPI_CHAR, 
    593560            clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 
    594561} 
     
    598565  \param [in] sendRequest sending request to verify 
    599566*/ 
    600 void CClientClientDHT::testSendRequest(std::list<MPI_Request>& sendRequest) 
     567template<typename T, typename H> 
     568void CClientClientDHTTemplate<T,H>::testSendRequest(std::list<MPI_Request>& sendRequest) 
    601569{ 
    602570  int flag = 0; 
     
    623591 
    624592/*! 
    625   Process the received request. Pushing global index and server index into map 
    626   \param[in] buffIndexGlobal pointer to the begining of buffer containing global index 
    627   \param[in] buffIndexServer pointer to the begining of buffer containing server index 
    628   \param[in] count size of received message 
    629 */ 
    630 //void CClientClientDHT::processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count) 
    631 //{ 
    632 //  for (int i = 0; i < count; ++i) 
    633 //    globalIndexToServerMapping_.insert(std::make_pair<size_t,int>(*(buffIndexGlobal+i),*(buffIndexServer+i))); 
    634 //} 
    635  
    636 /*! 
    637593  Compute size of message containing global index 
    638594  \param[in] requestRecv request of message 
    639595*/ 
    640 int CClientClientDHT::computeBuffCountIndexGlobal(MPI_Request& requestRecv) 
     596template<typename T, typename H> 
     597int CClientClientDHTTemplate<T,H>::computeBuffCountIndex(MPI_Request& requestRecv) 
    641598{ 
    642599  int flag, count = 0; 
     
    656613  \param[in] requestRecv request of message 
    657614*/ 
    658 int CClientClientDHT::computeBuffCountIndexServer(MPI_Request& requestRecv) 
     615template<typename T, typename H> 
     616int CClientClientDHTTemplate<T,H>::computeBuffCountInfo(MPI_Request& requestRecv) 
    659617{ 
    660618  int flag, count = 0; 
     
    664622  if (true == flag) 
    665623  { 
    666     MPI_Get_count(&status, MPI_INT, &count); 
    667   } 
    668  
    669   return count; 
    670 } 
    671  
    672 } 
     624    MPI_Get_count(&status, MPI_CHAR, &count); 
     625  } 
     626 
     627  return (count/infoTypeSize); 
     628} 
     629 
     630} 
Note: See TracChangeset for help on using the changeset viewer.