source: XIOS/trunk/src/client_server_mapping_distributed.cpp @ 584

Last change on this file since 584 was 584, checked in by mhnguyen, 9 years ago

Implementing new hash algorithm and fixing bug related to zoom

+) Replace boost hash with hash algorithm of Jenkins
+) Domain, if an attribute is non-empty for one client, it should also be non-empty for others inspite of zoom
+) Replace the way to find the number of client connecting to a server to make sure every server receive a message

Test
+) On Curie
+) test_client: passed and results are same like before
+) test_complete: passed, results are partially the same, the different part comes from added working operation

File size: 21.6 KB
Line 
1/*!
2   \file client_server_mapping.hpp
3   \author Ha NGUYEN
4   \since 27 Feb 2015
5   \date 09 Mars 2015
6
7   \brief Mapping between index client and server.
8   Clients pre-calculate all information of server distribution.
9 */
10#include "client_server_mapping_distributed.hpp"
11#include <limits>
12#include <boost/functional/hash.hpp>
13#include "utils.hpp"
14
15namespace xios
16{
17
18CClientServerMappingDistributed::CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer,
19                                                                 const MPI_Comm& clientIntraComm)
20  : CClientServerMapping(), indexClientHash_(), countIndexGlobal_(0), countIndexServer_(0),
21    indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_()
22{
23  clientIntraComm_ = clientIntraComm;
24  MPI_Comm_size(clientIntraComm,&(nbClient_));
25  MPI_Comm_rank(clientIntraComm,&clientRank_);
26  computeHashIndex();
27  computeDistributedServerIndex(globalIndexOfServer, clientIntraComm);
28}
29
30CClientServerMappingDistributed::~CClientServerMappingDistributed()
31{
32}
33
34/*!
35   Compute mapping global index of server which client sends to.
36   \param [in] globalIndexOnClient global index client has
37*/
38//void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient)
39//{
40//  int ssize = globalIndexOnClient.numElements();
41//  CArray<int,1>* localIndexOnClient = new CArray<int,1>(ssize);
42//  for (int i = 0; i < ssize; ++i) (*localIndexOnClient)(i) = i;
43//
44//  this->computeServerIndexMapping(globalIndexOnClient, *localIndexOnClient);
45//  delete localIndexOnClient;
46//}
47
48/*!
49   Compute mapping global index of server which client sends to.
50   \param [in] globalIndexOnClient global index client has
51   \param [in] localIndexOnClient local index on client
52*/
53//void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient,
54//                                                                const CArray<int,1>& localIndexOnClient)
55void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient)
56{
57  size_t ssize = globalIndexOnClient.numElements(), hashedIndex;
58
59  std::vector<size_t>::const_iterator itbClientHash = indexClientHash_.begin(), itClientHash,
60                                      iteClientHash = indexClientHash_.end();
61  std::map<int, std::vector<size_t> > client2ClientIndexGlobal;
62  std::map<int, std::vector<int> > client2ClientIndexServer;
63//  std::map<int, std::vector<int> > clientLocalIndex;
64
65  // Number of global index whose mapping server can be found out thanks to index-server mapping
66  int nbIndexAlreadyOnClient = 0;
67
68  // Number of global index whose mapping server are on other clients
69  int nbIndexSendToOthers = 0;
70  HashXIOS<size_t> hashGlobalIndex;
71  for (int i = 0; i < ssize; ++i)
72  {
73    size_t globalIndexClient = globalIndexOnClient(i);
74    hashedIndex  = hashGlobalIndex(globalIndexClient);
75    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedIndex);
76    if (iteClientHash != itClientHash)
77    {
78      int indexClient = std::distance(itbClientHash, itClientHash)-1;
79
80      if (clientRank_ == indexClient)
81      {
82        (indexGlobalOnServer_[globalIndexToServerMapping_[globalIndexClient]]).push_back(globalIndexClient);
83//        (localIndexSend2Server_[globalIndexToServerMapping_[globalIndexClient]]).push_back(localIndexOnClient(i));
84        ++nbIndexAlreadyOnClient;
85      }
86      else
87      {
88        client2ClientIndexGlobal[indexClient].push_back(globalIndexClient);
89//        clientLocalIndex[indexClient].push_back(i);
90        ++nbIndexSendToOthers;
91      }
92    }
93  }
94
95  int* sendBuff = new int[nbClient_];
96  for (int i = 0; i < nbClient_; ++i) sendBuff[i] = 0;
97  std::map<int, std::vector<size_t> >::iterator it  = client2ClientIndexGlobal.begin(),
98                                                ite = client2ClientIndexGlobal.end();
99  for (; it != ite; ++it) sendBuff[it->first] = 1;
100  int* recvBuff = new int[nbClient_];
101  MPI_Allreduce(sendBuff, recvBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm_);
102
103  std::list<MPI_Request> sendRequest;
104  if (0 != nbIndexSendToOthers)
105      for (it = client2ClientIndexGlobal.begin(); it != ite; ++it)
106         sendIndexGlobalToClients(it->first, it->second, clientIntraComm_, sendRequest);
107
108  // Receiving demand as well as the responds from other clients
109  // The demand message contains global index; meanwhile the responds have server index information
110  // Buffer to receive demand from other clients, it can be allocated or not depending whether it has demand(s)
111  unsigned long* recvBuffIndexGlobal = 0;
112  int maxNbIndexDemandedFromOthers = (nbIndexAlreadyOnClient >= globalIndexToServerMapping_.size())
113                                   ? 0 : (globalIndexToServerMapping_.size() - nbIndexAlreadyOnClient);
114  if (0 != maxNbIndexDemandedFromOthers)
115    recvBuffIndexGlobal = new unsigned long[maxNbIndexDemandedFromOthers];
116
117  // Buffer to receive respond from other clients, it can be allocated or not depending whether it demands other clients
118  int* recvBuffIndexServer = 0;
119  int nbIndexReceivedFromOthers = nbIndexSendToOthers;
120  if (0 != nbIndexReceivedFromOthers)
121    recvBuffIndexServer = new int[nbIndexReceivedFromOthers];
122
123  resetReceivingRequestAndCount();
124  std::map<int, MPI_Request>::iterator itRequest;
125  std::vector<int> demandAlreadyReceived, repondAlreadyReceived;
126  int nbDemandingClient = recvBuff[clientRank_], nbIndexServerReceived = 0;
127
128  resetReceivingRequestAndCount();
129  while ((0 < nbDemandingClient) || (!sendRequest.empty()) ||
130         (nbIndexServerReceived < nbIndexReceivedFromOthers))
131  {
132    // Just check whether a client has any demand from other clients.
133    // If it has, then it should send responds to these client(s)
134    probeIndexGlobalMessageFromClients(recvBuffIndexGlobal, maxNbIndexDemandedFromOthers);
135    if (0 < nbDemandingClient)
136    {
137      for (itRequest = requestRecvIndexGlobal_.begin();
138           itRequest != requestRecvIndexGlobal_.end(); ++itRequest)
139      {
140        int flagIndexGlobal, count;
141        MPI_Status statusIndexGlobal;
142
143        MPI_Test(&(itRequest->second), &flagIndexGlobal, &statusIndexGlobal);
144        if (true == flagIndexGlobal)
145        {
146          MPI_Get_count(&statusIndexGlobal, MPI_UNSIGNED_LONG, &count);
147          int clientSourceRank = statusIndexGlobal.MPI_SOURCE;
148          unsigned long* beginBuff = indexGlobalBuffBegin_[clientSourceRank];
149          for (int i = 0; i < count; ++i)
150          {
151            client2ClientIndexServer[clientSourceRank].push_back(globalIndexToServerMapping_[*(beginBuff+i)]);
152          }
153          sendIndexServerToClients(clientSourceRank, client2ClientIndexServer[clientSourceRank], clientIntraComm_, sendRequest);
154          --nbDemandingClient;
155
156          demandAlreadyReceived.push_back(clientSourceRank);
157        }
158      }
159      for (int i = 0; i< demandAlreadyReceived.size(); ++i)
160        requestRecvIndexGlobal_.erase(demandAlreadyReceived[i]);
161    }
162
163    testSendRequest(sendRequest);
164
165    // In some cases, a client need to listen respond from other clients about server information
166    // Ok, with the information, a client can fill in its server-global index map.
167    probeIndexServerMessageFromClients(recvBuffIndexServer, nbIndexReceivedFromOthers);
168    for (itRequest = requestRecvIndexServer_.begin();
169         itRequest != requestRecvIndexServer_.end();
170         ++itRequest)
171    {
172      int flagIndexServer, count;
173      MPI_Status statusIndexServer;
174
175      MPI_Test(&(itRequest->second), &flagIndexServer, &statusIndexServer);
176      if (true == flagIndexServer)
177      {
178        MPI_Get_count(&statusIndexServer, MPI_INT, &count);
179        int clientSourceRank = statusIndexServer.MPI_SOURCE;
180        int* beginBuff = indexServerBuffBegin_[clientSourceRank];
181        std::vector<size_t>& globalIndexTmp = client2ClientIndexGlobal[clientSourceRank];
182//        std::vector<int>& localIndexTmp = clientLocalIndex[clientSourceRank];
183        for (int i = 0; i < count; ++i)
184        {
185          (indexGlobalOnServer_[*(beginBuff+i)]).push_back(globalIndexTmp[i]);
186//          (localIndexSend2Server_[*(beginBuff+i)]).push_back(localIndexOnClient(localIndexTmp[i]));
187        }
188        nbIndexServerReceived += count;
189        repondAlreadyReceived.push_back(clientSourceRank);
190      }
191    }
192
193    for (int i = 0; i< repondAlreadyReceived.size(); ++i)
194      requestRecvIndexServer_.erase(repondAlreadyReceived[i]);
195    repondAlreadyReceived.resize(0);
196  }
197
198  if (0 != recvBuffIndexGlobal) delete recvBuffIndexGlobal;
199  if (0 != recvBuffIndexServer) delete recvBuffIndexServer;
200  delete [] sendBuff;
201  delete [] recvBuff;
202}
203
204/*!
205  Compute the hash index distribution of whole size_t space then each client will have a range of this distribution
206*/
207void CClientServerMappingDistributed::computeHashIndex()
208{
209  // Compute range of hash index for each client
210  indexClientHash_.resize(nbClient_+1);
211  size_t nbHashIndexMax = std::numeric_limits<size_t>::max();
212  size_t nbHashIndex;
213  indexClientHash_[0] = 0;
214  for (int i = 1; i < nbClient_; ++i)
215  {
216    nbHashIndex = nbHashIndexMax / nbClient_;
217    if (i < (nbHashIndexMax%nbClient_)) ++nbHashIndex;
218    indexClientHash_[i] = indexClientHash_[i-1] + nbHashIndex;
219  }
220  indexClientHash_[nbClient_] = nbHashIndexMax;
221}
222
223/*!
224  Compute distribution of global index for servers
225  Each client already holds a piece of information about global index and the corresponding server.
226This information is redistributed into size_t sipace in which each client possesses a specific range of index.
227Afterh the redistribution, each client as long as its range of index contains all necessary information about server.
228  \param [in] globalIndexOfServer global index and the corresponding server
229  \param [in] clientIntraComm client joining distribution process.
230*/
231void CClientServerMappingDistributed::computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer,
232                                                                    const MPI_Comm& clientIntraComm)
233{
234  int* sendBuff = new int[nbClient_];
235  int* sendNbIndexBuff = new int[nbClient_];
236  for (int i = 0; i < nbClient_; ++i)
237  {
238    sendBuff[i] = 0; sendNbIndexBuff[i] = 0;
239  }
240
241  // Compute size of sending and receving buffer
242  std::map<int, std::vector<size_t> > client2ClientIndexGlobal;
243  std::map<int, std::vector<int> > client2ClientIndexServer;
244
245  std::vector<size_t>::const_iterator itbClientHash = indexClientHash_.begin(), itClientHash,
246                                      iteClientHash = indexClientHash_.end();
247  boost::unordered_map<size_t,int>::const_iterator it  = globalIndexOfServer.begin(),
248                                                   ite = globalIndexOfServer.end();
249  HashXIOS<size_t> hashGlobalIndex;
250  for (; it != ite; ++it)
251  {
252    size_t hashIndex = hashGlobalIndex(it->first);
253    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex);
254    if (itClientHash != iteClientHash)
255    {
256      int indexClient = std::distance(itbClientHash, itClientHash)-1;
257      if (clientRank_ == indexClient)
258      {
259        globalIndexToServerMapping_.insert(std::make_pair<size_t,int>(it->first, it->second));
260      }
261      else
262      {
263        sendBuff[indexClient] = 1;
264        ++sendNbIndexBuff[indexClient];
265        client2ClientIndexGlobal[indexClient].push_back(it->first);
266        client2ClientIndexServer[indexClient].push_back(it->second);
267      }
268    }
269  }
270
271  // Calculate from how many clients each client receive message.
272  int* recvBuff = new int[nbClient_];
273  MPI_Allreduce(sendBuff, recvBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm);
274  int recvNbClient = recvBuff[clientRank_];
275
276  // Calculate size of buffer for receiving message
277  int* recvNbIndexBuff = new int[nbClient_];
278  MPI_Allreduce(sendNbIndexBuff, recvNbIndexBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm);
279  int recvNbIndexCount = recvNbIndexBuff[clientRank_];
280  unsigned long* recvIndexGlobalBuff = new unsigned long[recvNbIndexCount];
281  int* recvIndexServerBuff = new int[recvNbIndexCount];
282
283  // If a client holds information about global index and servers which don't belong to it,
284  // it will send a message to the correct clients.
285  // Contents of the message are global index and its corresponding server index
286  std::list<MPI_Request> sendRequest;
287  std::map<int, std::vector<size_t> >::iterator itGlobal  = client2ClientIndexGlobal.begin(),
288                                                iteGlobal = client2ClientIndexGlobal.end();
289  for ( ;itGlobal != iteGlobal; ++itGlobal)
290    sendIndexGlobalToClients(itGlobal->first, itGlobal->second, clientIntraComm, sendRequest);
291  std::map<int, std::vector<int> >::iterator itServer  = client2ClientIndexServer.begin(),
292                                             iteServer = client2ClientIndexServer.end();
293  for (; itServer != iteServer; ++itServer)
294    sendIndexServerToClients(itServer->first, itServer->second, clientIntraComm, sendRequest);
295
296  std::map<int, MPI_Request>::iterator itRequestIndexGlobal, itRequestIndexServer;
297  std::map<int, int> countBuffIndexServer, countBuffIndexGlobal;
298  std::vector<int> processedList;
299
300  bool isFinished = (0 == recvNbClient) ? true : false;
301
302  // Just to make sure before listening message, all counting index and receiving request have already beeen reset
303  resetReceivingRequestAndCount();
304
305  // Now each client trys to listen to demand from others.
306  // If they have message, it processes: pushing global index and corresponding server to its map
307  while (!isFinished || (!sendRequest.empty()))
308  {
309    testSendRequest(sendRequest);
310    probeIndexGlobalMessageFromClients(recvIndexGlobalBuff, recvNbIndexCount);
311
312    // Processing complete request
313    for (itRequestIndexGlobal = requestRecvIndexGlobal_.begin();
314         itRequestIndexGlobal != requestRecvIndexGlobal_.end();
315         ++itRequestIndexGlobal)
316    {
317      int rank = itRequestIndexGlobal->first;
318      int countIndexGlobal = computeBuffCountIndexGlobal(itRequestIndexGlobal->second);
319      if (0 != countIndexGlobal)
320        countBuffIndexGlobal[rank] = countIndexGlobal;
321    }
322
323    probeIndexServerMessageFromClients(recvIndexServerBuff, recvNbIndexCount);
324    for (itRequestIndexServer = requestRecvIndexServer_.begin();
325         itRequestIndexServer != requestRecvIndexServer_.end();
326         ++itRequestIndexServer)
327    {
328      int rank = itRequestIndexServer->first;
329      int countIndexServer = computeBuffCountIndexServer(itRequestIndexServer->second);
330      if (0 != countIndexServer)
331        countBuffIndexServer[rank] = countIndexServer;
332    }
333
334    for (std::map<int, int>::iterator it = countBuffIndexGlobal.begin();
335                                      it != countBuffIndexGlobal.end(); ++it)
336    {
337      int rank = it->first;
338      if (countBuffIndexServer.end() != countBuffIndexServer.find(rank))
339      {
340        processReceivedRequest(indexGlobalBuffBegin_[rank], indexServerBuffBegin_[rank], it->second);
341        processedList.push_back(rank);
342        --recvNbClient;
343      }
344    }
345
346    for (int i = 0; i < processedList.size(); ++i)
347    {
348      requestRecvIndexServer_.erase(processedList[i]);
349      requestRecvIndexGlobal_.erase(processedList[i]);
350      countBuffIndexGlobal.erase(processedList[i]);
351      countBuffIndexServer.erase(processedList[i]);
352    }
353
354    if (0 == recvNbClient) isFinished = true;
355  }
356
357  delete [] sendBuff;
358  delete [] sendNbIndexBuff;
359  delete [] recvBuff;
360  delete [] recvNbIndexBuff;
361  delete [] recvIndexGlobalBuff;
362  delete [] recvIndexServerBuff;
363}
364
365/*!
366  Probe and receive message containg global index from other clients.
367  Each client can send a message of global index to other clients to fulfill their maps.
368Each client probes message from its queue then if the message is ready, it will be put into the receiving buffer
369  \param [in] recvIndexGlobalBuff buffer dedicated for receiving global index
370  \param [in] recvNbIndexCount size of the buffer
371*/
372void CClientServerMappingDistributed::probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount)
373{
374  MPI_Status statusIndexGlobal;
375  int flagIndexGlobal, count;
376
377  // Probing for global index
378  MPI_Iprobe(MPI_ANY_SOURCE, 15, clientIntraComm_, &flagIndexGlobal, &statusIndexGlobal);
379  if ((true == flagIndexGlobal) && (countIndexGlobal_ < recvNbIndexCount))
380  {
381    MPI_Get_count(&statusIndexGlobal, MPI_UNSIGNED_LONG, &count);
382    indexGlobalBuffBegin_.insert(std::make_pair<int, unsigned long*>(statusIndexGlobal.MPI_SOURCE, recvIndexGlobalBuff+countIndexGlobal_));
383    MPI_Irecv(recvIndexGlobalBuff+countIndexGlobal_, count, MPI_UNSIGNED_LONG,
384              statusIndexGlobal.MPI_SOURCE, 15, clientIntraComm_,
385              &requestRecvIndexGlobal_[statusIndexGlobal.MPI_SOURCE]);
386    countIndexGlobal_ += count;
387  }
388}
389
390/*!
391  Probe and receive message containg server index from other clients.
392  Each client can send a message of server index to other clients to fulfill their maps.
393Each client probes message from its queue then if the message is ready, it will be put into the receiving buffer
394  \param [in] recvIndexServerBuff buffer dedicated for receiving server index
395  \param [in] recvNbIndexCount size of the buffer
396*/
397void CClientServerMappingDistributed::probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount)
398{
399  MPI_Status statusIndexServer;
400  int flagIndexServer, count;
401
402  // Probing for server index
403  MPI_Iprobe(MPI_ANY_SOURCE, 12, clientIntraComm_, &flagIndexServer, &statusIndexServer);
404  if ((true == flagIndexServer) && (countIndexServer_ < recvNbIndexCount))
405  {
406    MPI_Get_count(&statusIndexServer, MPI_INT, &count);
407    indexServerBuffBegin_.insert(std::make_pair<int, int*>(statusIndexServer.MPI_SOURCE, recvIndexServerBuff+countIndexServer_));
408    MPI_Irecv(recvIndexServerBuff+countIndexServer_, count, MPI_INT,
409              statusIndexServer.MPI_SOURCE, 12, clientIntraComm_,
410              &requestRecvIndexServer_[statusIndexServer.MPI_SOURCE]);
411
412    countIndexServer_ += count;
413  }
414}
415
416/*!
417  Send message containing global index to clients
418  \param [in] clientDestRank rank of destination client
419  \param [in] indexGlobal global index to send
420  \param [in] clientIntraComm communication group of client
421  \param [in] requestSendIndexGlobal list of sending request
422*/
423void CClientServerMappingDistributed::sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal,
424                                                               const MPI_Comm& clientIntraComm,
425                                                               std::list<MPI_Request>& requestSendIndexGlobal)
426{
427  MPI_Request request;
428  requestSendIndexGlobal.push_back(request);
429  MPI_Isend(&(indexGlobal)[0], (indexGlobal).size(), MPI_UNSIGNED_LONG,
430            clientDestRank, 15, clientIntraComm, &(requestSendIndexGlobal.back()));
431}
432
433/*!
434  Send message containing server index to clients
435  \param [in] clientDestRank rank of destination client
436  \param [in] indexServer server index to send
437  \param [in] clientIntraComm communication group of client
438  \param [in] requestSendIndexServer list of sending request
439*/
440void CClientServerMappingDistributed::sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer,
441                                                               const MPI_Comm& clientIntraComm,
442                                                               std::list<MPI_Request>& requestSendIndexServer)
443{
444  MPI_Request request;
445  requestSendIndexServer.push_back(request);
446  MPI_Isend(&(indexServer)[0], (indexServer).size(), MPI_INT,
447            clientDestRank, 12, clientIntraComm, &(requestSendIndexServer.back()));
448}
449
450/*!
451  Verify status of sending request
452  \param [in] sendRequest sending request to verify
453*/
454void CClientServerMappingDistributed::testSendRequest(std::list<MPI_Request>& sendRequest)
455{
456  int flag = 0;
457  MPI_Status status;
458  std::list<MPI_Request>::iterator itRequest;
459  int sizeListRequest = sendRequest.size();
460  int idx = 0;
461  while (idx < sizeListRequest)
462  {
463    bool isErased = false;
464    for (itRequest = sendRequest.begin(); itRequest != sendRequest.end(); ++itRequest)
465    {
466      MPI_Test(&(*itRequest), &flag, &status);
467      if (true == flag)
468      {
469        isErased = true;
470        break;
471      }
472    }
473    if (true == isErased) sendRequest.erase(itRequest);
474    ++idx;
475  }
476}
477
478/*!
479  Process the received request. Pushing global index and server index into map
480  \param[in] buffIndexGlobal pointer to the begining of buffer containing global index
481  \param[in] buffIndexServer pointer to the begining of buffer containing server index
482  \param[in] count size of received message
483*/
484void CClientServerMappingDistributed::processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count)
485{
486  for (int i = 0; i < count; ++i)
487    globalIndexToServerMapping_.insert(std::make_pair<size_t,int>(*(buffIndexGlobal+i),*(buffIndexServer+i)));
488}
489
490/*!
491  Compute size of message containing global index
492  \param[in] requestRecv request of message
493*/
494int CClientServerMappingDistributed::computeBuffCountIndexGlobal(MPI_Request& requestRecv)
495{
496  int flag, count = 0;
497  MPI_Status status;
498
499  MPI_Test(&requestRecv, &flag, &status);
500  if (true == flag)
501  {
502    MPI_Get_count(&status, MPI_UNSIGNED_LONG, &count);
503  }
504
505  return count;
506}
507
508/*!
509  Compute size of message containing server index
510  \param[in] requestRecv request of message
511*/
512int CClientServerMappingDistributed::computeBuffCountIndexServer(MPI_Request& requestRecv)
513{
514  int flag, count = 0;
515  MPI_Status status;
516
517  MPI_Test(&requestRecv, &flag, &status);
518  if (true == flag)
519  {
520    MPI_Get_count(&status, MPI_INT, &count);
521  }
522
523  return count;
524}
525
526/*!
527  Reset all receiving request map and counter
528*/
529void CClientServerMappingDistributed::resetReceivingRequestAndCount()
530{
531  countIndexGlobal_ = countIndexServer_ = 0;
532  requestRecvIndexGlobal_.clear();
533  requestRecvIndexServer_.clear();
534  indexGlobalBuffBegin_.clear();
535  indexServerBuffBegin_.clear();
536}
537
538}
Note: See TracBrowser for help on using the repository browser.