source: XIOS/trunk/src/client_server_mapping_distributed.cpp @ 576

Last change on this file since 576 was 569, checked in by mhnguyen, 9 years ago

Correct some bugs on discovering server index and do some code cleanings

+) Add some checking functions to make sure mpi_isend and mpi_irecv work correctly
+) Add comments to code
+) Remove some redundant code and comments

Test
+) On Curie
+) The new functions are tested in test_new_features.f90. Test_client and test_complete work like before
+) Test cases:

  • 3 dimension grid with: 1 domain, 1 axis
  • 3 dimension grid with: 3 axis
  • Attached and connected

+) All pass and results are correct

TODO:
+) Fix zoom bug with grid composed of only one axis

File size: 21.4 KB
Line 
1/*!
2   \file client_server_mapping.hpp
3   \author Ha NGUYEN
4   \since 27 Feb 2015
5   \date 09 Mars 2015
6
7   \brief Mapping between index client and server.
8   Clients pre-calculate all information of server distribution.
9 */
10#include "client_server_mapping_distributed.hpp"
11#include <limits>
12#include <boost/functional/hash.hpp>
13
14namespace xios
15{
16
17CClientServerMappingDistributed::CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer,
18                                                                 const MPI_Comm& clientIntraComm)
19  : CClientServerMapping(), indexClientHash_(), countIndexGlobal_(0), countIndexServer_(0),
20    indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_()
21{
22  clientIntraComm_ = clientIntraComm;
23  MPI_Comm_size(clientIntraComm,&(nbClient_));
24  MPI_Comm_rank(clientIntraComm,&clientRank_);
25  computeHashIndex();
26  computeDistributedServerIndex(globalIndexOfServer, clientIntraComm);
27}
28
29CClientServerMappingDistributed::~CClientServerMappingDistributed()
30{
31}
32
33/*!
34   Compute mapping global index of server which client sends to.
35   \param [in] globalIndexOnClient global index client has
36*/
37void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient)
38{
39  int ssize = globalIndexOnClient.numElements();
40  CArray<int,1>* localIndexOnClient = new CArray<int,1>(ssize);
41  for (int i = 0; i < ssize; ++i) (*localIndexOnClient)(i) = i;
42
43  this->computeServerIndexMapping(globalIndexOnClient, *localIndexOnClient);
44  delete localIndexOnClient;
45}
46
47/*!
48   Compute mapping global index of server which client sends to.
49   \param [in] globalIndexOnClient global index client has
50   \param [in] localIndexOnClient local index on client
51*/
52void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient,
53                                                                const CArray<int,1>& localIndexOnClient)
54{
55  size_t ssize = globalIndexOnClient.numElements(), hashedIndex;
56
57  std::vector<size_t>::const_iterator itbClientHash = indexClientHash_.begin(), itClientHash,
58                                      iteClientHash = indexClientHash_.end();
59  std::map<int, std::vector<size_t> > client2ClientIndexGlobal;
60  std::map<int, std::vector<int> > client2ClientIndexServer;
61  std::map<int, std::vector<int> > clientLocalIndex;
62
63  // Number of global index whose mapping server can be found out thanks to index-server mapping
64  int nbIndexAlreadyOnClient = 0;
65
66  // Number of global index whose mapping server are on other clients
67  int nbIndexSendToOthers = 0;
68  boost::hash<size_t> hashGlobalIndex;
69  for (int i = 0; i < ssize; ++i)
70  {
71    size_t globalIndexClient = globalIndexOnClient(i);
72    hashedIndex  = hashGlobalIndex(globalIndexClient);
73    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashedIndex);
74    if (iteClientHash != itClientHash)
75    {
76      int indexClient = std::distance(itbClientHash, itClientHash)-1;
77
78      if (clientRank_ == indexClient)
79      {
80        (indexGlobalOnServer_[globalIndexToServerMapping_[globalIndexClient]]).push_back(globalIndexClient);
81        (localIndexSend2Server_[globalIndexToServerMapping_[globalIndexClient]]).push_back(localIndexOnClient(i));
82        ++nbIndexAlreadyOnClient;
83      }
84      else
85      {
86        client2ClientIndexGlobal[indexClient].push_back(globalIndexClient);
87        clientLocalIndex[indexClient].push_back(i);
88        ++nbIndexSendToOthers;
89      }
90    }
91  }
92
93  int* sendBuff = new int[nbClient_];
94  for (int i = 0; i < nbClient_; ++i) sendBuff[i] = 0;
95  std::map<int, std::vector<size_t> >::iterator it  = client2ClientIndexGlobal.begin(),
96                                                ite = client2ClientIndexGlobal.end();
97  for (; it != ite; ++it) sendBuff[it->first] = 1;
98  int* recvBuff = new int[nbClient_];
99  MPI_Allreduce(sendBuff, recvBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm_);
100
101  std::list<MPI_Request> sendRequest;
102  if (0 != nbIndexSendToOthers)
103      for (it = client2ClientIndexGlobal.begin(); it != ite; ++it)
104         sendIndexGlobalToClients(it->first, it->second, clientIntraComm_, sendRequest);
105
106  // Receiving demand as well as the responds from other clients
107  // The demand message contains global index; meanwhile the responds have server index information
108  // Buffer to receive demand from other clients, it can be allocated or not depending whether it has demand(s)
109  unsigned long* recvBuffIndexGlobal = 0;
110  int maxNbIndexDemandedFromOthers = (nbIndexAlreadyOnClient >= globalIndexToServerMapping_.size())
111                                   ? 0 : (globalIndexToServerMapping_.size() - nbIndexAlreadyOnClient);
112  if (0 != maxNbIndexDemandedFromOthers)
113    recvBuffIndexGlobal = new unsigned long[maxNbIndexDemandedFromOthers];
114
115  // Buffer to receive respond from other clients, it can be allocated or not depending whether it demands other clients
116  int* recvBuffIndexServer = 0;
117  int nbIndexReceivedFromOthers = nbIndexSendToOthers;
118  if (0 != nbIndexReceivedFromOthers)
119    recvBuffIndexServer = new int[nbIndexReceivedFromOthers];
120
121  resetReceivingRequestAndCount();
122  std::map<int, MPI_Request>::iterator itRequest;
123  std::vector<int> demandAlreadyReceived, repondAlreadyReceived;
124  int nbDemandingClient = recvBuff[clientRank_], nbIndexServerReceived = 0;
125  while ((0 < nbDemandingClient) || (!sendRequest.empty()) ||
126         (nbIndexServerReceived < nbIndexReceivedFromOthers))
127  {
128    // Just check whether a client has any demand from other clients.
129    // If it has, then it should send responds to these client(s)
130    probeIndexGlobalMessageFromClients(recvBuffIndexGlobal, maxNbIndexDemandedFromOthers);
131    if (0 < nbDemandingClient)
132    {
133      for (itRequest = requestRecvIndexGlobal_.begin();
134           itRequest != requestRecvIndexGlobal_.end(); ++itRequest)
135      {
136        int flagIndexGlobal, count;
137        MPI_Status statusIndexGlobal;
138
139        MPI_Test(&(itRequest->second), &flagIndexGlobal, &statusIndexGlobal);
140        if (true == flagIndexGlobal)
141        {
142          MPI_Get_count(&statusIndexGlobal, MPI_UNSIGNED_LONG, &count);
143          int clientSourceRank = statusIndexGlobal.MPI_SOURCE;
144          unsigned long* beginBuff = indexGlobalBuffBegin_[clientSourceRank];
145          for (int i = 0; i < count; ++i)
146          {
147            client2ClientIndexServer[clientSourceRank].push_back(globalIndexToServerMapping_[*(beginBuff+i)]);
148          }
149          sendIndexServerToClients(clientSourceRank, client2ClientIndexServer[clientSourceRank], clientIntraComm_, sendRequest);
150          --nbDemandingClient;
151
152          demandAlreadyReceived.push_back(clientSourceRank);
153        }
154      }
155      for (int i = 0; i< demandAlreadyReceived.size(); ++i)
156        requestRecvIndexGlobal_.erase(demandAlreadyReceived[i]);
157    }
158
159    testSendRequest(sendRequest);
160
161    // In some cases, a client need to listen respond from other clients about server information
162    // Ok, with the information, a client can fill in its server-global index map.
163    probeIndexServerMessageFromClients(recvBuffIndexServer, nbIndexReceivedFromOthers);
164    for (itRequest = requestRecvIndexServer_.begin();
165         itRequest != requestRecvIndexServer_.end();
166         ++itRequest)
167    {
168      int flagIndexServer, count;
169      MPI_Status statusIndexServer;
170
171      MPI_Test(&(itRequest->second), &flagIndexServer, &statusIndexServer);
172      if (true == flagIndexServer)
173      {
174        MPI_Get_count(&statusIndexServer, MPI_INT, &count);
175        int clientSourceRank = statusIndexServer.MPI_SOURCE;
176        int* beginBuff = indexServerBuffBegin_[clientSourceRank];
177        std::vector<size_t>& globalIndexTmp = client2ClientIndexGlobal[clientSourceRank];
178        std::vector<int>& localIndexTmp = clientLocalIndex[clientSourceRank];
179        for (int i = 0; i < count; ++i)
180        {
181          (indexGlobalOnServer_[*(beginBuff+i)]).push_back(globalIndexTmp[i]);
182          (localIndexSend2Server_[*(beginBuff+i)]).push_back(localIndexOnClient(localIndexTmp[i]));
183        }
184        nbIndexServerReceived += count;
185        repondAlreadyReceived.push_back(clientSourceRank);
186      }
187    }
188
189    for (int i = 0; i< repondAlreadyReceived.size(); ++i)
190      requestRecvIndexServer_.erase(repondAlreadyReceived[i]);
191    repondAlreadyReceived.resize(0);
192  }
193
194  if (0 != recvBuffIndexGlobal) delete recvBuffIndexGlobal;
195  if (0 != recvBuffIndexServer) delete recvBuffIndexServer;
196  delete [] sendBuff;
197  delete [] recvBuff;
198}
199
200/*!
201  Compute the hash index distribution of whole size_t space then each client will have a range of this distribution
202*/
203void CClientServerMappingDistributed::computeHashIndex()
204{
205  // Compute range of hash index for each client
206  indexClientHash_.resize(nbClient_+1);
207  size_t nbHashIndexMax = std::numeric_limits<size_t>::max();
208  size_t nbHashIndex;
209  indexClientHash_[0] = 0;
210  for (int i = 1; i < nbClient_; ++i)
211  {
212    nbHashIndex = nbHashIndexMax / nbClient_;
213    if (i < (nbHashIndexMax%nbClient_)) ++nbHashIndex;
214    indexClientHash_[i] = indexClientHash_[i-1] + nbHashIndex;
215  }
216  indexClientHash_[nbClient_] = nbHashIndexMax;
217}
218
219/*!
220  Compute distribution of global index for servers
221  Each client already holds a piece of information about global index and the corresponding server.
222This information is redistributed into size_t sipace in which each client possesses a specific range of index.
223Afterh the redistribution, each client as long as its range of index contains all necessary information about server.
224  \param [in] globalIndexOfServer global index and the corresponding server
225  \param [in] clientIntraComm client joining distribution process.
226*/
227void CClientServerMappingDistributed::computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer,
228                                                                    const MPI_Comm& clientIntraComm)
229{
230  int* sendBuff = new int[nbClient_];
231  int* sendNbIndexBuff = new int[nbClient_];
232  for (int i = 0; i < nbClient_; ++i)
233  {
234    sendBuff[i] = 0; sendNbIndexBuff[i] = 0;
235  }
236
237  // Compute size of sending and receving buffer
238  std::map<int, std::vector<size_t> > client2ClientIndexGlobal;
239  std::map<int, std::vector<int> > client2ClientIndexServer;
240
241  std::vector<size_t>::const_iterator itbClientHash = indexClientHash_.begin(), itClientHash,
242                                      iteClientHash = indexClientHash_.end();
243  boost::unordered_map<size_t,int>::const_iterator it  = globalIndexOfServer.begin(),
244                                                   ite = globalIndexOfServer.end();
245  boost::hash<size_t> hashGlobalIndex;
246  for (; it != ite; ++it)
247  {
248    size_t hashIndex = hashGlobalIndex(it->first);
249    itClientHash = std::upper_bound(itbClientHash, iteClientHash, hashIndex);
250    if (itClientHash != iteClientHash)
251    {
252      int indexClient = std::distance(itbClientHash, itClientHash)-1;
253      if (clientRank_ == indexClient)
254      {
255        globalIndexToServerMapping_.insert(std::make_pair<size_t,int>(it->first, it->second));
256      }
257      else
258      {
259        sendBuff[indexClient] = 1;
260        ++sendNbIndexBuff[indexClient];
261        client2ClientIndexGlobal[indexClient].push_back(it->first);
262        client2ClientIndexServer[indexClient].push_back(it->second);
263      }
264    }
265  }
266
267  // Calculate from how many clients each client receive message.
268  int* recvBuff = new int[nbClient_];
269  MPI_Allreduce(sendBuff, recvBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm);
270  int recvNbClient = recvBuff[clientRank_];
271
272  // Calculate size of buffer for receiving message
273  int* recvNbIndexBuff = new int[nbClient_];
274  MPI_Allreduce(sendNbIndexBuff, recvNbIndexBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm);
275  int recvNbIndexCount = recvNbIndexBuff[clientRank_];
276  unsigned long* recvIndexGlobalBuff = new unsigned long[recvNbIndexCount];
277  int* recvIndexServerBuff = new int[recvNbIndexCount];
278
279  // If a client holds information about global index and servers which don't belong to it,
280  // it will send a message to the correct clients.
281  // Contents of the message are global index and its corresponding server index
282  std::list<MPI_Request> sendRequest;
283  std::map<int, std::vector<size_t> >::iterator itGlobal  = client2ClientIndexGlobal.begin(),
284                                                iteGlobal = client2ClientIndexGlobal.end();
285  for ( ;itGlobal != iteGlobal; ++itGlobal)
286    sendIndexGlobalToClients(itGlobal->first, itGlobal->second, clientIntraComm, sendRequest);
287  std::map<int, std::vector<int> >::iterator itServer  = client2ClientIndexServer.begin(),
288                                             iteServer = client2ClientIndexServer.end();
289  for (; itServer != iteServer; ++itServer)
290    sendIndexServerToClients(itServer->first, itServer->second, clientIntraComm, sendRequest);
291
292  std::map<int, MPI_Request>::iterator itRequestIndexGlobal, itRequestIndexServer;
293  std::map<int, int> countBuffIndexServer, countBuffIndexGlobal;
294  std::vector<int> processedList;
295
296  bool isFinished = (0 == recvNbClient) ? true : false;
297
298  // Just to make sure before listening message, all counting index and receiving request have already beeen reset
299  resetReceivingRequestAndCount();
300
301  // Now each client trys to listen to demand from others.
302  // If they have message, it processes: pushing global index and corresponding server to its map
303  while (!isFinished || (!sendRequest.empty()))
304  {
305    testSendRequest(sendRequest);
306    probeIndexGlobalMessageFromClients(recvIndexGlobalBuff, recvNbIndexCount);
307
308    // Processing complete request
309    for (itRequestIndexGlobal = requestRecvIndexGlobal_.begin();
310         itRequestIndexGlobal != requestRecvIndexGlobal_.end();
311         ++itRequestIndexGlobal)
312    {
313      int rank = itRequestIndexGlobal->first;
314      int countIndexGlobal = computeBuffCountIndexGlobal(itRequestIndexGlobal->second);
315      if (0 != countIndexGlobal)
316        countBuffIndexGlobal[rank] = countIndexGlobal;
317    }
318
319    probeIndexServerMessageFromClients(recvIndexServerBuff, recvNbIndexCount);
320    for (itRequestIndexServer = requestRecvIndexServer_.begin();
321         itRequestIndexServer != requestRecvIndexServer_.end();
322         ++itRequestIndexServer)
323    {
324      int rank = itRequestIndexServer->first;
325      int countIndexServer = computeBuffCountIndexServer(itRequestIndexServer->second);
326      if (0 != countIndexServer)
327        countBuffIndexServer[rank] = countIndexServer;
328    }
329
330    for (std::map<int, int>::iterator it = countBuffIndexGlobal.begin();
331                                      it != countBuffIndexGlobal.end(); ++it)
332    {
333      int rank = it->first;
334      if (countBuffIndexServer.end() != countBuffIndexServer.find(rank))
335      {
336        processReceivedRequest(indexGlobalBuffBegin_[rank], indexServerBuffBegin_[rank], it->second);
337        processedList.push_back(rank);
338        --recvNbClient;
339      }
340    }
341
342    for (int i = 0; i < processedList.size(); ++i)
343    {
344      requestRecvIndexServer_.erase(processedList[i]);
345      requestRecvIndexGlobal_.erase(processedList[i]);
346      countBuffIndexGlobal.erase(processedList[i]);
347      countBuffIndexServer.erase(processedList[i]);
348    }
349
350    if (0 == recvNbClient) isFinished = true;
351  }
352
353  delete [] sendBuff;
354  delete [] sendNbIndexBuff;
355  delete [] recvBuff;
356  delete [] recvNbIndexBuff;
357  delete [] recvIndexGlobalBuff;
358  delete [] recvIndexServerBuff;
359}
360
361/*!
362  Probe and receive message containg global index from other clients.
363  Each client can send a message of global index to other clients to fulfill their maps.
364Each client probes message from its queue then if the message is ready, it will be put into the receiving buffer
365  \param [in] recvIndexGlobalBuff buffer dedicated for receiving global index
366  \param [in] recvNbIndexCount size of the buffer
367*/
368void CClientServerMappingDistributed::probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount)
369{
370  MPI_Status statusIndexGlobal;
371  int flagIndexGlobal, count;
372
373  // Probing for global index
374  MPI_Iprobe(MPI_ANY_SOURCE, 15, clientIntraComm_, &flagIndexGlobal, &statusIndexGlobal);
375  if ((true == flagIndexGlobal) && (countIndexGlobal_ < recvNbIndexCount))
376  {
377    MPI_Get_count(&statusIndexGlobal, MPI_UNSIGNED_LONG, &count);
378    indexGlobalBuffBegin_.insert(std::make_pair<int, unsigned long*>(statusIndexGlobal.MPI_SOURCE, recvIndexGlobalBuff+countIndexGlobal_));
379    MPI_Irecv(recvIndexGlobalBuff+countIndexGlobal_, count, MPI_UNSIGNED_LONG,
380              statusIndexGlobal.MPI_SOURCE, 15, clientIntraComm_,
381              &requestRecvIndexGlobal_[statusIndexGlobal.MPI_SOURCE]);
382    countIndexGlobal_ += count;
383  }
384}
385
386/*!
387  Probe and receive message containg server index from other clients.
388  Each client can send a message of server index to other clients to fulfill their maps.
389Each client probes message from its queue then if the message is ready, it will be put into the receiving buffer
390  \param [in] recvIndexServerBuff buffer dedicated for receiving server index
391  \param [in] recvNbIndexCount size of the buffer
392*/
393void CClientServerMappingDistributed::probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount)
394{
395  MPI_Status statusIndexServer;
396  int flagIndexServer, count;
397
398  // Probing for server index
399  MPI_Iprobe(MPI_ANY_SOURCE, 12, clientIntraComm_, &flagIndexServer, &statusIndexServer);
400  if ((true == flagIndexServer) && (countIndexServer_ < recvNbIndexCount))
401  {
402    MPI_Get_count(&statusIndexServer, MPI_INT, &count);
403    indexServerBuffBegin_.insert(std::make_pair<int, int*>(statusIndexServer.MPI_SOURCE, recvIndexServerBuff+countIndexServer_));
404    MPI_Irecv(recvIndexServerBuff+countIndexServer_, count, MPI_INT,
405              statusIndexServer.MPI_SOURCE, 12, clientIntraComm_,
406              &requestRecvIndexServer_[statusIndexServer.MPI_SOURCE]);
407
408    countIndexServer_ += count;
409  }
410}
411
412/*!
413  Send message containing global index to clients
414  \param [in] clientDestRank rank of destination client
415  \param [in] indexGlobal global index to send
416  \param [in] clientIntraComm communication group of client
417  \param [in] requestSendIndexGlobal list of sending request
418*/
419void CClientServerMappingDistributed::sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal,
420                                                               const MPI_Comm& clientIntraComm,
421                                                               std::list<MPI_Request>& requestSendIndexGlobal)
422{
423  MPI_Request request;
424  requestSendIndexGlobal.push_back(request);
425  MPI_Isend(&(indexGlobal)[0], (indexGlobal).size(), MPI_UNSIGNED_LONG,
426            clientDestRank, 15, clientIntraComm, &(requestSendIndexGlobal.back()));
427}
428
429/*!
430  Send message containing server index to clients
431  \param [in] clientDestRank rank of destination client
432  \param [in] indexServer server index to send
433  \param [in] clientIntraComm communication group of client
434  \param [in] requestSendIndexServer list of sending request
435*/
436void CClientServerMappingDistributed::sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer,
437                                                               const MPI_Comm& clientIntraComm,
438                                                               std::list<MPI_Request>& requestSendIndexServer)
439{
440  MPI_Request request;
441  requestSendIndexServer.push_back(request);
442  MPI_Isend(&(indexServer)[0], (indexServer).size(), MPI_INT,
443            clientDestRank, 12, clientIntraComm, &(requestSendIndexServer.back()));
444}
445
446/*!
447  Verify status of sending request
448  \param [in] sendRequest sending request to verify
449*/
450void CClientServerMappingDistributed::testSendRequest(std::list<MPI_Request>& sendRequest)
451{
452  int flag = 0;
453  MPI_Status status;
454  std::list<MPI_Request>::iterator itRequest;
455  int sizeListRequest = sendRequest.size();
456  int idx = 0;
457  while (idx < sizeListRequest)
458  {
459    bool isErased = false;
460    for (itRequest = sendRequest.begin(); itRequest != sendRequest.end(); ++itRequest)
461    {
462      MPI_Test(&(*itRequest), &flag, &status);
463      if (true == flag)
464      {
465        isErased = true;
466        break;
467      }
468    }
469    if (true == isErased) sendRequest.erase(itRequest);
470    ++idx;
471  }
472}
473
474/*!
475  Process the received request. Pushing global index and server index into map
476  \param[in] buffIndexGlobal pointer to the begining of buffer containing global index
477  \param[in] buffIndexServer pointer to the begining of buffer containing server index
478  \param[in] count size of received message
479*/
480void CClientServerMappingDistributed::processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count)
481{
482  for (int i = 0; i < count; ++i)
483    globalIndexToServerMapping_.insert(std::make_pair<size_t,int>(*(buffIndexGlobal+i),*(buffIndexServer+i)));
484}
485
486/*!
487  Compute size of message containing global index
488  \param[in] requestRecv request of message
489*/
490int CClientServerMappingDistributed::computeBuffCountIndexGlobal(MPI_Request& requestRecv)
491{
492  int flag, count = 0;
493  MPI_Status status;
494
495  MPI_Test(&requestRecv, &flag, &status);
496  if (true == flag)
497  {
498    MPI_Get_count(&status, MPI_UNSIGNED_LONG, &count);
499  }
500
501  return count;
502}
503
504/*!
505  Compute size of message containing server index
506  \param[in] requestRecv request of message
507*/
508int CClientServerMappingDistributed::computeBuffCountIndexServer(MPI_Request& requestRecv)
509{
510  int flag, count = 0;
511  MPI_Status status;
512
513  MPI_Test(&requestRecv, &flag, &status);
514  if (true == flag)
515  {
516    MPI_Get_count(&status, MPI_INT, &count);
517  }
518
519  return count;
520}
521
522/*!
523  Reset all receiving request map and counter
524*/
525void CClientServerMappingDistributed::resetReceivingRequestAndCount()
526{
527  countIndexGlobal_ = countIndexServer_ = 0;
528  requestRecvIndexGlobal_.clear();
529  requestRecvIndexServer_.clear();
530  indexGlobalBuffBegin_.clear();
531  indexServerBuffBegin_.clear();
532}
533
534}
Note: See TracBrowser for help on using the repository browser.