Ignore:
Timestamp:
07/07/15 10:46:25 (9 years ago)
Author:
mhnguyen
Message:

Implementing interpolation (polynomial) and correct some bugs

+) Implement interpolation (polynomial)
+) Correct some minor bugs relating to memory allocation
+) Clear some redundant codes

Test
+) On Curie
+) test_client and test_complete pass

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/src/client_server_mapping_distributed.cpp

    r624 r630  
    5959    { 
    6060      int indexClient = std::distance(itbClientHash, itClientHash)-1; 
    61  
    6261      { 
    6362        client2ClientIndexGlobal[indexClient].push_back(globalIndexClient); 
     
    6968  int* sendBuff = new int[nbClient_]; 
    7069  for (int i = 0; i < nbClient_; ++i) sendBuff[i] = 0; 
    71   std::map<int, std::vector<size_t> >::iterator it  = client2ClientIndexGlobal.begin(), 
    72                                                 ite = client2ClientIndexGlobal.end(); 
    73   for (; it != ite; ++it) sendBuff[it->first] = 1; 
     70  std::map<int, std::vector<size_t> >::iterator itb  = client2ClientIndexGlobal.begin(), it, 
     71                                                ite  = client2ClientIndexGlobal.end(); 
     72  for (it = itb; it != ite; ++it) sendBuff[it->first] = 1; 
    7473  int* recvBuff = new int[nbClient_]; 
    7574  MPI_Allreduce(sendBuff, recvBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm_); 
     
    7776  std::list<MPI_Request> sendRequest; 
    7877  if (0 != nbIndexSendToOthers) 
    79       for (it = client2ClientIndexGlobal.begin(); it != ite; ++it) 
     78      for (it = itb; it != ite; ++it) 
    8079         sendIndexGlobalToClients(it->first, it->second, clientIntraComm_, sendRequest); 
    8180 
     
    8584  // The demand message contains global index; meanwhile the responds have server index information 
    8685  // Buffer to receive demand from other clients, it can be allocated or not depending whether it has demand(s) 
     86    // There are some cases we demand duplicate index so need to determine maxium size of demanding buffer 
     87  for (it = itb; it != ite; ++it) sendBuff[it->first] = (it->second).size(); 
     88  MPI_Allreduce(sendBuff, recvBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm_); 
     89 
    8790  unsigned long* recvBuffIndexGlobal = 0; 
    88 //  int maxNbIndexDemandedFromOthers = (nbIndexAlreadyOnClient >= globalIndexToServerMapping_.size()) 
    89 //                                   ? 0 : (globalIndexToServerMapping_.size() - nbIndexAlreadyOnClient); 
    90   int maxNbIndexDemandedFromOthers = (globalIndexToServerMapping_.size() > nbIndexSendToOthers) 
    91                                       ? globalIndexToServerMapping_.size() : nbIndexSendToOthers; 
    92  
     91  int maxNbIndexDemandedFromOthers = recvBuff[clientRank_]; 
    9392  if (!isDataDistributed_) maxNbIndexDemandedFromOthers = nbDemandingClient * nbIndexSendToOthers; //globalIndexToServerMapping_.size(); // Not very optimal but it's general 
    9493 
Note: See TracChangeset for help on using the changeset viewer.