Changeset 585


Ignore:
Timestamp:
04/01/15 17:52:58 (9 years ago)
Author:
mhnguyen
Message:

Modifying some functions to make sure zoom working even with grid not distributed

+) Change some code in sendIndex to make sure non-distributed grid work with zoom

Test
+) On Curie
+) test_client: passed and results are same like on the branchs
+) test_complete: there is a difference of output because of zoom index offset

Location:
XIOS/trunk/src
Files:
4 edited

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/src/client_server_mapping_distributed.cpp

    r584 r585  
    1717 
    1818CClientServerMappingDistributed::CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 
    19                                                                  const MPI_Comm& clientIntraComm) 
     19                                                                 const MPI_Comm& clientIntraComm, bool isDataDistributed) 
    2020  : CClientServerMapping(), indexClientHash_(), countIndexGlobal_(0), countIndexServer_(0), 
    21     indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_() 
     21    indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_(), isDataDistributed_(isDataDistributed) 
    2222{ 
    2323  clientIntraComm_ = clientIntraComm; 
     
    3535   Compute mapping global index of server which client sends to. 
    3636   \param [in] globalIndexOnClient global index client has 
    37 */ 
    38 //void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient) 
    39 //{ 
    40 //  int ssize = globalIndexOnClient.numElements(); 
    41 //  CArray<int,1>* localIndexOnClient = new CArray<int,1>(ssize); 
    42 //  for (int i = 0; i < ssize; ++i) (*localIndexOnClient)(i) = i; 
    43 // 
    44 //  this->computeServerIndexMapping(globalIndexOnClient, *localIndexOnClient); 
    45 //  delete localIndexOnClient; 
    46 //} 
    47  
    48 /*! 
    49    Compute mapping global index of server which client sends to. 
    50    \param [in] globalIndexOnClient global index client has 
    5137   \param [in] localIndexOnClient local index on client 
    5238*/ 
     
    6147  std::map<int, std::vector<size_t> > client2ClientIndexGlobal; 
    6248  std::map<int, std::vector<int> > client2ClientIndexServer; 
    63 //  std::map<int, std::vector<int> > clientLocalIndex; 
    6449 
    6550  // Number of global index whose mapping server can be found out thanks to index-server mapping 
     
    8166      { 
    8267        (indexGlobalOnServer_[globalIndexToServerMapping_[globalIndexClient]]).push_back(globalIndexClient); 
    83 //        (localIndexSend2Server_[globalIndexToServerMapping_[globalIndexClient]]).push_back(localIndexOnClient(i)); 
    8468        ++nbIndexAlreadyOnClient; 
    8569      } 
     
    8771      { 
    8872        client2ClientIndexGlobal[indexClient].push_back(globalIndexClient); 
    89 //        clientLocalIndex[indexClient].push_back(i); 
    9073        ++nbIndexSendToOthers; 
    9174      } 
     
    10689         sendIndexGlobalToClients(it->first, it->second, clientIntraComm_, sendRequest); 
    10790 
     91  int nbDemandingClient = recvBuff[clientRank_], nbIndexServerReceived = 0; 
    10892  // Receiving demand as well as the responds from other clients 
    10993  // The demand message contains global index; meanwhile the responds have server index information 
     
    11296  int maxNbIndexDemandedFromOthers = (nbIndexAlreadyOnClient >= globalIndexToServerMapping_.size()) 
    11397                                   ? 0 : (globalIndexToServerMapping_.size() - nbIndexAlreadyOnClient); 
     98  if (!isDataDistributed_) maxNbIndexDemandedFromOthers = nbDemandingClient * globalIndexToServerMapping_.size(); // Not very optimal but it's general 
     99 
    114100  if (0 != maxNbIndexDemandedFromOthers) 
    115101    recvBuffIndexGlobal = new unsigned long[maxNbIndexDemandedFromOthers]; 
     
    118104  int* recvBuffIndexServer = 0; 
    119105  int nbIndexReceivedFromOthers = nbIndexSendToOthers; 
     106//  int nbIndexReceivedFromOthers = globalIndexToServerMapping_.size() - nbIndexAlreadyOnClient; 
    120107  if (0 != nbIndexReceivedFromOthers) 
    121108    recvBuffIndexServer = new int[nbIndexReceivedFromOthers]; 
    122109 
    123   resetReceivingRequestAndCount(); 
    124110  std::map<int, MPI_Request>::iterator itRequest; 
    125111  std::vector<int> demandAlreadyReceived, repondAlreadyReceived; 
    126   int nbDemandingClient = recvBuff[clientRank_], nbIndexServerReceived = 0; 
     112 
    127113 
    128114  resetReceivingRequestAndCount(); 
     
    180166        int* beginBuff = indexServerBuffBegin_[clientSourceRank]; 
    181167        std::vector<size_t>& globalIndexTmp = client2ClientIndexGlobal[clientSourceRank]; 
    182 //        std::vector<int>& localIndexTmp = clientLocalIndex[clientSourceRank]; 
    183168        for (int i = 0; i < count; ++i) 
    184169        { 
    185170          (indexGlobalOnServer_[*(beginBuff+i)]).push_back(globalIndexTmp[i]); 
    186 //          (localIndexSend2Server_[*(beginBuff+i)]).push_back(localIndexOnClient(localIndexTmp[i])); 
    187171        } 
    188172        nbIndexServerReceived += count; 
     
    224208  Compute distribution of global index for servers 
    225209  Each client already holds a piece of information about global index and the corresponding server. 
    226 This information is redistributed into size_t sipace in which each client possesses a specific range of index. 
    227 Afterh the redistribution, each client as long as its range of index contains all necessary information about server. 
     210This information is redistributed into size_t space in which each client possesses a specific range of index. 
     211After the redistribution, each client as well as its range of index contains all necessary information about server. 
    228212  \param [in] globalIndexOfServer global index and the corresponding server 
    229213  \param [in] clientIntraComm client joining distribution process. 
  • XIOS/trunk/src/client_server_mapping_distributed.hpp

    r584 r585  
    3434    /** Default constructor */ 
    3535    CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 
    36                                     const MPI_Comm& clientIntraComm); 
     36                                    const MPI_Comm& clientIntraComm, bool isDataDistributed = true); 
    3737 
    3838    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer); 
     
    118118    //! Mapping client rank and the begining position of receiving buffer for message of server index from this client 
    119119    std::map<int, int*> indexServerBuffBegin_; 
     120 
     121    //! Flag to specify whether data is distributed or not 
     122    bool isDataDistributed_; 
    120123}; 
    121124 
  • XIOS/trunk/src/node/domain.cpp

    r584 r585  
    744744      for (int i = 0; i < nb; ++i) 
    745745      { 
    746         if (std::binary_search(itbVec, iteVec, globalIndexDomainZoom(i))) 
     746        if (iteVec != std::find(itbVec, iteVec, globalIndexDomainZoom(i))) 
    747747        { 
    748748          indSrv_[rank].push_back(globalIndexDomainZoom(i)); 
  • XIOS/trunk/src/node/grid.cpp

    r584 r585  
    336336     // Finally, compute index mapping between client(s) and server(s) 
    337337     clientServerMap_ = new CClientServerMappingDistributed(serverDistributionDescription_->getGlobalIndexRange(), 
    338                                                             client->intraComm); 
     338                                                            client->intraComm, 
     339                                                            clientDistribution_->isDataDistributed()); 
    339340 
    340341     clientServerMap_->computeServerIndexMapping(clientDistribution_->getGlobalIndex()); 
     
    501502    if (!doGridHaveDataDistributed()) 
    502503    { 
    503 //      if (0 == client->clientRank) 
    504 //      { 
    505 //       for (int ns = 0; itGlobal != iteMap; ++itGlobal, ++itLocal, ++ns) 
    506 //        { 
    507 //          rank = itGlobal->first; 
    508 //          int nb = (itGlobal->second).size(); 
    509 // 
    510 //          CArray<size_t, 1> outGlobalIndexOnServer(nb); 
    511 //          CArray<int, 1> outLocalIndexToServer(nb); 
    512 //          for (int k = 0; k < nb; ++k) 
    513 //          { 
    514 //            outGlobalIndexOnServer(k) = itGlobal->second.at(k); 
    515 //            outLocalIndexToServer(k)  = itLocal->second.at(k); 
    516 //          } 
    517 // 
    518 //          storeIndex_toSrv.insert( pair<int,CArray<int,1>* >(rank,new CArray<int,1>(outLocalIndexToServer) )); 
    519 //          listOutIndex.push_back(new CArray<size_t,1>(outGlobalIndexOnServer)); 
    520 // 
    521 //          list_msg.push_back(shared_ptr<CMessage>(new CMessage)); 
    522 //          *list_msg.back()<<getId()<<*listOutIndex.back(); 
    523 //          event.push(rank, 1, *list_msg.back()); 
    524 //        } 
    525 //        client->sendEvent(event); 
    526 //      } else client->sendEvent(event); 
     504      if (0 == client->clientRank) 
     505      { 
     506        for (rank = 0; rank < client->serverSize; ++rank) 
     507        { 
     508          int nb = 0; 
     509          if (globalIndexTmp.end() != globalIndexTmp.find(rank)) 
     510            nb = globalIndexTmp[rank].size(); 
     511 
     512          CArray<size_t, 1> outGlobalIndexOnServer(nb); 
     513          CArray<int, 1> outLocalIndexToServer(nb); 
     514          for (int k = 0; k < nb; ++k) 
     515          { 
     516            outGlobalIndexOnServer(k) = globalIndexTmp[rank].at(k); 
     517            outLocalIndexToServer(k)  = localIndexTmp[rank].at(k); 
     518          } 
     519 
     520          storeIndex_toSrv.insert( pair<int,CArray<int,1>* >(rank,new CArray<int,1>(outLocalIndexToServer) )); 
     521          listOutIndex.push_back(new CArray<size_t,1>(outGlobalIndexOnServer)); 
     522 
     523          list_msg.push_back(shared_ptr<CMessage>(new CMessage)); 
     524          *list_msg.back()<<getId()<<*listOutIndex.back(); 
     525 
     526          event.push(rank, 1, *list_msg.back()); 
     527        } 
     528        client->sendEvent(event); 
     529      } 
     530      else 
     531        client->sendEvent(event); 
    527532    } 
    528533    else 
Note: See TracChangeset for help on using the changeset viewer.