Changeset 569


Ignore:
Timestamp:
03/10/15 10:49:13 (9 years ago)
Author:
mhnguyen
Message:

Correct some bugs on discovering server index and do some code cleanings

+) Add some checking functions to make sure mpi_isend and mpi_irecv work correctly
+) Add comments to code
+) Remove some redundant code and comments

Test
+) On Curie
+) The new functions are tested in test_new_features.f90. Test_client and test_complete work like before
+) Test cases:

  • 3 dimension grid with: 1 domain, 1 axis
  • 3 dimension grid with: 3 axis
  • Attached and connected

+) All pass and results are correct

TODO:
+) Fix zoom bug with grid composed of only one axis

Location:
XIOS/trunk
Files:
15 edited

Legend:

Unmodified
Added
Removed
  • XIOS/trunk/inputs/Version2/iodef.xml

    r567 r569  
    77 
    88   <field_definition level="1" enabled=".TRUE."> 
    9 <!--     <field id="field_A"  operation="average" freq_op="3600s" domain_ref="domain_A"  axis_ref="axis_A" />--> 
     9     <field id="field_AA"  operation="average" freq_op="3600s" domain_ref="domain_A"  axis_ref="axis_A" /> 
    1010     <field id="field_A"  operation="average" freq_op="3600s" grid_ref="grid_A" /> 
    1111     <field id="field_Axis"  operation="average" freq_op="3600s" grid_ref="grid_Axis" /> 
     
    1919     </file> 
    2020     <file id="output_Axis" name="output_Axis"> 
    21         <field field_ref="field_Axis" /> 
     21<!--        <field field_ref="field_Axis" />--> 
    2222        <field field_ref="field_All_Axis" /> 
    2323     </file> 
     
    2828     <axis id="axis_A" /> 
    2929     <axis id="axis_B" /> 
    30      <axis id="axis_C" zoom_size="2" zoom_end="2" /> 
     30     <axis id="axis_C" zoom_size="2" zoom_end="2"/> 
    3131     <axis id="axis_D" zoom_size="2" zoom_end="3"/> 
    3232   </axis_definition> 
  • XIOS/trunk/src/client_server_mapping.cpp

    r568 r569  
    33   \author Ha NGUYEN 
    44   \since 04 Feb 2015 
    5    \date 06 Feb 2015 
     5   \date 09 Mars 2015 
    66 
    77   \brief Mapping between index client and server. 
     
    2020} 
    2121 
    22 void CClientServerMapping::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
    23                                                      const CArray<int,1>& localIndexOnClient) 
    24 { 
    25 //  defaultComputeServerIndexMapping(globalIndexOnClient, globalIndexServer); 
    26 } 
    27  
    28  
     22/*! 
     23  Compute mapping global index of server which client sends to. 
     24  \param [in] globalIndexOnClient global index on client 
     25  \param [in] globalIndexServer global index of servers 
     26*/ 
    2927void CClientServerMapping::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
    3028                                                     const std::vector<CArray<size_t,1>* >& globalIndexServer) 
     
    3331} 
    3432 
    35 //void CClientServerMapping::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
    36 //                                                     const CArray<int,1>& localIndexOnClient, 
    37 //                                                     const std::vector<CArray<size_t,1>* >& globalIndexOfServer) 
    38 //{ 
    39 //  defaultComputeServerIndexMapping(globalIndexOnClient, globalIndexOfServer, &localIndexOnClient); 
    40 //} 
    41  
    4233/*! 
    4334   Compute index of data which are sent to server and index global on server side 
    4435   \param [in] globalIndexOnClient global index of data on client 
    4536   \param [in] globalIndexServer global index of server(s) 
     37   \param [in] localIndexOnClient local index of data on client which are sent to server 
    4638*/ 
    4739void CClientServerMapping::defaultComputeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
     
    129121  for(int n=0;n<recvSize;n++) clientRes[recvBuff[n]]++ ; 
    130122 
    131 //  std::map<int,int> nbSenders; 
    132123  for(int n=0;n<nbConnectedServer;n++) 
    133124  { 
  • XIOS/trunk/src/client_server_mapping.hpp

    r568 r569  
    33   \author Ha NGUYEN 
    44   \since 04 Feb 2015 
    5    \date 06 Feb 2015 
     5   \date 09 Mars 2015 
    66 
    77   \brief Mapping between index client and server. 
     
    1313#include "array_new.hpp" 
    1414#include "mpi.hpp" 
    15 #include <boost/unordered_map.hpp> 
    1615 
    1716namespace xios { 
     
    3130    virtual ~CClientServerMapping(); 
    3231 
     32    // Only need global index on client to calculate mapping (supposed client has info of distribution) 
     33    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient) = 0; 
     34 
     35    // In case of computing local index on client sent to server 
     36    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
     37                                           const CArray<int,1>& localIndexOnClient) = 0; 
     38 
     39    // Simple case, global index on client and index on servers 
    3340    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
    3441                                           const std::vector<CArray<size_t,1>* >& globalIndexOnServer); 
    35  
    36     virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
    37                                            const CArray<int,1>& localIndexOnClient); 
    38  
    39 //    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
    40 //                                           const CArray<int,1>& localIndexOnClient, 
    41 //                                           const std::vector<CArray<size_t,1>* >& globalIndexOnServer); 
    4242 
    4343    std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 
  • XIOS/trunk/src/client_server_mapping_distributed.cpp

    r568 r569  
     1/*! 
     2   \file client_server_mapping.hpp 
     3   \author Ha NGUYEN 
     4   \since 27 Feb 2015 
     5   \date 09 Mars 2015 
     6 
     7   \brief Mapping between index client and server. 
     8   Clients pre-calculate all information of server distribution. 
     9 */ 
    110#include "client_server_mapping_distributed.hpp" 
    211#include <limits> 
     
    716 
    817CClientServerMappingDistributed::CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 
    9                                                                  const MPI_Comm& clientIntraComm) : CClientServerMapping(), indexClientHash_() 
     18                                                                 const MPI_Comm& clientIntraComm) 
     19  : CClientServerMapping(), indexClientHash_(), countIndexGlobal_(0), countIndexServer_(0), 
     20    indexGlobalBuffBegin_(), indexServerBuffBegin_(), requestRecvIndexServer_() 
    1021{ 
    1122  clientIntraComm_ = clientIntraComm; 
    1223  MPI_Comm_size(clientIntraComm,&(nbClient_)); 
    13   MPI_Comm_rank(clientIntraComm,&clientRank_) ; 
     24  MPI_Comm_rank(clientIntraComm,&clientRank_); 
     25  computeHashIndex(); 
    1426  computeDistributedServerIndex(globalIndexOfServer, clientIntraComm); 
    1527} 
     
    1729CClientServerMappingDistributed::~CClientServerMappingDistributed() 
    1830{ 
    19  
    20 } 
    21  
     31} 
     32 
     33/*! 
     34   Compute mapping global index of server which client sends to. 
     35   \param [in] globalIndexOnClient global index client has 
     36*/ 
     37void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient) 
     38{ 
     39  int ssize = globalIndexOnClient.numElements(); 
     40  CArray<int,1>* localIndexOnClient = new CArray<int,1>(ssize); 
     41  for (int i = 0; i < ssize; ++i) (*localIndexOnClient)(i) = i; 
     42 
     43  this->computeServerIndexMapping(globalIndexOnClient, *localIndexOnClient); 
     44  delete localIndexOnClient; 
     45} 
     46 
     47/*! 
     48   Compute mapping global index of server which client sends to. 
     49   \param [in] globalIndexOnClient global index client has 
     50   \param [in] localIndexOnClient local index on client 
     51*/ 
    2252void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
    2353                                                                const CArray<int,1>& localIndexOnClient) 
     
    89119    recvBuffIndexServer = new int[nbIndexReceivedFromOthers]; 
    90120 
    91   resetRequestAndCount(); 
     121  resetReceivingRequestAndCount(); 
    92122  std::map<int, MPI_Request>::iterator itRequest; 
    93123  std::vector<int> demandAlreadyReceived, repondAlreadyReceived; 
     
    168198} 
    169199 
     200/*! 
     201  Compute the hash index distribution of whole size_t space then each client will have a range of this distribution 
     202*/ 
    170203void CClientServerMappingDistributed::computeHashIndex() 
    171204{ 
     
    184217} 
    185218 
     219/*! 
     220  Compute distribution of global index for servers 
     221  Each client already holds a piece of information about global index and the corresponding server. 
     222This information is redistributed into size_t sipace in which each client possesses a specific range of index. 
     223Afterh the redistribution, each client as long as its range of index contains all necessary information about server. 
     224  \param [in] globalIndexOfServer global index and the corresponding server 
     225  \param [in] clientIntraComm client joining distribution process. 
     226*/ 
    186227void CClientServerMappingDistributed::computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer, 
    187228                                                                    const MPI_Comm& clientIntraComm) 
    188229{ 
    189   computeHashIndex(); 
    190   int clientRank; 
    191   MPI_Comm_rank(clientIntraComm,&clientRank); 
    192  
    193230  int* sendBuff = new int[nbClient_]; 
    194231  int* sendNbIndexBuff = new int[nbClient_]; 
     
    214251    { 
    215252      int indexClient = std::distance(itbClientHash, itClientHash)-1; 
    216       if (clientRank == indexClient) 
     253      if (clientRank_ == indexClient) 
    217254      { 
    218255        globalIndexToServerMapping_.insert(std::make_pair<size_t,int>(it->first, it->second)); 
     
    228265  } 
    229266 
    230  
    231 //    for (boost::unordered_map<size_t,int>::const_iterator it = globalIndexToServerMapping_.begin(); 
    232 //       it != globalIndexToServerMapping_.end(); ++it) 
    233 //       std::cout << " " << it->first << ":" << it->second; 
    234 //       std::cout << "First Number = " << globalIndexToServerMapping_.size() << std::endl; 
    235  
    236  
     267  // Calculate from how many clients each client receive message. 
    237268  int* recvBuff = new int[nbClient_]; 
    238269  MPI_Allreduce(sendBuff, recvBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm); 
    239  
     270  int recvNbClient = recvBuff[clientRank_]; 
     271 
     272  // Calculate size of buffer for receiving message 
    240273  int* recvNbIndexBuff = new int[nbClient_]; 
    241274  MPI_Allreduce(sendNbIndexBuff, recvNbIndexBuff, nbClient_, MPI_INT, MPI_SUM, clientIntraComm); 
    242  
    243   MPI_Status statusIndexGlobal, statusIndexServer; 
    244   int flag, countIndexGlobal_ = 0, countIndexServer_ = 0; 
    245  
    246  
    247   std::map<int, MPI_Request>::iterator itRequestIndexGlobal, itRequestIndexServer; 
    248   std::map<int, int> countBuffIndexServer, countBuffIndexGlobal; 
    249   std::vector<int> processedList; 
    250  
    251  
    252   bool isFinished=false; 
    253   int recvNbIndexCount = recvNbIndexBuff[clientRank]; 
    254   int recvNbClient = recvBuff[clientRank]; 
     275  int recvNbIndexCount = recvNbIndexBuff[clientRank_]; 
    255276  unsigned long* recvIndexGlobalBuff = new unsigned long[recvNbIndexCount]; 
    256277  int* recvIndexServerBuff = new int[recvNbIndexCount]; 
    257278 
     279  // If a client holds information about global index and servers which don't belong to it, 
     280  // it will send a message to the correct clients. 
     281  // Contents of the message are global index and its corresponding server index 
    258282  std::list<MPI_Request> sendRequest; 
    259283  std::map<int, std::vector<size_t> >::iterator itGlobal  = client2ClientIndexGlobal.begin(), 
    260284                                                iteGlobal = client2ClientIndexGlobal.end(); 
    261   for ( ; itGlobal != iteGlobal; ++itGlobal) 
     285  for ( ;itGlobal != iteGlobal; ++itGlobal) 
    262286    sendIndexGlobalToClients(itGlobal->first, itGlobal->second, clientIntraComm, sendRequest); 
    263287  std::map<int, std::vector<int> >::iterator itServer  = client2ClientIndexServer.begin(), 
     
    266290    sendIndexServerToClients(itServer->first, itServer->second, clientIntraComm, sendRequest); 
    267291 
    268   resetRequestAndCount(); 
     292  std::map<int, MPI_Request>::iterator itRequestIndexGlobal, itRequestIndexServer; 
     293  std::map<int, int> countBuffIndexServer, countBuffIndexGlobal; 
     294  std::vector<int> processedList; 
     295 
     296  bool isFinished = (0 == recvNbClient) ? true : false; 
     297 
     298  // Just to make sure before listening message, all counting index and receiving request have already beeen reset 
     299  resetReceivingRequestAndCount(); 
     300 
     301  // Now each client trys to listen to demand from others. 
     302  // If they have message, it processes: pushing global index and corresponding server to its map 
    269303  while (!isFinished || (!sendRequest.empty())) 
    270304  { 
     
    304338        --recvNbClient; 
    305339      } 
    306  
    307340    } 
    308341 
     
    324357  delete [] recvIndexGlobalBuff; 
    325358  delete [] recvIndexServerBuff; 
    326  
    327 //    for (boost::unordered_map<size_t,int>::const_iterator it = globalIndexToServerMapping_.begin(); 
    328 //       it != globalIndexToServerMapping_.end(); ++it) 
    329 //       std::cout << " " << it->first << ":" << it->second; 
    330 //       std::cout << "Number = " << globalIndexToServerMapping_.size() << std::endl; 
    331  
    332 } 
    333  
     359} 
     360 
     361/*! 
     362  Probe and receive message containg global index from other clients. 
     363  Each client can send a message of global index to other clients to fulfill their maps. 
     364Each client probes message from its queue then if the message is ready, it will be put into the receiving buffer 
     365  \param [in] recvIndexGlobalBuff buffer dedicated for receiving global index 
     366  \param [in] recvNbIndexCount size of the buffer 
     367*/ 
    334368void CClientServerMappingDistributed::probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount) 
    335369{ 
     
    350384} 
    351385 
     386/*! 
     387  Probe and receive message containg server index from other clients. 
     388  Each client can send a message of server index to other clients to fulfill their maps. 
     389Each client probes message from its queue then if the message is ready, it will be put into the receiving buffer 
     390  \param [in] recvIndexServerBuff buffer dedicated for receiving server index 
     391  \param [in] recvNbIndexCount size of the buffer 
     392*/ 
    352393void CClientServerMappingDistributed::probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount) 
    353394{ 
     
    369410} 
    370411 
    371  
     412/*! 
     413  Send message containing global index to clients 
     414  \param [in] clientDestRank rank of destination client 
     415  \param [in] indexGlobal global index to send 
     416  \param [in] clientIntraComm communication group of client 
     417  \param [in] requestSendIndexGlobal list of sending request 
     418*/ 
    372419void CClientServerMappingDistributed::sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal, 
    373420                                                               const MPI_Comm& clientIntraComm, 
     
    378425  MPI_Isend(&(indexGlobal)[0], (indexGlobal).size(), MPI_UNSIGNED_LONG, 
    379426            clientDestRank, 15, clientIntraComm, &(requestSendIndexGlobal.back())); 
    380  
    381 //  int nbSendClient = indexGlobal.size(); 
    382 //  std::map<int, std::vector<size_t> >::iterator 
    383 //                        itClient2ClientIndexGlobal  = indexGlobal.begin(), 
    384 //                        iteClient2ClientIndexGlobal = indexGlobal.end(); 
    385 // 
    386 //  for (; itClient2ClientIndexGlobal != iteClient2ClientIndexGlobal; 
    387 //         ++itClient2ClientIndexGlobal) 
    388 //  { 
    389 //    MPI_Request request; 
    390 //    requestSendIndexGlobal.push_back(request); 
    391 //    MPI_Isend(&(itClient2ClientIndexGlobal->second)[0], 
    392 //              (itClient2ClientIndexGlobal->second).size(), 
    393 //              MPI_UNSIGNED_LONG, 
    394 //              itClient2ClientIndexGlobal->first, 
    395 //              15, clientIntraComm, &(requestSendIndexGlobal.back())); 
    396 //  } 
    397  
    398 } 
    399  
     427} 
     428 
     429/*! 
     430  Send message containing server index to clients 
     431  \param [in] clientDestRank rank of destination client 
     432  \param [in] indexServer server index to send 
     433  \param [in] clientIntraComm communication group of client 
     434  \param [in] requestSendIndexServer list of sending request 
     435*/ 
    400436void CClientServerMappingDistributed::sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer, 
    401437                                                               const MPI_Comm& clientIntraComm, 
     
    406442  MPI_Isend(&(indexServer)[0], (indexServer).size(), MPI_INT, 
    407443            clientDestRank, 12, clientIntraComm, &(requestSendIndexServer.back())); 
    408  
    409 //  int nbSendClient = indexServer.size(); 
    410 //  std::map<int, std::vector<int> >::iterator 
    411 //                        itClient2ClientIndexServer  = indexServer.begin(), 
    412 //                        iteClient2ClientIndexServer = indexServer.end(); 
    413  
    414 //  for (; itClient2ClientIndexServer != iteClient2ClientIndexServer; 
    415 //         ++itClient2ClientIndexServer) 
    416 //  { 
    417 //    MPI_Request request; 
    418 //    requestSendIndexServer.push_back(request); 
    419 //    MPI_Isend(&(itClient2ClientIndexServer->second)[0], 
    420 //              (itClient2ClientIndexServer->second).size(), 
    421 //              MPI_INT, 
    422 //              itClient2ClientIndexServer->first, 
    423 //              12, clientIntraComm, &(requestSendIndexServer.back())); 
    424 // 
    425 //  } 
    426 } 
    427  
     444} 
     445 
     446/*! 
     447  Verify status of sending request 
     448  \param [in] sendRequest sending request to verify 
     449*/ 
    428450void CClientServerMappingDistributed::testSendRequest(std::list<MPI_Request>& sendRequest) 
    429451{ 
     
    441463      if (true == flag) 
    442464      { 
    443         --sizeListRequest; 
    444465        isErased = true; 
    445466        break; 
     
    451472} 
    452473 
     474/*! 
     475  Process the received request. Pushing global index and server index into map 
     476  \param[in] buffIndexGlobal pointer to the begining of buffer containing global index 
     477  \param[in] buffIndexServer pointer to the begining of buffer containing server index 
     478  \param[in] count size of received message 
     479*/ 
    453480void CClientServerMappingDistributed::processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count) 
    454481{ 
     
    457484} 
    458485 
     486/*! 
     487  Compute size of message containing global index 
     488  \param[in] requestRecv request of message 
     489*/ 
    459490int CClientServerMappingDistributed::computeBuffCountIndexGlobal(MPI_Request& requestRecv) 
    460491{ 
     
    471502} 
    472503 
     504/*! 
     505  Compute size of message containing server index 
     506  \param[in] requestRecv request of message 
     507*/ 
    473508int CClientServerMappingDistributed::computeBuffCountIndexServer(MPI_Request& requestRecv) 
    474509{ 
     
    485520} 
    486521 
    487 void CClientServerMappingDistributed::resetRequestAndCount() 
     522/*! 
     523  Reset all receiving request map and counter 
     524*/ 
     525void CClientServerMappingDistributed::resetReceivingRequestAndCount() 
    488526{ 
    489527  countIndexGlobal_ = countIndexServer_ = 0; 
  • XIOS/trunk/src/client_server_mapping_distributed.hpp

    r568 r569  
     1/*! 
     2   \file client_server_mapping.hpp 
     3   \author Ha NGUYEN 
     4   \since 27 Feb 2015 
     5   \date 09 Mars 2015 
     6 
     7   \brief Mapping between index client and server. 
     8   Clients pre-calculate all information of server distribution. 
     9 */ 
     10 
    111#ifndef __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__ 
    212#define __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__ 
     
    1020namespace xios 
    1121{ 
    12  
     22/*! 
     23  \class CClientServerMappingDistributed 
     24  This class computes index of data which are sent to server as well as index of data 
     25on server side with a distributed alogrithm. Each client has a piece of information about the distribution 
     26of servers. To find out all these info, first of all, all client join a discovering process in which each client 
     27announces the others about the info they have as well as demand others info they are lacked of. After this process, 
     28each client has enough info to decide to which client it need to send a demand for corresponding server of a global index. 
     29The alogrithm depends on hashed index. 
     30*/ 
    1331class CClientServerMappingDistributed : public CClientServerMapping 
    1432{ 
     
    1735    CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 
    1836                                    const MPI_Comm& clientIntraComm); 
     37 
     38    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient); 
    1939 
    2040    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, 
     
    2545 
    2646  protected: 
     47    // Redistribute global index and server index among clients 
    2748    void computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer, 
    2849                                       const MPI_Comm& clientIntraComm); 
    2950 
    30     void processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count); 
    31  
    32     void testSendRequest(std::list<MPI_Request>& sendRequest); 
    33  
    34     int computeBuffCount(MPI_Request& requestRecvIndexGlobal, MPI_Request& requestRecvIndexServer); 
    35  
    36     void computeHashIndex(); 
    37  
    38 //    void sendIndexServerToClients(std::map<int, std::vector<int> >& indexServer, 
    39 //                                  const MPI_Comm& clientIntraComm, 
    40 //                                  std::list<MPI_Request>& requestSendIndexServer); 
    41  
    42 //    void sendIndexGlobalToClients(std::map<int, std::vector<size_t> >& indexGlobal, 
    43 //                                  const MPI_Comm& clientIntraComm, 
    44 //                                  std::list<MPI_Request>& requestSendIndexGlobal); 
    45  
     51    // Send server index to clients 
    4652    void sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer, 
    4753                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer); 
    4854 
     55    // Send global index to clients 
    4956    void sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal, 
    5057                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal); 
    5158 
    52     void resetRequestAndCount(); 
     59    // Verify sending request 
     60    void testSendRequest(std::list<MPI_Request>& sendRequest); 
    5361 
     62    // Process request 
     63    void processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count); 
     64 
     65    // Probe and receive message of global index 
    5466    void probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount); 
    5567 
     68    // Probe and receive message of server index 
    5669    void probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount); 
    5770 
     71    // Compute range of hashing 
     72    void computeHashIndex(); 
     73 
     74    // Compute size of receiving buffer for global index 
    5875    int computeBuffCountIndexGlobal(MPI_Request& requestRecv); 
    5976 
     77    // Compute size of receiving buffer for server index 
    6078    int computeBuffCountIndexServer(MPI_Request& requestRecv); 
     79 
     80    // Reset request map 
     81    void resetReceivingRequestAndCount(); 
     82 
    6183  private: 
     84    //! Mapping of global index to the corresponding server 
    6285    boost::unordered_map<size_t,int> globalIndexToServerMapping_; 
    6386 
     87    //! Bounds of hash index 
     88    std::vector<size_t> indexClientHash_; 
     89 
     90    //! Number of client 
     91    int nbClient_; 
     92 
     93    //! Rank of client 
     94    int clientRank_; 
     95 
     96    //! Counting of buffer for receiving global index 
     97    int countIndexGlobal_; 
     98 
     99    //! Counting of buffer for receiving server index 
     100    int countIndexServer_; 
     101 
     102    //! intracommuntion of clients 
     103    MPI_Comm clientIntraComm_; 
     104 
     105    //! Request returned by MPI_IRecv function about global index 
    64106    std::map<int, MPI_Request> requestRecvIndexGlobal_; 
    65107 
     108    //! Request returned by MPI_IRecv function about index of server 
    66109    std::map<int, MPI_Request> requestRecvIndexServer_; 
    67110 
    68     std::vector<size_t> indexClientHash_; 
    69  
    70     int nbClient_; 
    71  
    72     int clientRank_; 
    73  
    74     int countIndexGlobal_; 
    75  
    76     int countIndexServer_; 
    77  
    78     MPI_Comm clientIntraComm_; 
    79  
     111    //! Mapping client rank and the beginning position of receiving buffer for message of global index from this client 
    80112    std::map<int, unsigned long*> indexGlobalBuffBegin_; 
    81113 
     114    //! Mapping client rank and the begining position of receiving buffer for message of server index from this client 
    82115    std::map<int, int*> indexServerBuffBegin_; 
    83  
    84116}; 
    85117 
  • XIOS/trunk/src/node/axis.cpp

    r568 r569  
    126126      this->zoom_end.setValue(zoom_end) ; 
    127127      this->zoom_size.setValue(zoom_size) ; 
    128  
    129       // compute client zoom indices 
    130 //      zoom_begin_client = ibegin_client > zoom_begin ? begin_client : zoom_begin ; 
    131 //      zoom_end_client   = iend_client < zoom_end ? iend_client : zoom_end ; 
    132 //      zoom_size_client  = zoom_end_client-zoom_begin_client+1 ; 
    133 //      if (zoom_ni_client<0) zoom_ni_client=0 ; 
    134128   } 
    135129 
  • XIOS/trunk/src/node/context.cpp

    r567 r569  
    330330 
    331331      // At last, we have all info of domain and axis, then send them 
    332 //       sendRefDomainsAxis(); 
     332       sendRefDomainsAxis(); 
    333333 
    334334      // After that, send all grid (if any) 
     
    799799 
    800800   //! Client side: Send information of reference domain and axis of active fields 
    801 //   void CContext::sendRefDomainsAxis() 
    802 //   { 
    803 //     std::set<StdString> domainIds; 
    804 //     std::set<StdString> axisIds; 
    805 // 
    806 //     // Find all reference domain and axis of all active fields 
    807 //     int numEnabledFiles = this->enabledFiles.size(); 
    808 //     for (int i = 0; i < numEnabledFiles; ++i) 
    809 //     { 
    810 //       std::vector<CField*> enabledFields = this->enabledFiles[i]->getEnabledFields(); 
    811 //       int numEnabledFields = enabledFields.size(); 
    812 //       for (int j = 0; j < numEnabledFields; ++j) 
    813 //       { 
    814 //         const std::pair<StdString, StdString>& prDomAxisId = enabledFields[j]->getDomainAxisIds(); 
    815 //         domainIds.insert(prDomAxisId.first); 
    816 //         axisIds.insert(prDomAxisId.second); 
    817 //       } 
    818 //     } 
    819 // 
    820 //     // Create all reference axis on server side 
    821 //     std::set<StdString>::iterator itDom, itAxis; 
    822 //     std::set<StdString>::const_iterator itE; 
    823 // 
    824 //     StdString axiDefRoot("axis_definition"); 
    825 //     CAxisGroup* axisPtr = CAxisGroup::get(axiDefRoot); 
    826 //     itE = axisIds.end(); 
    827 //     for (itAxis = axisIds.begin(); itAxis != itE; ++itAxis) 
    828 //     { 
    829 //       if (!itAxis->empty()) 
    830 //       { 
    831 //         axisPtr->sendCreateChild(*itAxis); 
    832 //         CAxis::get(*itAxis)->sendAllAttributesToServer(); 
    833 //       } 
    834 //     } 
    835 // 
    836 //     // Create all reference domains on server side 
    837 //     StdString domDefRoot("domain_definition"); 
    838 //     CDomainGroup* domPtr = CDomainGroup::get(domDefRoot); 
    839 //     itE = domainIds.end(); 
    840 //     for (itDom = domainIds.begin(); itDom != itE; ++itDom) 
    841 //     { 
    842 //       if (!itDom->empty()) { 
    843 //          domPtr->sendCreateChild(*itDom); 
    844 //          CDomain::get(*itDom)->sendAllAttributesToServer(); 
    845 //       } 
    846 //     } 
    847 //   } 
     801   void CContext::sendRefDomainsAxis() 
     802   { 
     803     std::set<StdString> domainIds; 
     804     std::set<StdString> axisIds; 
     805 
     806     // Find all reference domain and axis of all active fields 
     807     int numEnabledFiles = this->enabledFiles.size(); 
     808     for (int i = 0; i < numEnabledFiles; ++i) 
     809     { 
     810       std::vector<CField*> enabledFields = this->enabledFiles[i]->getEnabledFields(); 
     811       int numEnabledFields = enabledFields.size(); 
     812       for (int j = 0; j < numEnabledFields; ++j) 
     813       { 
     814         const std::pair<StdString, StdString>& prDomAxisId = enabledFields[j]->getRefDomainAxisIds(); 
     815         domainIds.insert(prDomAxisId.first); 
     816         axisIds.insert(prDomAxisId.second); 
     817       } 
     818     } 
     819 
     820     // Create all reference axis on server side 
     821     std::set<StdString>::iterator itDom, itAxis; 
     822     std::set<StdString>::const_iterator itE; 
     823 
     824     StdString axiDefRoot("axis_definition"); 
     825     CAxisGroup* axisPtr = CAxisGroup::get(axiDefRoot); 
     826     itE = axisIds.end(); 
     827     for (itAxis = axisIds.begin(); itAxis != itE; ++itAxis) 
     828     { 
     829       if (!itAxis->empty()) 
     830       { 
     831         axisPtr->sendCreateChild(*itAxis); 
     832         CAxis::get(*itAxis)->sendAllAttributesToServer(); 
     833       } 
     834     } 
     835 
     836     // Create all reference domains on server side 
     837     StdString domDefRoot("domain_definition"); 
     838     CDomainGroup* domPtr = CDomainGroup::get(domDefRoot); 
     839     itE = domainIds.end(); 
     840     for (itDom = domainIds.begin(); itDom != itE; ++itDom) 
     841     { 
     842       if (!itDom->empty()) { 
     843          domPtr->sendCreateChild(*itDom); 
     844          CDomain::get(*itDom)->sendAllAttributesToServer(); 
     845       } 
     846     } 
     847   } 
    848848 
    849849   //! Update calendar in each time step 
  • XIOS/trunk/src/node/distribution_client.cpp

    r567 r569  
    33   \author Ha NGUYEN 
    44   \since 13 Jan 2015 
    5    \date 09 Feb 2015 
     5   \date 09 Mars 2015 
    66 
    77   \brief Index distribution on client side. 
     
    5656  itbDom  = itDom  = domList.begin();  iteDom  = domList.end(); 
    5757  itbAxis = itAxis = axisList.begin(); iteAxis = axisList.end(); 
    58  
    59   // First of all, every attribute of domain and axis should be checked 
    60 //  for (;itDom != iteDom; ++itDom) (*itDom)->checkAttributesOnClient(); 
    61 //  for (;itAxis != iteAxis; ++itAxis) (*itAxis)->checkAttributesOnClient(); 
    6258 
    6359  readDistributionInfo(domList, axisList, axisDomainOrder); 
     
    574570} 
    575571 
     572/*! 
     573  Return local data index on client which are sent to servers 
     574*/ 
    576575const CArray<int,1>& CDistributionClient::getLocalDataIndexSendToServerOnClient() const 
    577576{ 
  • XIOS/trunk/src/node/distribution_client.hpp

    r567 r569  
    33   \author Ha NGUYEN 
    44   \since 13 Jan 2015 
    5    \date 09 Feb 2015 
     5   \date 09 Mars 2015 
    66 
    77   \brief Index distribution on client side. 
     
    112112}; 
    113113 
     114/*! 
     115  A grid can have multiple dimension, so can its mask in the form of multi-dimension array. 
     116It's not a good idea to store all multi-dimension arrays corresponding to each mask. 
     117One of the ways is to convert this array into 1-dimension one and every process is taken place on it. 
     118  \param [in] multi-dimension array grid mask 
     119*/ 
    114120template<int N> 
    115121void CDistributionClient::readGridMaskInfo(const CArray<bool,N>& gridMask) 
  • XIOS/trunk/src/node/domain.cpp

    r553 r569  
    1515#include "array_new.hpp" 
    1616#include "server_distribution_description.hpp" 
     17#include "client_server_mapping_distributed.hpp" 
    1718 
    1819namespace xios { 
     
    667668    int zoom_jend=zoom_jbegin+zoom_nj-1 ; 
    668669 
    669     std::vector<int> nGlobDomain(2); 
    670     nGlobDomain[0] = ni_glo.getValue(); 
    671     nGlobDomain[1] = nj_glo.getValue(); 
    672     CServerDistributionDescription serverDescription(nGlobDomain); 
    673     serverDescription.computeServerDistribution(nbServer, doComputeGlobalIndexServer); 
    674670 
    675671    // Precompute number of index 
     
    706702      } 
    707703 
    708     CClientServerMapping clientServerMap; 
    709     clientServerMap.computeServerIndexMapping(globalIndexDomain, serverDescription.getGlobalIndex()); 
    710     const std::map<int, std::vector<size_t> >& globalIndexDomainOnServer = clientServerMap.getGlobalIndexOnServer(); 
     704     std::vector<int> nGlobDomain(2); 
     705     nGlobDomain[0] = ni_glo.getValue(); 
     706     nGlobDomain[1] = nj_glo.getValue(); 
     707     size_t globalSizeIndex = 1, indexBegin, indexEnd; 
     708     int range, clientSize = client->clientSize; 
     709     for (int i = 0; i < nGlobDomain.size(); ++i) globalSizeIndex *= nGlobDomain[i]; 
     710     indexBegin = 0; 
     711     for (int i = 0; i < clientSize; ++i) 
     712     { 
     713       range = globalSizeIndex / clientSize; 
     714       if (i < (globalSizeIndex%clientSize)) ++range; 
     715       if (i == client->clientRank) break; 
     716       indexBegin += range; 
     717     } 
     718     indexEnd = indexBegin + range - 1; 
     719 
     720    CServerDistributionDescription serverDescription(nGlobDomain); 
     721    serverDescription.computeServerGlobalIndexInRange(nbServer, std::make_pair<size_t,size_t>(indexBegin, indexEnd)); 
     722    CClientServerMapping* clientServerMap = new CClientServerMappingDistributed(serverDescription.getGlobalIndexRange(), 
     723                                                                                client->intraComm); 
     724    clientServerMap->computeServerIndexMapping(globalIndexDomain); 
     725    const std::map<int, std::vector<size_t> >& globalIndexDomainOnServer = clientServerMap->getGlobalIndexOnServer(); 
    711726    std::vector<int> connectedServerRank; 
    712727    for (std::map<int, std::vector<size_t> >::const_iterator it = globalIndexDomainOnServer.begin(); it != globalIndexDomainOnServer.end(); ++it) { 
    713728      connectedServerRank.push_back(it->first); 
    714729    } 
    715     nbConnectedClients_ = clientServerMap.computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank); 
     730    nbConnectedClients_ = clientServerMap->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank); 
    716731    indSrv_ = globalIndexDomainOnServer; 
     732 
     733    delete clientServerMap; 
    717734  } 
    718735 
  • XIOS/trunk/src/node/field.cpp

    r567 r569  
    154154    if (!grid->doGridHaveDataDistributed()) 
    155155    { 
    156        if (0 == client->getClientRank()) 
     156       if (0 == client->clientRank) 
    157157       { 
    158158          for(it=grid->storeIndex_toSrv.begin();it!=grid->storeIndex_toSrv.end();it++) 
     
    799799   \return pair of Domain and Axis id 
    800800   */ 
    801 //   const std::pair<StdString,StdString>& CField::getDomainAxisIds() 
    802 //   { 
    803 //     CGrid* cgPtr = getRelGrid(); 
    804 //     if (NULL != cgPtr) 
    805 //     { 
    806 //       if (NULL != cgPtr->getRelDomain()) domAxisIds_.first = cgPtr->getRelDomain()->getId(); 
    807 //       if (NULL != cgPtr->getRelAxis()) domAxisIds_.second = cgPtr->getRelAxis()->getId(); 
    808 //     } 
    809 // 
    810 //     return (domAxisIds_); 
    811 //   } 
     801   const std::pair<StdString,StdString>& CField::getRefDomainAxisIds() 
     802   { 
     803     CGrid* cgPtr = getRelGrid(); 
     804     if (NULL != cgPtr) 
     805     { 
     806       std::vector<StdString>::iterator it; 
     807       if (!domain_ref.isEmpty()) 
     808       { 
     809         std::vector<StdString> domainList = cgPtr->getDomainList(); 
     810         it = std::find(domainList.begin(), domainList.end(), domain_ref.getValue()); 
     811         if (domainList.end() != it) domAxisIds_.first = *it; 
     812       } 
     813 
     814       if (!axis_ref.isEmpty()) 
     815       { 
     816         std::vector<StdString> axisList = cgPtr->getAxisList(); 
     817         it = std::find(axisList.begin(), axisList.end(), axis_ref.getValue()); 
     818         if (axisList.end() != it) domAxisIds_.second = *it; 
     819       } 
     820     } 
     821     return (domAxisIds_); 
     822   } 
    812823 
    813824   CVariable* CField::addVariable(const string& id) 
  • XIOS/trunk/src/node/field.hpp

    r567 r569  
    157157 
    158158 
    159 //        const std::pair<StdString, StdString>& getDomainAxisIds(); 
     159        const std::pair<StdString, StdString>& getRefDomainAxisIds(); 
    160160      public : 
    161161         /// Propriétés privées /// 
  • XIOS/trunk/src/node/grid.cpp

    r568 r569  
    340340                                                 clientDistribution_->getLocalDataIndexSendToServerOnClient()); 
    341341 
    342 //     clientServerMap_->computeServerIndexMapping(clientDistribution_->getGlobalIndex(), 
    343 //                                                 clientDistribution_->getLocalDataIndexSendToServerOnClient(), 
    344 //                                                 serverDistributionDescription_->getGlobalIndex()); 
    345342     const std::map<int, std::vector<size_t> >& globalIndexOnServer = clientServerMap_->getGlobalIndexOnServer(); 
    346343     std::vector<int> connectedServerRank; 
     
    353350     storeIndex_client.resize(clientDistribution_->getLocalDataIndexOnClient().numElements()); 
    354351     storeIndex_client = (clientDistribution_->getLocalDataIndexOnClient()); 
    355  
    356352   } 
    357353 
     
    457453    if (!doGridHaveDataDistributed()) 
    458454    { 
    459       if (0 == client->getClientRank()) 
     455      if (0 == client->clientRank) 
    460456      { 
    461457       for (int ns = 0; itGlobal != iteMap; ++itGlobal, ++itLocal, ++ns) 
  • XIOS/trunk/src/server_distribution_description.cpp

    r568 r569  
    33   \author Ha NGUYEN 
    44   \since 04 Jan 2015 
    5    \date 09 Feb 2015 
     5   \date 09 Mars 2015 
    66 
    77   \brief Description of index distribution on server(s). 
     
    1515  : nGlobal_(globalDimensionSize), indexBegin_(), dimensionSizes_(), globalIndex_(), vecGlobalIndex_() 
    1616{ 
    17  
    1817} 
    1918 
    2019CServerDistributionDescription::~CServerDistributionDescription() 
    2120{ 
    22 //  if (0 != globalIndex_) delete globalIndex_; 
    2321  if (!vecGlobalIndex_.empty()) 
    2422    for (int i = 0; i < vecGlobalIndex_.size(); ++i) delete vecGlobalIndex_[i]; 
     
    9290} 
    9391 
     92/*! 
     93  Compute global index assigned to a server with a range.E.g: if a grid has 100 points and 
     94  there are 2 servers, the first one takes index from 0 to 49, the second has index from 50 to 99 
     95  \param [in] nServer number of server 
     96  \param [in] indexBeginEnd begining and ending index of range 
     97  \param [in] serType type of server distribution. For now, we can distribute server by band or plan 
     98*/ 
    9499void CServerDistributionDescription::computeServerGlobalIndexInRange(int nServer, 
    95100                                        const std::pair<size_t, size_t>& indexBeginEnd, 
     
    117122    size_t ssize = 1, idx = 0; 
    118123    for (int j = 0; j < dim; ++j) ssize *= dimensionSizes_[idxServer][j]; 
    119     vecGlobalIndex_[idxServer] = new CArray<size_t,1>(ssize); 
    120124 
    121125    std::vector<int> idxLoop(dim,0); 
    122  
    123126    int innerLoopSize = dimensionSizes_[idxServer][0]; 
    124127 
     
    243246} 
    244247 
     248/*! 
     249  Get global index calculated by computeServerGlobalIndexInRange 
     250*/ 
    245251const boost::unordered_map<size_t,int>& CServerDistributionDescription::getGlobalIndexRange() const 
    246252{ 
  • XIOS/trunk/src/test/test_new_features.f90

    r567 r569  
    1717  INTEGER,PARAMETER :: ni_glo=100 
    1818  INTEGER,PARAMETER :: nj_glo=100 
    19   INTEGER,PARAMETER :: llm=5 
     19  INTEGER,PARAMETER :: llm=10 
    2020  DOUBLE PRECISION  :: lval(llm)=1 
    2121  TYPE(xios_field) :: field_hdl 
     
    135135  CALL xios_get_domain_attr("domain_A",ni=ni,lonvalue=lonvalue) 
    136136 
    137   print *,"ni",ni 
    138   print *,"lonvalue",lonvalue ; 
     137!  print *,"ni",ni 
     138!  print *,"lonvalue",lonvalue ; 
    139139 
    140140  CALL xios_is_defined_field_attr("field_A",enabled=ok) 
     
    146146    CALL xios_update_calendar(ts) 
    147147    CALL xios_send_field("field_A",field_A) 
    148     CALL xios_send_field("field_Axis",field_Axis) 
     148!    CALL xios_send_field("field_Axis",field_Axis) 
    149149    CALL xios_send_field("field_All_Axis",field_All_Axis) 
    150150    CALL wait_us(5000) ; 
Note: See TracChangeset for help on using the changeset viewer.