source: XIOS/trunk/src/client_server_mapping_distributed.hpp @ 569

Last change on this file since 569 was 569, checked in by mhnguyen, 9 years ago

Correct some bugs on discovering server index and do some code cleanings

+) Add some checking functions to make sure mpi_isend and mpi_irecv work correctly
+) Add comments to code
+) Remove some redundant code and comments

Test
+) On Curie
+) The new functions are tested in test_new_features.f90. Test_client and test_complete work like before
+) Test cases:

  • 3 dimension grid with: 1 domain, 1 axis
  • 3 dimension grid with: 3 axis
  • Attached and connected

+) All pass and results are correct

TODO:
+) Fix zoom bug with grid composed of only one axis

File size: 4.5 KB
Line 
1/*!
2   \file client_server_mapping.hpp
3   \author Ha NGUYEN
4   \since 27 Feb 2015
5   \date 09 Mars 2015
6
7   \brief Mapping between index client and server.
8   Clients pre-calculate all information of server distribution.
9 */
10
11#ifndef __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
12#define __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
13
14#include <client_server_mapping.hpp>
15#include "xmlioserver_spl.hpp"
16#include "array_new.hpp"
17#include "mpi.hpp"
18#include <boost/unordered_map.hpp>
19
20namespace xios
21{
22/*!
23  \class CClientServerMappingDistributed
24  This class computes index of data which are sent to server as well as index of data
25on server side with a distributed alogrithm. Each client has a piece of information about the distribution
26of servers. To find out all these info, first of all, all client join a discovering process in which each client
27announces the others about the info they have as well as demand others info they are lacked of. After this process,
28each client has enough info to decide to which client it need to send a demand for corresponding server of a global index.
29The alogrithm depends on hashed index.
30*/
31class CClientServerMappingDistributed : public CClientServerMapping
32{
33  public:
34    /** Default constructor */
35    CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer,
36                                    const MPI_Comm& clientIntraComm);
37
38    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient);
39
40    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient,
41                                           const CArray<int,1>& localIndexOnClient);
42
43    /** Default destructor */
44    virtual ~CClientServerMappingDistributed();
45
46  protected:
47    // Redistribute global index and server index among clients
48    void computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer,
49                                       const MPI_Comm& clientIntraComm);
50
51    // Send server index to clients
52    void sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer,
53                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer);
54
55    // Send global index to clients
56    void sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal,
57                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal);
58
59    // Verify sending request
60    void testSendRequest(std::list<MPI_Request>& sendRequest);
61
62    // Process request
63    void processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count);
64
65    // Probe and receive message of global index
66    void probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount);
67
68    // Probe and receive message of server index
69    void probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount);
70
71    // Compute range of hashing
72    void computeHashIndex();
73
74    // Compute size of receiving buffer for global index
75    int computeBuffCountIndexGlobal(MPI_Request& requestRecv);
76
77    // Compute size of receiving buffer for server index
78    int computeBuffCountIndexServer(MPI_Request& requestRecv);
79
80    // Reset request map
81    void resetReceivingRequestAndCount();
82
83  private:
84    //! Mapping of global index to the corresponding server
85    boost::unordered_map<size_t,int> globalIndexToServerMapping_;
86
87    //! Bounds of hash index
88    std::vector<size_t> indexClientHash_;
89
90    //! Number of client
91    int nbClient_;
92
93    //! Rank of client
94    int clientRank_;
95
96    //! Counting of buffer for receiving global index
97    int countIndexGlobal_;
98
99    //! Counting of buffer for receiving server index
100    int countIndexServer_;
101
102    //! intracommuntion of clients
103    MPI_Comm clientIntraComm_;
104
105    //! Request returned by MPI_IRecv function about global index
106    std::map<int, MPI_Request> requestRecvIndexGlobal_;
107
108    //! Request returned by MPI_IRecv function about index of server
109    std::map<int, MPI_Request> requestRecvIndexServer_;
110
111    //! Mapping client rank and the beginning position of receiving buffer for message of global index from this client
112    std::map<int, unsigned long*> indexGlobalBuffBegin_;
113
114    //! Mapping client rank and the begining position of receiving buffer for message of server index from this client
115    std::map<int, int*> indexServerBuffBegin_;
116};
117
118} // namespace xios
119#endif // __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
Note: See TracBrowser for help on using the repository browser.