source: XIOS/trunk/src/client_server_mapping_distributed.hpp @ 720

Last change on this file since 720 was 720, checked in by mhnguyen, 9 years ago

First implementation of hierarchical distributed hashed table

+) Implement dht for int with index of type size_t

Test
+) Local
+) Work correctly

File size: 4.6 KB
Line 
1/*!
2   \file client_server_mapping.hpp
3   \author Ha NGUYEN
4   \since 27 Feb 2015
5   \date 09 Mars 2015
6
7   \brief Mapping between index client and server.
8   Clients pre-calculate all information of server distribution.
9 */
10
11#ifndef __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
12#define __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
13
14#include <client_server_mapping.hpp>
15#include "xios_spl.hpp"
16#include "array_new.hpp"
17#include "mpi.hpp"
18#include <boost/unordered_map.hpp>
19#include "client_client_dht.hpp"
20
21namespace xios
22{
23/*!
24  \class CClientServerMappingDistributed
25  This class computes index of data which are sent to server as well as index of data
26on server side with a distributed alogrithm. Each client has a piece of information about the distribution
27of servers. To find out all these info, first of all, all client join a discovering process in which each client
28announces the others about the info they have as well as demand others info they are lacked of. After this process,
29each client has enough info to decide to which client it need to send a demand for corresponding server of a global index.
30The alogrithm depends on hashed index.
31*/
32class CClientServerMappingDistributed : public CClientServerMapping
33{
34  public:
35    /** Default constructor */
36    CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer,
37                                    const MPI_Comm& clientIntraComm, bool isDataDistributed = true);
38
39    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer);
40
41    std::vector<int> computeConnectedServerRank(const CArray<size_t,1> globalIndexClient);
42
43    /** Default destructor */
44    virtual ~CClientServerMappingDistributed();
45
46
47
48  protected:
49    // Redistribute global index and server index among clients
50    void computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer,
51                                       const MPI_Comm& clientIntraComm);
52
53    // Send server index to clients
54    void sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer,
55                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer);
56
57    // Send global index to clients
58    void sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal,
59                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal);
60
61    // Verify sending request
62    void testSendRequest(std::list<MPI_Request>& sendRequest);
63
64    // Process request
65    void processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count);
66
67    // Probe and receive message of global index
68    void probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount);
69
70    // Probe and receive message of server index
71    void probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount);
72
73    // Compute range of hashing
74    void computeHashIndex();
75
76    // Compute size of receiving buffer for global index
77    int computeBuffCountIndexGlobal(MPI_Request& requestRecv);
78
79    // Compute size of receiving buffer for server index
80    int computeBuffCountIndexServer(MPI_Request& requestRecv);
81
82    // Reset request map
83    void resetReceivingRequestAndCount();
84
85  protected:
86    //! Mapping of global index to the corresponding server
87    boost::unordered_map<size_t,int> globalIndexToServerMapping_;
88
89    //! Bounds of hash index
90    std::vector<size_t> indexClientHash_;
91
92    //! Number of client
93    int nbClient_;
94
95    //! Rank of client
96    int clientRank_;
97
98    //! Counting of buffer for receiving global index
99    int countIndexGlobal_;
100
101    //! Counting of buffer for receiving server index
102    int countIndexServer_;
103
104    //! intracommuntion of clients
105    MPI_Comm clientIntraComm_;
106
107    //! Request returned by MPI_IRecv function about global index
108    std::map<int, MPI_Request> requestRecvIndexGlobal_;
109
110    //! Request returned by MPI_IRecv function about index of server
111    std::map<int, MPI_Request> requestRecvIndexServer_;
112
113    //! Mapping client rank and the beginning position of receiving buffer for message of global index from this client
114    std::map<int, unsigned long*> indexGlobalBuffBegin_;
115
116    //! Mapping client rank and the begining position of receiving buffer for message of server index from this client
117    std::map<int, int*> indexServerBuffBegin_;
118
119    //! Flag to specify whether data is distributed or not
120    bool isDataDistributed_;
121
122
123    CClientClientDHT* ccDHT_;
124};
125
126} // namespace xios
127#endif // __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
Note: See TracBrowser for help on using the repository browser.