source: XIOS/trunk/src/client_client_dht.hpp @ 720

Last change on this file since 720 was 720, checked in by mhnguyen, 9 years ago

First implementation of hierarchical distributed hashed table

+) Implement dht for int with index of type size_t

Test
+) Local
+) Work correctly

File size: 5.3 KB
Line 
1/*!
2   \file client_client_dht.hpp
3   \author Ha NGUYEN
4   \since 15 Sep 2015
5   \date 29 Sep 2015
6
7   \brief Distributed hashed table implementation.
8 */
9
10#ifndef __XIOS_CLIENT_CLIENT_DHT_HPP__
11#define __XIOS_CLIENT_CLIENT_DHT_HPP__
12
13#include "xios_spl.hpp"
14#include "array_new.hpp"
15#include "mpi.hpp"
16#include <boost/unordered_map.hpp>
17
18namespace xios
19{
20/*!
21  \class CClientClientDHT
22  This class provides the similar features like \class CClientServerMappingDistributed, which implements a simple distributed hashed table;
23Moreover, by extending with hierarchical structure, it allows to reduce the number of communication among processes greatly.
24*/
25class CClientClientDHT
26{
27  public:
28    /** Default constructor */
29    CClientClientDHT(const boost::unordered_map<size_t,int>& globalIndexOfServer,
30                     const MPI_Comm& clientIntraComm, bool isDataDistributed = true,
31                     int hierarLvl = 2);
32
33    void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer);
34
35    const std::map<int, std::vector<size_t> >& getGlobalIndexOnServer() const {return indexGlobalOnServer_; }
36    const boost::unordered_map<size_t,int>& getGlobalIndexServerMapping() const {return globalIndexToServerMapping_; }
37
38    /** Default destructor */
39    virtual ~CClientClientDHT();
40
41  protected:
42    // Redistribute global index and server index among clients
43    void computeDistributedIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer,
44                                 const MPI_Comm& intraCommLevel,
45                                 int level);
46
47    void computeMPICommLevel(const MPI_Comm& mpiCommRoot);
48
49    void divideMPICommLevel(const MPI_Comm& mpiCommLevel, int level);
50
51    void computeHashIndex(std::vector<size_t>& indexClientHash, int nbClient);
52
53    virtual void computeIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer,
54                                     const MPI_Comm& intraCommLevel,
55                                     int level);
56
57  protected:
58    void probeIndexMessageFromClients(unsigned long* recvIndexGlobalBuff,
59                                            const int recvNbIndexCount,
60                                            int& countIndexGlobal,
61                                            std::map<int, unsigned long*>& indexGlobalBuffBegin,
62                                            std::map<int, MPI_Request>& requestRecvIndexGlobal,
63                                            const MPI_Comm& intraComm);
64
65    void probeInfoMessageFromClients(int* recvIndexServerBuff,
66                                            const int recvNbIndexCount,
67                                            int& countIndexServer,
68                                            std::map<int, int*>& indexServerBuffBegin,
69                                            std::map<int, MPI_Request>& requestRecvIndexServer,
70                                            const MPI_Comm& intraComm);
71
72    // Send server index to clients
73    void sendInfoToClients(int clientDestRank, std::vector<int>& indexServer,
74                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer);
75
76    // Send global index to clients
77    void sendIndexToClients(int clientDestRank, std::vector<size_t>& indexGlobal,
78                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal);
79
80    // Verify sending request
81    void testSendRequest(std::list<MPI_Request>& sendRequest);
82
83    // Compute size of receiving buffer for global index
84    int computeBuffCountIndexGlobal(MPI_Request& requestRecv);
85
86    // Compute size of receiving buffer for server index
87    int computeBuffCountIndexServer(MPI_Request& requestRecv);
88
89  protected:
90    //! Mapping of global index to the corresponding client
91    boost::unordered_map<size_t,int> globalIndexToServerMapping_;
92
93    //! A temporary mapping of index to the corresponding information in each level of hierarchy
94    boost::unordered_map<size_t,int> globalIndexToInfoMappingLevel_;
95    std::vector<MPI_Comm> commLevel_;
96
97    int nbLevel_;
98
99    //! Global index of data on SERVER, which are calculated by client(s)
100    std::map<int, std::vector<size_t> > indexGlobalOnServer_;
101
102//    //! Number of client
103//    int nbClient_;
104//
105//    //! Rank of client
106//    int clientRank_;
107
108//    //! Counting of buffer for receiving global index
109//    int countIndexGlobal_;
110//
111//    //! Counting of buffer for receiving server index
112//    int countIndexServer_;
113
114    //! intracommuntion of clients
115    MPI_Comm intraCommRoot_;
116
117//    //! Request returned by MPI_IRecv function about global index
118//    std::map<int, MPI_Request> requestRecvIndexGlobal_;
119//
120//    //! Request returned by MPI_IRecv function about index of server
121//    std::map<int, MPI_Request> requestRecvIndexServer_;
122//
123//    //! Mapping client rank and the beginning position of receiving buffer for message of global index from this client
124//    std::map<int, unsigned long*> indexGlobalBuffBegin_;
125//
126//    //! Mapping client rank and the begining position of receiving buffer for message of server index from this client
127//    std::map<int, int*> indexServerBuffBegin_;
128
129    //! Flag to specify whether data is distributed or not
130    bool isDataDistributed_;
131};
132
133} // namespace xios
134#endif // __XIOS_CLIENT_CLIENT_DHT_HPP__
Note: See TracBrowser for help on using the repository browser.