1 | /*! |
---|
2 | \file client_client_dht_template.hpp |
---|
3 | \author Ha NGUYEN |
---|
4 | \since 01 Oct 2015 |
---|
5 | \date 06 Oct 2015 |
---|
6 | |
---|
7 | \brief Distributed hashed table implementation. |
---|
8 | */ |
---|
9 | |
---|
10 | #ifndef __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__ |
---|
11 | #define __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__ |
---|
12 | |
---|
13 | #include "xios_spl.hpp" |
---|
14 | #include "array_new.hpp" |
---|
15 | #include "mpi.hpp" |
---|
16 | #include "policy.hpp" |
---|
17 | #include <boost/unordered_map.hpp> |
---|
18 | #include "dht_data_types.hpp" |
---|
19 | |
---|
20 | namespace xios |
---|
21 | { |
---|
22 | template<typename T, class HierarchyPolicy = DivideAdaptiveComm> class CClientClientDHTTemplate; |
---|
23 | |
---|
24 | /*! |
---|
25 | \class CClientClientDHTTemplate |
---|
26 | This class provides the similar features like \class CClientServerMappingDistributed, |
---|
27 | which implements a simple distributed hashed table; Moreover, by extending with hierarchical structure, |
---|
28 | it allows to reduce greatly the number of communication among processes. |
---|
29 | */ |
---|
30 | template<typename T, typename HierarchyPolicy> |
---|
31 | class CClientClientDHTTemplate: public HierarchyPolicy |
---|
32 | { |
---|
33 | public: |
---|
34 | typedef T InfoType; |
---|
35 | static const int infoTypeSize = sizeof(InfoType); |
---|
36 | typedef typename boost::unordered_map<InfoType, std::vector<size_t> > InfoType2IndexMap; |
---|
37 | typedef typename boost::unordered_map<size_t,InfoType> Index2InfoTypeMap; |
---|
38 | |
---|
39 | public: |
---|
40 | CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, |
---|
41 | const MPI_Comm& clientIntraComm, |
---|
42 | int hierarLvl = 2); |
---|
43 | |
---|
44 | void computeIndexInfoMapping(const CArray<size_t,1>& indices); |
---|
45 | |
---|
46 | const Index2InfoTypeMap& getInfoIndexMap() const {return indexToInfoMappingLevel_; } |
---|
47 | |
---|
48 | /** Default destructor */ |
---|
49 | virtual ~CClientClientDHTTemplate(); |
---|
50 | |
---|
51 | protected: |
---|
52 | // Redistribute index and info among clients |
---|
53 | void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, |
---|
54 | const MPI_Comm& intraCommLevel, |
---|
55 | int level); |
---|
56 | |
---|
57 | void computeHashIndex(std::vector<size_t>& indexClientHash, int nbClient); |
---|
58 | |
---|
59 | void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, |
---|
60 | const MPI_Comm& intraCommLevel, |
---|
61 | int level); |
---|
62 | |
---|
63 | void computeSendRecvRank(int level, int rank); |
---|
64 | |
---|
65 | void sendRecvRank(int level, |
---|
66 | const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements, |
---|
67 | int& recvNbRank, int& recvNbElements); |
---|
68 | |
---|
69 | protected: |
---|
70 | void probeIndexMessageFromClients(unsigned long* recvIndexGlobalBuff, |
---|
71 | const int recvNbIndexCount, |
---|
72 | int& countIndexGlobal, |
---|
73 | std::map<int, unsigned long*>& indexGlobalBuffBegin, |
---|
74 | std::map<int, MPI_Request>& requestRecvIndexGlobal, |
---|
75 | const MPI_Comm& intraComm); |
---|
76 | |
---|
77 | void probeInfoMessageFromClients(unsigned char* recvIndexServerBuff, |
---|
78 | const int recvNbIndexCount, |
---|
79 | int& countIndexServer, |
---|
80 | std::map<int, unsigned char*>& infoBuffBegin, |
---|
81 | std::map<int, MPI_Request>& requestRecvIndexServer, |
---|
82 | const MPI_Comm& intraComm); |
---|
83 | |
---|
84 | // Send information to clients |
---|
85 | void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, |
---|
86 | const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer); |
---|
87 | |
---|
88 | // Send global index to clients |
---|
89 | void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, |
---|
90 | const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal); |
---|
91 | |
---|
92 | // Verify sending request |
---|
93 | void testSendRequest(std::list<MPI_Request>& sendRequest); |
---|
94 | |
---|
95 | // Compute size of receiving buffer for global index |
---|
96 | int computeBuffCountIndex(MPI_Request& requestRecv); |
---|
97 | |
---|
98 | // Compute size of receiving buffer for server index |
---|
99 | int computeBuffCountInfo(MPI_Request& requestRecv); |
---|
100 | |
---|
101 | protected: |
---|
102 | //! Mapping of global index to the corresponding client |
---|
103 | Index2InfoTypeMap index2InfoMapping_; |
---|
104 | |
---|
105 | //! A mapping of index to the corresponding information in each level of hierarchy |
---|
106 | Index2InfoTypeMap indexToInfoMappingLevel_; |
---|
107 | |
---|
108 | std::vector<std::vector<int> > sendRank_; |
---|
109 | |
---|
110 | std::vector<std::vector<int> > recvRank_; |
---|
111 | |
---|
112 | //! Flag to specify whether data is distributed or not |
---|
113 | bool isDataDistributed_; |
---|
114 | }; |
---|
115 | |
---|
116 | typedef CClientClientDHTTemplate<int> CClientClientDHTInt; |
---|
117 | typedef CClientClientDHTTemplate<PairIntInt> CClientClientDHTPairIntInt; |
---|
118 | |
---|
119 | } // namespace xios |
---|
120 | #endif // __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__ |
---|