source: XIOS/trunk/src/client_client_dht_template.hpp @ 829

Last change on this file since 829 was 829, checked in by mhnguyen, 5 years ago

Refactoring transformation code

+) On exchanging information during transformation, not only global index are sent but also local index
+) Correct a bug in distributed hash table (dht)
+) Add new type for dht
+) Clean up some redundant codes

Test
+) On Curie
+) Every test passes
+) Code runs faster in some cases (up to 30%)

File size: 4.5 KB
Line 
1/*!
2   \file client_client_dht_template.hpp
3   \author Ha NGUYEN
4   \since 01 Oct 2015
5   \date 06 Oct 2015
6
7   \brief Distributed hashed table implementation.
8 */
9
10#ifndef __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
11#define __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
12
13#include "xios_spl.hpp"
14#include "array_new.hpp"
15#include "mpi.hpp"
16#include "policy.hpp"
17#include <boost/unordered_map.hpp>
18//#include "utils.hpp"
19#include "dht_data_types.hpp"
20
21namespace xios
22{
23template<typename T, class HierarchyPolicy = DivideCommByTwo> class CClientClientDHTTemplate;
24
25/*!
26  \class CClientClientDHTTemplate
27  This class provides the similar features like \class CClientServerMappingDistributed,
28which implements a simple distributed hashed table; Moreover, by extending with hierarchical structure,
29it allows to reduce greatly the number of communication among processes.
30*/
31template<typename T, typename HierarchyPolicy>
32class CClientClientDHTTemplate: public HierarchyPolicy
33{
34  public:
35    typedef T InfoType;
36    static const int infoTypeSize = sizeof(InfoType);
37    typedef typename boost::unordered_map<InfoType, std::vector<size_t> > InfoType2IndexMap;
38    typedef typename boost::unordered_map<size_t,InfoType> Index2InfoTypeMap;
39
40  public:
41    /** Default constructor */
42    CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap,
43                             const MPI_Comm& clientIntraComm,
44                             int hierarLvl = 2);
45
46    void computeIndexInfoMapping(const CArray<size_t,1>& indices);
47
48    const Index2InfoTypeMap& getInfoIndexMap() const {return infoIndexMapping_; }
49
50    /** Default destructor */
51    virtual ~CClientClientDHTTemplate();
52
53  protected:
54    // Redistribute index and info among clients
55    void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap,
56                                 const MPI_Comm& intraCommLevel,
57                                 int level);
58
59    void computeHashIndex(std::vector<size_t>& indexClientHash, int nbClient);
60
61    void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices,
62                                      const MPI_Comm& intraCommLevel,
63                                      int level);
64
65  protected:
66    void probeIndexMessageFromClients(unsigned long* recvIndexGlobalBuff,
67                                      const int recvNbIndexCount,
68                                      int& countIndexGlobal,
69                                      std::map<int, unsigned long*>& indexGlobalBuffBegin,
70                                      std::map<int, MPI_Request>& requestRecvIndexGlobal,
71                                      const MPI_Comm& intraComm);
72
73    void probeInfoMessageFromClients(unsigned char* recvIndexServerBuff,
74                                     const int recvNbIndexCount,
75                                     int& countIndexServer,
76                                     std::map<int, unsigned char*>& indexServerBuffBegin,
77                                     std::map<int, MPI_Request>& requestRecvIndexServer,
78                                     const MPI_Comm& intraComm);
79
80    // Send server index to clients
81    void sendInfoToClients(int clientDestRank, std::vector<InfoType>& indexServer,
82                           const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer);
83
84    // Send global index to clients
85    void sendIndexToClients(int clientDestRank, std::vector<size_t>& indexGlobal,
86                            const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal);
87
88    // Verify sending request
89    void testSendRequest(std::list<MPI_Request>& sendRequest);
90
91    // Compute size of receiving buffer for global index
92    int computeBuffCountIndex(MPI_Request& requestRecv);
93
94    // Compute size of receiving buffer for server index
95    int computeBuffCountInfo(MPI_Request& requestRecv);
96
97  protected:
98    //! Mapping of global index to the corresponding client
99    Index2InfoTypeMap index2InfoMapping_;
100
101    //! A temporary mapping of index to the corresponding information in each level of hierarchy
102    Index2InfoTypeMap indexToInfoMappingLevel_;
103
104    //! Data (information) corresponding to global index
105    Index2InfoTypeMap infoIndexMapping_;
106
107    //! intracommuntion of clients
108    MPI_Comm intraCommRoot_;
109
110    //! Flag to specify whether data is distributed or not
111    bool isDataDistributed_;
112};
113
114typedef CClientClientDHTTemplate<int> CClientClientDHTInt;
115typedef CClientClientDHTTemplate<PairIntInt> CClientClientDHTPairIntInt;
116
117} // namespace xios
118#endif // __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
Note: See TracBrowser for help on using the repository browser.