source: XIOS/dev/dev_olga/src/client_client_dht_template.hpp @ 1620

Last change on this file since 1620 was 1542, checked in by oabramkina, 6 years ago

Replacing Boost's unordered_map and shared_pointer by its STL counterparts.

Two notes for Curie:

  • one can see the content of unordered_map with ddt only if XIOS has been compiled with gnu
  • XIOS will not compile any more with pgi (all available versions use old STL which are not up to the c++11 norms)
File size: 5.0 KB
Line 
1/*!
2   \file client_client_dht_template.hpp
3   \author Ha NGUYEN
4   \since 01 Oct 2015
5   \date 15 April 2016
6
7   \brief Distributed hashed table implementation.
8 */
9
10#ifndef __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
11#define __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
12
13#include "xios_spl.hpp"
14#include "array_new.hpp"
15#include "mpi.hpp"
16#include "policy.hpp"
17#include  <unordered_map>
18#include "dht_data_types.hpp"
19
20namespace xios
21{
22template<typename T, class HierarchyPolicy = DivideAdaptiveComm> class CClientClientDHTTemplate;
23
24/*!
25  \class CClientClientDHTTemplate
26  This class provides the similar features like \class CClientServerMappingDistributed,
27which implements a simple distributed hashed table; Moreover, by extending with hierarchical structure,
28it allows to reduce greatly the number of communication among processes.
29*/
30template<typename T, typename HierarchyPolicy>
31class CClientClientDHTTemplate: public HierarchyPolicy
32{
33  public:
34    typedef T InfoType;
35    static const int infoTypeSize = sizeof(InfoType);
36//    typedef typename std::unordered_map<InfoType, std::vector<size_t> > InfoType2IndexMap;
37    typedef typename std::unordered_map<size_t,InfoType> Index2InfoTypeMap;
38    typedef typename std::unordered_map<size_t,std::vector<InfoType> > Index2VectorInfoTypeMap;
39
40  public:
41    CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap,
42                             const MPI_Comm& clientIntraComm);
43
44    CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap,
45                             const MPI_Comm& clientIntraComm);
46
47    void computeIndexInfoMapping(const CArray<size_t,1>& indices);
48
49    const Index2VectorInfoTypeMap& getInfoIndexMap() const {return indexToInfoMappingLevel_; }
50    Index2VectorInfoTypeMap& getInfoIndexMap() {return indexToInfoMappingLevel_; }
51    int getNbClient() { return nbClient_; }
52
53    /** Default destructor */
54    virtual ~CClientClientDHTTemplate();
55
56  protected:
57    CClientClientDHTTemplate(const MPI_Comm& clientIntraComm);
58
59  protected:
60
61
62    // Redistribute index and info among clients
63    void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap,
64                                 const MPI_Comm& intraCommLevel,
65                                 int level);
66
67    void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap,
68                                 const MPI_Comm& intraCommLevel,
69                                 int level);
70
71
72    void computeHashIndex(std::vector<size_t>& indexClientHash, int nbClient);
73
74    void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices,
75                                      const MPI_Comm& intraCommLevel,
76                                      int level);
77
78    void computeSendRecvRank(int level, int rank);
79
80    void sendRecvRank(int level,
81                      const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements,
82                      std::vector<int>& recvNbRank, std::vector<int>& recvNbElements);
83
84  protected:
85    // Send information to clients
86    void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize,
87                           const MPI_Comm& clientIntraComm,
88                           std::vector<MPI_Request>& requestSendInfo);
89
90    void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize,
91                            const MPI_Comm& clientIntraComm,
92                            std::vector<MPI_Request>& requestRecvInfo);
93
94    // Send global index to clients
95    void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize,
96                            const MPI_Comm& clientIntraComm,
97                            std::vector<MPI_Request>& requestSendIndexGlobal);
98
99    void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize,
100                             const MPI_Comm& clientIntraComm,
101                             std::vector<MPI_Request>& requestRecvIndex);
102
103    void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements,
104                          const std::vector<int>& recvNbRank, std::vector<int>& recvNbElements);
105
106  protected:
107    //! Mapping of global index to the corresponding client
108    Index2VectorInfoTypeMap index2InfoMapping_;
109
110    //! A mapping of index to the corresponding information in each level of hierarchy
111    Index2VectorInfoTypeMap indexToInfoMappingLevel_;
112
113    //! Rank of client to send on each DHT level
114    std::vector<std::vector<int> > sendRank_;
115
116    //! Rank of client to receive on each DHT level
117    std::vector<std::vector<int> > recvRank_;
118
119    //! Flag to specify whether data is distributed or not
120    bool isDataDistributed_;
121
122    //! Number of client
123    int nbClient_;
124};
125
126typedef CClientClientDHTTemplate<int> CClientClientDHTInt;
127typedef CClientClientDHTTemplate<size_t> CClientClientDHTSizet;
128typedef CClientClientDHTTemplate<double> CClientClientDHTDouble;
129typedef CClientClientDHTTemplate<PairIntInt> CClientClientDHTPairIntInt;
130
131} // namespace xios
132#endif // __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
Note: See TracBrowser for help on using the repository browser.