source: XIOS/trunk/src/client_client_dht_template.hpp @ 889

Last change on this file since 889 was 869, checked in by mhnguyen, 8 years ago

Removing the usage of blocking MPI on transformations

+) Use non-blocking MPI for axis inversion

Test
+) On Curie
+) test pass

File size: 4.8 KB
RevLine 
[721]1/*!
2   \file client_client_dht_template.hpp
3   \author Ha NGUYEN
4   \since 01 Oct 2015
[839]5   \date 15 April 2016
[721]6
7   \brief Distributed hashed table implementation.
8 */
9
10#ifndef __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
11#define __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
12
13#include "xios_spl.hpp"
14#include "array_new.hpp"
15#include "mpi.hpp"
16#include "policy.hpp"
17#include <boost/unordered_map.hpp>
[829]18#include "dht_data_types.hpp"
[721]19
20namespace xios
21{
[833]22template<typename T, class HierarchyPolicy = DivideAdaptiveComm> class CClientClientDHTTemplate;
[721]23
24/*!
25  \class CClientClientDHTTemplate
26  This class provides the similar features like \class CClientServerMappingDistributed,
27which implements a simple distributed hashed table; Moreover, by extending with hierarchical structure,
28it allows to reduce greatly the number of communication among processes.
29*/
30template<typename T, typename HierarchyPolicy>
31class CClientClientDHTTemplate: public HierarchyPolicy
32{
33  public:
34    typedef T InfoType;
[727]35    static const int infoTypeSize = sizeof(InfoType);
[867]36//    typedef typename boost::unordered_map<InfoType, std::vector<size_t> > InfoType2IndexMap;
[829]37    typedef typename boost::unordered_map<size_t,InfoType> Index2InfoTypeMap;
[860]38    typedef typename boost::unordered_map<size_t,std::vector<InfoType> > Index2VectorInfoTypeMap;
[721]39
40  public:
[829]41    CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap,
[835]42                             const MPI_Comm& clientIntraComm);
[721]43
[860]44    CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap,
45                             const MPI_Comm& clientIntraComm);
46
[721]47    void computeIndexInfoMapping(const CArray<size_t,1>& indices);
48
[860]49    const Index2VectorInfoTypeMap& getInfoIndexMap() const {return indexToInfoMappingLevel_; }
[869]50    Index2VectorInfoTypeMap& getInfoIndexMap() {return indexToInfoMappingLevel_; }
[843]51    int getNbClient() { return nbClient_; }
52
[721]53    /** Default destructor */
54    virtual ~CClientClientDHTTemplate();
55
56  protected:
57    // Redistribute index and info among clients
[829]58    void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap,
[721]59                                 const MPI_Comm& intraCommLevel,
60                                 int level);
61
[860]62    void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap,
63                                 const MPI_Comm& intraCommLevel,
64                                 int level);
65
66
[721]67    void computeHashIndex(std::vector<size_t>& indexClientHash, int nbClient);
68
69    void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices,
70                                      const MPI_Comm& intraCommLevel,
71                                      int level);
72
[833]73    void computeSendRecvRank(int level, int rank);
74
75    void sendRecvRank(int level,
76                      const std::vector<int>& sendNbRank, const std::vector<int>& sendNbElements,
[839]77                      std::vector<int>& recvNbRank, std::vector<int>& recvNbElements);
[833]78
[721]79  protected:
[830]80    // Send information to clients
81    void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize,
[839]82                           const MPI_Comm& clientIntraComm,
83                           std::vector<MPI_Request>& requestSendInfo);
[721]84
[839]85    void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize,
86                            const MPI_Comm& clientIntraComm,
87                            std::vector<MPI_Request>& requestRecvInfo);
88
[721]89    // Send global index to clients
[833]90    void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize,
[839]91                            const MPI_Comm& clientIntraComm,
92                            std::vector<MPI_Request>& requestSendIndexGlobal);
[721]93
[839]94    void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize,
95                             const MPI_Comm& clientIntraComm,
96                             std::vector<MPI_Request>& requestRecvIndex);
[721]97
[839]98    void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements,
99                          const std::vector<int>& recvNbRank, std::vector<int>& recvNbElements);
[721]100
101  protected:
102    //! Mapping of global index to the corresponding client
[860]103    Index2VectorInfoTypeMap index2InfoMapping_;
[721]104
[830]105    //! A mapping of index to the corresponding information in each level of hierarchy
[860]106    Index2VectorInfoTypeMap indexToInfoMappingLevel_;
[721]107
[867]108    //! Rank of client to send on each DHT level
[833]109    std::vector<std::vector<int> > sendRank_;
[721]110
[867]111    //! Rank of client to receive on each DHT level
[833]112    std::vector<std::vector<int> > recvRank_;
113
[721]114    //! Flag to specify whether data is distributed or not
115    bool isDataDistributed_;
[843]116
117    //! Number of client
118    int nbClient_;
[721]119};
120
121typedef CClientClientDHTTemplate<int> CClientClientDHTInt;
[829]122typedef CClientClientDHTTemplate<PairIntInt> CClientClientDHTPairIntInt;
[721]123
124} // namespace xios
125#endif // __XIOS_CLIENT_CLIENT_DHT_TEMPLATE_HPP__
Note: See TracBrowser for help on using the repository browser.