source: XIOS/trunk/src/client_server_mapping_distributed.hpp @ 584

Last change on this file since 584 was 584, checked in by mhnguyen, 9 years ago

Implementing new hash algorithm and fixing bug related to zoom

+) Replace boost hash with hash algorithm of Jenkins
+) Domain, if an attribute is non-empty for one client, it should also be non-empty for others inspite of zoom
+) Replace the way to find the number of client connecting to a server to make sure every server receive a message

Test
+) On Curie
+) test_client: passed and results are same like before
+) test_complete: passed, results are partially the same, the different part comes from added working operation

File size: 4.6 KB
Line 
1/*!
2   \file client_server_mapping.hpp
3   \author Ha NGUYEN
4   \since 27 Feb 2015
5   \date 09 Mars 2015
6
7   \brief Mapping between index client and server.
8   Clients pre-calculate all information of server distribution.
9 */
10
11#ifndef __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
12#define __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
13
14#include <client_server_mapping.hpp>
15#include "xmlioserver_spl.hpp"
16#include "array_new.hpp"
17#include "mpi.hpp"
18#include <boost/unordered_map.hpp>
19
20namespace xios
21{
22/*!
23  \class CClientServerMappingDistributed
24  This class computes index of data which are sent to server as well as index of data
25on server side with a distributed alogrithm. Each client has a piece of information about the distribution
26of servers. To find out all these info, first of all, all client join a discovering process in which each client
27announces the others about the info they have as well as demand others info they are lacked of. After this process,
28each client has enough info to decide to which client it need to send a demand for corresponding server of a global index.
29The alogrithm depends on hashed index.
30*/
31class CClientServerMappingDistributed : public CClientServerMapping
32{
33  public:
34    /** Default constructor */
35    CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer,
36                                    const MPI_Comm& clientIntraComm);
37
38    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer);
39
40//    virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer,
41//                                           const CArray<int,1>& localIndexOnClientSendToServer);
42
43    std::vector<int> computeConnectedServerRank(const CArray<size_t,1> globalIndexClient);
44
45    /** Default destructor */
46    virtual ~CClientServerMappingDistributed();
47
48
49
50  protected:
51    // Redistribute global index and server index among clients
52    void computeDistributedServerIndex(const boost::unordered_map<size_t,int>& globalIndexOfServer,
53                                       const MPI_Comm& clientIntraComm);
54
55    // Send server index to clients
56    void sendIndexServerToClients(int clientDestRank, std::vector<int>& indexServer,
57                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexServer);
58
59    // Send global index to clients
60    void sendIndexGlobalToClients(int clientDestRank, std::vector<size_t>& indexGlobal,
61                                  const MPI_Comm& clientIntraComm, std::list<MPI_Request>& requestSendIndexGlobal);
62
63    // Verify sending request
64    void testSendRequest(std::list<MPI_Request>& sendRequest);
65
66    // Process request
67    void processReceivedRequest(unsigned long* buffIndexGlobal, int* buffIndexServer, int count);
68
69    // Probe and receive message of global index
70    void probeIndexGlobalMessageFromClients(unsigned long* recvIndexGlobalBuff, int recvNbIndexCount);
71
72    // Probe and receive message of server index
73    void probeIndexServerMessageFromClients(int* recvIndexServerBuff, int recvNbIndexCount);
74
75    // Compute range of hashing
76    void computeHashIndex();
77
78    // Compute size of receiving buffer for global index
79    int computeBuffCountIndexGlobal(MPI_Request& requestRecv);
80
81    // Compute size of receiving buffer for server index
82    int computeBuffCountIndexServer(MPI_Request& requestRecv);
83
84    // Reset request map
85    void resetReceivingRequestAndCount();
86
87  private:
88    //! Mapping of global index to the corresponding server
89    boost::unordered_map<size_t,int> globalIndexToServerMapping_;
90
91    //! Bounds of hash index
92    std::vector<size_t> indexClientHash_;
93
94    //! Number of client
95    int nbClient_;
96
97    //! Rank of client
98    int clientRank_;
99
100    //! Counting of buffer for receiving global index
101    int countIndexGlobal_;
102
103    //! Counting of buffer for receiving server index
104    int countIndexServer_;
105
106    //! intracommuntion of clients
107    MPI_Comm clientIntraComm_;
108
109    //! Request returned by MPI_IRecv function about global index
110    std::map<int, MPI_Request> requestRecvIndexGlobal_;
111
112    //! Request returned by MPI_IRecv function about index of server
113    std::map<int, MPI_Request> requestRecvIndexServer_;
114
115    //! Mapping client rank and the beginning position of receiving buffer for message of global index from this client
116    std::map<int, unsigned long*> indexGlobalBuffBegin_;
117
118    //! Mapping client rank and the begining position of receiving buffer for message of server index from this client
119    std::map<int, int*> indexServerBuffBegin_;
120};
121
122} // namespace xios
123#endif // __XIOS_CLIENT_SERVER_MAPPING_DISTRIBUTED_HPP__
Note: See TracBrowser for help on using the repository browser.