source: XIOS/dev/dev_ym/XIOS_COUPLING/src/context_client.hpp @ 1853

Last change on this file since 1853 was 1853, checked in by ymipsl, 4 years ago

Coupling branch : replace hasServer and hasClient combination by the name of correct service : CLIENT, GATHERER or OUT_SERVER.

YM

  • Property copyright set to
    Software name : XIOS (Xml I/O Server)
    http://forge.ipsl.jussieu.fr/ioserver
    Creation date : January 2009
    Licence : CeCCIL version2
    see license file in root directory : Licence_CeCILL_V2-en.txt
    or http://www.cecill.info/licences/Licence_CeCILL_V2-en.html
    Holder : CEA/LSCE (Laboratoire des Sciences du CLimat et de l'Environnement)
    CNRS/IPSL (Institut Pierre Simon Laplace)
    Project Manager : Yann Meurdesoif
    yann.meurdesoif@cea.fr
File size: 4.6 KB
Line 
1#ifndef __CONTEXT_CLIENT_HPP__
2#define __CONTEXT_CLIENT_HPP__
3
4#include "xios_spl.hpp"
5#include "buffer_out.hpp"
6#include "buffer_in.hpp"
7#include "buffer_client.hpp"
8#include "event_client.hpp"
9#include "event_server.hpp"
10#include "mpi.hpp"
11#include "registry.hpp"
12
13namespace xios
14{
15  class CContext;
16  class CContextServer ;
17  /*!
18  \class CContextClient
19  A context can be both on client and on server side. In order to differenciate the role of
20  context on each side, e.x client sending events, server receiving and processing events, there is a need of
21  concrete "context" classes for both sides.
22  CContextClient processes and sends events from client to server where CContextServer receives these events
23  and processes them.
24  */
25  class CContextClient
26  {
27    public:
28      // Contructor
29      CContextClient(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* parentServer = 0);
30
31      // Send event to server
32      void sendEvent(CEventClient& event);
33      void waitEvent(list<int>& ranks);
34      void waitEvent_old(list<int>& ranks);
35
36      // Functions to set/get buffers
37      bool getBuffers(const size_t timeLine, const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers, bool nonBlocking = false);
38      void newBuffer(int rank);
39      bool checkBuffers(list<int>& ranks);
40      bool checkBuffers(void);
41      void releaseBuffers(void);
42      bool havePendingRequests(void);
43
44      bool isServerLeader(void) const;
45      bool isServerNotLeader(void) const;
46      const std::list<int>& getRanksServerLeader(void) const;
47      const std::list<int>& getRanksServerNotLeader(void) const;
48
49  /*!
50   * Check if the attached mode is used.
51   *
52   * \return true if and only if attached mode is used
53   */
54      bool isAttachedModeEnabled() const { return isAttached_ ; } 
55
56      static void computeLeader(int clientRank, int clientSize, int serverSize,
57                                std::list<int>& rankRecvLeader,
58                                std::list<int>& rankRecvNotLeader);
59
60      // Close and finalize context client
61//      void closeContext(void);  Never been implemented.
62      bool isNotifiedFinalized(void) ;
63      void finalize(void);
64
65      void setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize);
66
67      int getRemoteSize(void) {return serverSize;}
68      int getServerSize(void) {return serverSize;}
69
70      /*! set the associated server (dual chanel client/server) */     
71      void setAssociatedServer(CContextServer* associatedServer) { associatedServer=associatedServer_;}
72      /*! get the associated server (dual chanel client/server) */     
73      CContextServer* getAssociatedServer(void) { return associatedServer_;}
74
75    public:
76      CContext* context; //!< Context for client
77
78      size_t timeLine; //!< Timeline of each event
79
80      int clientRank; //!< Rank of current client
81
82      int clientSize; //!< Size of client group
83
84      int serverSize; //!< Size of server group
85
86      MPI_Comm interComm; //!< Communicator of server group (interCommunicator)
87
88      MPI_Comm interCommMerged; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication.
89
90      MPI_Comm intraComm; //!< Communicator of client group
91
92      MPI_Comm commSelf; //!< Communicator of the client alone. Needed to create a new communicator between 1 proc client and 1 proc server for one sided communication
93
94      map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers
95
96      bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used.
97
98    private:
99      void lockBuffers(list<int>& ranks) ;
100      void unlockBuffers(list<int>& ranks) ;
101     
102      //! Mapping of server and buffer size for each connection to server
103      std::map<int,StdSize> mapBufferSize_;
104      //! Maximum event sizes estimated for each connection to server
105      std::map<int,StdSize> maxEventSizes;
106      //! Maximum number of events that can be buffered
107      StdSize maxBufferedEvents;
108
109      //! Context for server (Only used in attached mode)
110      CContext* parentServer;
111
112      //! List of server ranks for which the client is leader
113      std::list<int> ranksServerLeader;
114
115      //! List of server ranks for which the client is not leader
116      std::list<int> ranksServerNotLeader;
117
118      std::vector<std::vector<MPI_Win> >windows ; //! one sided mpi windows to expose client buffers to servers == windows[nbServers][2]
119      bool isAttached_ ;
120      CContextServer* associatedServer_ ; //!< The server associated to the pair client/server
121
122  };
123}
124
125#endif // __CONTEXT_CLIENT_HPP__
Note: See TracBrowser for help on using the repository browser.