#ifndef __CONTEXT_CLIENT_HPP__ #define __CONTEXT_CLIENT_HPP__ #include "xios_spl.hpp" #include "buffer_out.hpp" #include "buffer_in.hpp" #include "buffer_client.hpp" #include "event_client.hpp" #include "event_server.hpp" #include "mpi.hpp" #include "registry.hpp" namespace xios { class CContext; class CContextServer ; /*! \class CContextClient A context can be both on client and on server side. In order to differenciate the role of context on each side, e.x client sending events, server receiving and processing events, there is a need of concrete "context" classes for both sides. CContextClient processes and sends events from client to server where CContextServer receives these events and processes them. */ class CContextClient { public: // Contructor CContextClient(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* parentServer = 0); // Send event to server void sendEvent(CEventClient& event); void waitEvent(list& ranks); void waitEvent_old(list& ranks); // Functions to set/get buffers bool getBuffers(const size_t timeLine, const list& serverList, const list& sizeList, list& retBuffers, bool nonBlocking = false); void newBuffer(int rank); bool checkBuffers(list& ranks); bool checkBuffers(void); void releaseBuffers(void); bool havePendingRequests(void); bool isServerLeader(void) const; bool isServerNotLeader(void) const; const std::list& getRanksServerLeader(void) const; const std::list& getRanksServerNotLeader(void) const; /*! * Check if the attached mode is used. * * \return true if and only if attached mode is used */ bool isAttachedModeEnabled() const { return isAttached_ ; } static void computeLeader(int clientRank, int clientSize, int serverSize, std::list& rankRecvLeader, std::list& rankRecvNotLeader); // Close and finalize context client // void closeContext(void); Never been implemented. bool isNotifiedFinalized(void) ; void finalize(void); void setBufferSize(const std::map& mapSize, const std::map& maxEventSize); int getRemoteSize(void) {return serverSize;} int getServerSize(void) {return serverSize;} /*! set the associated server (dual chanel client/server) */ void setAssociatedServer(CContextServer* associatedServer) { associatedServer=associatedServer_;} /*! get the associated server (dual chanel client/server) */ CContextServer* getAssociatedServer(void) { return associatedServer_;} public: CContext* context; //!< Context for client size_t timeLine; //!< Timeline of each event int clientRank; //!< Rank of current client int clientSize; //!< Size of client group int serverSize; //!< Size of server group MPI_Comm interComm; //!< Communicator of server group (interCommunicator) MPI_Comm interCommMerged; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication. MPI_Comm intraComm; //!< Communicator of client group MPI_Comm commSelf; //!< Communicator of the client alone. Needed to create a new communicator between 1 proc client and 1 proc server for one sided communication map buffers; //!< Buffers for connection to servers bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used. private: void lockBuffers(list& ranks) ; void unlockBuffers(list& ranks) ; //! Mapping of server and buffer size for each connection to server std::map mapBufferSize_; //! Maximum event sizes estimated for each connection to server std::map maxEventSizes; //! Maximum number of events that can be buffered StdSize maxBufferedEvents; //! Context for server (Only used in attached mode) CContext* parentServer; //! List of server ranks for which the client is leader std::list ranksServerLeader; //! List of server ranks for which the client is not leader std::list ranksServerNotLeader; std::vector >windows ; //! one sided mpi windows to expose client buffers to servers == windows[nbServers][2] bool isAttached_ ; CContextServer* associatedServer_ ; //!< The server associated to the pair client/server }; } #endif // __CONTEXT_CLIENT_HPP__