source: XIOS3/trunk/src/transport/one_sided_context_server.hpp @ 2433

Last change on this file since 2433 was 2343, checked in by ymipsl, 2 years ago
  • Implement new infrastructure for transfert protocol.
  • new purelly one sided protocol is now available, the previous protocol (legacy, mix send/recv and one sided) is still available. Other specific protocol could be implemented more easilly in future.
  • switch can be operate with "transport_protocol" variable in XIOS context :

ex:
<variable id="transport_protocol" type="string">one_sided</variable>

Available protocols are : one_sided, legacy or default. The default protocol is "legacy".

YM

  • Property svn:executable set to *
File size: 3.8 KB
Line 
1#ifndef __ONE_SIDED_CONTEXT_SERVER_HPP__
2#define __ONE_SIDED_CONTEXT_SERVER_HPP__
3#include "xios_spl.hpp"
4#include "event_server.hpp"
5#include "buffer_server.hpp"
6#include "one_sided_server_buffer.hpp"
7#include "one_sided_server_base.hpp"
8#include "mpi.hpp"
9#include "event_scheduler.hpp"
10#include "context_server.hpp"
11
12namespace xios
13{
14  class CContext ;
15  class CContextClient;
16
17  class COneSidedContextServer : public CContextServer, public COneSidedServerBase
18  {
19   
20    public:
21
22      COneSidedContextServer(CContext* parent,MPI_Comm intraComm,MPI_Comm interComm) ;
23      ~COneSidedContextServer() ;
24      bool eventLoop(bool enableEventsProcessing = true);
25      void releaseBuffers(void) ;
26
27
28    private:
29
30      class CRequest
31      {
32        public : 
33          CRequest(MPI_Comm& interComm, MPI_Status& status)
34          {
35            rank_=status.MPI_SOURCE ;
36            MPI_Get_count(&status,MPI_CHAR,&count_);
37            buffer_.resize(count_) ;
38            MPI_Irecv(buffer_.data(), count_, MPI_CHAR, rank_, 20, interComm, &request_) ;
39          }
40         
41          ~CRequest() { }
42
43          bool test(void)
44          {
45            int flag ;
46            MPI_Status status ;
47            MPI_Test(&request_, &flag, &status) ;
48            if (flag==true) return true ;
49            else return false ;
50          }
51          int getCount(void) {return count_ ;}
52          int getRank(void) {return rank_ ;}
53          vector<char>& getBuffer(void) { return buffer_;}
54
55        private:
56          int rank_ ;
57          int count_ ;
58          vector<char> buffer_ ;
59          MPI_Request request_ ;
60      };
61   
62   
63    void listen(void) ;
64    void listenPendingRequest(void) ;
65    void processRequest(CRequest& request) ;
66    void checkBuffers(void) ;
67    void processEvents(bool enableEventsProcessing) ;
68   
69    bool hasFinished(void);
70    void dispatchEvent(CEventServer& event) ;
71    bool isCollectiveEvent(CEventServer& event) ;
72    void setPendingEvent(void) ;
73    bool hasPendingEvent(void) ;
74
75    void notifyClientsFinalize(void) ;
76    void freeWindows(void) ; // !<< free Windows for one sided communication
77   
78    MPI_Comm interCommMerged_; //!< Communicator of the client group + server group (intraCommunicator) needed for one sided communication.
79    MPI_Comm commSelf_ ; //!< Communicator for proc alone from interCommMerged
80
81    map<int,COneSidedServerBuffer*> buffers_ ;
82    map<int,size_t> lastTimeLine ; //!< last event time line for a processed request
83    map<int,size_t>::iterator itLastTimeLine ; //!< iterator on lastTimeLine
84    map<int, list<std::pair<MPI_Message,MPI_Status> > > pendingProbe;
85    map<int,MPI_Request> pendingRequest ;
86    map<int,char*> bufferRequest ;
87
88    map<size_t,CEventServer*> events ;
89    size_t currentTimeLine ;
90     
91    bool finished ;
92    bool pendingEvent ;
93    bool scheduled  ;    /*!< event of current timeline is alreading scheduled ? */
94    bool pureOneSided ; //!< if true, client will communicated with servers only trough one sided communication. Otherwise the hybrid mode P2P /One sided is used.
95
96    private:
97 
98      std::map<int, StdSize> mapBufferSize_;
99      std::map<int,MPI_Comm> winComm_ ; //! Window communicators
100      std::map<int,std::vector<MPI_Win> >windows_ ; //! one sided mpi windows to expose client buffers to servers ; No memory will be attached on server side.
101      bool isProcessingEvent_ ;
102      size_t remoteHashId_; //!< the hash is of the calling context client
103     
104      MPI_Comm processEventBarrier_ ;
105      bool eventScheduled_=false;
106      MPI_Request processEventRequest_ ;
107
108      std::list<CRequest> requests_ ;
109
110      std::map<size_t, SPendingEvent> pendingEvents_   ;
111      std::map<size_t, SPendingEvent> completedEvents_ ;
112
113  } ;
114
115}
116
117#endif
Note: See TracBrowser for help on using the repository browser.