source: XIOS/dev/dev_olga/src/context_client.cpp @ 1130

Last change on this file since 1130 was 1130, checked in by oabramkina, 7 years ago

Two-level server: merging new grid functionalities and changes in the communication protocol (e.g. non-blocking context finalize, registries, oasis).

Tests on curie: test_client, test_complete, nemo (test_xios2_cmip6.exe).

To do: non-structured grid, check reading, possible bug in client/server initialization (?).

  • Property copyright set to
    Software name : XIOS (Xml I/O Server)
    http://forge.ipsl.jussieu.fr/ioserver
    Creation date : January 2009
    Licence : CeCCIL version2
    see license file in root directory : Licence_CeCILL_V2-en.txt
    or http://www.cecill.info/licences/Licence_CeCILL_V2-en.html
    Holder : CEA/LSCE (Laboratoire des Sciences du CLimat et de l'Environnement)
    CNRS/IPSL (Institut Pierre Simon Laplace)
    Project Manager : Yann Meurdesoif
    yann.meurdesoif@cea.fr
  • Property svn:eol-style set to native
File size: 14.1 KB
Line 
1#include "xios_spl.hpp"
2#include "context_client.hpp"
3#include "context_server.hpp"
4#include "event_client.hpp"
5#include "buffer_out.hpp"
6#include "buffer_client.hpp"
7#include "type.hpp"
8#include "event_client.hpp"
9#include "context.hpp"
10#include "mpi.hpp"
11#include "timer.hpp"
12#include "cxios.hpp"
13#include "server.hpp"
14
15namespace xios
16{
17    /*!
18    \param [in] parent Pointer to context on client side
19    \param [in] intraComm_ communicator of group client
20    \param [in] interComm_ communicator of group server
21    \cxtSer [in] cxtSer Pointer to context of server side. (It is only used in case of attached mode).
22    */
23    CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer)
24     : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4)
25    {
26      context = parent;
27      intraComm = intraComm_;
28      interComm = interComm_;
29      MPI_Comm_rank(intraComm, &clientRank);
30      MPI_Comm_size(intraComm, &clientSize);
31
32      int flag;
33      MPI_Comm_test_inter(interComm, &flag);
34      if (flag) MPI_Comm_remote_size(interComm, &serverSize);
35      else  MPI_Comm_size(interComm, &serverSize);
36
37      if (clientSize < serverSize)
38      {
39        int serverByClient = serverSize / clientSize;
40        int remain = serverSize % clientSize;
41        int rankStart = serverByClient * clientRank;
42
43        if (clientRank < remain)
44        {
45          serverByClient++;
46          rankStart += clientRank;
47        }
48        else
49          rankStart += remain;
50
51        for (int i = 0; i < serverByClient; i++)
52          ranksServerLeader.push_back(rankStart + i);
53
54        ranksServerNotLeader.resize(0);      }
55      else
56      {
57        int clientByServer = clientSize / serverSize;
58        int remain = clientSize % serverSize;
59
60        if (clientRank < (clientByServer + 1) * remain)
61        {
62          if (clientRank % (clientByServer + 1) == 0)
63            ranksServerLeader.push_back(clientRank / (clientByServer + 1));
64          else
65            ranksServerNotLeader.push_back(clientRank / (clientByServer + 1));
66        }
67        else
68        {
69          int rank = clientRank - (clientByServer + 1) * remain;
70          if (rank % clientByServer == 0)
71            ranksServerLeader.push_back(remain + rank / clientByServer);
72          else
73            ranksServerNotLeader.push_back(remain + rank / clientByServer);
74        }
75      }
76
77      timeLine = 0;
78    }
79
80    /*!
81    In case of attached mode, the current context must be reset to context for client
82    \param [in] event Event sent to server
83    */
84    void CContextClient::sendEvent(CEventClient& event)
85    {
86      list<int> ranks = event.getRanks();
87
88      if (!event.isEmpty())
89      {
90        list<int> sizes = event.getSizes();
91
92        // We force the getBuffers call to be non-blocking on classical servers
93        list<CBufferOut*> buffList;
94        bool couldBuffer = getBuffers(ranks, sizes, buffList, (!CXios::isClient && (CServer::serverLevel == 0) ));
95        //bool couldBuffer = getBuffers(ranks, sizes, buffList, false );
96
97        if (couldBuffer)
98        {
99          event.send(timeLine, sizes, buffList);
100
101          checkBuffers(ranks);
102
103          if (isAttachedModeEnabled()) // couldBuffer is always true in attached mode
104          {
105            waitEvent(ranks);
106            CContext::setCurrent(context->getId());
107          }
108        }
109        else
110        {
111          tmpBufferedEvent.ranks = ranks;
112          tmpBufferedEvent.sizes = sizes;
113
114          for (list<int>::const_iterator it = sizes.begin(); it != sizes.end(); it++)
115            tmpBufferedEvent.buffers.push_back(new CBufferOut(*it));
116
117          event.send(timeLine, tmpBufferedEvent.sizes, tmpBufferedEvent.buffers);
118        }
119      }
120
121      timeLine++;
122    }
123
124    /*!
125     * Send the temporarily buffered event (if any).
126     *
127     * \return true if a temporarily buffered event could be sent, false otherwise
128     */
129    bool CContextClient::sendTemporarilyBufferedEvent()
130    {
131      bool couldSendTmpBufferedEvent = false;
132
133      if (hasTemporarilyBufferedEvent())
134      {
135        list<CBufferOut*> buffList;
136        if (getBuffers(tmpBufferedEvent.ranks, tmpBufferedEvent.sizes, buffList, true)) // Non-blocking call
137        {
138          list<CBufferOut*>::iterator it, itBuffer;
139
140          for (it = tmpBufferedEvent.buffers.begin(), itBuffer = buffList.begin(); it != tmpBufferedEvent.buffers.end(); it++, itBuffer++)
141            (*itBuffer)->put((char*)(*it)->start(), (*it)->count());
142
143          checkBuffers(tmpBufferedEvent.ranks);
144
145          tmpBufferedEvent.clear();
146
147          couldSendTmpBufferedEvent = true;
148        }
149      }
150
151      return couldSendTmpBufferedEvent;
152    }
153
154    /*!
155    If client is also server (attached mode), after sending event, it should process right away
156    the incoming event.
157    \param [in] ranks list rank of server connected this client
158    */
159    void CContextClient::waitEvent(list<int>& ranks)
160    {
161      parentServer->server->setPendingEvent();
162      while (checkBuffers(ranks))
163      {
164        parentServer->server->listen();
165        parentServer->server->checkPendingRequest();
166      }
167
168      while (parentServer->server->hasPendingEvent())
169      {
170       parentServer->server->eventLoop();
171      }
172    }
173
174
175    /*!
176     * Get buffers for each connection to the servers. This function blocks until there is enough room in the buffers unless
177     * it is explicitly requested to be non-blocking.
178     *
179     * \param [in] serverList list of rank of connected server
180     * \param [in] sizeList size of message corresponding to each connection
181     * \param [out] retBuffers list of buffers that can be used to store an event
182     * \param [in] nonBlocking whether this function should be non-blocking
183     * \return whether the already allocated buffers could be used
184    */
185    bool CContextClient::getBuffers(const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers,
186                                    bool nonBlocking /*= false*/)
187    {
188      list<int>::const_iterator itServer, itSize;
189      list<CClientBuffer*> bufferList;
190      map<int,CClientBuffer*>::const_iterator it;
191      list<CClientBuffer*>::iterator itBuffer;
192      bool areBuffersFree;
193
194      for (itServer = serverList.begin(); itServer != serverList.end(); itServer++)
195      {
196        it = buffers.find(*itServer);
197        if (it == buffers.end())
198        {
199          newBuffer(*itServer);
200          it = buffers.find(*itServer);
201        }
202        bufferList.push_back(it->second);
203      }
204
205      CTimer::get("Blocking time").resume();
206      do
207      {
208        areBuffersFree = true;
209        for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
210          areBuffersFree &= (*itBuffer)->isBufferFree(*itSize);
211
212        if (!areBuffersFree)
213        {
214          checkBuffers();
215          if (CServer::serverLevel == 0)
216            context->server->listen();
217
218          else if (CServer::serverLevel == 1)
219          {
220            context->server->listen();
221            for (int i = 0; i < context->serverPrimServer.size(); ++i)
222              context->serverPrimServer[i]->listen();
223          }
224
225          else if (CServer::serverLevel == 2)
226            context->server->listen();
227
228        }
229      } while (!areBuffersFree && !nonBlocking);
230
231      CTimer::get("Blocking time").suspend();
232
233      if (areBuffersFree)
234      {
235        for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
236          retBuffers.push_back((*itBuffer)->getBuffer(*itSize));
237      }
238
239      return areBuffersFree;
240   }
241
242   /*!
243   Make a new buffer for a certain connection to server with specific rank
244   \param [in] rank rank of connected server
245   */
246   void CContextClient::newBuffer(int rank)
247   {
248     if (!mapBufferSize_.count(rank))
249     {
250       error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl;
251       mapBufferSize_[rank] = CXios::minBufferSize;
252     }
253     CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxBufferedEvents);
254     // Notify the server
255     CBufferOut* bufOut = buffer->getBuffer(sizeof(StdSize));
256     bufOut->put(mapBufferSize_[rank]); // Stupid C++
257     buffer->checkBuffer();
258   }
259
260   /*!
261   Verify state of buffers. Buffer is under pending state if there is no message on it
262   \return state of buffers, pending(true), ready(false)
263   */
264   bool CContextClient::checkBuffers(void)
265   {
266      map<int,CClientBuffer*>::iterator itBuff;
267      bool pending = false;
268      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++)
269        pending |= itBuff->second->checkBuffer();
270      return pending;
271   }
272
273   //! Release all buffers
274   void CContextClient::releaseBuffers()
275   {
276      map<int,CClientBuffer*>::iterator itBuff;
277      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++)
278          delete itBuff->second;
279      buffers.clear();
280   }
281
282   /*!
283   Verify state of buffers corresponding to a connection
284   \param [in] ranks list rank of server to which client connects to
285   \return state of buffers, pending(true), ready(false)
286   */
287   bool CContextClient::checkBuffers(list<int>& ranks)
288   {
289      list<int>::iterator it;
290      bool pending = false;
291      for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer();
292      return pending;
293   }
294
295   /*!
296    * Set the buffer size for each connection. Warning: This function is collective.
297    *
298    * \param [in] mapSize maps the rank of the connected servers to the size of the correspoinding buffer
299    * \param [in] maxEventSize maps the rank of the connected servers to the size of the biggest event
300   */
301   void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)
302   {
303     mapBufferSize_ = mapSize;
304
305     // Compute the maximum number of events that can be safely buffered.
306     double minBufferSizeEventSizeRatio = std::numeric_limits<double>::max();
307     for (std::map<int,StdSize>::const_iterator it = mapSize.begin(), ite = mapSize.end(); it != ite; ++it)
308     {
309       double ratio = double(it->second) / maxEventSize.at(it->first);
310       if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio;
311     }
312     MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm);
313
314     if (minBufferSizeEventSizeRatio < 1.0)
315       ERROR("void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)",
316             << "The buffer sizes and the maximum events sizes are incoherent.");
317
318     maxBufferedEvents = size_t(2 * minBufferSizeEventSizeRatio) // there is room for two local buffers on the server
319                          + size_t(minBufferSizeEventSizeRatio)  // one local buffer can always be fully used
320                          + 1;                                   // the other local buffer might contain only one event
321   }
322
323   /*!
324    Get leading server in the group of connected server
325    \return ranks of leading servers
326    */
327    const std::list<int>& CContextClient::getRanksServerNotLeader(void) const
328    {
329      return ranksServerNotLeader;
330    }
331
332    /*!
333    Check if client connects to leading server
334    \return connected(true), not connected (false)
335    */
336    bool CContextClient::isServerNotLeader(void) const
337    {
338      return !ranksServerNotLeader.empty();
339    }
340
341  /*!
342  Get leading server in the group of connected server
343  \return ranks of leading servers
344  */
345  const std::list<int>& CContextClient::getRanksServerLeader(void) const
346  {
347    return ranksServerLeader;
348  }
349
350  /*!
351  Check if client connects to leading server
352  \return connected(true), not connected (false)
353  */
354  bool CContextClient::isServerLeader(void) const
355  {
356    return !ranksServerLeader.empty();
357  }
358
359  /*!
360   * Check if the attached mode is used.
361   *
362   * \return true if and only if attached mode is used
363   */
364  bool CContextClient::isAttachedModeEnabled() const
365  {
366    return (parentServer != 0);
367  }
368
369   /*!
370   * Finalize context client and do some reports. Function is non-blocking.
371   */
372  void CContextClient::finalize(void)
373  {
374    map<int,CClientBuffer*>::iterator itBuff;
375    bool stop = false;
376
377    CTimer::get("Blocking time").resume();
378    while (hasTemporarilyBufferedEvent())
379    {
380      checkBuffers();
381      sendTemporarilyBufferedEvent();
382    }
383    CTimer::get("Blocking time").suspend();
384
385    CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE);
386    if (isServerLeader())
387    {
388      CMessage msg;
389      const std::list<int>& ranks = getRanksServerLeader();
390      for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)
391        event.push(*itRank, 1, msg);
392      sendEvent(event);
393    }
394    else sendEvent(event);
395
396    CTimer::get("Blocking time").resume();
397//    while (!stop)
398    {
399      checkBuffers();
400      if (hasTemporarilyBufferedEvent())
401        sendTemporarilyBufferedEvent();
402
403      stop = true;
404//      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) stop &= !itBuff->second->hasPendingRequest();
405    }
406    CTimer::get("Blocking time").suspend();
407
408    std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(),
409                                          iteMap = mapBufferSize_.end(), itMap;
410
411    StdSize totalBuf = 0;
412    for (itMap = itbMap; itMap != iteMap; ++itMap)
413    {
414      report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl
415                 << "  +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl;
416      totalBuf += itMap->second;
417    }
418    report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl;
419
420    //releaseBuffers(); // moved to CContext::finalize()
421  }
422
423  /*!
424  */
425  bool CContextClient::havePendingRequests(void)
426  {
427    bool pending = false;
428    map<int,CClientBuffer*>::iterator itBuff;
429    for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++)
430      pending |= itBuff->second->hasPendingRequest();
431    return pending;
432  }
433
434
435}
Note: See TracBrowser for help on using the repository browser.