source: XIOS/dev/XIOS_DEV_CMIP6/src/context_client.cpp @ 1378

Last change on this file since 1378 was 1378, checked in by ymipsl, 6 years ago

Attemping to solve a dead-lock rising at finalize :
Server 1 must be able receive buffer of on other context (ie from server2) when awaiting free buffer.
To be tested extensively...

YM


  • Property copyright set to
    Software name : XIOS (Xml I/O Server)
    http://forge.ipsl.jussieu.fr/ioserver
    Creation date : January 2009
    Licence : CeCCIL version2
    see license file in root directory : Licence_CeCILL_V2-en.txt
    or http://www.cecill.info/licences/Licence_CeCILL_V2-en.html
    Holder : CEA/LSCE (Laboratoire des Sciences du CLimat et de l'Environnement)
    CNRS/IPSL (Institut Pierre Simon Laplace)
    Project Manager : Yann Meurdesoif
    yann.meurdesoif@cea.fr
  • Property svn:eol-style set to native
File size: 15.8 KB
RevLine 
[591]1#include "xios_spl.hpp"
[300]2#include "context_client.hpp"
3#include "context_server.hpp"
4#include "event_client.hpp"
5#include "buffer_out.hpp"
6#include "buffer_client.hpp"
7#include "type.hpp"
8#include "event_client.hpp"
9#include "context.hpp"
[382]10#include "mpi.hpp"
[347]11#include "timer.hpp"
[401]12#include "cxios.hpp"
[1130]13#include "server.hpp"
[300]14
[335]15namespace xios
[300]16{
[512]17    /*!
18    \param [in] parent Pointer to context on client side
19    \param [in] intraComm_ communicator of group client
20    \param [in] interComm_ communicator of group server
[983]21    \cxtSer [in] cxtSer Pointer to context of server side. (It is only used in case of attached mode).
[512]22    */
[595]23    CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer)
[917]24     : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4)
[300]25    {
[595]26      context = parent;
27      intraComm = intraComm_;
28      interComm = interComm_;
29      MPI_Comm_rank(intraComm, &clientRank);
30      MPI_Comm_size(intraComm, &clientSize);
[509]31
[595]32      int flag;
33      MPI_Comm_test_inter(interComm, &flag);
34      if (flag) MPI_Comm_remote_size(interComm, &serverSize);
35      else  MPI_Comm_size(interComm, &serverSize);
[509]36
[1232]37      computeLeader(clientRank, clientSize, serverSize, ranksServerLeader, ranksServerNotLeader);
38
39      timeLine = 0;
40    }
41
42    void CContextClient::computeLeader(int clientRank, int clientSize, int serverSize,
43                                       std::list<int>& rankRecvLeader,
44                                       std::list<int>& rankRecvNotLeader)
45    {
46      if ((0 == clientSize) || (0 == serverSize)) return;
47
[595]48      if (clientSize < serverSize)
49      {
50        int serverByClient = serverSize / clientSize;
51        int remain = serverSize % clientSize;
52        int rankStart = serverByClient * clientRank;
[300]53
[595]54        if (clientRank < remain)
55        {
56          serverByClient++;
57          rankStart += clientRank;
58        }
59        else
60          rankStart += remain;
61
62        for (int i = 0; i < serverByClient; i++)
[1232]63          rankRecvLeader.push_back(rankStart + i);
[1021]64
[1232]65        rankRecvNotLeader.resize(0);
[1158]66      }
[595]67      else
68      {
69        int clientByServer = clientSize / serverSize;
70        int remain = clientSize % serverSize;
71
72        if (clientRank < (clientByServer + 1) * remain)
73        {
74          if (clientRank % (clientByServer + 1) == 0)
[1232]75            rankRecvLeader.push_back(clientRank / (clientByServer + 1));
[1021]76          else
[1232]77            rankRecvNotLeader.push_back(clientRank / (clientByServer + 1));
[595]78        }
79        else
80        {
81          int rank = clientRank - (clientByServer + 1) * remain;
82          if (rank % clientByServer == 0)
[1232]83            rankRecvLeader.push_back(remain + rank / clientByServer);
[1021]84          else
[1232]85            rankRecvNotLeader.push_back(remain + rank / clientByServer);
[595]86        }
87      }
[300]88    }
89
[512]90    /*!
91    In case of attached mode, the current context must be reset to context for client
92    \param [in] event Event sent to server
93    */
[300]94    void CContextClient::sendEvent(CEventClient& event)
95    {
[731]96      list<int> ranks = event.getRanks();
[1054]97
[1377]98      if (CXios::checkEventSync)
99      {
100        int typeId, classId, typeId_in, classId_in, timeLine_out;
101        typeId_in=event.getTypeId() ;
102        classId_in=event.getClassId() ;
103        MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ;
104        MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ;
105        MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ;
106        if (typeId/clientSize!=event.getTypeId() || classId/clientSize!=event.getClassId() || timeLine_out/clientSize!=timeLine)
107        {
108           ERROR("void CContextClient::sendEvent(CEventClient& event)",
109               << "Event are not coherent between client.");
110        }
111      }
112
[595]113      if (!event.isEmpty())
[300]114      {
[731]115        list<int> sizes = event.getSizes();
[300]116
[1130]117        // We force the getBuffers call to be non-blocking on classical servers
[1054]118        list<CBufferOut*> buffList;
[1130]119        bool couldBuffer = getBuffers(ranks, sizes, buffList, (!CXios::isClient && (CServer::serverLevel == 0) ));
[1184]120//        bool couldBuffer = getBuffers(ranks, sizes, buffList, CXios::isServer );
[509]121
[1054]122        if (couldBuffer)
123        {
124          event.send(timeLine, sizes, buffList);
[731]125
[1054]126          checkBuffers(ranks);
127
128          if (isAttachedModeEnabled()) // couldBuffer is always true in attached mode
129          {
130            waitEvent(ranks);
131            CContext::setCurrent(context->getId());
132          }
133        }
134        else
135        {
136          tmpBufferedEvent.ranks = ranks;
137          tmpBufferedEvent.sizes = sizes;
138
139          for (list<int>::const_iterator it = sizes.begin(); it != sizes.end(); it++)
140            tmpBufferedEvent.buffers.push_back(new CBufferOut(*it));
[1377]141          info(100)<<"DEBUG : temporaly event created : timeline "<<timeLine<<endl ;
[1054]142          event.send(timeLine, tmpBufferedEvent.sizes, tmpBufferedEvent.buffers);
143        }
[300]144      }
145
[1054]146      timeLine++;
147    }
148
149    /*!
150     * Send the temporarily buffered event (if any).
151     *
[1158]152     * \return true if a temporarily buffered event could be sent, false otherwise
[1054]153     */
154    bool CContextClient::sendTemporarilyBufferedEvent()
155    {
156      bool couldSendTmpBufferedEvent = false;
157
158      if (hasTemporarilyBufferedEvent())
[511]159      {
[1054]160        list<CBufferOut*> buffList;
161        if (getBuffers(tmpBufferedEvent.ranks, tmpBufferedEvent.sizes, buffList, true)) // Non-blocking call
162        {
163          list<CBufferOut*>::iterator it, itBuffer;
164
165          for (it = tmpBufferedEvent.buffers.begin(), itBuffer = buffList.begin(); it != tmpBufferedEvent.buffers.end(); it++, itBuffer++)
166            (*itBuffer)->put((char*)(*it)->start(), (*it)->count());
167
[1377]168          info(100)<<"DEBUG : temporaly event sent "<<endl ;
[1054]169          checkBuffers(tmpBufferedEvent.ranks);
170
171          tmpBufferedEvent.clear();
172
173          couldSendTmpBufferedEvent = true;
174        }
[511]175      }
176
[1054]177      return couldSendTmpBufferedEvent;
[300]178    }
[509]179
[512]180    /*!
181    If client is also server (attached mode), after sending event, it should process right away
182    the incoming event.
183    \param [in] ranks list rank of server connected this client
184    */
[300]185    void CContextClient::waitEvent(list<int>& ranks)
186    {
[595]187      parentServer->server->setPendingEvent();
188      while (checkBuffers(ranks))
[300]189      {
[595]190        parentServer->server->listen();
191        parentServer->server->checkPendingRequest();
[300]192      }
[386]193
[595]194      while (parentServer->server->hasPendingEvent())
[386]195      {
[595]196       parentServer->server->eventLoop();
[386]197      }
[300]198    }
199
[512]200    /*!
[1054]201     * Get buffers for each connection to the servers. This function blocks until there is enough room in the buffers unless
202     * it is explicitly requested to be non-blocking.
203     *
204     * \param [in] serverList list of rank of connected server
205     * \param [in] sizeList size of message corresponding to each connection
206     * \param [out] retBuffers list of buffers that can be used to store an event
207     * \param [in] nonBlocking whether this function should be non-blocking
208     * \return whether the already allocated buffers could be used
[512]209    */
[1071]210    bool CContextClient::getBuffers(const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers,
211                                    bool nonBlocking /*= false*/)
[300]212    {
[1054]213      list<int>::const_iterator itServer, itSize;
[595]214      list<CClientBuffer*> bufferList;
[1054]215      map<int,CClientBuffer*>::const_iterator it;
[595]216      list<CClientBuffer*>::iterator itBuffer;
[884]217      bool areBuffersFree;
[300]218
[595]219      for (itServer = serverList.begin(); itServer != serverList.end(); itServer++)
[300]220      {
[595]221        it = buffers.find(*itServer);
222        if (it == buffers.end())
[300]223        {
[595]224          newBuffer(*itServer);
225          it = buffers.find(*itServer);
[509]226        }
[595]227        bufferList.push_back(it->second);
[300]228      }
[347]229
230      CTimer::get("Blocking time").resume();
[884]231      do
[300]232      {
[884]233        areBuffersFree = true;
[595]234        for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
[884]235          areBuffersFree &= (*itBuffer)->isBufferFree(*itSize);
236
237        if (!areBuffersFree)
[300]238        {
[884]239          checkBuffers();
[1130]240          if (CServer::serverLevel == 0)
241            context->server->listen();
[1071]242
[1130]243          else if (CServer::serverLevel == 1)
244          {
245            context->server->listen();
246            for (int i = 0; i < context->serverPrimServer.size(); ++i)
247              context->serverPrimServer[i]->listen();
[1378]248            CServer::contextEventLoop(false) ; // avoid dead-lock at finalize...
[1130]249          }
250
251          else if (CServer::serverLevel == 2)
252            context->server->listen();
253
[300]254        }
[1054]255      } while (!areBuffersFree && !nonBlocking);
[1130]256
[347]257      CTimer::get("Blocking time").suspend();
258
[1054]259      if (areBuffersFree)
[300]260      {
[1054]261        for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
262          retBuffers.push_back((*itBuffer)->getBuffer(*itSize));
[300]263      }
[1054]264
265      return areBuffersFree;
[300]266   }
[509]267
[512]268   /*!
269   Make a new buffer for a certain connection to server with specific rank
270   \param [in] rank rank of connected server
271   */
[300]272   void CContextClient::newBuffer(int rank)
273   {
[1201]274      if (!mapBufferSize_.count(rank))
275      {
276        error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl;
277        mapBufferSize_[rank] = CXios::minBufferSize;
278        maxEventSizes[rank] = CXios::minBufferSize;
279      }
280      CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxEventSizes[rank], maxBufferedEvents);
281      // Notify the server
282      CBufferOut* bufOut = buffer->getBuffer(sizeof(StdSize));
283      bufOut->put(mapBufferSize_[rank]); // Stupid C++
284      buffer->checkBuffer();
[509]285   }
[300]286
[512]287   /*!
288   Verify state of buffers. Buffer is under pending state if there is no message on it
289   \return state of buffers, pending(true), ready(false)
290   */
[300]291   bool CContextClient::checkBuffers(void)
292   {
[595]293      map<int,CClientBuffer*>::iterator itBuff;
294      bool pending = false;
[1130]295      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++)
296        pending |= itBuff->second->checkBuffer();
[595]297      return pending;
[509]298   }
[300]299
[512]300   //! Release all buffers
[1071]301   void CContextClient::releaseBuffers()
[300]302   {
[595]303      map<int,CClientBuffer*>::iterator itBuff;
[1077]304      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++)
[1139]305      {
[1077]306          delete itBuff->second;
[1139]307      }
[1077]308      buffers.clear();
[509]309   }
[300]310
[512]311   /*!
312   Verify state of buffers corresponding to a connection
313   \param [in] ranks list rank of server to which client connects to
314   \return state of buffers, pending(true), ready(false)
315   */
[300]316   bool CContextClient::checkBuffers(list<int>& ranks)
317   {
[595]318      list<int>::iterator it;
319      bool pending = false;
320      for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer();
321      return pending;
[509]322   }
[300]323
[512]324   /*!
[917]325    * Set the buffer size for each connection. Warning: This function is collective.
326    *
327    * \param [in] mapSize maps the rank of the connected servers to the size of the correspoinding buffer
328    * \param [in] maxEventSize maps the rank of the connected servers to the size of the biggest event
[512]329   */
[917]330   void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)
[509]331   {
332     mapBufferSize_ = mapSize;
[1201]333     maxEventSizes = maxEventSize;
[917]334
335     // Compute the maximum number of events that can be safely buffered.
336     double minBufferSizeEventSizeRatio = std::numeric_limits<double>::max();
337     for (std::map<int,StdSize>::const_iterator it = mapSize.begin(), ite = mapSize.end(); it != ite; ++it)
338     {
339       double ratio = double(it->second) / maxEventSize.at(it->first);
340       if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio;
341     }
342     MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm);
343
344     if (minBufferSizeEventSizeRatio < 1.0)
[1201]345     {
[917]346       ERROR("void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)",
347             << "The buffer sizes and the maximum events sizes are incoherent.");
[1201]348     }
349     else if (minBufferSizeEventSizeRatio == std::numeric_limits<double>::max())
350       minBufferSizeEventSizeRatio = 1.0; // In this case, maxBufferedEvents will never be used but we want to avoid any floating point exception
[917]351
352     maxBufferedEvents = size_t(2 * minBufferSizeEventSizeRatio) // there is room for two local buffers on the server
353                          + size_t(minBufferSizeEventSizeRatio)  // one local buffer can always be fully used
354                          + 1;                                   // the other local buffer might contain only one event
[509]355   }
356
[1158]357  /*!
358  Get leading server in the group of connected server
359  \return ranks of leading servers
360  */
361  const std::list<int>& CContextClient::getRanksServerNotLeader(void) const
362  {
363    return ranksServerNotLeader;
364  }
[1021]365
[1158]366  /*!
367  Check if client connects to leading server
368  \return connected(true), not connected (false)
369  */
370  bool CContextClient::isServerNotLeader(void) const
371  {
372    return !ranksServerNotLeader.empty();
373  }
[1021]374
[595]375  /*!
376  Get leading server in the group of connected server
377  \return ranks of leading servers
378  */
379  const std::list<int>& CContextClient::getRanksServerLeader(void) const
380  {
381    return ranksServerLeader;
382  }
[509]383
[595]384  /*!
385  Check if client connects to leading server
386  \return connected(true), not connected (false)
387  */
388  bool CContextClient::isServerLeader(void) const
389  {
390    return !ranksServerLeader.empty();
391  }
[300]392
[704]393  /*!
394   * Check if the attached mode is used.
395   *
396   * \return true if and only if attached mode is used
397   */
398  bool CContextClient::isAttachedModeEnabled() const
399  {
400    return (parentServer != 0);
401  }
[697]402
[512]403   /*!
[1130]404   * Finalize context client and do some reports. Function is non-blocking.
[512]405   */
[1130]406  void CContextClient::finalize(void)
[1054]407  {
408    map<int,CClientBuffer*>::iterator itBuff;
409    bool stop = false;
[731]410
[1054]411    CTimer::get("Blocking time").resume();
412    while (hasTemporarilyBufferedEvent())
413    {
414      checkBuffers();
415      sendTemporarilyBufferedEvent();
416    }
417    CTimer::get("Blocking time").suspend();
[509]418
[1054]419    CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE);
420    if (isServerLeader())
421    {
422      CMessage msg;
423      const std::list<int>& ranks = getRanksServerLeader();
424      for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)
[1377]425      {
426        info(100)<<"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ;
[1054]427        event.push(*itRank, 1, msg);
[1377]428      }
[1054]429      sendEvent(event);
430    }
431    else sendEvent(event);
[509]432
[1054]433    CTimer::get("Blocking time").resume();
[1130]434//    while (!stop)
[1054]435    {
436      checkBuffers();
437      if (hasTemporarilyBufferedEvent())
438        sendTemporarilyBufferedEvent();
[511]439
[1054]440      stop = true;
[1130]441//      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) stop &= !itBuff->second->hasPendingRequest();
[1054]442    }
443    CTimer::get("Blocking time").suspend();
444
445    std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(),
446                                          iteMap = mapBufferSize_.end(), itMap;
[1071]447
[1054]448    StdSize totalBuf = 0;
449    for (itMap = itbMap; itMap != iteMap; ++itMap)
450    {
451      report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl
452                 << "  +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl;
453      totalBuf += itMap->second;
454    }
455    report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl;
456
[1130]457    //releaseBuffers(); // moved to CContext::finalize()
[1054]458  }
[1130]459
[1139]460
461  /*!
462  */
[1130]463  bool CContextClient::havePendingRequests(void)
464  {
465    bool pending = false;
466    map<int,CClientBuffer*>::iterator itBuff;
467    for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++)
468      pending |= itBuff->second->hasPendingRequest();
469    return pending;
470  }
471
472
[509]473}
Note: See TracBrowser for help on using the repository browser.