source: XIOS/dev/branch_yushan/src/context_client.cpp @ 1073

Last change on this file since 1073 was 1070, checked in by yushan, 7 years ago

Preperation for merge from trunk

  • Property copyright set to
    Software name : XIOS (Xml I/O Server)
    http://forge.ipsl.jussieu.fr/ioserver
    Creation date : January 2009
    Licence : CeCCIL version2
    see license file in root directory : Licence_CeCILL_V2-en.txt
    or http://www.cecill.info/licences/Licence_CeCILL_V2-en.html
    Holder : CEA/LSCE (Laboratoire des Sciences du CLimat et de l'Environnement)
    CNRS/IPSL (Institut Pierre Simon Laplace)
    Project Manager : Yann Meurdesoif
    yann.meurdesoif@cea.fr
  • Property svn:eol-style set to native
File size: 10.9 KB
RevLine 
[591]1#include "xios_spl.hpp"
[300]2#include "context_client.hpp"
3#include "context_server.hpp"
4#include "event_client.hpp"
5#include "buffer_out.hpp"
6#include "buffer_client.hpp"
7#include "type.hpp"
8#include "event_client.hpp"
9#include "context.hpp"
[382]10#include "mpi.hpp"
[347]11#include "timer.hpp"
[401]12#include "cxios.hpp"
[300]13
[335]14namespace xios
[300]15{
[512]16    /*!
17    \param [in] parent Pointer to context on client side
18    \param [in] intraComm_ communicator of group client
19    \param [in] interComm_ communicator of group server
20    \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode)
21    */
[1053]22    CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer)
[917]23     : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4)
[300]24    {
[595]25      context = parent;
26      intraComm = intraComm_;
27      interComm = interComm_;
28      MPI_Comm_rank(intraComm, &clientRank);
29      MPI_Comm_size(intraComm, &clientSize);
[509]30
[595]31      int flag;
32      MPI_Comm_test_inter(interComm, &flag);
33      if (flag) MPI_Comm_remote_size(interComm, &serverSize);
34      else  MPI_Comm_size(interComm, &serverSize);
[509]35
[595]36      if (clientSize < serverSize)
37      {
38        int serverByClient = serverSize / clientSize;
39        int remain = serverSize % clientSize;
40        int rankStart = serverByClient * clientRank;
[300]41
[595]42        if (clientRank < remain)
43        {
44          serverByClient++;
45          rankStart += clientRank;
46        }
47        else
48          rankStart += remain;
49
50        for (int i = 0; i < serverByClient; i++)
51          ranksServerLeader.push_back(rankStart + i);
52      }
53      else
54      {
55        int clientByServer = clientSize / serverSize;
56        int remain = clientSize % serverSize;
57
58        if (clientRank < (clientByServer + 1) * remain)
59        {
60          if (clientRank % (clientByServer + 1) == 0)
61            ranksServerLeader.push_back(clientRank / (clientByServer + 1));
62        }
63        else
64        {
65          int rank = clientRank - (clientByServer + 1) * remain;
66          if (rank % clientByServer == 0)
67            ranksServerLeader.push_back(remain + rank / clientByServer);
[1037]68        }
[595]69      }
70
71      timeLine = 0;
[300]72    }
73
[512]74    /*!
75    In case of attached mode, the current context must be reset to context for client
76    \param [in] event Event sent to server
77    */
[300]78    void CContextClient::sendEvent(CEventClient& event)
79    {
[731]80      list<int> ranks = event.getRanks();
[595]81      if (!event.isEmpty())
[300]82      {
[731]83        list<int> sizes = event.getSizes();
[300]84
[1037]85        list<CBufferOut*> buffList = getBuffers(ranks, sizes);
[509]86
[1037]87        event.send(timeLine, sizes, buffList);
[731]88
[1037]89        checkBuffers(ranks);
[300]90      }
91
[1037]92      if (isAttachedModeEnabled())
[511]93      {
[1037]94        waitEvent(ranks);
95        CContext::setCurrent(context->getId());
[511]96      }
97
[1037]98      timeLine++;
[300]99    }
[509]100
[512]101    /*!
102    If client is also server (attached mode), after sending event, it should process right away
103    the incoming event.
104    \param [in] ranks list rank of server connected this client
105    */
[300]106    void CContextClient::waitEvent(list<int>& ranks)
107    {
[595]108      parentServer->server->setPendingEvent();
[1037]109      size_t pendingmapSize;
[595]110      while (checkBuffers(ranks))
[300]111      {
[1037]112        parentServer->server->listen(); 
113        parentServer->server->checkPendingRequest(); 
[300]114      }
[386]115
[595]116      while (parentServer->server->hasPendingEvent())
[386]117      {
[1070]118       parentServer->server->eventLoop();
[386]119      }
[300]120    }
121
[512]122    /*!
[1037]123    Setup buffer for each connection to server and verify their state to put content into them
124    \param [in] serverList list of rank of connected server
125    \param [in] sizeList size of message corresponding to each connection
126    \return List of buffer input which event can be placed
[512]127    */
[1037]128    list<CBufferOut*> CContextClient::getBuffers(list<int>& serverList, list<int>& sizeList)
[300]129    {
[1037]130      list<int>::iterator itServer, itSize;
[595]131      list<CClientBuffer*> bufferList;
[1037]132      map<int,CClientBuffer*>::iterator it;
[595]133      list<CClientBuffer*>::iterator itBuffer;
[1037]134      list<CBufferOut*>  retBuffer;
[884]135      bool areBuffersFree;
[300]136
[595]137      for (itServer = serverList.begin(); itServer != serverList.end(); itServer++)
[300]138      {
[595]139        it = buffers.find(*itServer);
140        if (it == buffers.end())
[300]141        {
[595]142          newBuffer(*itServer);
143          it = buffers.find(*itServer);
[509]144        }
[595]145        bufferList.push_back(it->second);
[300]146      }
[347]147
148      CTimer::get("Blocking time").resume();
[884]149      do
[300]150      {
[884]151        areBuffersFree = true;
[595]152        for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
[884]153          areBuffersFree &= (*itBuffer)->isBufferFree(*itSize);
154
155        if (!areBuffersFree)
[300]156        {
[884]157          checkBuffers();
158          context->server->listen();
[300]159        }
[1037]160      } while (!areBuffersFree);
[347]161      CTimer::get("Blocking time").suspend();
162
[1037]163      for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
[300]164      {
[1053]165        CBufferOut* m_buf = (*itBuffer)->getBuffer(*itSize);
166        //retBuffer.push_back((*itBuffer)->getBuffer(*itSize));
167        //int m_size = retBuffer.size();
168        //retBuffer.resize(m_size+1);
169        //m_size = retBuffer.size();
170        retBuffer.push_back(m_buf);
[300]171      }
[1037]172      return retBuffer;
[300]173   }
[509]174
[512]175   /*!
176   Make a new buffer for a certain connection to server with specific rank
177   \param [in] rank rank of connected server
178   */
[300]179   void CContextClient::newBuffer(int rank)
180   {
[724]181      if (!mapBufferSize_.count(rank))
182      {
183        error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl;
184        mapBufferSize_[rank] = CXios::minBufferSize;
185      }
[917]186      CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxBufferedEvents);
[725]187      // Notify the server
188      CBufferOut* bufOut = buffer->getBuffer(sizeof(StdSize));
189      bufOut->put(mapBufferSize_[rank]); // Stupid C++
190      buffer->checkBuffer();
[509]191   }
[300]192
[512]193   /*!
194   Verify state of buffers. Buffer is under pending state if there is no message on it
195   \return state of buffers, pending(true), ready(false)
196   */
[300]197   bool CContextClient::checkBuffers(void)
198   {
[595]199      map<int,CClientBuffer*>::iterator itBuff;
200      bool pending = false;
[1060]201      if(! buffers.empty())
[595]202      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) pending |= itBuff->second->checkBuffer();
203      return pending;
[509]204   }
[300]205
[512]206   //! Release all buffers
[300]207   void CContextClient::releaseBuffers(void)
208   {
[595]209      map<int,CClientBuffer*>::iterator itBuff;
210      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) delete itBuff->second;
[509]211   }
[300]212
[512]213   /*!
214   Verify state of buffers corresponding to a connection
215   \param [in] ranks list rank of server to which client connects to
216   \return state of buffers, pending(true), ready(false)
217   */
[300]218   bool CContextClient::checkBuffers(list<int>& ranks)
219   {
[595]220      list<int>::iterator it;
221      bool pending = false;
222      for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer();
223      return pending;
[509]224   }
[300]225
[512]226   /*!
[917]227    * Set the buffer size for each connection. Warning: This function is collective.
228    *
229    * \param [in] mapSize maps the rank of the connected servers to the size of the correspoinding buffer
230    * \param [in] maxEventSize maps the rank of the connected servers to the size of the biggest event
[512]231   */
[917]232   void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)
[509]233   {
234     mapBufferSize_ = mapSize;
[917]235
236     // Compute the maximum number of events that can be safely buffered.
237     double minBufferSizeEventSizeRatio = std::numeric_limits<double>::max();
238     for (std::map<int,StdSize>::const_iterator it = mapSize.begin(), ite = mapSize.end(); it != ite; ++it)
239     {
240       double ratio = double(it->second) / maxEventSize.at(it->first);
241       if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio;
242     }
[1037]243     #ifdef _usingMPI
[917]244     MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm);
[1037]245     #elif _usingEP
246     MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm);
247     #endif
248     
[917]249     if (minBufferSizeEventSizeRatio < 1.0)
250       ERROR("void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)",
251             << "The buffer sizes and the maximum events sizes are incoherent.");
252
253     maxBufferedEvents = size_t(2 * minBufferSizeEventSizeRatio) // there is room for two local buffers on the server
254                          + size_t(minBufferSizeEventSizeRatio)  // one local buffer can always be fully used
255                          + 1;                                   // the other local buffer might contain only one event
[509]256   }
257
[595]258  /*!
259  Get leading server in the group of connected server
260  \return ranks of leading servers
261  */
262  const std::list<int>& CContextClient::getRanksServerLeader(void) const
263  {
264    return ranksServerLeader;
265  }
[509]266
[595]267  /*!
268  Check if client connects to leading server
269  \return connected(true), not connected (false)
270  */
271  bool CContextClient::isServerLeader(void) const
272  {
273    return !ranksServerLeader.empty();
274  }
[300]275
[704]276  /*!
277   * Check if the attached mode is used.
278   *
279   * \return true if and only if attached mode is used
280   */
281  bool CContextClient::isAttachedModeEnabled() const
282  {
283    return (parentServer != 0);
284  }
[697]285
[512]286   /*!
287   Finalize context client and do some reports
288   */
[300]289   void CContextClient::finalize(void)
290   {
[595]291     map<int,CClientBuffer*>::iterator itBuff;
[1037]292     bool stop = true;
[731]293
[595]294     CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE);
[300]295     if (isServerLeader())
296     {
[595]297       CMessage msg;
298       const std::list<int>& ranks = getRanksServerLeader();
299       for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)
300         event.push(*itRank, 1, msg);
301       sendEvent(event);
[300]302     }
[595]303     else sendEvent(event);
[509]304
[347]305     CTimer::get("Blocking time").resume();
[1037]306     while (stop)
[300]307     {
[595]308       checkBuffers();
[1037]309       stop = false;
[1060]310       for (itBuff = buffers.begin(); itBuff != buffers.end(); ++itBuff) stop |= itBuff->second->hasPendingRequest();
[300]311     }
[347]312     CTimer::get("Blocking time").suspend();
[509]313
[595]314     std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(),
315                                           iteMap = mapBufferSize_.end(), itMap;
[511]316     StdSize totalBuf = 0;
317     for (itMap = itbMap; itMap != iteMap; ++itMap)
318     {
[595]319       report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl
320                  << "  +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl;
[511]321       totalBuf += itMap->second;
322     }
[595]323     report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl;
[511]324
[595]325     releaseBuffers();
[300]326   }
[509]327}
Note: See TracBrowser for help on using the repository browser.