source: XIOS/dev/branch_yushan/src/context_client.cpp @ 1103

Last change on this file since 1103 was 1102, checked in by yushan, 7 years ago

clean up

  • Property copyright set to
    Software name : XIOS (Xml I/O Server)
    http://forge.ipsl.jussieu.fr/ioserver
    Creation date : January 2009
    Licence : CeCCIL version2
    see license file in root directory : Licence_CeCILL_V2-en.txt
    or http://www.cecill.info/licences/Licence_CeCILL_V2-en.html
    Holder : CEA/LSCE (Laboratoire des Sciences du CLimat et de l'Environnement)
    CNRS/IPSL (Institut Pierre Simon Laplace)
    Project Manager : Yann Meurdesoif
    yann.meurdesoif@cea.fr
  • Property svn:eol-style set to native
File size: 11.0 KB
RevLine 
[591]1#include "xios_spl.hpp"
[300]2#include "context_client.hpp"
3#include "context_server.hpp"
4#include "event_client.hpp"
5#include "buffer_out.hpp"
6#include "buffer_client.hpp"
7#include "type.hpp"
8#include "event_client.hpp"
9#include "context.hpp"
[382]10#include "mpi.hpp"
[347]11#include "timer.hpp"
[401]12#include "cxios.hpp"
[300]13
[335]14namespace xios
[300]15{
[1081]16    CContextClient::CContextClient() {}
17   
[512]18    /*!
19    \param [in] parent Pointer to context on client side
20    \param [in] intraComm_ communicator of group client
21    \param [in] interComm_ communicator of group server
[1081]22    \param [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode)
[512]23    */
[1053]24    CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer)
[917]25     : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4)
[300]26    {
[595]27      context = parent;
28      intraComm = intraComm_;
29      interComm = interComm_;
30      MPI_Comm_rank(intraComm, &clientRank);
31      MPI_Comm_size(intraComm, &clientSize);
[1081]32     
[595]33      int flag;
34      MPI_Comm_test_inter(interComm, &flag);
35      if (flag) MPI_Comm_remote_size(interComm, &serverSize);
36      else  MPI_Comm_size(interComm, &serverSize);
[509]37
[1081]38
[595]39      if (clientSize < serverSize)
40      {
41        int serverByClient = serverSize / clientSize;
42        int remain = serverSize % clientSize;
43        int rankStart = serverByClient * clientRank;
[300]44
[595]45        if (clientRank < remain)
46        {
47          serverByClient++;
48          rankStart += clientRank;
49        }
50        else
51          rankStart += remain;
52
53        for (int i = 0; i < serverByClient; i++)
54          ranksServerLeader.push_back(rankStart + i);
[1081]55
56       
[595]57      }
58      else
59      {
60        int clientByServer = clientSize / serverSize;
61        int remain = clientSize % serverSize;
62
63        if (clientRank < (clientByServer + 1) * remain)
64        {
65          if (clientRank % (clientByServer + 1) == 0)
66            ranksServerLeader.push_back(clientRank / (clientByServer + 1));
67        }
68        else
69        {
70          int rank = clientRank - (clientByServer + 1) * remain;
71          if (rank % clientByServer == 0)
72            ranksServerLeader.push_back(remain + rank / clientByServer);
[1037]73        }
[1081]74
[1102]75        // printf("clientRank = %d (%p)\n", clientRank, &clientRank);
[595]76      }
77
78      timeLine = 0;
[300]79    }
80
[512]81    /*!
82    In case of attached mode, the current context must be reset to context for client
83    \param [in] event Event sent to server
84    */
[300]85    void CContextClient::sendEvent(CEventClient& event)
86    {
[731]87      list<int> ranks = event.getRanks();
[595]88      if (!event.isEmpty())
[300]89      {
[731]90        list<int> sizes = event.getSizes();
[300]91
[1037]92        list<CBufferOut*> buffList = getBuffers(ranks, sizes);
[509]93
[1037]94        event.send(timeLine, sizes, buffList);
[731]95
[1037]96        checkBuffers(ranks);
[300]97      }
98
[1037]99      if (isAttachedModeEnabled())
[511]100      {
[1037]101        waitEvent(ranks);
102        CContext::setCurrent(context->getId());
[511]103      }
104
[1037]105      timeLine++;
[300]106    }
[509]107
[512]108    /*!
109    If client is also server (attached mode), after sending event, it should process right away
110    the incoming event.
111    \param [in] ranks list rank of server connected this client
112    */
[300]113    void CContextClient::waitEvent(list<int>& ranks)
114    {
[595]115      parentServer->server->setPendingEvent();
[1037]116      size_t pendingmapSize;
[595]117      while (checkBuffers(ranks))
[300]118      {
[1037]119        parentServer->server->listen(); 
120        parentServer->server->checkPendingRequest(); 
[300]121      }
[386]122
[595]123      while (parentServer->server->hasPendingEvent())
[386]124      {
[1070]125       parentServer->server->eventLoop();
[386]126      }
[300]127    }
128
[512]129    /*!
[1037]130    Setup buffer for each connection to server and verify their state to put content into them
131    \param [in] serverList list of rank of connected server
132    \param [in] sizeList size of message corresponding to each connection
133    \return List of buffer input which event can be placed
[512]134    */
[1037]135    list<CBufferOut*> CContextClient::getBuffers(list<int>& serverList, list<int>& sizeList)
[300]136    {
[1037]137      list<int>::iterator itServer, itSize;
[595]138      list<CClientBuffer*> bufferList;
[1037]139      map<int,CClientBuffer*>::iterator it;
[595]140      list<CClientBuffer*>::iterator itBuffer;
[1037]141      list<CBufferOut*>  retBuffer;
[884]142      bool areBuffersFree;
[300]143
[595]144      for (itServer = serverList.begin(); itServer != serverList.end(); itServer++)
[300]145      {
[595]146        it = buffers.find(*itServer);
147        if (it == buffers.end())
[300]148        {
[595]149          newBuffer(*itServer);
150          it = buffers.find(*itServer);
[509]151        }
[595]152        bufferList.push_back(it->second);
[300]153      }
[347]154
155      CTimer::get("Blocking time").resume();
[884]156      do
[300]157      {
[884]158        areBuffersFree = true;
[595]159        for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
[884]160          areBuffersFree &= (*itBuffer)->isBufferFree(*itSize);
161
162        if (!areBuffersFree)
[300]163        {
[884]164          checkBuffers();
165          context->server->listen();
[300]166        }
[1037]167      } while (!areBuffersFree);
[347]168      CTimer::get("Blocking time").suspend();
169
[1037]170      for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
[300]171      {
[1053]172        CBufferOut* m_buf = (*itBuffer)->getBuffer(*itSize);
173        //retBuffer.push_back((*itBuffer)->getBuffer(*itSize));
174        //int m_size = retBuffer.size();
175        //retBuffer.resize(m_size+1);
176        //m_size = retBuffer.size();
177        retBuffer.push_back(m_buf);
[300]178      }
[1037]179      return retBuffer;
[300]180   }
[509]181
[512]182   /*!
183   Make a new buffer for a certain connection to server with specific rank
184   \param [in] rank rank of connected server
185   */
[300]186   void CContextClient::newBuffer(int rank)
187   {
[724]188      if (!mapBufferSize_.count(rank))
189      {
190        error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl;
191        mapBufferSize_[rank] = CXios::minBufferSize;
192      }
[917]193      CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxBufferedEvents);
[725]194      // Notify the server
195      CBufferOut* bufOut = buffer->getBuffer(sizeof(StdSize));
196      bufOut->put(mapBufferSize_[rank]); // Stupid C++
197      buffer->checkBuffer();
[509]198   }
[300]199
[512]200   /*!
201   Verify state of buffers. Buffer is under pending state if there is no message on it
202   \return state of buffers, pending(true), ready(false)
203   */
[300]204   bool CContextClient::checkBuffers(void)
205   {
[595]206      map<int,CClientBuffer*>::iterator itBuff;
207      bool pending = false;
[1060]208      if(! buffers.empty())
[595]209      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) pending |= itBuff->second->checkBuffer();
210      return pending;
[509]211   }
[300]212
[512]213   //! Release all buffers
[300]214   void CContextClient::releaseBuffers(void)
215   {
[595]216      map<int,CClientBuffer*>::iterator itBuff;
217      for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) delete itBuff->second;
[509]218   }
[300]219
[512]220   /*!
221   Verify state of buffers corresponding to a connection
222   \param [in] ranks list rank of server to which client connects to
223   \return state of buffers, pending(true), ready(false)
224   */
[300]225   bool CContextClient::checkBuffers(list<int>& ranks)
226   {
[595]227      list<int>::iterator it;
228      bool pending = false;
229      for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer();
230      return pending;
[509]231   }
[300]232
[512]233   /*!
[917]234    * Set the buffer size for each connection. Warning: This function is collective.
235    *
236    * \param [in] mapSize maps the rank of the connected servers to the size of the correspoinding buffer
237    * \param [in] maxEventSize maps the rank of the connected servers to the size of the biggest event
[512]238   */
[917]239   void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)
[509]240   {
241     mapBufferSize_ = mapSize;
[917]242
243     // Compute the maximum number of events that can be safely buffered.
244     double minBufferSizeEventSizeRatio = std::numeric_limits<double>::max();
245     for (std::map<int,StdSize>::const_iterator it = mapSize.begin(), ite = mapSize.end(); it != ite; ++it)
246     {
247       double ratio = double(it->second) / maxEventSize.at(it->first);
248       if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio;
249     }
[1037]250     #ifdef _usingMPI
[917]251     MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm);
[1037]252     #elif _usingEP
253     MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm);
254     #endif
255     
[917]256     if (minBufferSizeEventSizeRatio < 1.0)
257       ERROR("void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)",
258             << "The buffer sizes and the maximum events sizes are incoherent.");
259
260     maxBufferedEvents = size_t(2 * minBufferSizeEventSizeRatio) // there is room for two local buffers on the server
261                          + size_t(minBufferSizeEventSizeRatio)  // one local buffer can always be fully used
262                          + 1;                                   // the other local buffer might contain only one event
[509]263   }
264
[595]265  /*!
266  Get leading server in the group of connected server
267  \return ranks of leading servers
268  */
269  const std::list<int>& CContextClient::getRanksServerLeader(void) const
270  {
271    return ranksServerLeader;
272  }
[509]273
[595]274  /*!
275  Check if client connects to leading server
276  \return connected(true), not connected (false)
277  */
278  bool CContextClient::isServerLeader(void) const
279  {
280    return !ranksServerLeader.empty();
281  }
[300]282
[704]283  /*!
284   * Check if the attached mode is used.
285   *
286   * \return true if and only if attached mode is used
287   */
288  bool CContextClient::isAttachedModeEnabled() const
289  {
290    return (parentServer != 0);
291  }
[697]292
[512]293   /*!
294   Finalize context client and do some reports
295   */
[300]296   void CContextClient::finalize(void)
297   {
[595]298     map<int,CClientBuffer*>::iterator itBuff;
[1037]299     bool stop = true;
[731]300
[595]301     CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE);
[300]302     if (isServerLeader())
303     {
[595]304       CMessage msg;
305       const std::list<int>& ranks = getRanksServerLeader();
306       for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)
307         event.push(*itRank, 1, msg);
308       sendEvent(event);
[300]309     }
[595]310     else sendEvent(event);
[509]311
[347]312     CTimer::get("Blocking time").resume();
[1037]313     while (stop)
[300]314     {
[595]315       checkBuffers();
[1037]316       stop = false;
[1060]317       for (itBuff = buffers.begin(); itBuff != buffers.end(); ++itBuff) stop |= itBuff->second->hasPendingRequest();
[300]318     }
[347]319     CTimer::get("Blocking time").suspend();
[509]320
[595]321     std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(),
322                                           iteMap = mapBufferSize_.end(), itMap;
[511]323     StdSize totalBuf = 0;
324     for (itMap = itbMap; itMap != iteMap; ++itMap)
325     {
[1094]326       //report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl
327       //           << "  +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl;
[511]328       totalBuf += itMap->second;
329     }
[1094]330     //report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl;
[511]331
[595]332     releaseBuffers();
[300]333   }
[509]334}
Note: See TracBrowser for help on using the repository browser.