24 : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4)
43 std::list<int>& rankRecvLeader,
44 std::list<int>& rankRecvNotLeader)
46 if ((0 == clientSize) || (0 == serverSize))
return;
48 if (clientSize < serverSize)
54 if (clientRank < remain)
62 for (
int i = 0; i < serverByClient; i++)
63 rankRecvLeader.push_back(rankStart + i);
65 rankRecvNotLeader.resize(0);
72 if (clientRank < (clientByServer + 1) * remain)
74 if (clientRank % (clientByServer + 1) == 0)
75 rankRecvLeader.push_back(clientRank / (clientByServer + 1));
77 rankRecvNotLeader.push_back(clientRank / (clientByServer + 1));
81 int rank = clientRank - (clientByServer + 1) * remain;
82 if (rank % clientByServer == 0)
83 rankRecvLeader.push_back(remain + rank / clientByServer);
85 rankRecvNotLeader.push_back(remain + rank / clientByServer);
96 list<int>
ranks =
event.getRanks();
100 int typeId, classId, typeId_in, classId_in, timeLine_out;
101 typeId_in=
event.getTypeId() ;
102 classId_in=
event.getClassId() ;
104 MPI_Allreduce(&
timeLine,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM,
intraComm) ;
105 MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM,
intraComm) ;
106 MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM,
intraComm) ;
109 ERROR(
"void CContextClient::sendEvent(CEventClient& event)",
110 <<
"Event are not coherent between client.");
116 list<int>
sizes =
event.getSizes();
119 list<CBufferOut*> buffList;
125 event.send(
timeLine, sizes, buffList);
141 for (list<int>::const_iterator it = sizes.begin(); it != sizes.end(); it++)
143 info(100)<<
"DEBUG : temporaly event created : timeline "<<
timeLine<<endl ;
159 bool couldSendTmpBufferedEvent =
false;
163 list<CBufferOut*> buffList;
166 list<CBufferOut*>::iterator it, itBuffer;
169 (*itBuffer)->put((
char*)(*it)->start(), (*it)->count());
171 info(100)<<
"DEBUG : temporaly event sent "<<endl ;
176 couldSendTmpBufferedEvent =
true;
180 return couldSendTmpBufferedEvent;
216 list<int>::const_iterator itServer, itSize;
217 list<CClientBuffer*> bufferList;
218 map<int,CClientBuffer*>::const_iterator it;
219 list<CClientBuffer*>::iterator itBuffer;
222 for (itServer = serverList.begin(); itServer != serverList.end(); itServer++)
230 bufferList.push_back(it->second);
236 areBuffersFree =
true;
237 for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
238 areBuffersFree &= (*itBuffer)->isBufferFree(*itSize);
258 }
while (!areBuffersFree && !nonBlocking);
264 for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++)
265 retBuffers.push_back((*itBuffer)->getBuffer(*itSize));
268 return areBuffersFree;
279 error(0) <<
"WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl;
296 map<int,CClientBuffer*>::iterator itBuff;
297 bool pending =
false;
299 pending |= itBuff->second->checkBuffer();
306 map<int,CClientBuffer*>::iterator itBuff;
309 delete itBuff->second;
321 list<int>::iterator it;
322 bool pending =
false;
323 for (it = ranks.begin(); it != ranks.end(); it++) pending |=
buffers[*it]->checkBuffer();
339 double minBufferSizeEventSizeRatio = std::numeric_limits<double>::max();
340 for (std::map<int,StdSize>::const_iterator it = mapSize.begin(), ite = mapSize.end(); it != ite; ++it)
342 double ratio = double(it->second) /
maxEventSizes[it->first];
343 if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio;
345 MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN,
intraComm);
347 if (minBufferSizeEventSizeRatio < 1.0)
349 ERROR(
"void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)",
350 <<
"The buffer sizes and the maximum events sizes are incoherent.");
352 else if (minBufferSizeEventSizeRatio == std::numeric_limits<double>::max())
353 minBufferSizeEventSizeRatio = 1.0;
356 + size_t(minBufferSizeEventSizeRatio)
411 map<int,CClientBuffer*>::iterator itBuff;
427 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)
429 info(100)<<
"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ;
430 event.push(*itRank, 1, msg);
448 std::map<int,StdSize>::const_iterator itbMap =
mapBufferSize_.begin(),
452 for (itMap = itbMap; itMap != iteMap; ++itMap)
454 report(10) <<
" Memory report : Context <" <<
context->
getId() <<
"> : client side : memory used for buffer of each connection to server" << endl
455 <<
" +) To server with rank " << itMap->first <<
" : " << itMap->second <<
" bytes " << endl;
456 totalBuf += itMap->second;
458 report(0) <<
" Memory report : Context <" <<
context->
getId() <<
"> : client side : total memory used for buffer " << totalBuf <<
" bytes" << endl;
468 bool pending =
false;
469 map<int,CClientBuffer*>::iterator itBuff;
471 pending |= itBuff->second->hasPendingRequest();
CContextClient(CContext *parent, MPI_Comm intraComm, MPI_Comm interComm, CContext *parentServer=0)
static bool isClient
Check if xios is client.
static void computeLeader(int clientRank, int clientSize, int serverSize, std::list< int > &rankRecvLeader, std::list< int > &rankRecvNotLeader)
void sendEvent(CEventClient &event)
In case of attached mode, the current context must be reset to context for client.
MPI_Comm intraComm
Communicator of client group.
static bool checkEventSync
For debuuging, check if event are coherent and synchrone on client side.
void releaseBuffers(void)
Release all buffers.
bool eventLoop(bool enableEventsProcessing=true)
int clientSize
Size of client group.
std::list< int > ranksServerNotLeader
List of server ranks for which the client is not leader.
CContext * parentServer
Event temporarily buffered (used only on the server)
CContextServer * server
Concrete context server.
void checkPendingRequest(void)
void setBufferSize(const std::map< int, StdSize > &mapSize, const std::map< int, StdSize > &maxEventSize)
Set the buffer size for each connection.
static ENodeType GetType(void)
const std::list< int > & getRanksServerNotLeader(void) const
Get leading server in the group of connected server.
size_t timeLine
Timeline of each event.
std::map< int, StdSize > mapBufferSize_
Mapping of server and buffer size for each connection to server.
bool checkBuffers(void)
Verify state of buffers.
bool isAttachedModeEnabled() const
Check if the attached mode is used.
const std::list< int > & getRanksServerLeader(void) const
Get leading server in the group of connected server.
const StdString & getId(void) const
Accesseurs ///.
map< int, CClientBuffer * > buffers
Buffers for connection to servers.
bool getBuffers(const list< int > &serverList, const list< int > &sizeList, list< CBufferOut * > &retBuffers, bool nonBlocking=false)
Get buffers for each connection to the servers.
bool hasPendingEvent(void)
static void setCurrent(const string &id)
Set context with an id be the current context.
void finalize(void)
Finalize context client and do some reports.
CBufferOut * getBuffer(StdSize size)
std::list< int > ranksServerLeader
List of server ranks for which the client is leader.
CATCH CScalarAlgorithmReduceScalar::CScalarAlgorithmReduceScalar(CScalar *scalarDestination, CScalar *scalarSource, CReduceScalarToScalar *algo ERROR)("CScalarAlgorithmReduceScalar::CScalarAlgorithmReduceScalar(CScalar* scalarDestination, CScalar* scalarSource, CReduceScalarToScalar* algo)",<< "Operation must be defined."<< "Scalar source "<< scalarSource->getId()<< std::endl<< "Scalar destination "<< scalarDestination->getId())
static StdSize minBufferSize
Minimum buffer size.
StdSize maxBufferedEvents
Maximum number of events that can be buffered.
int clientRank
Rank of current client.
bool sendTemporarilyBufferedEvent()
Send the temporarily buffered event (if any).
std::map< int, StdSize > maxEventSizes
Maximum event sizes estimated for each connection to server.
CLog error("error", cerr.rdbuf())
std::vector< CContextServer * > serverPrimServer
MPI_Comm interComm
Communicator of server group.
int serverSize
Size of server group.
bool hasTemporarilyBufferedEvent() const
bool isServerNotLeader(void) const
Check if client connects to leading server.
static void contextEventLoop(bool enableEventsProcessing=true)
bool havePendingRequests(void)
CContext * context
Context for client.
static CTimer & get(std::string name)
struct xios::CContextClient::@0 tmpBufferedEvent
void newBuffer(int rank)
Make a new buffer for a certain connection to server with specific rank.
void waitEvent(list< int > &ranks)
If client is also server (attached mode), after sending event, it should process right away the incom...
void setPendingEvent(void)
bool isServerLeader(void) const
Check if client connects to leading server.