[591] | 1 | #include "xios_spl.hpp" |
---|
[300] | 2 | #include "context_client.hpp" |
---|
| 3 | #include "context_server.hpp" |
---|
| 4 | #include "event_client.hpp" |
---|
| 5 | #include "buffer_out.hpp" |
---|
| 6 | #include "buffer_client.hpp" |
---|
| 7 | #include "type.hpp" |
---|
| 8 | #include "event_client.hpp" |
---|
| 9 | #include "context.hpp" |
---|
[382] | 10 | #include "mpi.hpp" |
---|
[347] | 11 | #include "timer.hpp" |
---|
[401] | 12 | #include "cxios.hpp" |
---|
[1460] | 13 | #include "server.hpp" |
---|
[1328] | 14 | using namespace ep_lib; |
---|
[300] | 15 | |
---|
[335] | 16 | namespace xios |
---|
[300] | 17 | { |
---|
[512] | 18 | /*! |
---|
| 19 | \param [in] parent Pointer to context on client side |
---|
| 20 | \param [in] intraComm_ communicator of group client |
---|
| 21 | \param [in] interComm_ communicator of group server |
---|
[1460] | 22 | \cxtSer [in] cxtSer Pointer to context of server side. (It is only used in case of attached mode). |
---|
[512] | 23 | */ |
---|
[1328] | 24 | CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer) |
---|
[917] | 25 | : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) |
---|
[300] | 26 | { |
---|
[595] | 27 | context = parent; |
---|
| 28 | intraComm = intraComm_; |
---|
| 29 | interComm = interComm_; |
---|
[1356] | 30 | *(static_cast< ::MPI_Comm* >(intraComm.mpi_comm)) = *(static_cast< ::MPI_Comm* >(intraComm_.mpi_comm)); |
---|
| 31 | *(static_cast< ::MPI_Comm* >(interComm.mpi_comm)) = *(static_cast< ::MPI_Comm* >(interComm_.mpi_comm)); |
---|
| 32 | *(static_cast< ::MPI_Comm* >(interComm.ep_comm_ptr->intercomm->mpi_inter_comm)) = *(static_cast< ::MPI_Comm* >(interComm_.ep_comm_ptr->intercomm->mpi_inter_comm)); |
---|
[1355] | 33 | //MPI_Comm_dup(intraComm_, &intraComm); |
---|
| 34 | //MPI_Comm_dup(interComm_, &interComm); |
---|
[595] | 35 | MPI_Comm_rank(intraComm, &clientRank); |
---|
| 36 | MPI_Comm_size(intraComm, &clientSize); |
---|
[509] | 37 | |
---|
[595] | 38 | int flag; |
---|
| 39 | MPI_Comm_test_inter(interComm, &flag); |
---|
| 40 | if (flag) MPI_Comm_remote_size(interComm, &serverSize); |
---|
| 41 | else MPI_Comm_size(interComm, &serverSize); |
---|
[509] | 42 | |
---|
[1460] | 43 | computeLeader(clientRank, clientSize, serverSize, ranksServerLeader, ranksServerNotLeader); |
---|
| 44 | |
---|
| 45 | timeLine = 0; |
---|
| 46 | } |
---|
| 47 | |
---|
| 48 | void CContextClient::computeLeader(int clientRank, int clientSize, int serverSize, |
---|
| 49 | std::list<int>& rankRecvLeader, |
---|
| 50 | std::list<int>& rankRecvNotLeader) |
---|
| 51 | { |
---|
| 52 | if ((0 == clientSize) || (0 == serverSize)) return; |
---|
| 53 | |
---|
[595] | 54 | if (clientSize < serverSize) |
---|
| 55 | { |
---|
| 56 | int serverByClient = serverSize / clientSize; |
---|
| 57 | int remain = serverSize % clientSize; |
---|
| 58 | int rankStart = serverByClient * clientRank; |
---|
[300] | 59 | |
---|
[595] | 60 | if (clientRank < remain) |
---|
| 61 | { |
---|
| 62 | serverByClient++; |
---|
| 63 | rankStart += clientRank; |
---|
| 64 | } |
---|
| 65 | else |
---|
| 66 | rankStart += remain; |
---|
| 67 | |
---|
| 68 | for (int i = 0; i < serverByClient; i++) |
---|
[1460] | 69 | rankRecvLeader.push_back(rankStart + i); |
---|
[988] | 70 | |
---|
[1460] | 71 | rankRecvNotLeader.resize(0); |
---|
[595] | 72 | } |
---|
| 73 | else |
---|
| 74 | { |
---|
| 75 | int clientByServer = clientSize / serverSize; |
---|
| 76 | int remain = clientSize % serverSize; |
---|
| 77 | |
---|
| 78 | if (clientRank < (clientByServer + 1) * remain) |
---|
| 79 | { |
---|
| 80 | if (clientRank % (clientByServer + 1) == 0) |
---|
[1460] | 81 | rankRecvLeader.push_back(clientRank / (clientByServer + 1)); |
---|
[988] | 82 | else |
---|
[1460] | 83 | rankRecvNotLeader.push_back(clientRank / (clientByServer + 1)); |
---|
[595] | 84 | } |
---|
| 85 | else |
---|
| 86 | { |
---|
| 87 | int rank = clientRank - (clientByServer + 1) * remain; |
---|
| 88 | if (rank % clientByServer == 0) |
---|
[1460] | 89 | rankRecvLeader.push_back(remain + rank / clientByServer); |
---|
[988] | 90 | else |
---|
[1460] | 91 | rankRecvNotLeader.push_back(remain + rank / clientByServer); |
---|
| 92 | } |
---|
[595] | 93 | } |
---|
[300] | 94 | } |
---|
| 95 | |
---|
[512] | 96 | /*! |
---|
| 97 | In case of attached mode, the current context must be reset to context for client |
---|
| 98 | \param [in] event Event sent to server |
---|
| 99 | */ |
---|
[300] | 100 | void CContextClient::sendEvent(CEventClient& event) |
---|
| 101 | { |
---|
[731] | 102 | list<int> ranks = event.getRanks(); |
---|
[1033] | 103 | |
---|
[1460] | 104 | if (CXios::checkEventSync) |
---|
| 105 | { |
---|
| 106 | int typeId, classId, typeId_in, classId_in, timeLine_out; |
---|
| 107 | typeId_in=event.getTypeId() ; |
---|
| 108 | classId_in=event.getClassId() ; |
---|
| 109 | MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ; |
---|
| 110 | MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ; |
---|
| 111 | MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ; |
---|
| 112 | if (typeId/clientSize!=event.getTypeId() || classId/clientSize!=event.getClassId() || timeLine_out/clientSize!=timeLine) |
---|
| 113 | { |
---|
| 114 | ERROR("void CContextClient::sendEvent(CEventClient& event)", |
---|
| 115 | << "Event are not coherent between client."); |
---|
| 116 | } |
---|
| 117 | } |
---|
| 118 | |
---|
[595] | 119 | if (!event.isEmpty()) |
---|
[300] | 120 | { |
---|
[731] | 121 | list<int> sizes = event.getSizes(); |
---|
[300] | 122 | |
---|
[1460] | 123 | // We force the getBuffers call to be non-blocking on classical servers |
---|
[1033] | 124 | list<CBufferOut*> buffList; |
---|
[1460] | 125 | bool couldBuffer = getBuffers(ranks, sizes, buffList, (!CXios::isClient && (CServer::serverLevel == 0) )); |
---|
| 126 | // bool couldBuffer = getBuffers(ranks, sizes, buffList, CXios::isServer ); |
---|
[509] | 127 | |
---|
[1033] | 128 | if (couldBuffer) |
---|
| 129 | { |
---|
| 130 | event.send(timeLine, sizes, buffList); |
---|
[731] | 131 | |
---|
[1033] | 132 | checkBuffers(ranks); |
---|
| 133 | |
---|
| 134 | if (isAttachedModeEnabled()) // couldBuffer is always true in attached mode |
---|
| 135 | { |
---|
| 136 | waitEvent(ranks); |
---|
| 137 | CContext::setCurrent(context->getId()); |
---|
| 138 | } |
---|
| 139 | } |
---|
| 140 | else |
---|
| 141 | { |
---|
| 142 | tmpBufferedEvent.ranks = ranks; |
---|
| 143 | tmpBufferedEvent.sizes = sizes; |
---|
| 144 | |
---|
| 145 | for (list<int>::const_iterator it = sizes.begin(); it != sizes.end(); it++) |
---|
| 146 | tmpBufferedEvent.buffers.push_back(new CBufferOut(*it)); |
---|
[1460] | 147 | info(100)<<"DEBUG : temporaly event created : timeline "<<timeLine<<endl ; |
---|
[1033] | 148 | event.send(timeLine, tmpBufferedEvent.sizes, tmpBufferedEvent.buffers); |
---|
| 149 | } |
---|
[300] | 150 | } |
---|
| 151 | |
---|
[1033] | 152 | timeLine++; |
---|
| 153 | } |
---|
| 154 | |
---|
| 155 | /*! |
---|
| 156 | * Send the temporarily buffered event (if any). |
---|
| 157 | * |
---|
| 158 | * \return true if a temporarily buffered event could be sent, false otherwise |
---|
| 159 | */ |
---|
| 160 | bool CContextClient::sendTemporarilyBufferedEvent() |
---|
| 161 | { |
---|
| 162 | bool couldSendTmpBufferedEvent = false; |
---|
| 163 | |
---|
| 164 | if (hasTemporarilyBufferedEvent()) |
---|
[511] | 165 | { |
---|
[1033] | 166 | list<CBufferOut*> buffList; |
---|
| 167 | if (getBuffers(tmpBufferedEvent.ranks, tmpBufferedEvent.sizes, buffList, true)) // Non-blocking call |
---|
| 168 | { |
---|
| 169 | list<CBufferOut*>::iterator it, itBuffer; |
---|
| 170 | |
---|
| 171 | for (it = tmpBufferedEvent.buffers.begin(), itBuffer = buffList.begin(); it != tmpBufferedEvent.buffers.end(); it++, itBuffer++) |
---|
| 172 | (*itBuffer)->put((char*)(*it)->start(), (*it)->count()); |
---|
| 173 | |
---|
[1460] | 174 | info(100)<<"DEBUG : temporaly event sent "<<endl ; |
---|
[1033] | 175 | checkBuffers(tmpBufferedEvent.ranks); |
---|
| 176 | |
---|
| 177 | tmpBufferedEvent.clear(); |
---|
| 178 | |
---|
| 179 | couldSendTmpBufferedEvent = true; |
---|
| 180 | } |
---|
[511] | 181 | } |
---|
| 182 | |
---|
[1033] | 183 | return couldSendTmpBufferedEvent; |
---|
[300] | 184 | } |
---|
[509] | 185 | |
---|
[512] | 186 | /*! |
---|
| 187 | If client is also server (attached mode), after sending event, it should process right away |
---|
| 188 | the incoming event. |
---|
| 189 | \param [in] ranks list rank of server connected this client |
---|
| 190 | */ |
---|
[300] | 191 | void CContextClient::waitEvent(list<int>& ranks) |
---|
| 192 | { |
---|
[595] | 193 | parentServer->server->setPendingEvent(); |
---|
| 194 | while (checkBuffers(ranks)) |
---|
[300] | 195 | { |
---|
[595] | 196 | parentServer->server->listen(); |
---|
| 197 | parentServer->server->checkPendingRequest(); |
---|
[300] | 198 | } |
---|
[386] | 199 | |
---|
[595] | 200 | while (parentServer->server->hasPendingEvent()) |
---|
[386] | 201 | { |
---|
[595] | 202 | parentServer->server->eventLoop(); |
---|
[386] | 203 | } |
---|
[300] | 204 | } |
---|
| 205 | |
---|
[512] | 206 | /*! |
---|
[1033] | 207 | * Get buffers for each connection to the servers. This function blocks until there is enough room in the buffers unless |
---|
| 208 | * it is explicitly requested to be non-blocking. |
---|
| 209 | * |
---|
| 210 | * \param [in] serverList list of rank of connected server |
---|
| 211 | * \param [in] sizeList size of message corresponding to each connection |
---|
| 212 | * \param [out] retBuffers list of buffers that can be used to store an event |
---|
| 213 | * \param [in] nonBlocking whether this function should be non-blocking |
---|
| 214 | * \return whether the already allocated buffers could be used |
---|
[512] | 215 | */ |
---|
[1460] | 216 | bool CContextClient::getBuffers(const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers, |
---|
| 217 | bool nonBlocking /*= false*/) |
---|
[300] | 218 | { |
---|
[1033] | 219 | list<int>::const_iterator itServer, itSize; |
---|
[595] | 220 | list<CClientBuffer*> bufferList; |
---|
[1033] | 221 | map<int,CClientBuffer*>::const_iterator it; |
---|
[595] | 222 | list<CClientBuffer*>::iterator itBuffer; |
---|
[884] | 223 | bool areBuffersFree; |
---|
[300] | 224 | |
---|
[595] | 225 | for (itServer = serverList.begin(); itServer != serverList.end(); itServer++) |
---|
[300] | 226 | { |
---|
[595] | 227 | it = buffers.find(*itServer); |
---|
| 228 | if (it == buffers.end()) |
---|
[300] | 229 | { |
---|
[595] | 230 | newBuffer(*itServer); |
---|
| 231 | it = buffers.find(*itServer); |
---|
[509] | 232 | } |
---|
[595] | 233 | bufferList.push_back(it->second); |
---|
[300] | 234 | } |
---|
[347] | 235 | |
---|
| 236 | CTimer::get("Blocking time").resume(); |
---|
[884] | 237 | do |
---|
[300] | 238 | { |
---|
[884] | 239 | areBuffersFree = true; |
---|
[595] | 240 | for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) |
---|
[884] | 241 | areBuffersFree &= (*itBuffer)->isBufferFree(*itSize); |
---|
| 242 | |
---|
| 243 | if (!areBuffersFree) |
---|
[300] | 244 | { |
---|
[884] | 245 | checkBuffers(); |
---|
[1460] | 246 | if (CServer::serverLevel == 0) |
---|
| 247 | context->server->listen(); |
---|
| 248 | |
---|
| 249 | else if (CServer::serverLevel == 1) |
---|
| 250 | { |
---|
| 251 | context->server->listen(); |
---|
| 252 | for (int i = 0; i < context->serverPrimServer.size(); ++i) |
---|
| 253 | context->serverPrimServer[i]->listen(); |
---|
| 254 | CServer::contextEventLoop(false) ; // avoid dead-lock at finalize... |
---|
| 255 | } |
---|
| 256 | |
---|
| 257 | else if (CServer::serverLevel == 2) |
---|
| 258 | context->server->listen(); |
---|
| 259 | |
---|
[300] | 260 | } |
---|
[1033] | 261 | } while (!areBuffersFree && !nonBlocking); |
---|
[1460] | 262 | |
---|
[347] | 263 | CTimer::get("Blocking time").suspend(); |
---|
| 264 | |
---|
[1033] | 265 | if (areBuffersFree) |
---|
[300] | 266 | { |
---|
[1033] | 267 | for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) |
---|
| 268 | retBuffers.push_back((*itBuffer)->getBuffer(*itSize)); |
---|
[300] | 269 | } |
---|
[1033] | 270 | |
---|
| 271 | return areBuffersFree; |
---|
[300] | 272 | } |
---|
[509] | 273 | |
---|
[512] | 274 | /*! |
---|
| 275 | Make a new buffer for a certain connection to server with specific rank |
---|
| 276 | \param [in] rank rank of connected server |
---|
| 277 | */ |
---|
[300] | 278 | void CContextClient::newBuffer(int rank) |
---|
| 279 | { |
---|
[724] | 280 | if (!mapBufferSize_.count(rank)) |
---|
| 281 | { |
---|
| 282 | error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl; |
---|
| 283 | mapBufferSize_[rank] = CXios::minBufferSize; |
---|
[1205] | 284 | maxEventSizes[rank] = CXios::minBufferSize; |
---|
[724] | 285 | } |
---|
[1205] | 286 | CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxEventSizes[rank], maxBufferedEvents); |
---|
[725] | 287 | // Notify the server |
---|
| 288 | CBufferOut* bufOut = buffer->getBuffer(sizeof(StdSize)); |
---|
| 289 | bufOut->put(mapBufferSize_[rank]); // Stupid C++ |
---|
| 290 | buffer->checkBuffer(); |
---|
[509] | 291 | } |
---|
[300] | 292 | |
---|
[512] | 293 | /*! |
---|
| 294 | Verify state of buffers. Buffer is under pending state if there is no message on it |
---|
| 295 | \return state of buffers, pending(true), ready(false) |
---|
| 296 | */ |
---|
[300] | 297 | bool CContextClient::checkBuffers(void) |
---|
| 298 | { |
---|
[595] | 299 | map<int,CClientBuffer*>::iterator itBuff; |
---|
| 300 | bool pending = false; |
---|
[1460] | 301 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
| 302 | pending |= itBuff->second->checkBuffer(); |
---|
[595] | 303 | return pending; |
---|
[509] | 304 | } |
---|
[300] | 305 | |
---|
[512] | 306 | //! Release all buffers |
---|
[1460] | 307 | void CContextClient::releaseBuffers() |
---|
[300] | 308 | { |
---|
[595] | 309 | map<int,CClientBuffer*>::iterator itBuff; |
---|
[1460] | 310 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
| 311 | { |
---|
| 312 | delete itBuff->second; |
---|
| 313 | } |
---|
| 314 | buffers.clear(); |
---|
[509] | 315 | } |
---|
[300] | 316 | |
---|
[512] | 317 | /*! |
---|
| 318 | Verify state of buffers corresponding to a connection |
---|
| 319 | \param [in] ranks list rank of server to which client connects to |
---|
| 320 | \return state of buffers, pending(true), ready(false) |
---|
| 321 | */ |
---|
[300] | 322 | bool CContextClient::checkBuffers(list<int>& ranks) |
---|
| 323 | { |
---|
[595] | 324 | list<int>::iterator it; |
---|
| 325 | bool pending = false; |
---|
| 326 | for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer(); |
---|
| 327 | return pending; |
---|
[509] | 328 | } |
---|
[300] | 329 | |
---|
[512] | 330 | /*! |
---|
[917] | 331 | * Set the buffer size for each connection. Warning: This function is collective. |
---|
| 332 | * |
---|
| 333 | * \param [in] mapSize maps the rank of the connected servers to the size of the correspoinding buffer |
---|
| 334 | * \param [in] maxEventSize maps the rank of the connected servers to the size of the biggest event |
---|
[512] | 335 | */ |
---|
[917] | 336 | void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize) |
---|
[509] | 337 | { |
---|
| 338 | mapBufferSize_ = mapSize; |
---|
[1205] | 339 | maxEventSizes = maxEventSize; |
---|
[917] | 340 | |
---|
| 341 | // Compute the maximum number of events that can be safely buffered. |
---|
| 342 | double minBufferSizeEventSizeRatio = std::numeric_limits<double>::max(); |
---|
| 343 | for (std::map<int,StdSize>::const_iterator it = mapSize.begin(), ite = mapSize.end(); it != ite; ++it) |
---|
| 344 | { |
---|
| 345 | double ratio = double(it->second) / maxEventSize.at(it->first); |
---|
| 346 | if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio; |
---|
| 347 | } |
---|
[1328] | 348 | //MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); |
---|
[1134] | 349 | MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); |
---|
[1328] | 350 | |
---|
[917] | 351 | if (minBufferSizeEventSizeRatio < 1.0) |
---|
[1205] | 352 | { |
---|
[917] | 353 | ERROR("void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)", |
---|
| 354 | << "The buffer sizes and the maximum events sizes are incoherent."); |
---|
[1205] | 355 | } |
---|
| 356 | else if (minBufferSizeEventSizeRatio == std::numeric_limits<double>::max()) |
---|
| 357 | minBufferSizeEventSizeRatio = 1.0; // In this case, maxBufferedEvents will never be used but we want to avoid any floating point exception |
---|
[917] | 358 | |
---|
| 359 | maxBufferedEvents = size_t(2 * minBufferSizeEventSizeRatio) // there is room for two local buffers on the server |
---|
| 360 | + size_t(minBufferSizeEventSizeRatio) // one local buffer can always be fully used |
---|
| 361 | + 1; // the other local buffer might contain only one event |
---|
[509] | 362 | } |
---|
| 363 | |
---|
[595] | 364 | /*! |
---|
| 365 | Get leading server in the group of connected server |
---|
| 366 | \return ranks of leading servers |
---|
| 367 | */ |
---|
[988] | 368 | const std::list<int>& CContextClient::getRanksServerNotLeader(void) const |
---|
| 369 | { |
---|
| 370 | return ranksServerNotLeader; |
---|
| 371 | } |
---|
| 372 | |
---|
| 373 | /*! |
---|
| 374 | Check if client connects to leading server |
---|
| 375 | \return connected(true), not connected (false) |
---|
| 376 | */ |
---|
| 377 | bool CContextClient::isServerNotLeader(void) const |
---|
| 378 | { |
---|
| 379 | return !ranksServerNotLeader.empty(); |
---|
| 380 | } |
---|
| 381 | |
---|
| 382 | /*! |
---|
| 383 | Get leading server in the group of connected server |
---|
| 384 | \return ranks of leading servers |
---|
| 385 | */ |
---|
[595] | 386 | const std::list<int>& CContextClient::getRanksServerLeader(void) const |
---|
| 387 | { |
---|
| 388 | return ranksServerLeader; |
---|
| 389 | } |
---|
[509] | 390 | |
---|
[595] | 391 | /*! |
---|
| 392 | Check if client connects to leading server |
---|
| 393 | \return connected(true), not connected (false) |
---|
| 394 | */ |
---|
| 395 | bool CContextClient::isServerLeader(void) const |
---|
| 396 | { |
---|
| 397 | return !ranksServerLeader.empty(); |
---|
| 398 | } |
---|
[300] | 399 | |
---|
[704] | 400 | /*! |
---|
| 401 | * Check if the attached mode is used. |
---|
| 402 | * |
---|
| 403 | * \return true if and only if attached mode is used |
---|
| 404 | */ |
---|
| 405 | bool CContextClient::isAttachedModeEnabled() const |
---|
| 406 | { |
---|
| 407 | return (parentServer != 0); |
---|
| 408 | } |
---|
[697] | 409 | |
---|
[512] | 410 | /*! |
---|
[1460] | 411 | * Finalize context client and do some reports. Function is non-blocking. |
---|
[512] | 412 | */ |
---|
[1460] | 413 | void CContextClient::finalize(void) |
---|
| 414 | { |
---|
| 415 | map<int,CClientBuffer*>::iterator itBuff; |
---|
| 416 | bool stop = false; |
---|
[731] | 417 | |
---|
[1460] | 418 | CTimer::get("Blocking time").resume(); |
---|
| 419 | while (hasTemporarilyBufferedEvent()) |
---|
| 420 | { |
---|
| 421 | checkBuffers(); |
---|
| 422 | sendTemporarilyBufferedEvent(); |
---|
| 423 | } |
---|
| 424 | CTimer::get("Blocking time").suspend(); |
---|
[1033] | 425 | |
---|
[1460] | 426 | CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE); |
---|
| 427 | if (isServerLeader()) |
---|
| 428 | { |
---|
| 429 | CMessage msg; |
---|
| 430 | const std::list<int>& ranks = getRanksServerLeader(); |
---|
| 431 | for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) |
---|
| 432 | { |
---|
| 433 | #pragma omp critical (_output) |
---|
| 434 | info(100)<<"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ; |
---|
| 435 | event.push(*itRank, 1, msg); |
---|
| 436 | } |
---|
| 437 | sendEvent(event); |
---|
| 438 | } |
---|
| 439 | else sendEvent(event); |
---|
[509] | 440 | |
---|
[1460] | 441 | CTimer::get("Blocking time").resume(); |
---|
| 442 | // while (!stop) |
---|
| 443 | { |
---|
| 444 | checkBuffers(); |
---|
| 445 | if (hasTemporarilyBufferedEvent()) |
---|
| 446 | sendTemporarilyBufferedEvent(); |
---|
[1033] | 447 | |
---|
[1460] | 448 | stop = true; |
---|
| 449 | // for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) stop &= !itBuff->second->hasPendingRequest(); |
---|
| 450 | } |
---|
| 451 | CTimer::get("Blocking time").suspend(); |
---|
[509] | 452 | |
---|
[1460] | 453 | std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(), |
---|
| 454 | iteMap = mapBufferSize_.end(), itMap; |
---|
[511] | 455 | |
---|
[1460] | 456 | StdSize totalBuf = 0; |
---|
| 457 | for (itMap = itbMap; itMap != iteMap; ++itMap) |
---|
| 458 | { |
---|
| 459 | #pragma omp critical (_output) |
---|
| 460 | report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl |
---|
| 461 | << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; |
---|
| 462 | totalBuf += itMap->second; |
---|
| 463 | } |
---|
| 464 | #pragma omp critical (_output) |
---|
| 465 | report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; |
---|
| 466 | |
---|
| 467 | //releaseBuffers(); // moved to CContext::finalize() |
---|
| 468 | } |
---|
| 469 | |
---|
| 470 | |
---|
| 471 | /*! |
---|
| 472 | */ |
---|
| 473 | bool CContextClient::havePendingRequests(void) |
---|
| 474 | { |
---|
| 475 | bool pending = false; |
---|
| 476 | map<int,CClientBuffer*>::iterator itBuff; |
---|
| 477 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
| 478 | pending |= itBuff->second->hasPendingRequest(); |
---|
| 479 | return pending; |
---|
| 480 | } |
---|
| 481 | |
---|
| 482 | |
---|
[509] | 483 | } |
---|