[591] | 1 | #include "xios_spl.hpp" |
---|
[300] | 2 | #include "context_client.hpp" |
---|
| 3 | #include "context_server.hpp" |
---|
| 4 | #include "event_client.hpp" |
---|
| 5 | #include "buffer_out.hpp" |
---|
| 6 | #include "buffer_client.hpp" |
---|
| 7 | #include "type.hpp" |
---|
| 8 | #include "event_client.hpp" |
---|
| 9 | #include "context.hpp" |
---|
[382] | 10 | #include "mpi.hpp" |
---|
[347] | 11 | #include "timer.hpp" |
---|
[401] | 12 | #include "cxios.hpp" |
---|
[1130] | 13 | #include "server.hpp" |
---|
[300] | 14 | |
---|
[335] | 15 | namespace xios |
---|
[300] | 16 | { |
---|
[512] | 17 | /*! |
---|
| 18 | \param [in] parent Pointer to context on client side |
---|
| 19 | \param [in] intraComm_ communicator of group client |
---|
| 20 | \param [in] interComm_ communicator of group server |
---|
[983] | 21 | \cxtSer [in] cxtSer Pointer to context of server side. (It is only used in case of attached mode). |
---|
[512] | 22 | */ |
---|
[595] | 23 | CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer) |
---|
[917] | 24 | : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) |
---|
[300] | 25 | { |
---|
[1547] | 26 | pureOneSided=CXios::getin<bool>("pure_one_sided",false); // pure one sided communication (for test) |
---|
| 27 | if (isAttachedModeEnabled()) pureOneSided=false ; // no one sided in attach mode |
---|
| 28 | |
---|
[595] | 29 | context = parent; |
---|
| 30 | intraComm = intraComm_; |
---|
| 31 | interComm = interComm_; |
---|
| 32 | MPI_Comm_rank(intraComm, &clientRank); |
---|
| 33 | MPI_Comm_size(intraComm, &clientSize); |
---|
[509] | 34 | |
---|
[595] | 35 | int flag; |
---|
| 36 | MPI_Comm_test_inter(interComm, &flag); |
---|
| 37 | if (flag) MPI_Comm_remote_size(interComm, &serverSize); |
---|
| 38 | else MPI_Comm_size(interComm, &serverSize); |
---|
[509] | 39 | |
---|
[1232] | 40 | computeLeader(clientRank, clientSize, serverSize, ranksServerLeader, ranksServerNotLeader); |
---|
| 41 | |
---|
[1547] | 42 | if (flag) MPI_Intercomm_merge(interComm_,false,&interCommMerged) ; |
---|
| 43 | |
---|
| 44 | MPI_Comm_split(intraComm_,clientRank,clientRank, &commSelf) ; |
---|
| 45 | |
---|
| 46 | timeLine = 1; |
---|
[1232] | 47 | } |
---|
| 48 | |
---|
| 49 | void CContextClient::computeLeader(int clientRank, int clientSize, int serverSize, |
---|
| 50 | std::list<int>& rankRecvLeader, |
---|
| 51 | std::list<int>& rankRecvNotLeader) |
---|
| 52 | { |
---|
| 53 | if ((0 == clientSize) || (0 == serverSize)) return; |
---|
| 54 | |
---|
[595] | 55 | if (clientSize < serverSize) |
---|
| 56 | { |
---|
| 57 | int serverByClient = serverSize / clientSize; |
---|
| 58 | int remain = serverSize % clientSize; |
---|
| 59 | int rankStart = serverByClient * clientRank; |
---|
[300] | 60 | |
---|
[595] | 61 | if (clientRank < remain) |
---|
| 62 | { |
---|
| 63 | serverByClient++; |
---|
| 64 | rankStart += clientRank; |
---|
| 65 | } |
---|
| 66 | else |
---|
| 67 | rankStart += remain; |
---|
| 68 | |
---|
| 69 | for (int i = 0; i < serverByClient; i++) |
---|
[1232] | 70 | rankRecvLeader.push_back(rankStart + i); |
---|
[1021] | 71 | |
---|
[1232] | 72 | rankRecvNotLeader.resize(0); |
---|
[1158] | 73 | } |
---|
[595] | 74 | else |
---|
| 75 | { |
---|
| 76 | int clientByServer = clientSize / serverSize; |
---|
| 77 | int remain = clientSize % serverSize; |
---|
| 78 | |
---|
| 79 | if (clientRank < (clientByServer + 1) * remain) |
---|
| 80 | { |
---|
| 81 | if (clientRank % (clientByServer + 1) == 0) |
---|
[1232] | 82 | rankRecvLeader.push_back(clientRank / (clientByServer + 1)); |
---|
[1021] | 83 | else |
---|
[1232] | 84 | rankRecvNotLeader.push_back(clientRank / (clientByServer + 1)); |
---|
[595] | 85 | } |
---|
| 86 | else |
---|
| 87 | { |
---|
| 88 | int rank = clientRank - (clientByServer + 1) * remain; |
---|
| 89 | if (rank % clientByServer == 0) |
---|
[1232] | 90 | rankRecvLeader.push_back(remain + rank / clientByServer); |
---|
[1021] | 91 | else |
---|
[1232] | 92 | rankRecvNotLeader.push_back(remain + rank / clientByServer); |
---|
[595] | 93 | } |
---|
| 94 | } |
---|
[300] | 95 | } |
---|
| 96 | |
---|
[512] | 97 | /*! |
---|
| 98 | In case of attached mode, the current context must be reset to context for client |
---|
| 99 | \param [in] event Event sent to server |
---|
| 100 | */ |
---|
[300] | 101 | void CContextClient::sendEvent(CEventClient& event) |
---|
| 102 | { |
---|
[731] | 103 | list<int> ranks = event.getRanks(); |
---|
[1054] | 104 | |
---|
[1377] | 105 | if (CXios::checkEventSync) |
---|
| 106 | { |
---|
| 107 | int typeId, classId, typeId_in, classId_in, timeLine_out; |
---|
| 108 | typeId_in=event.getTypeId() ; |
---|
| 109 | classId_in=event.getClassId() ; |
---|
[1475] | 110 | // MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ; // MPI_UINT64_T standardized by MPI 3 |
---|
| 111 | MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm) ; |
---|
[1377] | 112 | MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ; |
---|
| 113 | MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ; |
---|
| 114 | if (typeId/clientSize!=event.getTypeId() || classId/clientSize!=event.getClassId() || timeLine_out/clientSize!=timeLine) |
---|
| 115 | { |
---|
| 116 | ERROR("void CContextClient::sendEvent(CEventClient& event)", |
---|
| 117 | << "Event are not coherent between client."); |
---|
| 118 | } |
---|
| 119 | } |
---|
| 120 | |
---|
[595] | 121 | if (!event.isEmpty()) |
---|
[300] | 122 | { |
---|
[731] | 123 | list<int> sizes = event.getSizes(); |
---|
[300] | 124 | |
---|
[1547] | 125 | // We force the getBuffers call to be non-blocking on classical servers |
---|
[1054] | 126 | list<CBufferOut*> buffList; |
---|
[1547] | 127 | getBuffers(timeLine, ranks, sizes, buffList) ; |
---|
[509] | 128 | |
---|
[1547] | 129 | event.send(timeLine, sizes, buffList); |
---|
| 130 | unlockBuffers(ranks) ; |
---|
| 131 | |
---|
| 132 | checkBuffers(ranks); |
---|
[731] | 133 | |
---|
[1547] | 134 | if (isAttachedModeEnabled()) // couldBuffer is always true in attached mode |
---|
[1054] | 135 | { |
---|
[1547] | 136 | waitEvent(ranks); |
---|
| 137 | CContext::setCurrent(context->getId()); |
---|
[1054] | 138 | } |
---|
[300] | 139 | } |
---|
| 140 | |
---|
[1054] | 141 | timeLine++; |
---|
| 142 | } |
---|
| 143 | |
---|
| 144 | /*! |
---|
[512] | 145 | If client is also server (attached mode), after sending event, it should process right away |
---|
| 146 | the incoming event. |
---|
| 147 | \param [in] ranks list rank of server connected this client |
---|
| 148 | */ |
---|
[300] | 149 | void CContextClient::waitEvent(list<int>& ranks) |
---|
| 150 | { |
---|
[595] | 151 | parentServer->server->setPendingEvent(); |
---|
| 152 | while (checkBuffers(ranks)) |
---|
[300] | 153 | { |
---|
[595] | 154 | parentServer->server->listen(); |
---|
| 155 | parentServer->server->checkPendingRequest(); |
---|
[300] | 156 | } |
---|
[386] | 157 | |
---|
[595] | 158 | while (parentServer->server->hasPendingEvent()) |
---|
[386] | 159 | { |
---|
[595] | 160 | parentServer->server->eventLoop(); |
---|
[386] | 161 | } |
---|
[300] | 162 | } |
---|
| 163 | |
---|
[512] | 164 | /*! |
---|
[1054] | 165 | * Get buffers for each connection to the servers. This function blocks until there is enough room in the buffers unless |
---|
| 166 | * it is explicitly requested to be non-blocking. |
---|
| 167 | * |
---|
[1547] | 168 | * |
---|
| 169 | * \param [in] timeLine time line of the event which will be sent to servers |
---|
[1054] | 170 | * \param [in] serverList list of rank of connected server |
---|
| 171 | * \param [in] sizeList size of message corresponding to each connection |
---|
| 172 | * \param [out] retBuffers list of buffers that can be used to store an event |
---|
| 173 | * \param [in] nonBlocking whether this function should be non-blocking |
---|
| 174 | * \return whether the already allocated buffers could be used |
---|
[512] | 175 | */ |
---|
[1547] | 176 | bool CContextClient::getBuffers(const size_t timeLine, const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers, |
---|
[1071] | 177 | bool nonBlocking /*= false*/) |
---|
[300] | 178 | { |
---|
[1054] | 179 | list<int>::const_iterator itServer, itSize; |
---|
[595] | 180 | list<CClientBuffer*> bufferList; |
---|
[1054] | 181 | map<int,CClientBuffer*>::const_iterator it; |
---|
[595] | 182 | list<CClientBuffer*>::iterator itBuffer; |
---|
[884] | 183 | bool areBuffersFree; |
---|
[300] | 184 | |
---|
[595] | 185 | for (itServer = serverList.begin(); itServer != serverList.end(); itServer++) |
---|
[300] | 186 | { |
---|
[595] | 187 | it = buffers.find(*itServer); |
---|
| 188 | if (it == buffers.end()) |
---|
[300] | 189 | { |
---|
[595] | 190 | newBuffer(*itServer); |
---|
| 191 | it = buffers.find(*itServer); |
---|
[509] | 192 | } |
---|
[595] | 193 | bufferList.push_back(it->second); |
---|
[300] | 194 | } |
---|
[347] | 195 | |
---|
| 196 | CTimer::get("Blocking time").resume(); |
---|
[884] | 197 | do |
---|
[300] | 198 | { |
---|
[884] | 199 | areBuffersFree = true; |
---|
[595] | 200 | for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) |
---|
[1547] | 201 | { |
---|
[884] | 202 | areBuffersFree &= (*itBuffer)->isBufferFree(*itSize); |
---|
[1547] | 203 | } |
---|
[884] | 204 | |
---|
| 205 | if (!areBuffersFree) |
---|
[300] | 206 | { |
---|
[1547] | 207 | for (itBuffer = bufferList.begin(); itBuffer != bufferList.end(); itBuffer++) (*itBuffer)->unlockBuffer(); |
---|
[884] | 208 | checkBuffers(); |
---|
[1547] | 209 | if (CServer::serverLevel == 0) context->server->listen(); |
---|
[1130] | 210 | else if (CServer::serverLevel == 1) |
---|
| 211 | { |
---|
| 212 | context->server->listen(); |
---|
[1547] | 213 | for (int i = 0; i < context->serverPrimServer.size(); ++i) context->serverPrimServer[i]->listen(); |
---|
[1378] | 214 | CServer::contextEventLoop(false) ; // avoid dead-lock at finalize... |
---|
[1130] | 215 | } |
---|
| 216 | |
---|
[1547] | 217 | else if (CServer::serverLevel == 2) context->server->listen(); |
---|
[1130] | 218 | |
---|
[300] | 219 | } |
---|
[1054] | 220 | } while (!areBuffersFree && !nonBlocking); |
---|
[347] | 221 | CTimer::get("Blocking time").suspend(); |
---|
| 222 | |
---|
[1054] | 223 | if (areBuffersFree) |
---|
[300] | 224 | { |
---|
[1054] | 225 | for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) |
---|
[1547] | 226 | retBuffers.push_back((*itBuffer)->getBuffer(timeLine, *itSize)); |
---|
[300] | 227 | } |
---|
[1054] | 228 | return areBuffersFree; |
---|
[300] | 229 | } |
---|
[509] | 230 | |
---|
[512] | 231 | /*! |
---|
| 232 | Make a new buffer for a certain connection to server with specific rank |
---|
| 233 | \param [in] rank rank of connected server |
---|
| 234 | */ |
---|
[300] | 235 | void CContextClient::newBuffer(int rank) |
---|
| 236 | { |
---|
[1201] | 237 | if (!mapBufferSize_.count(rank)) |
---|
| 238 | { |
---|
| 239 | error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl; |
---|
| 240 | mapBufferSize_[rank] = CXios::minBufferSize; |
---|
| 241 | maxEventSizes[rank] = CXios::minBufferSize; |
---|
| 242 | } |
---|
[1547] | 243 | CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxEventSizes[rank]); |
---|
[1201] | 244 | // Notify the server |
---|
[1547] | 245 | CBufferOut* bufOut = buffer->getBuffer(0, sizeof(StdSize)); |
---|
[1201] | 246 | bufOut->put(mapBufferSize_[rank]); // Stupid C++ |
---|
[1547] | 247 | buffer->checkBuffer(true); |
---|
| 248 | |
---|
| 249 | if (!isAttachedModeEnabled()) // create windows only in server mode |
---|
| 250 | { |
---|
| 251 | MPI_Comm OneSidedInterComm, oneSidedComm ; |
---|
| 252 | MPI_Intercomm_create(commSelf, 0, interCommMerged, clientSize+rank, 0, &OneSidedInterComm ); |
---|
| 253 | MPI_Intercomm_merge(OneSidedInterComm,false,&oneSidedComm); |
---|
| 254 | buffer->createWindows(oneSidedComm) ; |
---|
| 255 | } |
---|
| 256 | |
---|
[509] | 257 | } |
---|
[300] | 258 | |
---|
[512] | 259 | /*! |
---|
| 260 | Verify state of buffers. Buffer is under pending state if there is no message on it |
---|
| 261 | \return state of buffers, pending(true), ready(false) |
---|
| 262 | */ |
---|
[300] | 263 | bool CContextClient::checkBuffers(void) |
---|
| 264 | { |
---|
[595] | 265 | map<int,CClientBuffer*>::iterator itBuff; |
---|
| 266 | bool pending = false; |
---|
[1130] | 267 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
[1547] | 268 | pending |= itBuff->second->checkBuffer(!pureOneSided); |
---|
[595] | 269 | return pending; |
---|
[509] | 270 | } |
---|
[300] | 271 | |
---|
[512] | 272 | //! Release all buffers |
---|
[1071] | 273 | void CContextClient::releaseBuffers() |
---|
[300] | 274 | { |
---|
[595] | 275 | map<int,CClientBuffer*>::iterator itBuff; |
---|
[1077] | 276 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
[1139] | 277 | { |
---|
[1554] | 278 | delete itBuff->second; |
---|
[1139] | 279 | } |
---|
[1077] | 280 | buffers.clear(); |
---|
[509] | 281 | } |
---|
[300] | 282 | |
---|
[1547] | 283 | /*! |
---|
| 284 | Lock the buffers for one sided communications |
---|
| 285 | \param [in] ranks list rank of server to which client connects to |
---|
| 286 | */ |
---|
| 287 | void CContextClient::lockBuffers(list<int>& ranks) |
---|
| 288 | { |
---|
| 289 | list<int>::iterator it; |
---|
| 290 | for (it = ranks.begin(); it != ranks.end(); it++) buffers[*it]->lockBuffer(); |
---|
| 291 | } |
---|
| 292 | |
---|
| 293 | /*! |
---|
| 294 | Unlock the buffers for one sided communications |
---|
| 295 | \param [in] ranks list rank of server to which client connects to |
---|
| 296 | */ |
---|
| 297 | void CContextClient::unlockBuffers(list<int>& ranks) |
---|
| 298 | { |
---|
| 299 | list<int>::iterator it; |
---|
| 300 | for (it = ranks.begin(); it != ranks.end(); it++) buffers[*it]->unlockBuffer(); |
---|
| 301 | } |
---|
| 302 | |
---|
[512] | 303 | /*! |
---|
| 304 | Verify state of buffers corresponding to a connection |
---|
| 305 | \param [in] ranks list rank of server to which client connects to |
---|
| 306 | \return state of buffers, pending(true), ready(false) |
---|
| 307 | */ |
---|
[300] | 308 | bool CContextClient::checkBuffers(list<int>& ranks) |
---|
| 309 | { |
---|
[595] | 310 | list<int>::iterator it; |
---|
| 311 | bool pending = false; |
---|
[1547] | 312 | for (it = ranks.begin(); it != ranks.end(); it++) pending |= buffers[*it]->checkBuffer(!pureOneSided); |
---|
[595] | 313 | return pending; |
---|
[509] | 314 | } |
---|
[300] | 315 | |
---|
[512] | 316 | /*! |
---|
[917] | 317 | * Set the buffer size for each connection. Warning: This function is collective. |
---|
| 318 | * |
---|
| 319 | * \param [in] mapSize maps the rank of the connected servers to the size of the correspoinding buffer |
---|
| 320 | * \param [in] maxEventSize maps the rank of the connected servers to the size of the biggest event |
---|
[512] | 321 | */ |
---|
[917] | 322 | void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize) |
---|
[509] | 323 | { |
---|
| 324 | mapBufferSize_ = mapSize; |
---|
[1201] | 325 | maxEventSizes = maxEventSize; |
---|
[509] | 326 | } |
---|
| 327 | |
---|
[1158] | 328 | /*! |
---|
| 329 | Get leading server in the group of connected server |
---|
| 330 | \return ranks of leading servers |
---|
| 331 | */ |
---|
| 332 | const std::list<int>& CContextClient::getRanksServerNotLeader(void) const |
---|
| 333 | { |
---|
| 334 | return ranksServerNotLeader; |
---|
| 335 | } |
---|
[1021] | 336 | |
---|
[1158] | 337 | /*! |
---|
| 338 | Check if client connects to leading server |
---|
| 339 | \return connected(true), not connected (false) |
---|
| 340 | */ |
---|
| 341 | bool CContextClient::isServerNotLeader(void) const |
---|
| 342 | { |
---|
| 343 | return !ranksServerNotLeader.empty(); |
---|
| 344 | } |
---|
[1021] | 345 | |
---|
[595] | 346 | /*! |
---|
| 347 | Get leading server in the group of connected server |
---|
| 348 | \return ranks of leading servers |
---|
| 349 | */ |
---|
| 350 | const std::list<int>& CContextClient::getRanksServerLeader(void) const |
---|
| 351 | { |
---|
| 352 | return ranksServerLeader; |
---|
| 353 | } |
---|
[509] | 354 | |
---|
[595] | 355 | /*! |
---|
| 356 | Check if client connects to leading server |
---|
| 357 | \return connected(true), not connected (false) |
---|
| 358 | */ |
---|
| 359 | bool CContextClient::isServerLeader(void) const |
---|
| 360 | { |
---|
| 361 | return !ranksServerLeader.empty(); |
---|
| 362 | } |
---|
[300] | 363 | |
---|
[704] | 364 | /*! |
---|
| 365 | * Check if the attached mode is used. |
---|
| 366 | * |
---|
| 367 | * \return true if and only if attached mode is used |
---|
| 368 | */ |
---|
| 369 | bool CContextClient::isAttachedModeEnabled() const |
---|
| 370 | { |
---|
| 371 | return (parentServer != 0); |
---|
| 372 | } |
---|
[697] | 373 | |
---|
[512] | 374 | /*! |
---|
[1130] | 375 | * Finalize context client and do some reports. Function is non-blocking. |
---|
[512] | 376 | */ |
---|
[1130] | 377 | void CContextClient::finalize(void) |
---|
[1054] | 378 | { |
---|
| 379 | map<int,CClientBuffer*>::iterator itBuff; |
---|
[1547] | 380 | std::list<int>::iterator ItServerLeader; |
---|
| 381 | |
---|
[1054] | 382 | bool stop = false; |
---|
[731] | 383 | |
---|
[1547] | 384 | int* nbServerConnectionLocal = new int[serverSize] ; |
---|
| 385 | int* nbServerConnectionGlobal = new int[serverSize] ; |
---|
| 386 | for(int i=0;i<serverSize;++i) nbServerConnectionLocal[i]=0 ; |
---|
| 387 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) nbServerConnectionLocal[itBuff->first]=1 ; |
---|
| 388 | for (ItServerLeader = ranksServerLeader.begin(); ItServerLeader != ranksServerLeader.end(); ItServerLeader++) nbServerConnectionLocal[*ItServerLeader]=1 ; |
---|
| 389 | |
---|
| 390 | MPI_Allreduce(nbServerConnectionLocal, nbServerConnectionGlobal, serverSize, MPI_INT, MPI_SUM, intraComm); |
---|
| 391 | |
---|
| 392 | CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE); |
---|
| 393 | CMessage msg; |
---|
[509] | 394 | |
---|
[1547] | 395 | for (int i=0;i<serverSize;++i) if (nbServerConnectionLocal[i]==1) event.push(i, nbServerConnectionGlobal[i], msg) ; |
---|
| 396 | sendEvent(event); |
---|
| 397 | |
---|
| 398 | delete[] nbServerConnectionLocal ; |
---|
| 399 | delete[] nbServerConnectionGlobal ; |
---|
| 400 | /* |
---|
[1054] | 401 | if (isServerLeader()) |
---|
| 402 | { |
---|
| 403 | CMessage msg; |
---|
| 404 | const std::list<int>& ranks = getRanksServerLeader(); |
---|
| 405 | for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) |
---|
[1377] | 406 | { |
---|
| 407 | info(100)<<"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ; |
---|
[1054] | 408 | event.push(*itRank, 1, msg); |
---|
[1377] | 409 | } |
---|
[1054] | 410 | sendEvent(event); |
---|
| 411 | } |
---|
| 412 | else sendEvent(event); |
---|
[1547] | 413 | */ |
---|
[509] | 414 | |
---|
[1054] | 415 | CTimer::get("Blocking time").resume(); |
---|
[1547] | 416 | checkBuffers(); |
---|
[1054] | 417 | CTimer::get("Blocking time").suspend(); |
---|
| 418 | |
---|
| 419 | std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(), |
---|
| 420 | iteMap = mapBufferSize_.end(), itMap; |
---|
[1071] | 421 | |
---|
[1054] | 422 | StdSize totalBuf = 0; |
---|
| 423 | for (itMap = itbMap; itMap != iteMap; ++itMap) |
---|
| 424 | { |
---|
| 425 | report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl |
---|
| 426 | << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; |
---|
| 427 | totalBuf += itMap->second; |
---|
| 428 | } |
---|
| 429 | report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; |
---|
| 430 | |
---|
[1130] | 431 | //releaseBuffers(); // moved to CContext::finalize() |
---|
[1054] | 432 | } |
---|
[1130] | 433 | |
---|
[1139] | 434 | |
---|
| 435 | /*! |
---|
| 436 | */ |
---|
[1130] | 437 | bool CContextClient::havePendingRequests(void) |
---|
| 438 | { |
---|
| 439 | bool pending = false; |
---|
| 440 | map<int,CClientBuffer*>::iterator itBuff; |
---|
| 441 | for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) |
---|
| 442 | pending |= itBuff->second->hasPendingRequest(); |
---|
| 443 | return pending; |
---|
| 444 | } |
---|
| 445 | |
---|
| 446 | |
---|
[509] | 447 | } |
---|