Changeset 1071
- Timestamp:
- 03/13/17 17:21:04 (8 years ago)
- Location:
- XIOS/dev/dev_olga
- Files:
-
- 5 added
- 13 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_olga/src/buffer_client.cpp
r992 r1071 74 74 int flag; 75 75 76 int error, errclass, len; 77 char errstring[MPI_MAX_ERROR_STRING]; 78 76 79 if (pending) 77 80 { … … 86 89 if (count > 0) 87 90 { 88 MPI_Comm_test_inter(interComm, &flag); 89 MPI_Issend(buffer[current], count, MPI_CHAR, serverRank, 20, interComm, &request); 91 MPI_Errhandler_set(interComm,MPI_ERRORS_RETURN); 92 error = MPI_Issend(buffer[current], count, MPI_CHAR, serverRank, 20, interComm, &request); 93 if (error != MPI_SUCCESS) 94 { 95 MPI_Error_class(error, &errclass); 96 MPI_Error_string(error, errstring, &len); 97 ERROR("MPI error class: ", <<errclass<<" MPI error "<<errstring ); 98 } 90 99 pending = true; 91 100 if (current == 1) current = 0; -
XIOS/dev/dev_olga/src/context_client.cpp
r1054 r1071 91 91 // We force the getBuffers call to be non-blocking on the servers 92 92 list<CBufferOut*> buffList; 93 bool couldBuffer = getBuffers(ranks, sizes, buffList, !CXios::isClient);94 93 // bool couldBuffer = getBuffers(ranks, sizes, buffList, CXios::isServer); 94 bool couldBuffer = getBuffers(ranks, sizes, buffList, false); 95 95 96 96 if (couldBuffer) … … 182 182 * \return whether the already allocated buffers could be used 183 183 */ 184 bool CContextClient::getBuffers(const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers, bool nonBlocking /*= false*/) 184 bool CContextClient::getBuffers(const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers, 185 bool nonBlocking /*= false*/) 185 186 { 186 187 list<int>::const_iterator itServer, itSize; … … 211 212 { 212 213 checkBuffers(); 213 // if (?) 214 // { 214 215 // WHY DO WE PUT HERE SERVER INTO LISTENING LOOP AT ALL???? 216 // context->server->listen(); 215 217 // for (int i = 0; i < context->serverPrimServer.size(); ++i) 216 218 // context->serverPrimServer[i]->listen(); 217 // }218 // else219 context->server->listen();220 219 } 221 220 } while (!areBuffersFree && !nonBlocking); … … 237 236 void CContextClient::newBuffer(int rank) 238 237 { 239 240 241 242 243 244 245 246 247 248 238 if (!mapBufferSize_.count(rank)) 239 { 240 error(0) << "WARNING: Unexpected request for buffer to communicate with server " << rank << std::endl; 241 mapBufferSize_[rank] = CXios::minBufferSize; 242 } 243 CClientBuffer* buffer = buffers[rank] = new CClientBuffer(interComm, rank, mapBufferSize_[rank], maxBufferedEvents); 244 // Notify the server 245 CBufferOut* bufOut = buffer->getBuffer(sizeof(StdSize)); 246 bufOut->put(mapBufferSize_[rank]); // Stupid C++ 247 buffer->checkBuffer(); 249 248 } 250 249 … … 262 261 263 262 //! Release all buffers 264 void CContextClient::releaseBuffers( void)263 void CContextClient::releaseBuffers() 265 264 { 266 265 map<int,CClientBuffer*>::iterator itBuff; 267 266 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) delete itBuff->second; 267 // buffersReleased_ = true; 268 268 } 269 269 … … 273 273 \return state of buffers, pending(true), ready(false) 274 274 */ 275 // bool CContextClient::checkBuffers(list<int>& ranks) 275 276 bool CContextClient::checkBuffers(list<int>& ranks) 276 277 { … … 358 359 Finalize context client and do some reports 359 360 */ 360 void CContextClient::finalize(void) 361 // void CContextClient::finalize(void) 362 void CContextClient::finalize() 361 363 { 362 364 map<int,CClientBuffer*>::iterator itBuff; … … 396 398 std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(), 397 399 iteMap = mapBufferSize_.end(), itMap; 400 398 401 StdSize totalBuf = 0; 399 402 for (itMap = itbMap; itMap != iteMap; ++itMap) -
XIOS/dev/dev_olga/src/context_client.hpp
r1054 r1071 70 70 MPI_Comm intraComm; //!< Communicator of client group 71 71 72 map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers 72 map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers 73 // map<int,CClientBuffer*> buffersPrim; //!< Buffers for connection to secondary servers 73 74 74 75 private: … … 77 78 //! Maximum number of events that can be buffered 78 79 StdSize maxBufferedEvents; 80 81 // bool buffersReleased_; 79 82 80 83 struct { -
XIOS/dev/dev_olga/src/context_server.cpp
r1054 r1071 225 225 { 226 226 finished=true; 227 info(20)<<"Server Side context <"<<context->getId()<<"> finalized"<<endl; 227 // info(20)<<"Server Side context <"<<context->getId()<<"> finalized"<<endl; // moved to CContext::finalize() 228 228 std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), 229 229 iteMap = mapBufferSize_.end(), itMap; … … 231 231 { 232 232 rank = itMap->first; 233 report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl234 << " +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl;233 // report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl 234 // << " +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl; 235 235 totalBuf_ += itMap->second; 236 236 } 237 237 context->finalize(); 238 238 239 // report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 239 // report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; // moved to CContext::finalize() 240 240 } 241 241 else if (event.classId==CContext::GetType()) CContext::dispatchEvent(event); -
XIOS/dev/dev_olga/src/node/context.cpp
r1054 r1071 28 28 , isPostProcessed(false), finalized(false) 29 29 , idServer_(), client(0), server(0) 30 // , clientPrimServer(0), serverPrimServer(0)31 30 , allProcessed(false) 32 31 { /* Ne rien faire de plus */ } … … 37 36 , isPostProcessed(false), finalized(false) 38 37 , idServer_(), client(0), server(0) 39 // , clientPrimServer(0), serverPrimServer(0)40 38 , allProcessed(false) 41 39 { /* Ne rien faire de plus */ } … … 45 43 delete client; 46 44 delete server; 47 // delete clientPrimServer;48 // delete serverPrimServer;45 for (std::vector<CContextClient*>::iterator it = clientPrimServer.begin(); it != clientPrimServer.end(); it++) delete *it; 46 for (std::vector<CContextServer*>::iterator it = serverPrimServer.begin(); it != serverPrimServer.end(); it++) delete *it; 49 47 } 50 48 … … 249 247 250 248 hasClient = true; 249 MPI_Comm intraCommServer, interCommServer; 251 250 252 251 if (CServer::serverLevel != 1) 253 // initClient is called by client252 // initClient is called by client 254 253 { 255 254 client = new CContextClient(this, intraComm, interComm, cxtServer); 256 server = new CContextServer(this, intraComm, interComm);257 MPI_Comm intraCommServer, interCommServer;258 255 if (cxtServer) // Attached mode 259 256 { … … 263 260 else 264 261 { 265 // MPI_Comm_dup(intraComm, &intraCommServer); 266 // comms.push_back(intraCommServer); 267 // MPI_Comm_dup(interComm, &interCommServer); 268 // comms.push_back(interCommServer); 269 } 270 } 271 262 MPI_Comm_dup(intraComm, &intraCommServer); 263 comms.push_back(intraCommServer); 264 MPI_Comm_dup(interComm, &interCommServer); 265 comms.push_back(interCommServer); 266 } 267 registryIn=new CRegistry(intraComm); 268 registryIn->setPath(getId()) ; 269 if (client->clientRank==0) registryIn->fromFile("xios_registry.bin") ; 270 registryIn->bcastRegistry() ; 271 registryOut=new CRegistry(intraComm) ; 272 registryOut->setPath(getId()) ; 273 274 server = new CContextServer(this, intraCommServer, interCommServer); 275 } 272 276 else 273 277 // initClient is called by primary server 274 278 { 275 279 clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 276 serverPrimServer.push_back(new CContextServer(this, intraComm, interComm)); 277 } 278 279 280 281 // registryIn=new CRegistry(intraComm); 282 // registryIn->setPath(getId()) ; 283 // if (client->clientRank==0) registryIn->fromFile("xios_registry.bin") ; 284 // registryIn->bcastRegistry() ; 285 // 286 // registryOut=new CRegistry(intraComm) ; 287 // registryOut->setPath(getId()) ; 288 289 280 MPI_Comm_dup(intraComm, &intraCommServer); 281 comms.push_back(intraCommServer); 282 MPI_Comm_dup(interComm, &interCommServer); 283 comms.push_back(interCommServer); 284 serverPrimServer.push_back(new CContextServer(this, intraCommServer, interCommServer)); 285 } 290 286 } 291 287 … … 354 350 hasServer=true; 355 351 server = new CContextServer(this,intraComm,interComm); 356 client = new CContextClient(this,intraComm,interComm);357 352 // client = new CContextClient(this,intraComm,interComm, cxtClient); 358 353 359 //registryIn=new CRegistry(intraComm);360 //registryIn->setPath(getId()) ;361 //if (server->intraCommRank==0) registryIn->fromFile("xios_registry.bin") ;362 //registryIn->bcastRegistry() ;363 //registryOut=new CRegistry(intraComm) ;364 //registryOut->setPath(getId()) ;354 registryIn=new CRegistry(intraComm); 355 registryIn->setPath(getId()) ; 356 if (server->intraCommRank==0) registryIn->fromFile("xios_registry.bin") ; 357 registryIn->bcastRegistry() ; 358 registryOut=new CRegistry(intraComm) ; 359 registryOut->setPath(getId()) ; 365 360 366 361 MPI_Comm intraCommClient, interCommClient; … … 372 367 else 373 368 { 374 // MPI_Comm_dup(intraComm, &intraCommClient); 375 // comms.push_back(intraCommClient); 376 // MPI_Comm_dup(interComm, &interCommClient); 377 // comms.push_back(interCommClient); 378 } 379 380 } 381 382 //! Server side: Put server into a loop in order to listen message from client 383 // bool CContext::eventLoop(void) 384 // { 385 // if (CServer::serverLevel == 0) 386 // { 387 // return server->eventLoop(); 388 // } 389 // else if (CServer::serverLevel == 1) 390 // { 391 // bool serverFinished = server->eventLoop(); 392 // bool serverPrimFinished = true; 393 // for (int i = 0; i < serverPrimServer.size(); ++i) 394 // { 395 // serverPrimFinished *= serverPrimServer[i]->eventLoop(); 396 // } 397 // return ( serverFinished && serverPrimFinished); 398 // } 399 // else 400 // { 401 // return server->eventLoop(); 402 // } 403 // } 369 MPI_Comm_dup(intraComm, &intraCommClient); 370 comms.push_back(intraCommClient); 371 MPI_Comm_dup(interComm, &interCommClient); 372 comms.push_back(interCommClient); 373 } 374 client = new CContextClient(this,intraCommClient,interCommClient); 375 376 } 404 377 405 378 //! Try to send the buffers and receive possible answers … … 412 385 if (hasTmpBufferedEvent) 413 386 hasTmpBufferedEvent = !client->sendTemporarilyBufferedEvent(); 414 415 387 // Don't process events if there is a temporarily buffered event 416 388 return server->eventLoop(!hasTmpBufferedEvent); … … 432 404 if (hasTmpBufferedEventPrim) 433 405 hasTmpBufferedEventPrim = !clientPrimServer[i]->sendTemporarilyBufferedEvent(); 434 serverPrimFinished *= serverPrimServer[i]->eventLoop(hasTmpBufferedEventPrim); 406 // serverPrimFinished *= serverPrimServer[i]->eventLoop(!hasTmpBufferedEventPrim); 407 serverPrimFinished *= serverPrimServer[i]->eventLoop(); 435 408 } 436 409 return ( serverFinished && serverPrimFinished); … … 441 414 client->checkBuffers(); 442 415 bool hasTmpBufferedEvent = client->hasTemporarilyBufferedEvent(); 443 if (hasTmpBufferedEvent) 444 hasTmpBufferedEvent = !client->sendTemporarilyBufferedEvent(); 445 return server->eventLoop(!hasTmpBufferedEvent); 446 } 416 // if (hasTmpBufferedEvent) 417 // hasTmpBufferedEvent = !client->sendTemporarilyBufferedEvent(); 418 // return server->eventLoop(!hasTmpBufferedEvent); 419 return server->eventLoop(); 420 } 447 421 } 448 422 … … 453 427 { 454 428 finalized = true; 455 // if (hasClient) sendRegistry() ; 456 457 client->finalize(); 458 while (!server->hasFinished()) 459 { 460 server->eventLoop(); 461 } 429 430 if (hasClient) sendRegistry() ; 462 431 463 432 if ((hasClient) && (hasServer)) … … 475 444 } 476 445 } 477 446 client->finalize(); 447 while (!server->hasFinished()) 448 { 449 server->eventLoop(); 450 } 451 452 info(20)<<"Server Side context <"<<getId()<<"> finalized"<<endl; 478 453 report(0)<< " Memory report : Context <"<<getId()<<"> : server side : total memory used for buffers "<<CContextServer::getTotalBuf()<<" bytes"<<endl; 479 454 … … 482 457 { 483 458 closeAllFile(); 484 // registryOut->hierarchicalGatherRegistry() ; 485 // if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 486 } 487 488 for (std::vector<CContextClient*>::iterator it = clientPrimServer.begin(); it != clientPrimServer.end(); it++) 489 delete *it; 490 491 for (std::vector<CContextServer*>::iterator it = serverPrimServer.begin(); it != serverPrimServer.end(); it++) 492 delete *it; 493 494 for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 495 MPI_Comm_free(&(*it)); 496 comms.clear(); 459 registryOut->hierarchicalGatherRegistry() ; 460 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 461 } 462 463 // for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 464 // MPI_Comm_free(&(*it)); 465 // comms.clear(); 497 466 498 467 } 499 468 } 500 469 //! Free internally allocated communicators 470 void CContext::freeComms(void) 471 { 472 for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 473 MPI_Comm_free(&(*it)); 474 comms.clear(); 475 } 501 476 502 477 void CContext::postProcessingGlobalAttributes() … … 1597 1572 1598 1573 // Use correct context client to send message 1599 // int nbSrvPools = (hasServer) ? clientPrimServer.size() : 1;1600 1574 int nbSrvPools = (this->hasServer) ? (this->hasClient ? this->clientPrimServer.size() : 0) : 1; 1601 1575 for (int i = 0; i < nbSrvPools; ++i) … … 1618 1592 else contextClientTmp->sendEvent(event); 1619 1593 } 1620 1621 // if (!hasServer)1622 // {1623 // if (client->isServerLeader())1624 // {1625 // CMessage msg ;1626 // msg<<this->getIdServer();1627 // if (client->clientRank==0) msg<<*registryOut ;1628 // const std::list<int>& ranks = client->getRanksServerLeader();1629 // for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)1630 // event.push(*itRank,1,msg);1631 // client->sendEvent(event);1632 // }1633 // else client->sendEvent(event);1634 // }1635 // else1636 // {1637 // if (clientPrimServer->isServerLeader())1638 // {1639 // CMessage msg ;1640 // msg<<this->getIdServer();1641 // if (clientPrimServer->clientRank==0) msg<<*registryOut ;1642 // const std::list<int>& ranks = clientPrimServer->getRanksServerLeader();1643 // for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)1644 // event.push(*itRank,1,msg);1645 // clientPrimServer->sendEvent(event);1646 // }1647 // else clientPrimServer->sendEvent(event);1648 // }1649 1594 } 1650 1595 -
XIOS/dev/dev_olga/src/node/context.hpp
r1054 r1071 100 100 void finalize(void); 101 101 void closeDefinition(void); 102 bool isFinalized(void); 102 103 103 104 // Some functions to process context … … 165 166 void recvRegistry(CBufferIn& buffer) ; //!< registry is received by the servers 166 167 167 bool isFinalized(void);168 void freeComms(void); //!< Free internally allcoated communicators 168 169 169 170 // dispatch event … … 231 232 bool hasServer; 232 233 233 // Concrete contex client 234 CContextClient* client; 234 CContextServer* server; //!< Concrete context server 235 CContextClient* client; //!< Concrete contex client 236 std::vector<CContextServer*> serverPrimServer; 237 std::vector<CContextClient*> clientPrimServer; 238 235 239 CRegistry* registryIn ; //!< input registry which is read from file 236 240 CRegistry* registryOut ; //!< output registry which will be wrote on file at the finalize 237 238 // Concrete context server239 CContextServer* server;240 241 // CContextClient* clientPrimServer;242 // CContextServer* serverPrimServer;243 std::vector<CContextClient*> clientPrimServer;244 std::vector<CContextServer*> serverPrimServer;245 241 246 242 private: … … 250 246 StdString idServer_; 251 247 CGarbageCollector garbageCollector; 252 std::list<MPI_Comm> comms; //!< Communicators allocated internally --- significance??248 std::list<MPI_Comm> comms; //!< Communicators allocated internally 253 249 254 250 public: // Some function maybe removed in the near future -
XIOS/dev/dev_olga/src/node/field.cpp
r1054 r1071 398 398 { 399 399 CContext* context = CContext::getCurrent(); 400 CContextClient* client = context->client; 400 // CContextClient* client = context->client; 401 CContextClient* client = (!context->hasServer) ? context->client : this->file->getContextClient(); 402 401 403 402 404 lastDataRequestedFromServer = tsDataRequested; -
XIOS/dev/dev_olga/src/node/file.cpp
r1054 r1071 857 857 } 858 858 859 859 860 /*! 860 861 \brief Send a message to create a field on server side … … 1081 1082 void CFile::sendAddVariable(const string& id, CContextClient* client) 1082 1083 { 1083 sendAddItem(id, (int)EVENT_ID_ADD_VARIABLE , client);1084 sendAddItem(id, (int)EVENT_ID_ADD_VARIABLE); 1084 1085 // CContext* context = CContext::getCurrent(); 1085 1086 -
XIOS/dev/dev_olga/src/node/file.hpp
r1054 r1071 122 122 123 123 // Send info to server 124 //void sendEnabledFields();124 void sendEnabledFields(); 125 125 void sendEnabledFields(CContextClient* client); 126 126 void sendAddField(const string& id = ""); -
XIOS/dev/dev_olga/src/node/variable.cpp
r1030 r1071 128 128 } 129 129 130 void CVariable::sendValue(CContextClient* client )130 void CVariable::sendValue(CContextClient* client, bool clientPrim /*= false*/) 131 131 { 132 132 CEventClient event(this->getType(),EVENT_ID_VARIABLE_VALUE) ; -
XIOS/dev/dev_olga/src/node/variable.hpp
r1021 r1071 79 79 //! Sending a request to set up variable data 80 80 void sendValue(); 81 void sendValue(CContextClient* client );81 void sendValue(CContextClient* client, bool clientPrim = false); 82 82 83 83 static void recvValue(CEventServer& event) ; -
XIOS/dev/dev_olga/src/server.cpp
r1054 r1071 22 22 // list<MPI_Comm> CServer::interComm ; 23 23 std::list<MPI_Comm> CServer::contextInterComms; 24 std::list<MPI_Comm> CServer::contextIntraComms; 24 25 int CServer::serverLevel = 0 ; 25 int CServer::serverLeader = 0;26 int CServer::serverSize = 0;26 int CServer::serverLeader_ = 0; 27 int CServer::serverSize_ = 0; 27 28 int CServer::nbPools = 0; 28 29 int CServer::poolId = 0; 30 int CServer::nbContexts_ = 0; 29 31 bool CServer::isRoot = false ; 30 32 int CServer::rank = INVALID_RANK; … … 103 105 { 104 106 int serverRank = rank - leaders[hashServer]; // server proc rank starting 0 105 serverSize = size - leaders[hashServer];106 nbPools = serverSize * CXios::ratioServer2 / 100;107 if ( serverRank < (serverSize - nbPools) )107 serverSize_ = size - leaders[hashServer]; 108 nbPools = serverSize_ * CXios::ratioServer2 / 100; 109 if ( serverRank < (serverSize_ - nbPools) ) 108 110 { 109 111 serverLevel = 1; … … 112 114 { 113 115 serverLevel = 2; 114 poolId = serverRank - serverSize + nbPools;116 poolId = serverRank - serverSize_ + nbPools; 115 117 myColor = rank; 116 118 } … … 156 158 } 157 159 else 158 serverLeader = it->second;160 serverLeader_ = it->second; 159 161 } 160 162 161 163 for (int i = 0; i < nbPools; ++i) 162 164 { 163 srvSndLeader = serverLeader + serverSize- nbPools + i;165 srvSndLeader = serverLeader_ + serverSize_ - nbPools + i; 164 166 int intraCommSize, intraCommRank ; 165 167 MPI_Comm_size(intraComm, &intraCommSize) ; … … 239 241 240 242 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 243 MPI_Comm_free(&(*it)); 244 245 for (std::list<MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++) 241 246 MPI_Comm_free(&(*it)); 242 247 … … 310 315 { 311 316 MPI_Send(&msg,1,MPI_INT,0,0,*itr) ; 312 // itr = interCommRight.erase(itr) ;313 317 } 314 318 MPI_Comm_free(&(*it)); … … 488 492 contextList[contextId]=context; 489 493 490 // All type of servers initialize its own server (CContextServer) 494 // Primary or classical server: initialize its own server (CContextServer) 495 MPI_Comm inter; 491 496 if (serverLevel < 2) 492 497 { 493 498 MPI_Comm contextInterComm; 494 499 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm); 495 MPI_Comm inter;496 500 MPI_Intercomm_merge(contextInterComm,1,&inter); 497 501 MPI_Barrier(inter); … … 499 503 context->initServer(intraComm,contextInterComm); 500 504 contextInterComms.push_back(contextInterComm); 501 } 505 506 } 507 // Secondary server: initialize its own server (CContextServer) 502 508 else if (serverLevel == 2) 503 509 { 504 context->initServer(intraComm, interCommLeft.front()); 510 MPI_Comm_dup(interCommLeft.front(), &inter); 511 contextInterComms.push_back(inter); 512 context->initServer(intraComm, contextInterComms.back()); 513 // context->initServer(intraComm, interCommLeft.front()); 505 514 } 506 515 … … 520 529 CBufferOut buffer(buff,messageSize) ; 521 530 buffer<<msg ; 522 int sndServerGloRanks = serverSize -nbPools+serverLeader+i; // the assumption is that there is only one proc per secondary server pool531 int sndServerGloRanks = serverSize_-nbPools+serverLeader_ +i; // the assumption is that there is only one proc per secondary server pool 523 532 MPI_Send(buff, buffer.count(), MPI_CHAR, sndServerGloRanks, 1, CXios::globalComm) ; 524 context->initClient(intraComm, *it) ; 533 MPI_Comm_dup(*it, &inter); 534 contextInterComms.push_back(inter); 535 MPI_Comm_dup(intraComm, &inter); 536 contextIntraComms.push_back(inter); 537 context->initClient(contextIntraComms.back(), contextInterComms.back()) ; 538 // context->initClient(intraComm, contextPrimInterComms.back()) ; 539 // context->initClient(intraComm, *it) ; 525 540 delete [] buff ; 526 541 } 542 ++nbContexts_; 527 543 } 528 544 } … … 539 555 if (finished) 540 556 { 557 // it->second->freeComms(); // deallocate internally allcoated context communicators 541 558 contextList.erase(it) ; 542 559 break ; 543 560 } 544 561 else 562 { 545 563 finished=it->second->checkBuffersAndListen(); 564 } 546 565 } 547 566 } … … 579 598 { 580 599 if (serverLevel == 1) 581 id = rank-serverLeader ;600 id = rank-serverLeader_; 582 601 else 583 602 id = poolId; -
XIOS/dev/dev_olga/src/server.hpp
r1054 r1071 27 27 static list<MPI_Comm> interCommLeft; // interComm between server (primary, classical or secondary) and its client (client or primary server) 28 28 static list<MPI_Comm> interCommRight; // interComm between primary server and secondary server (non-empty only for primary server pool) 29 static std::list<MPI_Comm> contextInterComms; // significance ?? 29 static std::list<MPI_Comm> contextInterComms; // list of context intercomms 30 static std::list<MPI_Comm> contextIntraComms; // list of context intercomms (needed only in case of secondary servers) 30 31 static CEventScheduler* eventScheduler; 31 32 … … 64 65 private: 65 66 static int rank; 66 static int serverLeader ; //!< Leader of the classical or primary server (needed in case of secondary servers)67 static int serverSize ; //!< Number of procs dedicated to servers (primary and seconday (if any) combined)67 static int serverLeader_; //!< Leader of the classical or primary server (needed in case of secondary servers) 68 static int serverSize_; //!< Number of procs dedicated to servers (primary and seconday (if any) combined) 68 69 static int nbPools; //!< Number of secondary server pools 69 70 static int poolId; //!< id of a secondary server pool starting from 1 71 static int nbContexts_; 70 72 static StdOFStream m_infoStream; 71 73 static StdOFStream m_errorStream;
Note: See TracChangeset
for help on using the changeset viewer.