Changeset 992 for XIOS/dev/dev_olga/src
- Timestamp:
- 11/16/16 18:00:52 (8 years ago)
- Location:
- XIOS/dev/dev_olga/src
- Files:
-
- 12 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_olga/src/buffer_client.cpp
r983 r992 87 87 { 88 88 MPI_Comm_test_inter(interComm, &flag); 89 // if (flag) 90 MPI_Issend(buffer[current], count, MPI_CHAR, serverRank, 20, interComm, &request); 91 // else 92 // ERROR("bool CClientBuffer::checkBuffer(void)", 93 // << "Invalid intercommunicator"); 89 MPI_Issend(buffer[current], count, MPI_CHAR, serverRank, 20, interComm, &request); 94 90 pending = true; 95 91 if (current == 1) current = 0; -
XIOS/dev/dev_olga/src/client.cpp
r983 r992 24 24 StdOFStream CClient::m_errorStream; 25 25 26 MPI_Comm& CClient::getInterComm(void) { return (interComm); } 27 26 28 ///--------------------------------------------------------------- 27 29 /*! … … 248 250 MPI_Comm_rank(intraComm,&rank) ; 249 251 250 if ( (!CXios::isServer) || (CXios::serverLevel == 1))252 if (!CXios::isServer) 251 253 { 252 254 MPI_Comm_rank(intraComm,&rank) ; … … 282 284 } 283 285 286 284 287 int CClient::getRank() 285 288 { -
XIOS/dev/dev_olga/src/client.hpp
r983 r992 20 20 static int serverLeader; 21 21 static bool is_MPI_Initialized ; 22 23 static MPI_Comm& getInterComm(); 22 24 23 25 //! Get rank of the current process -
XIOS/dev/dev_olga/src/context_server.cpp
r983 r992 23 23 { 24 24 25 CContextServer::CContextServer(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_)25 CContextServer::CContextServer(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_) 26 26 { 27 27 context=parent; … … 41 41 boost::hash<string> hashString; 42 42 hashId=hashString(context->getId()); 43 44 } 45 46 CContextServer::CContextServer(CContext* parent, int srvLvl, MPI_Comm intraComm_,MPI_Comm interComm_) 47 { 48 context=parent; 49 intraComm=intraComm_; 50 MPI_Comm_size(intraComm,&intraCommSize); 51 MPI_Comm_rank(intraComm,&intraCommRank); 52 interComm=interComm_; 53 int flag; 54 MPI_Comm_test_inter(interComm,&flag); 55 if (flag) MPI_Comm_remote_size(interComm,&commSize); 56 else MPI_Comm_size(interComm,&commSize); 57 58 currentTimeLine=0; 59 scheduled=false; 60 finished=false; 61 62 boost::hash<string> hashString; 63 StdString contextId = context->getId(); 64 contextId += "_prim"; // just to distinguish between server and serverPrimServer on server1 65 hashId=hashString(contextId); 43 66 44 67 } … … 184 207 // The best way to properly solve this problem will be to use the event scheduler also in attached mode 185 208 // for now just set up a MPI barrier 186 if (!CServer::eventScheduler) MPI_Barrier(intraComm) ;209 // if (!CServer::eventScheduler) MPI_Barrier(intraComm) ; 187 210 188 211 CTimer::get("Process events").resume(); -
XIOS/dev/dev_olga/src/context_server.hpp
r983 r992 14 14 public: 15 15 16 CContextServer(CContext* parent,MPI_Comm intraComm,MPI_Comm interComm) ; 16 CContextServer(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm) ; 17 CContextServer(CContext* parent, int srvLvl, MPI_Comm intraComm, MPI_Comm interComm) ; 17 18 bool eventLoop(void) ; 18 19 void listen(void) ; -
XIOS/dev/dev_olga/src/cxios.hpp
r983 r992 37 37 38 38 static bool isClient ; //!< Check if xios is client 39 static bool isServer ; //!< Check if xios is primaryserver39 static bool isServer ; //!< Check if xios is server 40 40 41 41 static int serverLevel ; // -
XIOS/dev/dev_olga/src/distribution_client.cpp
r983 r992 24 24 , elementZoomMask_(), elementNLocal_(), elementNGlobal_() 25 25 { 26 // numElement_ = globalLocalIndex.size(); !!! numElement_ should be calculated !!!! 27 isComputed_ = true; // my changes 28 29 26 // numElement_ = globalLocalIndex.size(); !!! numElement_ should be calculated (?) 27 isComputed_ = true; 30 28 localDataIndex_.resize(globalLocalIndex.size()); 31 29 localMaskIndex_.resize(globalLocalIndex.size()); -
XIOS/dev/dev_olga/src/event_scheduler.cpp
r591 r992 107 107 } 108 108 109 110 111 109 void CEventScheduler::checkParentRequest(void) 112 110 { … … 152 150 size_t hashId=recvRequest->buffer[1] ; 153 151 size_t lev=recvRequest->buffer[2] ; 154 delete recvRequest ;152 // delete recvRequest ; 155 153 pendingRecvParentRequest.pop() ; 156 154 157 155 if (lev==level) eventStack.push(pair<size_t,size_t>(timeLine,hashId)) ; 158 156 else bcastEvent(timeLine, hashId, lev) ; 157 delete recvRequest ; 159 158 } 160 159 } -
XIOS/dev/dev_olga/src/node/context.cpp
r987 r992 15 15 #include "xios_spl.hpp" 16 16 17 #include "server.hpp" 17 18 18 19 namespace xios { … … 252 253 { 253 254 clientPrimServer = new CContextClient(this, intraComm, interComm); 254 serverPrimServer = new CContextServer(this, intraComm, interComm);255 serverPrimServer = new CContextServer(this, 1, intraComm, interComm); // just some int parameter to distinguish server from serverPrimServer on server1 255 256 } 256 257 … … 414 415 { 415 416 finalized = true; 416 if (hasClient) sendRegistry() ;417 // if (hasClient) sendRegistry() ; 417 418 418 419 /* if (CXios::serverLevel == 0) … … 446 447 }*/ 447 448 448 client->finalize();449 while (!server->hasFinished())450 {451 server->eventLoop();452 }453 454 449 if ((hasClient) && (hasServer)) 455 450 { … … 458 453 { 459 454 serverPrimServer->eventLoop(); 455 CServer::eventScheduler->checkEvent() ; 460 456 } 461 457 } 458 459 client->finalize(); 460 while (!server->hasFinished()) 461 { 462 server->eventLoop(); 463 } 464 462 465 463 466 if (hasServer) … … 1056 1059 this->solveAllInheritance(); 1057 1060 1058 ShowTree(info(10));1061 // ShowTree(info(10)); 1059 1062 1060 1063 // Check if some axis, domains or grids are eligible to for compressed indexed output. -
XIOS/dev/dev_olga/src/node/file.hpp
r957 r992 115 115 CVariableGroup* addVariableGroup(const string& id = ""); 116 116 117 // Send info to ser ever117 // Send info to server 118 118 void sendEnabledFields(); 119 119 void sendAddField(const string& id = ""); -
XIOS/dev/dev_olga/src/server.cpp
r983 r992 18 18 { 19 19 MPI_Comm CServer::intraComm ; 20 list<MPI_Comm> CServer::interCommLeft ; 21 list<MPI_Comm> CServer::interCommRight ; 20 22 list<MPI_Comm> CServer::interComm ; 21 23 std::list<MPI_Comm> CServer::contextInterComms; … … 103 105 104 106 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 107 interCommLeft.push_back(newComm) ; 105 108 interComm.push_back(newComm) ; 106 109 } … … 125 128 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 126 129 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 130 interCommLeft.push_back(newComm) ; 127 131 interComm.push_back(newComm) ; 128 132 } … … 134 138 } 135 139 136 CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 140 CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 141 interCommRight.push_back(CClient::getInterComm()); 142 interComm.push_back(CClient::getInterComm()); 143 137 144 } 138 145 … … 144 151 if (it->first == hashServer1) 145 152 { 146 // no changes needed here to create one context per process of the secondary server pool147 153 clientLeader=it->second ; 148 154 int intraCommSize, intraCommRank ; … … 153 159 154 160 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 161 interCommLeft.push_back(newComm) ; 155 162 interComm.push_back(newComm) ; 156 163 … … 207 214 void CServer::finalize(void) 208 215 { 209 if (CXios::serverLevel == 1)210 {211 CClient::finalize();212 }213 216 214 217 CTimer::get("XIOS").suspend() ; … … 219 222 MPI_Comm_free(&(*it)); 220 223 221 for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) 224 // for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) 225 // MPI_Comm_free(&(*it)); 226 227 for (std::list<MPI_Comm>::iterator it = interCommLeft.begin(); it != interCommLeft.end(); it++) 228 MPI_Comm_free(&(*it)); 229 230 for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) 222 231 MPI_Comm_free(&(*it)); 223 232 … … 263 272 void CServer::listenFinalize(void) 264 273 { 265 list<MPI_Comm>::iterator it ;274 list<MPI_Comm>::iterator it, itr; 266 275 int msg ; 267 276 int flag ; 268 277 269 for(it=interComm .begin();it!=interComm.end();it++)278 for(it=interCommLeft.begin();it!=interCommLeft.end();it++) 270 279 { 271 280 MPI_Status status ; … … 275 284 if (flag==true) 276 285 { 277 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; 278 info(20)<<" CServer : Receive client finalize"<<endl ; 286 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; 287 info(20)<<" CServer : Receive client finalize"<<endl ; 288 289 // If primary server, send finalize to secondary server pool(s) 290 for(itr=interCommRight.begin(); itr!=interCommRight.end(); itr++) 291 { 292 MPI_Send(&msg,1,MPI_INT,0,0,*itr) ; 293 294 // MPI_Comm_free(&(*itr)); 295 // interCommRight.erase(itr) ; 296 } 297 279 298 MPI_Comm_free(&(*it)); 280 interComm.erase(it) ; 299 // interComm.erase(it) ; 300 interCommLeft.erase(it) ; 281 301 break ; 282 302 } 283 303 } 284 304 285 if (interComm.empty()) 305 if (interCommLeft.empty()) 306 // if (interComm.empty()) 286 307 { 287 308 int i,size ; -
XIOS/dev/dev_olga/src/server.hpp
r987 r992 27 27 // Communicators for the primary group of servers 28 28 static MPI_Comm intraComm; 29 static list<MPI_Comm> interComm; 29 static list<MPI_Comm> interCommLeft; // interComm between server and its client (client or primary server) 30 static list<MPI_Comm> interCommRight; // interComm between primary server and secondary server (non-empty only for primary server pool) 31 static list<MPI_Comm> interComm; // interCommLeft + interCommRight 30 32 static std::list<MPI_Comm> contextInterComms; 31 33 static CEventScheduler* eventScheduler;
Note: See TracChangeset
for help on using the changeset viewer.