[490] | 1 | #include "globalScopeData.hpp" |
---|
[591] | 2 | #include "xios_spl.hpp" |
---|
[300] | 3 | #include "cxios.hpp" |
---|
[342] | 4 | #include "server.hpp" |
---|
[983] | 5 | #include "client.hpp" |
---|
[300] | 6 | #include "type.hpp" |
---|
| 7 | #include "context.hpp" |
---|
[352] | 8 | #include "object_template.hpp" |
---|
[300] | 9 | #include "oasis_cinterface.hpp" |
---|
| 10 | #include <boost/functional/hash.hpp> |
---|
| 11 | #include <boost/algorithm/string.hpp> |
---|
[382] | 12 | #include "mpi.hpp" |
---|
[347] | 13 | #include "tracer.hpp" |
---|
| 14 | #include "timer.hpp" |
---|
[492] | 15 | #include "event_scheduler.hpp" |
---|
[300] | 16 | |
---|
[335] | 17 | namespace xios |
---|
[490] | 18 | { |
---|
[300] | 19 | MPI_Comm CServer::intraComm ; |
---|
[992] | 20 | list<MPI_Comm> CServer::interCommLeft ; |
---|
| 21 | list<MPI_Comm> CServer::interCommRight ; |
---|
[1054] | 22 | // list<MPI_Comm> CServer::interComm ; |
---|
[655] | 23 | std::list<MPI_Comm> CServer::contextInterComms; |
---|
[1071] | 24 | std::list<MPI_Comm> CServer::contextIntraComms; |
---|
[1021] | 25 | int CServer::serverLevel = 0 ; |
---|
[1071] | 26 | int CServer::serverLeader_ = 0; |
---|
| 27 | int CServer::serverSize_ = 0; |
---|
[1021] | 28 | int CServer::nbPools = 0; |
---|
| 29 | int CServer::poolId = 0; |
---|
[1071] | 30 | int CServer::nbContexts_ = 0; |
---|
[983] | 31 | bool CServer::isRoot = false ; |
---|
[1077] | 32 | int CServer::rank_ = INVALID_RANK; |
---|
[490] | 33 | StdOFStream CServer::m_infoStream; |
---|
[523] | 34 | StdOFStream CServer::m_errorStream; |
---|
[490] | 35 | map<string,CContext*> CServer::contextList ; |
---|
[300] | 36 | bool CServer::finished=false ; |
---|
| 37 | bool CServer::is_MPI_Initialized ; |
---|
[597] | 38 | CEventScheduler* CServer::eventScheduler = 0; |
---|
[983] | 39 | |
---|
| 40 | //--------------------------------------------------------------- |
---|
| 41 | /*! |
---|
| 42 | * \fn void CServer::initialize(void) |
---|
[1054] | 43 | * Creates intraComm for each possible type of servers (classical, primary or secondary). |
---|
| 44 | * In case of secondary servers intraComm is created for each secondary server pool. |
---|
| 45 | * (For now the assumption is that there is one proc per pool.) |
---|
| 46 | * Creates the following lists of interComms: |
---|
| 47 | * classical server -- interCommLeft |
---|
| 48 | * primary server -- interCommLeft and interCommRight |
---|
| 49 | * secondary server -- interComm for each pool. |
---|
[983] | 50 | */ |
---|
[300] | 51 | void CServer::initialize(void) |
---|
| 52 | { |
---|
| 53 | int initialized ; |
---|
| 54 | MPI_Initialized(&initialized) ; |
---|
| 55 | if (initialized) is_MPI_Initialized=true ; |
---|
| 56 | else is_MPI_Initialized=false ; |
---|
[490] | 57 | |
---|
[300] | 58 | // Not using OASIS |
---|
| 59 | if (!CXios::usingOasis) |
---|
| 60 | { |
---|
[490] | 61 | |
---|
| 62 | if (!is_MPI_Initialized) |
---|
[300] | 63 | { |
---|
[925] | 64 | MPI_Init(NULL, NULL); |
---|
[300] | 65 | } |
---|
[359] | 66 | CTimer::get("XIOS").resume() ; |
---|
[490] | 67 | |
---|
| 68 | boost::hash<string> hashString ; |
---|
[1021] | 69 | unsigned long hashServer = hashString(CXios::xiosCodeId); |
---|
[490] | 70 | |
---|
[300] | 71 | unsigned long* hashAll ; |
---|
[490] | 72 | |
---|
| 73 | // int rank ; |
---|
[300] | 74 | int size ; |
---|
| 75 | int myColor ; |
---|
| 76 | int i,c ; |
---|
[1021] | 77 | MPI_Comm newComm, serversComm; |
---|
[490] | 78 | |
---|
[983] | 79 | MPI_Comm_size(CXios::globalComm, &size) ; |
---|
[1077] | 80 | MPI_Comm_rank(CXios::globalComm, &rank_); |
---|
[1009] | 81 | |
---|
[300] | 82 | hashAll=new unsigned long[size] ; |
---|
[983] | 83 | MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ; |
---|
[490] | 84 | |
---|
[1021] | 85 | map<unsigned long, int> colors ; |
---|
[300] | 86 | map<unsigned long, int> leaders ; |
---|
[1133] | 87 | map<unsigned long, int> lastProcesses ; // needed in case of two server levels |
---|
[300] | 88 | map<unsigned long, int>::iterator it ; |
---|
[490] | 89 | |
---|
[1142] | 90 | int nbSrv = 0; |
---|
[300] | 91 | for(i=0,c=0;i<size;i++) |
---|
| 92 | { |
---|
| 93 | if (colors.find(hashAll[i])==colors.end()) |
---|
| 94 | { |
---|
| 95 | colors[hashAll[i]]=c ; |
---|
| 96 | leaders[hashAll[i]]=i ; |
---|
| 97 | c++ ; |
---|
| 98 | } |
---|
[1142] | 99 | if (hashAll[i] == hashServer) ++nbSrv; |
---|
| 100 | //if (hashAll[i+1] != hashAll[i]) // Potential bug here! |
---|
| 101 | // lastProcesses[hashAll[i]]=i ; // It seems that lastprocesses is only used for calculating the server size. Can we count server size directly? |
---|
[300] | 102 | } |
---|
[490] | 103 | |
---|
[1021] | 104 | // Setting the number of secondary pools |
---|
| 105 | myColor = colors[hashServer]; |
---|
| 106 | if (CXios::usingServer2) |
---|
[1009] | 107 | { |
---|
[1077] | 108 | int serverRank = rank_ - leaders[hashServer]; // server proc rank starting 0 |
---|
[1142] | 109 | serverSize_ = nbSrv; //lastProcesses[hashServer] - leaders[hashServer] + 1; |
---|
[1133] | 110 | // serverSize_ = lastProcesses - leaders[hashServer]; |
---|
[1071] | 111 | nbPools = serverSize_ * CXios::ratioServer2 / 100; |
---|
| 112 | if ( serverRank < (serverSize_ - nbPools) ) |
---|
[1009] | 113 | { |
---|
[1021] | 114 | serverLevel = 1; |
---|
[1009] | 115 | } |
---|
[1021] | 116 | else |
---|
[1009] | 117 | { |
---|
[1021] | 118 | serverLevel = 2; |
---|
[1071] | 119 | poolId = serverRank - serverSize_ + nbPools; |
---|
[1142] | 120 | myColor = rank_ + size; // + size to make sure that myColor is unique among not only servers but also clients. It's only a temporary solution |
---|
[1009] | 121 | } |
---|
| 122 | } |
---|
| 123 | |
---|
[1077] | 124 | MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ; |
---|
[1009] | 125 | |
---|
[1021] | 126 | if (serverLevel == 0) |
---|
[983] | 127 | { |
---|
| 128 | int clientLeader; |
---|
| 129 | for(it=leaders.begin();it!=leaders.end();it++) |
---|
| 130 | { |
---|
| 131 | if (it->first!=hashServer) |
---|
| 132 | { |
---|
| 133 | clientLeader=it->second ; |
---|
| 134 | int intraCommSize, intraCommRank ; |
---|
| 135 | MPI_Comm_size(intraComm,&intraCommSize) ; |
---|
| 136 | MPI_Comm_rank(intraComm,&intraCommRank) ; |
---|
[1142] | 137 | info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[983] | 138 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
[490] | 139 | |
---|
[1009] | 140 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
[992] | 141 | interCommLeft.push_back(newComm) ; |
---|
[983] | 142 | } |
---|
| 143 | } |
---|
| 144 | } |
---|
[1021] | 145 | else if (serverLevel == 1) |
---|
[983] | 146 | { |
---|
[1054] | 147 | int clientLeader, srvSndLeader; |
---|
| 148 | int srvPrmLeader ; |
---|
[1021] | 149 | for (it=leaders.begin();it!=leaders.end();it++) |
---|
[983] | 150 | { |
---|
[1021] | 151 | if (it->first != hashServer) |
---|
[983] | 152 | { |
---|
[1021] | 153 | clientLeader=it->second ; |
---|
| 154 | int intraCommSize, intraCommRank ; |
---|
| 155 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
| 156 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
[1142] | 157 | info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[1021] | 158 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
| 159 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
| 160 | interCommLeft.push_back(newComm) ; |
---|
[983] | 161 | } |
---|
| 162 | else |
---|
[1071] | 163 | serverLeader_ = it->second; |
---|
[1021] | 164 | } |
---|
[1009] | 165 | |
---|
[1021] | 166 | for (int i = 0; i < nbPools; ++i) |
---|
[983] | 167 | { |
---|
[1071] | 168 | srvSndLeader = serverLeader_ + serverSize_ - nbPools + i; |
---|
[1054] | 169 | int intraCommSize, intraCommRank ; |
---|
| 170 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
| 171 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
[1142] | 172 | info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[1054] | 173 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvSndLeader<<endl ; |
---|
[1142] | 174 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 1, &newComm) ; |
---|
[1054] | 175 | interCommRight.push_back(newComm) ; |
---|
[1021] | 176 | } |
---|
| 177 | } // primary server |
---|
| 178 | else |
---|
| 179 | { |
---|
| 180 | int clientLeader; |
---|
| 181 | clientLeader = leaders[hashString(CXios::xiosCodeId)]; |
---|
| 182 | int intraCommSize, intraCommRank ; |
---|
| 183 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
| 184 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
[1142] | 185 | info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[1021] | 186 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
[983] | 187 | |
---|
[1142] | 188 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; |
---|
[1021] | 189 | interCommLeft.push_back(newComm) ; |
---|
| 190 | } // secondary server |
---|
[983] | 191 | |
---|
| 192 | delete [] hashAll ; |
---|
| 193 | |
---|
[300] | 194 | } |
---|
| 195 | // using OASIS |
---|
| 196 | else |
---|
| 197 | { |
---|
[1130] | 198 | int size, rank; |
---|
| 199 | int myColor; |
---|
[490] | 200 | if (!is_MPI_Initialized) oasis_init(CXios::xiosCodeId); |
---|
| 201 | |
---|
[359] | 202 | CTimer::get("XIOS").resume() ; |
---|
[655] | 203 | MPI_Comm localComm; |
---|
| 204 | oasis_get_localcomm(localComm); |
---|
| 205 | |
---|
[1130] | 206 | // Create server intraComm |
---|
| 207 | if (!CXios::usingServer2) |
---|
| 208 | MPI_Comm_dup(localComm, &intraComm); |
---|
| 209 | else |
---|
| 210 | { |
---|
| 211 | MPI_Comm_rank(localComm,&rank) ; |
---|
| 212 | MPI_Comm_size(localComm,&serverSize_) ; |
---|
| 213 | nbPools = serverSize_ * CXios::ratioServer2 / 100; |
---|
| 214 | if ( rank < (serverSize_ - nbPools) ) |
---|
| 215 | { |
---|
| 216 | serverLevel = 1; |
---|
| 217 | myColor = 0; |
---|
| 218 | } |
---|
| 219 | else |
---|
| 220 | { |
---|
| 221 | serverLevel = 2; |
---|
| 222 | poolId = rank - serverSize_ + nbPools; |
---|
| 223 | myColor = rank; |
---|
| 224 | } |
---|
| 225 | MPI_Comm_split(localComm, myColor, rank, &intraComm) ; |
---|
| 226 | |
---|
| 227 | } |
---|
[1077] | 228 | MPI_Comm_rank(intraComm,&rank_) ; |
---|
[300] | 229 | MPI_Comm_size(intraComm,&size) ; |
---|
[1130] | 230 | |
---|
[300] | 231 | string codesId=CXios::getin<string>("oasis_codes_id") ; |
---|
[490] | 232 | |
---|
[300] | 233 | vector<string> splitted ; |
---|
[483] | 234 | boost::split( splitted, codesId, boost::is_any_of(","), boost::token_compress_on ) ; |
---|
[300] | 235 | vector<string>::iterator it ; |
---|
| 236 | |
---|
| 237 | MPI_Comm newComm ; |
---|
| 238 | int globalRank ; |
---|
| 239 | MPI_Comm_rank(CXios::globalComm,&globalRank); |
---|
[490] | 240 | |
---|
[300] | 241 | for(it=splitted.begin();it!=splitted.end();it++) |
---|
| 242 | { |
---|
| 243 | oasis_get_intercomm(newComm,*it) ; |
---|
[1130] | 244 | // interComm.push_back(newComm) ; |
---|
| 245 | if ( !CXios::usingServer2) |
---|
| 246 | interCommLeft.push_back(newComm) ; |
---|
| 247 | else |
---|
| 248 | { |
---|
| 249 | if (serverLevel == 1) |
---|
| 250 | { |
---|
| 251 | info(50)<<"intercommCreate::server "<<rank_<<" intraCommSize : "<<size |
---|
| 252 | <<" intraCommRank :"<<rank_<<" clientLeader "<< rank<<endl ; |
---|
| 253 | MPI_Intercomm_create(intraComm, 0, localComm, rank, 0, &newComm) ; |
---|
| 254 | interCommRight.push_back(newComm) ; |
---|
| 255 | |
---|
| 256 | } |
---|
| 257 | else if (serverLevel == 2) |
---|
| 258 | { |
---|
| 259 | info(50)<<"intercommCreate::server "<<rank_<<" intraCommSize : "<<size |
---|
| 260 | <<" intraCommRank :"<<rank_<<" clientLeader "<< 0<<endl ; |
---|
| 261 | MPI_Intercomm_create(intraComm, 0, localComm, 0, 0, &newComm) ; |
---|
| 262 | interCommLeft.push_back(newComm) ; |
---|
| 263 | |
---|
| 264 | } |
---|
| 265 | |
---|
| 266 | } |
---|
| 267 | // if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ; |
---|
| 268 | // MPI_Comm_remote_size(newComm,&size); |
---|
| 269 | // Send serverLeader to client |
---|
| 270 | if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,interCommLeft.back()) ; |
---|
[300] | 271 | } |
---|
[492] | 272 | oasis_enddef() ; |
---|
[300] | 273 | } |
---|
[490] | 274 | |
---|
[1077] | 275 | MPI_Comm_rank(intraComm, &rank_) ; |
---|
| 276 | if (rank_==0) isRoot=true; |
---|
[490] | 277 | else isRoot=false; |
---|
[492] | 278 | |
---|
| 279 | eventScheduler = new CEventScheduler(intraComm) ; |
---|
[300] | 280 | } |
---|
[490] | 281 | |
---|
[300] | 282 | void CServer::finalize(void) |
---|
| 283 | { |
---|
[983] | 284 | |
---|
[361] | 285 | CTimer::get("XIOS").suspend() ; |
---|
[697] | 286 | |
---|
[492] | 287 | delete eventScheduler ; |
---|
[655] | 288 | |
---|
| 289 | for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) |
---|
| 290 | MPI_Comm_free(&(*it)); |
---|
[983] | 291 | |
---|
[1071] | 292 | for (std::list<MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++) |
---|
| 293 | MPI_Comm_free(&(*it)); |
---|
| 294 | |
---|
[992] | 295 | // for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) |
---|
| 296 | // MPI_Comm_free(&(*it)); |
---|
| 297 | |
---|
[1077] | 298 | // for (std::list<MPI_Comm>::iterator it = interCommLeft.begin(); it != interCommLeft.end(); it++) |
---|
| 299 | // MPI_Comm_free(&(*it)); |
---|
[983] | 300 | |
---|
[1054] | 301 | for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) |
---|
| 302 | MPI_Comm_free(&(*it)); |
---|
[992] | 303 | |
---|
[655] | 304 | MPI_Comm_free(&intraComm); |
---|
| 305 | |
---|
[300] | 306 | if (!is_MPI_Initialized) |
---|
[490] | 307 | { |
---|
[300] | 308 | if (CXios::usingOasis) oasis_finalize(); |
---|
| 309 | else MPI_Finalize() ; |
---|
| 310 | } |
---|
[347] | 311 | report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; |
---|
| 312 | report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; |
---|
| 313 | report(0)<<"Performance report : Ratio : "<<CTimer::get("Process events").getCumulatedTime()/CTimer::get("XIOS server").getCumulatedTime()*100.<<"%"<<endl ; |
---|
[300] | 314 | } |
---|
[490] | 315 | |
---|
[300] | 316 | void CServer::eventLoop(void) |
---|
| 317 | { |
---|
| 318 | bool stop=false ; |
---|
[490] | 319 | |
---|
[347] | 320 | CTimer::get("XIOS server").resume() ; |
---|
[300] | 321 | while(!stop) |
---|
| 322 | { |
---|
| 323 | if (isRoot) |
---|
| 324 | { |
---|
| 325 | listenContext(); |
---|
| 326 | if (!finished) listenFinalize() ; |
---|
| 327 | } |
---|
| 328 | else |
---|
| 329 | { |
---|
| 330 | listenRootContext(); |
---|
| 331 | if (!finished) listenRootFinalize() ; |
---|
| 332 | } |
---|
[490] | 333 | |
---|
[300] | 334 | contextEventLoop() ; |
---|
| 335 | if (finished && contextList.empty()) stop=true ; |
---|
[956] | 336 | eventScheduler->checkEvent() ; |
---|
[983] | 337 | |
---|
[300] | 338 | } |
---|
[347] | 339 | CTimer::get("XIOS server").suspend() ; |
---|
[300] | 340 | } |
---|
[490] | 341 | |
---|
[300] | 342 | void CServer::listenFinalize(void) |
---|
| 343 | { |
---|
[992] | 344 | list<MPI_Comm>::iterator it, itr; |
---|
[300] | 345 | int msg ; |
---|
| 346 | int flag ; |
---|
[490] | 347 | |
---|
[992] | 348 | for(it=interCommLeft.begin();it!=interCommLeft.end();it++) |
---|
[300] | 349 | { |
---|
| 350 | MPI_Status status ; |
---|
[347] | 351 | traceOff() ; |
---|
[300] | 352 | MPI_Iprobe(0,0,*it,&flag,&status) ; |
---|
[347] | 353 | traceOn() ; |
---|
[300] | 354 | if (flag==true) |
---|
| 355 | { |
---|
[1054] | 356 | MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; |
---|
| 357 | info(20)<<" CServer : Receive client finalize"<<endl ; |
---|
| 358 | // Sending server finalize message to secondary servers (if any) |
---|
| 359 | for(itr=interCommRight.begin();itr!=interCommRight.end();itr++) |
---|
| 360 | { |
---|
| 361 | MPI_Send(&msg,1,MPI_INT,0,0,*itr) ; |
---|
| 362 | } |
---|
[655] | 363 | MPI_Comm_free(&(*it)); |
---|
[992] | 364 | interCommLeft.erase(it) ; |
---|
[300] | 365 | break ; |
---|
| 366 | } |
---|
| 367 | } |
---|
[490] | 368 | |
---|
[1054] | 369 | if (interCommLeft.empty()) |
---|
[300] | 370 | { |
---|
| 371 | int i,size ; |
---|
| 372 | MPI_Comm_size(intraComm,&size) ; |
---|
| 373 | MPI_Request* requests= new MPI_Request[size-1] ; |
---|
| 374 | MPI_Status* status= new MPI_Status[size-1] ; |
---|
[490] | 375 | |
---|
[300] | 376 | for(int i=1;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,4,intraComm,&requests[i-1]) ; |
---|
| 377 | MPI_Waitall(size-1,requests,status) ; |
---|
| 378 | |
---|
| 379 | finished=true ; |
---|
| 380 | delete [] requests ; |
---|
| 381 | delete [] status ; |
---|
| 382 | } |
---|
| 383 | } |
---|
[490] | 384 | |
---|
| 385 | |
---|
[300] | 386 | void CServer::listenRootFinalize() |
---|
| 387 | { |
---|
| 388 | int flag ; |
---|
| 389 | MPI_Status status ; |
---|
| 390 | int msg ; |
---|
[490] | 391 | |
---|
[347] | 392 | traceOff() ; |
---|
[300] | 393 | MPI_Iprobe(0,4,intraComm, &flag, &status) ; |
---|
[347] | 394 | traceOn() ; |
---|
[300] | 395 | if (flag==true) |
---|
| 396 | { |
---|
| 397 | MPI_Recv(&msg,1,MPI_INT,0,4,intraComm,&status) ; |
---|
| 398 | finished=true ; |
---|
| 399 | } |
---|
| 400 | } |
---|
[490] | 401 | |
---|
[300] | 402 | void CServer::listenContext(void) |
---|
| 403 | { |
---|
[490] | 404 | |
---|
[300] | 405 | MPI_Status status ; |
---|
| 406 | int flag ; |
---|
| 407 | static void* buffer ; |
---|
| 408 | static MPI_Request request ; |
---|
| 409 | static bool recept=false ; |
---|
| 410 | int rank ; |
---|
| 411 | int count ; |
---|
[490] | 412 | |
---|
[300] | 413 | if (recept==false) |
---|
| 414 | { |
---|
[347] | 415 | traceOff() ; |
---|
[300] | 416 | MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; |
---|
[347] | 417 | traceOn() ; |
---|
[490] | 418 | if (flag==true) |
---|
[300] | 419 | { |
---|
| 420 | rank=status.MPI_SOURCE ; |
---|
| 421 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
| 422 | buffer=new char[count] ; |
---|
| 423 | MPI_Irecv(buffer,count,MPI_CHAR,rank,1,CXios::globalComm,&request) ; |
---|
[490] | 424 | recept=true ; |
---|
[300] | 425 | } |
---|
| 426 | } |
---|
| 427 | else |
---|
| 428 | { |
---|
[347] | 429 | traceOff() ; |
---|
[300] | 430 | MPI_Test(&request,&flag,&status) ; |
---|
[347] | 431 | traceOn() ; |
---|
[300] | 432 | if (flag==true) |
---|
| 433 | { |
---|
| 434 | rank=status.MPI_SOURCE ; |
---|
| 435 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
| 436 | recvContextMessage(buffer,count) ; |
---|
[1054] | 437 | delete [] buffer; |
---|
[490] | 438 | recept=false ; |
---|
[300] | 439 | } |
---|
| 440 | } |
---|
| 441 | } |
---|
[490] | 442 | |
---|
[300] | 443 | void CServer::recvContextMessage(void* buff,int count) |
---|
| 444 | { |
---|
[983] | 445 | static map<string,contextMessage> recvContextId; |
---|
[300] | 446 | map<string,contextMessage>::iterator it ; |
---|
| 447 | CBufferIn buffer(buff,count) ; |
---|
| 448 | string id ; |
---|
| 449 | int clientLeader ; |
---|
| 450 | int nbMessage ; |
---|
| 451 | |
---|
| 452 | buffer>>id>>nbMessage>>clientLeader ; |
---|
[490] | 453 | |
---|
[300] | 454 | it=recvContextId.find(id) ; |
---|
| 455 | if (it==recvContextId.end()) |
---|
[490] | 456 | { |
---|
[300] | 457 | contextMessage msg={0,0} ; |
---|
| 458 | pair<map<string,contextMessage>::iterator,bool> ret ; |
---|
| 459 | ret=recvContextId.insert(pair<string,contextMessage>(id,msg)) ; |
---|
| 460 | it=ret.first ; |
---|
[490] | 461 | } |
---|
[300] | 462 | it->second.nbRecv+=1 ; |
---|
| 463 | it->second.leaderRank+=clientLeader ; |
---|
[490] | 464 | |
---|
[300] | 465 | if (it->second.nbRecv==nbMessage) |
---|
[490] | 466 | { |
---|
[300] | 467 | int size ; |
---|
| 468 | MPI_Comm_size(intraComm,&size) ; |
---|
| 469 | MPI_Request* requests= new MPI_Request[size-1] ; |
---|
| 470 | MPI_Status* status= new MPI_Status[size-1] ; |
---|
[490] | 471 | |
---|
[300] | 472 | for(int i=1;i<size;i++) |
---|
| 473 | { |
---|
| 474 | MPI_Isend(buff,count,MPI_CHAR,i,2,intraComm,&requests[i-1]) ; |
---|
| 475 | } |
---|
| 476 | MPI_Waitall(size-1,requests,status) ; |
---|
| 477 | registerContext(buff,count,it->second.leaderRank) ; |
---|
| 478 | |
---|
| 479 | recvContextId.erase(it) ; |
---|
| 480 | delete [] requests ; |
---|
| 481 | delete [] status ; |
---|
| 482 | |
---|
| 483 | } |
---|
[490] | 484 | } |
---|
| 485 | |
---|
[300] | 486 | void CServer::listenRootContext(void) |
---|
| 487 | { |
---|
| 488 | MPI_Status status ; |
---|
| 489 | int flag ; |
---|
| 490 | static void* buffer ; |
---|
| 491 | static MPI_Request request ; |
---|
| 492 | static bool recept=false ; |
---|
| 493 | int rank ; |
---|
| 494 | int count ; |
---|
| 495 | const int root=0 ; |
---|
[490] | 496 | |
---|
[300] | 497 | if (recept==false) |
---|
| 498 | { |
---|
[347] | 499 | traceOff() ; |
---|
[300] | 500 | MPI_Iprobe(root,2,intraComm, &flag, &status) ; |
---|
[347] | 501 | traceOn() ; |
---|
[490] | 502 | if (flag==true) |
---|
[300] | 503 | { |
---|
| 504 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
| 505 | buffer=new char[count] ; |
---|
| 506 | MPI_Irecv(buffer,count,MPI_CHAR,root,2,intraComm,&request) ; |
---|
[490] | 507 | recept=true ; |
---|
[300] | 508 | } |
---|
| 509 | } |
---|
| 510 | else |
---|
| 511 | { |
---|
| 512 | MPI_Test(&request,&flag,&status) ; |
---|
| 513 | if (flag==true) |
---|
| 514 | { |
---|
| 515 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
| 516 | registerContext(buffer,count) ; |
---|
| 517 | delete [] buffer ; |
---|
[490] | 518 | recept=false ; |
---|
[300] | 519 | } |
---|
| 520 | } |
---|
[490] | 521 | } |
---|
| 522 | |
---|
[655] | 523 | void CServer::registerContext(void* buff, int count, int leaderRank) |
---|
[300] | 524 | { |
---|
| 525 | string contextId; |
---|
[655] | 526 | CBufferIn buffer(buff, count); |
---|
| 527 | buffer >> contextId; |
---|
[983] | 528 | CContext* context; |
---|
[300] | 529 | |
---|
[680] | 530 | info(20) << "CServer : Register new Context : " << contextId << endl; |
---|
[490] | 531 | |
---|
[680] | 532 | if (contextList.find(contextId) != contextList.end()) |
---|
| 533 | ERROR("void CServer::registerContext(void* buff, int count, int leaderRank)", |
---|
| 534 | << "Context '" << contextId << "' has already been registred"); |
---|
[490] | 535 | |
---|
[983] | 536 | context=CContext::create(contextId); |
---|
[655] | 537 | contextList[contextId]=context; |
---|
| 538 | |
---|
[1071] | 539 | // Primary or classical server: initialize its own server (CContextServer) |
---|
| 540 | MPI_Comm inter; |
---|
[1054] | 541 | if (serverLevel < 2) |
---|
| 542 | { |
---|
| 543 | MPI_Comm contextInterComm; |
---|
| 544 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm); |
---|
| 545 | MPI_Intercomm_merge(contextInterComm,1,&inter); |
---|
| 546 | MPI_Barrier(inter); |
---|
| 547 | MPI_Comm_free(&inter); |
---|
| 548 | context->initServer(intraComm,contextInterComm); |
---|
| 549 | contextInterComms.push_back(contextInterComm); |
---|
[1071] | 550 | |
---|
[1054] | 551 | } |
---|
[1071] | 552 | // Secondary server: initialize its own server (CContextServer) |
---|
[1054] | 553 | else if (serverLevel == 2) |
---|
| 554 | { |
---|
[1071] | 555 | MPI_Comm_dup(interCommLeft.front(), &inter); |
---|
| 556 | contextInterComms.push_back(inter); |
---|
| 557 | context->initServer(intraComm, contextInterComms.back()); |
---|
[1054] | 558 | } |
---|
| 559 | |
---|
| 560 | // Primary server: send create context message to secondary servers and initialize its own client (CContextClient) |
---|
[1021] | 561 | if (serverLevel == 1) |
---|
[983] | 562 | { |
---|
[1054] | 563 | int i = 0, size; |
---|
| 564 | CMessage msg; |
---|
| 565 | int messageSize; |
---|
| 566 | MPI_Comm_size(intraComm, &size) ; |
---|
| 567 | for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i) |
---|
| 568 | { |
---|
| 569 | StdString str = contextId +"_server_" + boost::lexical_cast<string>(i); |
---|
[1077] | 570 | msg<<str<<size<<rank_ ; |
---|
[1054] | 571 | messageSize = msg.size() ; |
---|
| 572 | buff = new char[messageSize] ; |
---|
| 573 | CBufferOut buffer(buff,messageSize) ; |
---|
| 574 | buffer<<msg ; |
---|
[1071] | 575 | int sndServerGloRanks = serverSize_-nbPools+serverLeader_ +i; // the assumption is that there is only one proc per secondary server pool |
---|
[1054] | 576 | MPI_Send(buff, buffer.count(), MPI_CHAR, sndServerGloRanks, 1, CXios::globalComm) ; |
---|
[1071] | 577 | MPI_Comm_dup(*it, &inter); |
---|
| 578 | contextInterComms.push_back(inter); |
---|
| 579 | MPI_Comm_dup(intraComm, &inter); |
---|
| 580 | contextIntraComms.push_back(inter); |
---|
| 581 | context->initClient(contextIntraComms.back(), contextInterComms.back()) ; |
---|
[1054] | 582 | delete [] buff ; |
---|
| 583 | } |
---|
[1071] | 584 | ++nbContexts_; |
---|
[983] | 585 | } |
---|
[490] | 586 | } |
---|
| 587 | |
---|
[300] | 588 | void CServer::contextEventLoop(void) |
---|
| 589 | { |
---|
[1130] | 590 | bool isFinalized ; |
---|
[300] | 591 | map<string,CContext*>::iterator it ; |
---|
[983] | 592 | |
---|
[490] | 593 | for(it=contextList.begin();it!=contextList.end();it++) |
---|
[300] | 594 | { |
---|
[1130] | 595 | isFinalized=it->second->isFinalized(); |
---|
| 596 | if (isFinalized) |
---|
[300] | 597 | { |
---|
[1139] | 598 | // it->second->postFinalize(); |
---|
[300] | 599 | contextList.erase(it) ; |
---|
| 600 | break ; |
---|
| 601 | } |
---|
[1054] | 602 | else |
---|
[1139] | 603 | it->second->checkBuffersAndListen(); |
---|
[300] | 604 | } |
---|
| 605 | } |
---|
[490] | 606 | |
---|
| 607 | //! Get rank of the current process |
---|
| 608 | int CServer::getRank() |
---|
| 609 | { |
---|
[1077] | 610 | return rank_; |
---|
[490] | 611 | } |
---|
| 612 | |
---|
[523] | 613 | /*! |
---|
| 614 | * Open a file specified by a suffix and an extension and use it for the given file buffer. |
---|
| 615 | * The file name will be suffix+rank+extension. |
---|
| 616 | * |
---|
| 617 | * \param fileName[in] protype file name |
---|
| 618 | * \param ext [in] extension of the file |
---|
| 619 | * \param fb [in/out] the file buffer |
---|
| 620 | */ |
---|
| 621 | void CServer::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb) |
---|
| 622 | { |
---|
| 623 | StdStringStream fileNameClient; |
---|
| 624 | int numDigit = 0; |
---|
| 625 | int size = 0; |
---|
[1021] | 626 | int id; |
---|
[523] | 627 | MPI_Comm_size(CXios::globalComm, &size); |
---|
| 628 | while (size) |
---|
| 629 | { |
---|
| 630 | size /= 10; |
---|
| 631 | ++numDigit; |
---|
| 632 | } |
---|
[497] | 633 | |
---|
[1021] | 634 | if (!CXios::usingServer2) |
---|
| 635 | id = getRank(); |
---|
[1009] | 636 | else |
---|
[1021] | 637 | { |
---|
| 638 | if (serverLevel == 1) |
---|
[1077] | 639 | id = rank_; |
---|
[1021] | 640 | else |
---|
| 641 | id = poolId; |
---|
| 642 | } |
---|
| 643 | fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << id << ext; |
---|
[523] | 644 | fb->open(fileNameClient.str().c_str(), std::ios::out); |
---|
| 645 | if (!fb->is_open()) |
---|
| 646 | ERROR("void CServer::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", |
---|
| 647 | << std::endl << "Can not open <" << fileNameClient << "> file to write the server log(s)."); |
---|
| 648 | } |
---|
[490] | 649 | |
---|
[523] | 650 | /*! |
---|
| 651 | * \brief Open a file stream to write the info logs |
---|
| 652 | * Open a file stream with a specific file name suffix+rank |
---|
| 653 | * to write the info logs. |
---|
| 654 | * \param fileName [in] protype file name |
---|
| 655 | */ |
---|
| 656 | void CServer::openInfoStream(const StdString& fileName) |
---|
| 657 | { |
---|
| 658 | std::filebuf* fb = m_infoStream.rdbuf(); |
---|
| 659 | openStream(fileName, ".out", fb); |
---|
[490] | 660 | |
---|
[523] | 661 | info.write2File(fb); |
---|
| 662 | report.write2File(fb); |
---|
| 663 | } |
---|
[490] | 664 | |
---|
[523] | 665 | //! Write the info logs to standard output |
---|
| 666 | void CServer::openInfoStream() |
---|
| 667 | { |
---|
| 668 | info.write2StdOut(); |
---|
| 669 | report.write2StdOut(); |
---|
| 670 | } |
---|
[490] | 671 | |
---|
[523] | 672 | //! Close the info logs file if it opens |
---|
| 673 | void CServer::closeInfoStream() |
---|
| 674 | { |
---|
| 675 | if (m_infoStream.is_open()) m_infoStream.close(); |
---|
| 676 | } |
---|
| 677 | |
---|
| 678 | /*! |
---|
| 679 | * \brief Open a file stream to write the error log |
---|
| 680 | * Open a file stream with a specific file name suffix+rank |
---|
| 681 | * to write the error log. |
---|
| 682 | * \param fileName [in] protype file name |
---|
| 683 | */ |
---|
| 684 | void CServer::openErrorStream(const StdString& fileName) |
---|
| 685 | { |
---|
| 686 | std::filebuf* fb = m_errorStream.rdbuf(); |
---|
| 687 | openStream(fileName, ".err", fb); |
---|
| 688 | |
---|
| 689 | error.write2File(fb); |
---|
| 690 | } |
---|
| 691 | |
---|
| 692 | //! Write the error log to standard error output |
---|
| 693 | void CServer::openErrorStream() |
---|
| 694 | { |
---|
| 695 | error.write2StdErr(); |
---|
| 696 | } |
---|
| 697 | |
---|
| 698 | //! Close the error log file if it opens |
---|
| 699 | void CServer::closeErrorStream() |
---|
| 700 | { |
---|
| 701 | if (m_errorStream.is_open()) m_errorStream.close(); |
---|
| 702 | } |
---|
[300] | 703 | } |
---|