[490] | 1 | #include "globalScopeData.hpp" |
---|
[591] | 2 | #include "xios_spl.hpp" |
---|
[300] | 3 | #include "cxios.hpp" |
---|
[342] | 4 | #include "server.hpp" |
---|
[983] | 5 | #include "client.hpp" |
---|
[300] | 6 | #include "type.hpp" |
---|
| 7 | #include "context.hpp" |
---|
[352] | 8 | #include "object_template.hpp" |
---|
[300] | 9 | #include "oasis_cinterface.hpp" |
---|
| 10 | #include <boost/functional/hash.hpp> |
---|
| 11 | #include <boost/algorithm/string.hpp> |
---|
[382] | 12 | #include "mpi.hpp" |
---|
[347] | 13 | #include "tracer.hpp" |
---|
| 14 | #include "timer.hpp" |
---|
[492] | 15 | #include "event_scheduler.hpp" |
---|
[300] | 16 | |
---|
[335] | 17 | namespace xios |
---|
[490] | 18 | { |
---|
[300] | 19 | MPI_Comm CServer::intraComm ; |
---|
[992] | 20 | list<MPI_Comm> CServer::interCommLeft ; |
---|
| 21 | list<MPI_Comm> CServer::interCommRight ; |
---|
[655] | 22 | std::list<MPI_Comm> CServer::contextInterComms; |
---|
[1071] | 23 | std::list<MPI_Comm> CServer::contextIntraComms; |
---|
[1021] | 24 | int CServer::serverLevel = 0 ; |
---|
[1148] | 25 | int CServer::nbContexts = 0; |
---|
[983] | 26 | bool CServer::isRoot = false ; |
---|
[1077] | 27 | int CServer::rank_ = INVALID_RANK; |
---|
[490] | 28 | StdOFStream CServer::m_infoStream; |
---|
[523] | 29 | StdOFStream CServer::m_errorStream; |
---|
[490] | 30 | map<string,CContext*> CServer::contextList ; |
---|
[1152] | 31 | vector<int> CServer::sndServerGlobalRanks; |
---|
[300] | 32 | bool CServer::finished=false ; |
---|
| 33 | bool CServer::is_MPI_Initialized ; |
---|
[597] | 34 | CEventScheduler* CServer::eventScheduler = 0; |
---|
[983] | 35 | |
---|
| 36 | //--------------------------------------------------------------- |
---|
| 37 | /*! |
---|
| 38 | * \fn void CServer::initialize(void) |
---|
[1054] | 39 | * Creates intraComm for each possible type of servers (classical, primary or secondary). |
---|
| 40 | * In case of secondary servers intraComm is created for each secondary server pool. |
---|
| 41 | * (For now the assumption is that there is one proc per pool.) |
---|
[1148] | 42 | * Creates interComm and stores them into the following lists: |
---|
[1054] | 43 | * classical server -- interCommLeft |
---|
| 44 | * primary server -- interCommLeft and interCommRight |
---|
[1148] | 45 | * secondary server -- interCommLeft for each pool. |
---|
[983] | 46 | */ |
---|
[300] | 47 | void CServer::initialize(void) |
---|
| 48 | { |
---|
| 49 | int initialized ; |
---|
| 50 | MPI_Initialized(&initialized) ; |
---|
| 51 | if (initialized) is_MPI_Initialized=true ; |
---|
| 52 | else is_MPI_Initialized=false ; |
---|
[1152] | 53 | int rank ; |
---|
[490] | 54 | |
---|
[300] | 55 | // Not using OASIS |
---|
| 56 | if (!CXios::usingOasis) |
---|
| 57 | { |
---|
[490] | 58 | |
---|
| 59 | if (!is_MPI_Initialized) |
---|
[300] | 60 | { |
---|
[925] | 61 | MPI_Init(NULL, NULL); |
---|
[300] | 62 | } |
---|
[359] | 63 | CTimer::get("XIOS").resume() ; |
---|
[490] | 64 | |
---|
| 65 | boost::hash<string> hashString ; |
---|
[1021] | 66 | unsigned long hashServer = hashString(CXios::xiosCodeId); |
---|
[490] | 67 | |
---|
[300] | 68 | unsigned long* hashAll ; |
---|
[1152] | 69 | unsigned long* srvLevelAll ; |
---|
[490] | 70 | |
---|
[300] | 71 | int size ; |
---|
| 72 | int myColor ; |
---|
| 73 | int i,c ; |
---|
[1152] | 74 | MPI_Comm newComm; |
---|
[490] | 75 | |
---|
[983] | 76 | MPI_Comm_size(CXios::globalComm, &size) ; |
---|
[1077] | 77 | MPI_Comm_rank(CXios::globalComm, &rank_); |
---|
[1009] | 78 | |
---|
[300] | 79 | hashAll=new unsigned long[size] ; |
---|
[983] | 80 | MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ; |
---|
[490] | 81 | |
---|
[1021] | 82 | map<unsigned long, int> colors ; |
---|
[300] | 83 | map<unsigned long, int> leaders ; |
---|
| 84 | map<unsigned long, int>::iterator it ; |
---|
[490] | 85 | |
---|
[1152] | 86 | int srvNodeSize = 1, srvNodeLeader = 0; |
---|
| 87 | |
---|
| 88 | // (1) Establish client leaders, distribute processes between two server levels |
---|
[300] | 89 | for(i=0,c=0;i<size;i++) |
---|
| 90 | { |
---|
| 91 | if (colors.find(hashAll[i])==colors.end()) |
---|
| 92 | { |
---|
| 93 | colors[hashAll[i]]=c ; |
---|
| 94 | leaders[hashAll[i]]=i ; |
---|
| 95 | c++ ; |
---|
| 96 | } |
---|
[1152] | 97 | if (CXios::usingServer2) |
---|
| 98 | { |
---|
| 99 | if (hashAll[i] == hashServer) |
---|
| 100 | { |
---|
| 101 | if (hashAll[i-1] != hashServer || i == 0) |
---|
| 102 | { |
---|
| 103 | srvNodeLeader = i; |
---|
| 104 | } |
---|
| 105 | if (hashAll[i+1] == hashServer) |
---|
| 106 | { |
---|
| 107 | ++srvNodeSize; |
---|
| 108 | } |
---|
| 109 | else |
---|
| 110 | { |
---|
| 111 | if ( (rank_-srvNodeLeader) >= 0 && |
---|
| 112 | (rank_-srvNodeLeader) < (1.- CXios::ratioServer2/100.)*srvNodeSize ) |
---|
| 113 | { |
---|
| 114 | serverLevel = 1; |
---|
| 115 | } |
---|
| 116 | if ( (rank_-srvNodeLeader) >= (1.- CXios::ratioServer2/100.)*srvNodeSize && |
---|
| 117 | (rank_ - srvNodeLeader) < srvNodeSize ) |
---|
| 118 | { |
---|
| 119 | serverLevel = 2; |
---|
| 120 | } |
---|
| 121 | srvNodeSize = 1; |
---|
| 122 | } |
---|
| 123 | } |
---|
| 124 | } |
---|
[300] | 125 | } |
---|
[490] | 126 | |
---|
[1152] | 127 | // (2) Create intraComm |
---|
| 128 | myColor = (serverLevel == 2) ? rank_ : colors[hashServer]; |
---|
| 129 | MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ; |
---|
| 130 | |
---|
| 131 | // (3) Create interComm |
---|
[1021] | 132 | if (CXios::usingServer2) |
---|
[1009] | 133 | { |
---|
[1152] | 134 | MPI_Allgather(&serverLevel, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ; |
---|
[1168] | 135 | |
---|
[1152] | 136 | for (i=0; i<size; i++) |
---|
| 137 | if (hashAll[i] == 2) |
---|
| 138 | sndServerGlobalRanks.push_back(i); |
---|
[1009] | 139 | } |
---|
| 140 | |
---|
[1021] | 141 | if (serverLevel == 0) |
---|
[983] | 142 | { |
---|
| 143 | int clientLeader; |
---|
| 144 | for(it=leaders.begin();it!=leaders.end();it++) |
---|
| 145 | { |
---|
| 146 | if (it->first!=hashServer) |
---|
| 147 | { |
---|
| 148 | clientLeader=it->second ; |
---|
| 149 | int intraCommSize, intraCommRank ; |
---|
| 150 | MPI_Comm_size(intraComm,&intraCommSize) ; |
---|
| 151 | MPI_Comm_rank(intraComm,&intraCommRank) ; |
---|
[1142] | 152 | info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[983] | 153 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
[490] | 154 | |
---|
[1009] | 155 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
[992] | 156 | interCommLeft.push_back(newComm) ; |
---|
[983] | 157 | } |
---|
| 158 | } |
---|
| 159 | } |
---|
[1021] | 160 | else if (serverLevel == 1) |
---|
[983] | 161 | { |
---|
[1054] | 162 | int clientLeader, srvSndLeader; |
---|
| 163 | int srvPrmLeader ; |
---|
[1152] | 164 | |
---|
[1021] | 165 | for (it=leaders.begin();it!=leaders.end();it++) |
---|
[983] | 166 | { |
---|
[1021] | 167 | if (it->first != hashServer) |
---|
[983] | 168 | { |
---|
[1021] | 169 | clientLeader=it->second ; |
---|
| 170 | int intraCommSize, intraCommRank ; |
---|
| 171 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
| 172 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
[1142] | 173 | info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[1021] | 174 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
| 175 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
| 176 | interCommLeft.push_back(newComm) ; |
---|
[983] | 177 | } |
---|
[1021] | 178 | } |
---|
[1009] | 179 | |
---|
[1152] | 180 | for (int i = 0; i < sndServerGlobalRanks.size(); ++i) |
---|
[983] | 181 | { |
---|
[1054] | 182 | int intraCommSize, intraCommRank ; |
---|
| 183 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
| 184 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
[1142] | 185 | info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[1152] | 186 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< sndServerGlobalRanks[i]<<endl ; |
---|
| 187 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; |
---|
[1054] | 188 | interCommRight.push_back(newComm) ; |
---|
[1021] | 189 | } |
---|
[1168] | 190 | } |
---|
[1021] | 191 | else |
---|
| 192 | { |
---|
| 193 | int clientLeader; |
---|
| 194 | clientLeader = leaders[hashString(CXios::xiosCodeId)]; |
---|
| 195 | int intraCommSize, intraCommRank ; |
---|
| 196 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
| 197 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
[1142] | 198 | info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize |
---|
[1021] | 199 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
[983] | 200 | |
---|
[1142] | 201 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; |
---|
[1021] | 202 | interCommLeft.push_back(newComm) ; |
---|
[1168] | 203 | } |
---|
[983] | 204 | |
---|
| 205 | delete [] hashAll ; |
---|
| 206 | |
---|
[300] | 207 | } |
---|
| 208 | // using OASIS |
---|
| 209 | else |
---|
| 210 | { |
---|
[1152] | 211 | int size; |
---|
[1130] | 212 | int myColor; |
---|
[490] | 213 | if (!is_MPI_Initialized) oasis_init(CXios::xiosCodeId); |
---|
| 214 | |
---|
[359] | 215 | CTimer::get("XIOS").resume() ; |
---|
[655] | 216 | MPI_Comm localComm; |
---|
| 217 | oasis_get_localcomm(localComm); |
---|
| 218 | |
---|
[1130] | 219 | // Create server intraComm |
---|
| 220 | if (!CXios::usingServer2) |
---|
[1167] | 221 | { |
---|
[1130] | 222 | MPI_Comm_dup(localComm, &intraComm); |
---|
[1167] | 223 | MPI_Comm_rank(localComm,&rank_) ; |
---|
| 224 | } |
---|
[1130] | 225 | else |
---|
| 226 | { |
---|
[1152] | 227 | MPI_Comm_rank(localComm,&rank_) ; |
---|
| 228 | MPI_Comm_size(localComm,&size) ; |
---|
| 229 | |
---|
| 230 | for (int i=size*CXios::ratioServer2/100; i<size; i++) |
---|
| 231 | sndServerGlobalRanks.push_back(i); |
---|
| 232 | |
---|
| 233 | if ( rank_ < (size - sndServerGlobalRanks.size()) ) |
---|
[1130] | 234 | { |
---|
| 235 | serverLevel = 1; |
---|
| 236 | myColor = 0; |
---|
| 237 | } |
---|
| 238 | else |
---|
| 239 | { |
---|
| 240 | serverLevel = 2; |
---|
[1152] | 241 | myColor = rank_; |
---|
[1130] | 242 | } |
---|
[1152] | 243 | MPI_Comm_split(localComm, myColor, rank_, &intraComm) ; |
---|
[1130] | 244 | |
---|
| 245 | } |
---|
[300] | 246 | MPI_Comm_size(intraComm,&size) ; |
---|
| 247 | string codesId=CXios::getin<string>("oasis_codes_id") ; |
---|
[490] | 248 | |
---|
[300] | 249 | vector<string> splitted ; |
---|
[483] | 250 | boost::split( splitted, codesId, boost::is_any_of(","), boost::token_compress_on ) ; |
---|
[300] | 251 | vector<string>::iterator it ; |
---|
| 252 | |
---|
| 253 | MPI_Comm newComm ; |
---|
| 254 | int globalRank ; |
---|
| 255 | MPI_Comm_rank(CXios::globalComm,&globalRank); |
---|
[490] | 256 | |
---|
[300] | 257 | for(it=splitted.begin();it!=splitted.end();it++) |
---|
| 258 | { |
---|
| 259 | oasis_get_intercomm(newComm,*it) ; |
---|
[1130] | 260 | // interComm.push_back(newComm) ; |
---|
| 261 | if ( !CXios::usingServer2) |
---|
[1150] | 262 | { |
---|
[1130] | 263 | interCommLeft.push_back(newComm) ; |
---|
[1150] | 264 | if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ; |
---|
| 265 | } |
---|
[1130] | 266 | else |
---|
| 267 | { |
---|
| 268 | if (serverLevel == 1) |
---|
| 269 | { |
---|
[1150] | 270 | interCommLeft.push_back(newComm) ; |
---|
| 271 | if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ; |
---|
[1152] | 272 | for (int i = 0; i < sndServerGlobalRanks.size(); ++i) |
---|
[1150] | 273 | { |
---|
[1152] | 274 | int srvSndLeader = sndServerGlobalRanks[i]; |
---|
[1150] | 275 | info(50)<<"intercommCreate::client (server level 1) "<<globalRank<<" intraCommSize : "<<size |
---|
| 276 | <<" intraCommRank :"<<rank_<<" clientLeader "<< srvSndLeader<<endl ; |
---|
| 277 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ; |
---|
| 278 | interCommRight.push_back(newComm) ; |
---|
| 279 | } |
---|
[1130] | 280 | } |
---|
| 281 | else if (serverLevel == 2) |
---|
| 282 | { |
---|
| 283 | info(50)<<"intercommCreate::server "<<rank_<<" intraCommSize : "<<size |
---|
| 284 | <<" intraCommRank :"<<rank_<<" clientLeader "<< 0<<endl ; |
---|
| 285 | MPI_Intercomm_create(intraComm, 0, localComm, 0, 0, &newComm) ; |
---|
| 286 | interCommLeft.push_back(newComm) ; |
---|
| 287 | } |
---|
| 288 | } |
---|
| 289 | // MPI_Comm_remote_size(newComm,&size); |
---|
[300] | 290 | } |
---|
[492] | 291 | oasis_enddef() ; |
---|
[300] | 292 | } |
---|
[490] | 293 | |
---|
[1152] | 294 | MPI_Comm_rank(intraComm, &rank) ; |
---|
| 295 | if (rank==0) isRoot=true; |
---|
[490] | 296 | else isRoot=false; |
---|
[492] | 297 | |
---|
| 298 | eventScheduler = new CEventScheduler(intraComm) ; |
---|
[300] | 299 | } |
---|
[490] | 300 | |
---|
[300] | 301 | void CServer::finalize(void) |
---|
| 302 | { |
---|
[361] | 303 | CTimer::get("XIOS").suspend() ; |
---|
[697] | 304 | |
---|
[492] | 305 | delete eventScheduler ; |
---|
[655] | 306 | |
---|
| 307 | for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) |
---|
| 308 | MPI_Comm_free(&(*it)); |
---|
[983] | 309 | |
---|
[1071] | 310 | for (std::list<MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++) |
---|
| 311 | MPI_Comm_free(&(*it)); |
---|
| 312 | |
---|
[992] | 313 | // for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) |
---|
| 314 | // MPI_Comm_free(&(*it)); |
---|
| 315 | |
---|
[1077] | 316 | // for (std::list<MPI_Comm>::iterator it = interCommLeft.begin(); it != interCommLeft.end(); it++) |
---|
| 317 | // MPI_Comm_free(&(*it)); |
---|
[983] | 318 | |
---|
[1054] | 319 | for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) |
---|
| 320 | MPI_Comm_free(&(*it)); |
---|
[992] | 321 | |
---|
[655] | 322 | MPI_Comm_free(&intraComm); |
---|
| 323 | |
---|
[300] | 324 | if (!is_MPI_Initialized) |
---|
[490] | 325 | { |
---|
[300] | 326 | if (CXios::usingOasis) oasis_finalize(); |
---|
| 327 | else MPI_Finalize() ; |
---|
| 328 | } |
---|
[347] | 329 | report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; |
---|
| 330 | report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; |
---|
| 331 | report(0)<<"Performance report : Ratio : "<<CTimer::get("Process events").getCumulatedTime()/CTimer::get("XIOS server").getCumulatedTime()*100.<<"%"<<endl ; |
---|
[1158] | 332 | report(100)<<CTimer::getAllCumulatedTime()<<endl ; |
---|
[300] | 333 | } |
---|
[490] | 334 | |
---|
[300] | 335 | void CServer::eventLoop(void) |
---|
| 336 | { |
---|
| 337 | bool stop=false ; |
---|
[490] | 338 | |
---|
[347] | 339 | CTimer::get("XIOS server").resume() ; |
---|
[300] | 340 | while(!stop) |
---|
| 341 | { |
---|
| 342 | if (isRoot) |
---|
| 343 | { |
---|
| 344 | listenContext(); |
---|
[1148] | 345 | listenRootContext(); |
---|
[300] | 346 | if (!finished) listenFinalize() ; |
---|
| 347 | } |
---|
| 348 | else |
---|
| 349 | { |
---|
| 350 | listenRootContext(); |
---|
| 351 | if (!finished) listenRootFinalize() ; |
---|
| 352 | } |
---|
[490] | 353 | |
---|
[300] | 354 | contextEventLoop() ; |
---|
| 355 | if (finished && contextList.empty()) stop=true ; |
---|
[956] | 356 | eventScheduler->checkEvent() ; |
---|
[300] | 357 | } |
---|
[347] | 358 | CTimer::get("XIOS server").suspend() ; |
---|
[300] | 359 | } |
---|
[490] | 360 | |
---|
[300] | 361 | void CServer::listenFinalize(void) |
---|
| 362 | { |
---|
[992] | 363 | list<MPI_Comm>::iterator it, itr; |
---|
[300] | 364 | int msg ; |
---|
| 365 | int flag ; |
---|
[490] | 366 | |
---|
[992] | 367 | for(it=interCommLeft.begin();it!=interCommLeft.end();it++) |
---|
[300] | 368 | { |
---|
| 369 | MPI_Status status ; |
---|
[347] | 370 | traceOff() ; |
---|
[300] | 371 | MPI_Iprobe(0,0,*it,&flag,&status) ; |
---|
[347] | 372 | traceOn() ; |
---|
[300] | 373 | if (flag==true) |
---|
| 374 | { |
---|
[1054] | 375 | MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; |
---|
| 376 | info(20)<<" CServer : Receive client finalize"<<endl ; |
---|
| 377 | // Sending server finalize message to secondary servers (if any) |
---|
| 378 | for(itr=interCommRight.begin();itr!=interCommRight.end();itr++) |
---|
| 379 | { |
---|
| 380 | MPI_Send(&msg,1,MPI_INT,0,0,*itr) ; |
---|
| 381 | } |
---|
[655] | 382 | MPI_Comm_free(&(*it)); |
---|
[992] | 383 | interCommLeft.erase(it) ; |
---|
[300] | 384 | break ; |
---|
| 385 | } |
---|
| 386 | } |
---|
[490] | 387 | |
---|
[1054] | 388 | if (interCommLeft.empty()) |
---|
[300] | 389 | { |
---|
| 390 | int i,size ; |
---|
| 391 | MPI_Comm_size(intraComm,&size) ; |
---|
| 392 | MPI_Request* requests= new MPI_Request[size-1] ; |
---|
| 393 | MPI_Status* status= new MPI_Status[size-1] ; |
---|
[490] | 394 | |
---|
[300] | 395 | for(int i=1;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,4,intraComm,&requests[i-1]) ; |
---|
| 396 | MPI_Waitall(size-1,requests,status) ; |
---|
| 397 | |
---|
| 398 | finished=true ; |
---|
| 399 | delete [] requests ; |
---|
| 400 | delete [] status ; |
---|
| 401 | } |
---|
| 402 | } |
---|
[490] | 403 | |
---|
| 404 | |
---|
[300] | 405 | void CServer::listenRootFinalize() |
---|
| 406 | { |
---|
| 407 | int flag ; |
---|
| 408 | MPI_Status status ; |
---|
| 409 | int msg ; |
---|
[490] | 410 | |
---|
[347] | 411 | traceOff() ; |
---|
[300] | 412 | MPI_Iprobe(0,4,intraComm, &flag, &status) ; |
---|
[347] | 413 | traceOn() ; |
---|
[300] | 414 | if (flag==true) |
---|
| 415 | { |
---|
| 416 | MPI_Recv(&msg,1,MPI_INT,0,4,intraComm,&status) ; |
---|
| 417 | finished=true ; |
---|
| 418 | } |
---|
| 419 | } |
---|
[490] | 420 | |
---|
[300] | 421 | void CServer::listenContext(void) |
---|
| 422 | { |
---|
[490] | 423 | |
---|
[300] | 424 | MPI_Status status ; |
---|
| 425 | int flag ; |
---|
[1158] | 426 | static char* buffer ; |
---|
[300] | 427 | static MPI_Request request ; |
---|
| 428 | static bool recept=false ; |
---|
| 429 | int rank ; |
---|
| 430 | int count ; |
---|
[490] | 431 | |
---|
[300] | 432 | if (recept==false) |
---|
| 433 | { |
---|
[347] | 434 | traceOff() ; |
---|
[300] | 435 | MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; |
---|
[347] | 436 | traceOn() ; |
---|
[490] | 437 | if (flag==true) |
---|
[300] | 438 | { |
---|
| 439 | rank=status.MPI_SOURCE ; |
---|
| 440 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
| 441 | buffer=new char[count] ; |
---|
[1158] | 442 | MPI_Irecv((void*)buffer,count,MPI_CHAR,rank,1,CXios::globalComm,&request) ; |
---|
[490] | 443 | recept=true ; |
---|
[300] | 444 | } |
---|
| 445 | } |
---|
| 446 | else |
---|
| 447 | { |
---|
[347] | 448 | traceOff() ; |
---|
[300] | 449 | MPI_Test(&request,&flag,&status) ; |
---|
[347] | 450 | traceOn() ; |
---|
[300] | 451 | if (flag==true) |
---|
| 452 | { |
---|
| 453 | rank=status.MPI_SOURCE ; |
---|
| 454 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
[1158] | 455 | recvContextMessage((void*)buffer,count) ; |
---|
| 456 | delete [] buffer ; |
---|
[490] | 457 | recept=false ; |
---|
[300] | 458 | } |
---|
| 459 | } |
---|
| 460 | } |
---|
[490] | 461 | |
---|
[300] | 462 | void CServer::recvContextMessage(void* buff,int count) |
---|
| 463 | { |
---|
[983] | 464 | static map<string,contextMessage> recvContextId; |
---|
[300] | 465 | map<string,contextMessage>::iterator it ; |
---|
| 466 | CBufferIn buffer(buff,count) ; |
---|
| 467 | string id ; |
---|
| 468 | int clientLeader ; |
---|
| 469 | int nbMessage ; |
---|
| 470 | |
---|
| 471 | buffer>>id>>nbMessage>>clientLeader ; |
---|
[490] | 472 | |
---|
[300] | 473 | it=recvContextId.find(id) ; |
---|
| 474 | if (it==recvContextId.end()) |
---|
[490] | 475 | { |
---|
[300] | 476 | contextMessage msg={0,0} ; |
---|
| 477 | pair<map<string,contextMessage>::iterator,bool> ret ; |
---|
| 478 | ret=recvContextId.insert(pair<string,contextMessage>(id,msg)) ; |
---|
| 479 | it=ret.first ; |
---|
[490] | 480 | } |
---|
[300] | 481 | it->second.nbRecv+=1 ; |
---|
| 482 | it->second.leaderRank+=clientLeader ; |
---|
[490] | 483 | |
---|
[300] | 484 | if (it->second.nbRecv==nbMessage) |
---|
[490] | 485 | { |
---|
[300] | 486 | int size ; |
---|
| 487 | MPI_Comm_size(intraComm,&size) ; |
---|
[1148] | 488 | // MPI_Request* requests= new MPI_Request[size-1] ; |
---|
| 489 | // MPI_Status* status= new MPI_Status[size-1] ; |
---|
| 490 | MPI_Request* requests= new MPI_Request[size] ; |
---|
| 491 | MPI_Status* status= new MPI_Status[size] ; |
---|
[490] | 492 | |
---|
[1148] | 493 | CMessage msg ; |
---|
| 494 | msg<<id<<it->second.leaderRank; |
---|
| 495 | int messageSize=msg.size() ; |
---|
| 496 | void * sendBuff = new char[messageSize] ; |
---|
| 497 | CBufferOut sendBuffer(sendBuff,messageSize) ; |
---|
| 498 | sendBuffer<<msg ; |
---|
| 499 | |
---|
| 500 | // Include root itself in order not to have a divergence |
---|
| 501 | for(int i=0; i<size; i++) |
---|
[300] | 502 | { |
---|
[1148] | 503 | MPI_Isend(sendBuff,count,MPI_CHAR,i,2,intraComm,&requests[i]) ; |
---|
[300] | 504 | } |
---|
| 505 | |
---|
[1148] | 506 | // for(int i=1;i<size;i++) |
---|
| 507 | // { |
---|
| 508 | // MPI_Isend(buff,count,MPI_CHAR,i,2,intraComm,&requests[i-1]) ; |
---|
| 509 | // } |
---|
| 510 | // MPI_Waitall(size-1,requests,status) ; |
---|
| 511 | // registerContext(buff,count,it->second.leaderRank) ; |
---|
| 512 | |
---|
[300] | 513 | recvContextId.erase(it) ; |
---|
| 514 | delete [] requests ; |
---|
| 515 | delete [] status ; |
---|
| 516 | |
---|
| 517 | } |
---|
[490] | 518 | } |
---|
| 519 | |
---|
[300] | 520 | void CServer::listenRootContext(void) |
---|
| 521 | { |
---|
| 522 | MPI_Status status ; |
---|
| 523 | int flag ; |
---|
| 524 | static void* buffer ; |
---|
| 525 | static MPI_Request request ; |
---|
| 526 | static bool recept=false ; |
---|
| 527 | int rank ; |
---|
[1148] | 528 | // int count ; |
---|
| 529 | static int count ; |
---|
[300] | 530 | const int root=0 ; |
---|
[1148] | 531 | boost::hash<string> hashString; |
---|
| 532 | size_t hashId = hashString("RegisterContext"); |
---|
[490] | 533 | |
---|
[1148] | 534 | // (1) Receive context id from the root |
---|
[300] | 535 | if (recept==false) |
---|
| 536 | { |
---|
[347] | 537 | traceOff() ; |
---|
[300] | 538 | MPI_Iprobe(root,2,intraComm, &flag, &status) ; |
---|
[347] | 539 | traceOn() ; |
---|
[490] | 540 | if (flag==true) |
---|
[300] | 541 | { |
---|
| 542 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
| 543 | buffer=new char[count] ; |
---|
[1158] | 544 | MPI_Irecv((void*)buffer,count,MPI_CHAR,root,2,intraComm,&request) ; |
---|
[490] | 545 | recept=true ; |
---|
[300] | 546 | } |
---|
| 547 | } |
---|
[1148] | 548 | // (2) If context id is received, save it into a buffer and register an event |
---|
[300] | 549 | else |
---|
| 550 | { |
---|
| 551 | MPI_Test(&request,&flag,&status) ; |
---|
| 552 | if (flag==true) |
---|
| 553 | { |
---|
| 554 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
[1148] | 555 | eventScheduler->registerEvent(nbContexts,hashId); |
---|
[1158] | 556 | // registerContext((void*)buffer,count) ; |
---|
[1148] | 557 | // delete [] buffer ; |
---|
[490] | 558 | recept=false ; |
---|
[300] | 559 | } |
---|
| 560 | } |
---|
[1148] | 561 | // (3) If event has been scheduled, call register context |
---|
| 562 | if (eventScheduler->queryEvent(nbContexts,hashId)) |
---|
| 563 | { |
---|
| 564 | registerContext(buffer,count) ; |
---|
| 565 | ++nbContexts; |
---|
| 566 | delete [] buffer ; |
---|
| 567 | } |
---|
[490] | 568 | } |
---|
| 569 | |
---|
[655] | 570 | void CServer::registerContext(void* buff, int count, int leaderRank) |
---|
[300] | 571 | { |
---|
| 572 | string contextId; |
---|
[655] | 573 | CBufferIn buffer(buff, count); |
---|
[1148] | 574 | // buffer >> contextId; |
---|
| 575 | buffer >> contextId>>leaderRank; |
---|
[983] | 576 | CContext* context; |
---|
[300] | 577 | |
---|
[680] | 578 | info(20) << "CServer : Register new Context : " << contextId << endl; |
---|
[490] | 579 | |
---|
[680] | 580 | if (contextList.find(contextId) != contextList.end()) |
---|
| 581 | ERROR("void CServer::registerContext(void* buff, int count, int leaderRank)", |
---|
| 582 | << "Context '" << contextId << "' has already been registred"); |
---|
[490] | 583 | |
---|
[983] | 584 | context=CContext::create(contextId); |
---|
[655] | 585 | contextList[contextId]=context; |
---|
| 586 | |
---|
[1148] | 587 | // Primary or classical server: create communication channel with a client |
---|
| 588 | // (1) create interComm (with a client) |
---|
| 589 | // (2) initialize client and server (contextClient and contextServer) |
---|
[1071] | 590 | MPI_Comm inter; |
---|
[1054] | 591 | if (serverLevel < 2) |
---|
| 592 | { |
---|
| 593 | MPI_Comm contextInterComm; |
---|
| 594 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm); |
---|
| 595 | MPI_Intercomm_merge(contextInterComm,1,&inter); |
---|
| 596 | MPI_Barrier(inter); |
---|
| 597 | MPI_Comm_free(&inter); |
---|
| 598 | context->initServer(intraComm,contextInterComm); |
---|
| 599 | contextInterComms.push_back(contextInterComm); |
---|
[1071] | 600 | |
---|
[1054] | 601 | } |
---|
[1148] | 602 | // Secondary server: create communication channel with a primary server |
---|
| 603 | // (1) duplicate interComm with a primary server |
---|
| 604 | // (2) initialize client and server (contextClient and contextServer) |
---|
| 605 | // Remark: in the case of the secondary server there is no need to create an interComm calling MPI_Intercomm_create, |
---|
| 606 | // because interComm of CContext is defined on the same processes as the interComm of CServer. |
---|
| 607 | // So just duplicate it. |
---|
[1054] | 608 | else if (serverLevel == 2) |
---|
| 609 | { |
---|
[1071] | 610 | MPI_Comm_dup(interCommLeft.front(), &inter); |
---|
| 611 | contextInterComms.push_back(inter); |
---|
| 612 | context->initServer(intraComm, contextInterComms.back()); |
---|
[1054] | 613 | } |
---|
| 614 | |
---|
[1148] | 615 | // Primary server: |
---|
| 616 | // (1) send create context message to secondary servers |
---|
| 617 | // (2) initialize communication channels with secondary servers (create contextClient and contextServer) |
---|
[1021] | 618 | if (serverLevel == 1) |
---|
[983] | 619 | { |
---|
[1054] | 620 | int i = 0, size; |
---|
| 621 | CMessage msg; |
---|
| 622 | int messageSize; |
---|
| 623 | MPI_Comm_size(intraComm, &size) ; |
---|
| 624 | for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i) |
---|
| 625 | { |
---|
| 626 | StdString str = contextId +"_server_" + boost::lexical_cast<string>(i); |
---|
[1077] | 627 | msg<<str<<size<<rank_ ; |
---|
[1054] | 628 | messageSize = msg.size() ; |
---|
| 629 | buff = new char[messageSize] ; |
---|
| 630 | CBufferOut buffer(buff,messageSize) ; |
---|
| 631 | buffer<<msg ; |
---|
[1152] | 632 | MPI_Send(buff, buffer.count(), MPI_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ; |
---|
[1071] | 633 | MPI_Comm_dup(*it, &inter); |
---|
| 634 | contextInterComms.push_back(inter); |
---|
| 635 | MPI_Comm_dup(intraComm, &inter); |
---|
| 636 | contextIntraComms.push_back(inter); |
---|
| 637 | context->initClient(contextIntraComms.back(), contextInterComms.back()) ; |
---|
[1054] | 638 | delete [] buff ; |
---|
| 639 | } |
---|
[983] | 640 | } |
---|
[490] | 641 | } |
---|
| 642 | |
---|
[300] | 643 | void CServer::contextEventLoop(void) |
---|
| 644 | { |
---|
[1130] | 645 | bool isFinalized ; |
---|
[300] | 646 | map<string,CContext*>::iterator it ; |
---|
[983] | 647 | |
---|
[490] | 648 | for(it=contextList.begin();it!=contextList.end();it++) |
---|
[300] | 649 | { |
---|
[1130] | 650 | isFinalized=it->second->isFinalized(); |
---|
| 651 | if (isFinalized) |
---|
[300] | 652 | { |
---|
| 653 | contextList.erase(it) ; |
---|
| 654 | break ; |
---|
| 655 | } |
---|
[1054] | 656 | else |
---|
[1139] | 657 | it->second->checkBuffersAndListen(); |
---|
[300] | 658 | } |
---|
| 659 | } |
---|
[490] | 660 | |
---|
[1148] | 661 | //! Get rank of the current process in the intraComm |
---|
[490] | 662 | int CServer::getRank() |
---|
| 663 | { |
---|
[1167] | 664 | int rank; |
---|
| 665 | MPI_Comm_rank(intraComm,&rank); |
---|
| 666 | return rank; |
---|
[490] | 667 | } |
---|
| 668 | |
---|
[1168] | 669 | vector<int>& CServer::getSecondaryServerGlobalRanks() |
---|
| 670 | { |
---|
| 671 | return sndServerGlobalRanks; |
---|
| 672 | } |
---|
| 673 | |
---|
[523] | 674 | /*! |
---|
| 675 | * Open a file specified by a suffix and an extension and use it for the given file buffer. |
---|
| 676 | * The file name will be suffix+rank+extension. |
---|
| 677 | * |
---|
| 678 | * \param fileName[in] protype file name |
---|
| 679 | * \param ext [in] extension of the file |
---|
| 680 | * \param fb [in/out] the file buffer |
---|
| 681 | */ |
---|
| 682 | void CServer::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb) |
---|
| 683 | { |
---|
| 684 | StdStringStream fileNameClient; |
---|
| 685 | int numDigit = 0; |
---|
| 686 | int size = 0; |
---|
[1021] | 687 | int id; |
---|
[523] | 688 | MPI_Comm_size(CXios::globalComm, &size); |
---|
| 689 | while (size) |
---|
| 690 | { |
---|
| 691 | size /= 10; |
---|
| 692 | ++numDigit; |
---|
| 693 | } |
---|
[1167] | 694 | id = rank_; //getRank(); |
---|
[497] | 695 | |
---|
[1021] | 696 | fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << id << ext; |
---|
[523] | 697 | fb->open(fileNameClient.str().c_str(), std::ios::out); |
---|
| 698 | if (!fb->is_open()) |
---|
| 699 | ERROR("void CServer::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", |
---|
| 700 | << std::endl << "Can not open <" << fileNameClient << "> file to write the server log(s)."); |
---|
| 701 | } |
---|
[490] | 702 | |
---|
[523] | 703 | /*! |
---|
| 704 | * \brief Open a file stream to write the info logs |
---|
| 705 | * Open a file stream with a specific file name suffix+rank |
---|
| 706 | * to write the info logs. |
---|
| 707 | * \param fileName [in] protype file name |
---|
| 708 | */ |
---|
| 709 | void CServer::openInfoStream(const StdString& fileName) |
---|
| 710 | { |
---|
| 711 | std::filebuf* fb = m_infoStream.rdbuf(); |
---|
| 712 | openStream(fileName, ".out", fb); |
---|
[490] | 713 | |
---|
[523] | 714 | info.write2File(fb); |
---|
| 715 | report.write2File(fb); |
---|
| 716 | } |
---|
[490] | 717 | |
---|
[523] | 718 | //! Write the info logs to standard output |
---|
| 719 | void CServer::openInfoStream() |
---|
| 720 | { |
---|
| 721 | info.write2StdOut(); |
---|
| 722 | report.write2StdOut(); |
---|
| 723 | } |
---|
[490] | 724 | |
---|
[523] | 725 | //! Close the info logs file if it opens |
---|
| 726 | void CServer::closeInfoStream() |
---|
| 727 | { |
---|
| 728 | if (m_infoStream.is_open()) m_infoStream.close(); |
---|
| 729 | } |
---|
| 730 | |
---|
| 731 | /*! |
---|
| 732 | * \brief Open a file stream to write the error log |
---|
| 733 | * Open a file stream with a specific file name suffix+rank |
---|
| 734 | * to write the error log. |
---|
| 735 | * \param fileName [in] protype file name |
---|
| 736 | */ |
---|
| 737 | void CServer::openErrorStream(const StdString& fileName) |
---|
| 738 | { |
---|
| 739 | std::filebuf* fb = m_errorStream.rdbuf(); |
---|
| 740 | openStream(fileName, ".err", fb); |
---|
| 741 | |
---|
| 742 | error.write2File(fb); |
---|
| 743 | } |
---|
| 744 | |
---|
| 745 | //! Write the error log to standard error output |
---|
| 746 | void CServer::openErrorStream() |
---|
| 747 | { |
---|
| 748 | error.write2StdErr(); |
---|
| 749 | } |
---|
| 750 | |
---|
| 751 | //! Close the error log file if it opens |
---|
| 752 | void CServer::closeErrorStream() |
---|
| 753 | { |
---|
| 754 | if (m_errorStream.is_open()) m_errorStream.close(); |
---|
| 755 | } |
---|
[300] | 756 | } |
---|