1 | #include "globalScopeData.hpp" |
---|
2 | #include "xios_spl.hpp" |
---|
3 | #include "cxios.hpp" |
---|
4 | #include "server.hpp" |
---|
5 | #include "client.hpp" |
---|
6 | #include "type.hpp" |
---|
7 | #include "context.hpp" |
---|
8 | #include "object_template.hpp" |
---|
9 | #include "oasis_cinterface.hpp" |
---|
10 | #include <boost/functional/hash.hpp> |
---|
11 | #include <boost/algorithm/string.hpp> |
---|
12 | #include "mpi.hpp" |
---|
13 | #include "tracer.hpp" |
---|
14 | #include "timer.hpp" |
---|
15 | #include "event_scheduler.hpp" |
---|
16 | |
---|
17 | namespace xios |
---|
18 | { |
---|
19 | MPI_Comm CServer::intraComm ; |
---|
20 | list<MPI_Comm> CServer::interCommLeft ; |
---|
21 | list<MPI_Comm> CServer::interCommRight ; |
---|
22 | list<MPI_Comm> CServer::interComm ; |
---|
23 | std::list<MPI_Comm> CServer::contextInterComms; |
---|
24 | int CServer::nbSndSrvPools = (CXios::serverLevel == 0) ? 0 : 1; |
---|
25 | int CServer::poolNb = 0; |
---|
26 | bool CServer::isRoot = false ; |
---|
27 | int CServer::rank = INVALID_RANK; |
---|
28 | int CServer::rankSndServers = 0; |
---|
29 | StdOFStream CServer::m_infoStream; |
---|
30 | StdOFStream CServer::m_errorStream; |
---|
31 | map<string,CContext*> CServer::contextList ; |
---|
32 | bool CServer::finished=false ; |
---|
33 | bool CServer::is_MPI_Initialized ; |
---|
34 | CEventScheduler* CServer::eventScheduler = 0; |
---|
35 | |
---|
36 | //--------------------------------------------------------------- |
---|
37 | /*! |
---|
38 | * \fn void CServer::initialize(void) |
---|
39 | * Function creates intra and inter comm for each initialized server pool |
---|
40 | */ |
---|
41 | void CServer::initialize(void) |
---|
42 | { |
---|
43 | int initialized ; |
---|
44 | MPI_Initialized(&initialized) ; |
---|
45 | if (initialized) is_MPI_Initialized=true ; |
---|
46 | else is_MPI_Initialized=false ; |
---|
47 | |
---|
48 | |
---|
49 | // Not using OASIS |
---|
50 | if (!CXios::usingOasis) |
---|
51 | { |
---|
52 | |
---|
53 | if (!is_MPI_Initialized) |
---|
54 | { |
---|
55 | MPI_Init(NULL, NULL); |
---|
56 | } |
---|
57 | CTimer::get("XIOS").resume() ; |
---|
58 | |
---|
59 | boost::hash<string> hashString ; |
---|
60 | unsigned long hashServer1 = hashString(CXios::xiosCodeIdPrm); |
---|
61 | unsigned long hashServer2 = hashString(CXios::xiosCodeIdSnd); |
---|
62 | unsigned long hashServer = (CXios::serverLevel < 2) ? hashServer1 : hashServer2; |
---|
63 | |
---|
64 | unsigned long* hashAll ; |
---|
65 | unsigned long* hashAllServers ; |
---|
66 | |
---|
67 | // int rank ; |
---|
68 | int size ; |
---|
69 | int myColor ; |
---|
70 | int i,c ; |
---|
71 | MPI_Comm newComm, serversInterComm; |
---|
72 | |
---|
73 | MPI_Comm_size(CXios::globalComm, &size) ; |
---|
74 | MPI_Comm_rank(CXios::globalComm, &rank); |
---|
75 | |
---|
76 | hashAll=new unsigned long[size] ; |
---|
77 | MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ; |
---|
78 | |
---|
79 | map<unsigned long, int> colors, colorsServers ; |
---|
80 | map<unsigned long, int> leaders ; |
---|
81 | map<unsigned long, int>::iterator it ; |
---|
82 | // map<unsigned long, int> leadersServers ; |
---|
83 | vector<int> leadersServers; |
---|
84 | |
---|
85 | for(i=0,c=0;i<size;i++) |
---|
86 | { |
---|
87 | if (colors.find(hashAll[i])==colors.end()) |
---|
88 | { |
---|
89 | colors[hashAll[i]]=c ; |
---|
90 | leaders[hashAll[i]]=i ; |
---|
91 | c++ ; |
---|
92 | } |
---|
93 | } |
---|
94 | |
---|
95 | nbSndSrvPools = size - leaders[hashServer2]; // one proc per secondary-server pool |
---|
96 | |
---|
97 | // Taking into account multiple pools on secondary server |
---|
98 | if (nbSndSrvPools > 1) |
---|
99 | { |
---|
100 | if (CXios::serverLevel > 1) |
---|
101 | { |
---|
102 | int nbProcs = size - leaders[hashServer2]; |
---|
103 | int remain = nbProcs % nbSndSrvPools; |
---|
104 | int procsPerPool = nbProcs / nbSndSrvPools; |
---|
105 | rankSndServers = rank - leaders[hashServer2]; |
---|
106 | StdString strTmp = CXios::xiosCodeIdSnd; |
---|
107 | |
---|
108 | if (remain == 0) |
---|
109 | { |
---|
110 | poolNb = rankSndServers/procsPerPool; |
---|
111 | } |
---|
112 | else |
---|
113 | { |
---|
114 | if (rankSndServers <= (procsPerPool + 1) * remain) |
---|
115 | poolNb = rankSndServers/(procsPerPool+1); |
---|
116 | else |
---|
117 | { |
---|
118 | poolNb = remain + 1; |
---|
119 | rankSndServers -= (procsPerPool + 1) * remain; |
---|
120 | rankSndServers -= procsPerPool; |
---|
121 | poolNb += rankSndServers/procsPerPool; |
---|
122 | } |
---|
123 | } |
---|
124 | strTmp += boost::lexical_cast<string>(poolNb+1); // add 1 to avoid hashing zero |
---|
125 | hashServer = hashString(strTmp); |
---|
126 | hashServer2 = hashString(strTmp); |
---|
127 | } |
---|
128 | } |
---|
129 | |
---|
130 | if (nbSndSrvPools > 1) |
---|
131 | { |
---|
132 | myColor = size; |
---|
133 | MPI_Comm_split(MPI_COMM_WORLD, myColor, rank, &serversInterComm) ; |
---|
134 | } |
---|
135 | else |
---|
136 | { |
---|
137 | myColor=colors[hashServer] ; |
---|
138 | MPI_Comm_split(MPI_COMM_WORLD, myColor, rank, &intraComm) ; |
---|
139 | } |
---|
140 | |
---|
141 | if (nbSndSrvPools > 1) |
---|
142 | { |
---|
143 | int sizeServers; |
---|
144 | // int rankServers; |
---|
145 | // MPI_Comm_rank(serversInterComm, &rankServers) ; |
---|
146 | MPI_Comm_size(serversInterComm, &sizeServers) ; |
---|
147 | hashAllServers=new unsigned long[sizeServers] ; |
---|
148 | MPI_Allgather(&hashServer, 1, MPI_LONG, hashAllServers, 1, MPI_LONG, serversInterComm) ; |
---|
149 | |
---|
150 | for(i=0, c=0; i<sizeServers; i++) |
---|
151 | { |
---|
152 | if (colorsServers.find(hashAllServers[i])==colorsServers.end()) |
---|
153 | { |
---|
154 | colorsServers[hashAllServers[i]]=c ; |
---|
155 | // leadersServers[hashAllServers[i]]= leaders[hashServer1] + i ; |
---|
156 | leadersServers.push_back( leaders[hashServer1] + i ); |
---|
157 | c++ ; |
---|
158 | } |
---|
159 | } |
---|
160 | myColor=colorsServers[hashServer] ; |
---|
161 | MPI_Comm_split(serversInterComm, myColor, rank, &intraComm) ; |
---|
162 | } |
---|
163 | |
---|
164 | |
---|
165 | if (CXios::serverLevel == 0) |
---|
166 | { |
---|
167 | int clientLeader; |
---|
168 | for(it=leaders.begin();it!=leaders.end();it++) |
---|
169 | { |
---|
170 | if (it->first!=hashServer) |
---|
171 | { |
---|
172 | clientLeader=it->second ; |
---|
173 | int intraCommSize, intraCommRank ; |
---|
174 | MPI_Comm_size(intraComm,&intraCommSize) ; |
---|
175 | MPI_Comm_rank(intraComm,&intraCommRank) ; |
---|
176 | info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize |
---|
177 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
178 | |
---|
179 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
180 | interCommLeft.push_back(newComm) ; |
---|
181 | interComm.push_back(newComm) ; |
---|
182 | } |
---|
183 | } |
---|
184 | } |
---|
185 | else |
---|
186 | { |
---|
187 | if ((CXios::serverLevel == 1)) |
---|
188 | { |
---|
189 | // Creating interComm with client (interCommLeft) |
---|
190 | int clientLeader; |
---|
191 | int srvSndLeader; |
---|
192 | for(it=leaders.begin();it!=leaders.end();it++) |
---|
193 | { |
---|
194 | if (it->first != hashServer2) |
---|
195 | { |
---|
196 | if (it->first != hashServer1) |
---|
197 | { |
---|
198 | clientLeader=it->second ; |
---|
199 | int intraCommSize, intraCommRank ; |
---|
200 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
201 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
202 | info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize |
---|
203 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
204 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
205 | interCommLeft.push_back(newComm) ; |
---|
206 | interComm.push_back(newComm) ; |
---|
207 | } |
---|
208 | } |
---|
209 | else |
---|
210 | { |
---|
211 | srvSndLeader = it->second; |
---|
212 | } |
---|
213 | } |
---|
214 | |
---|
215 | // Creating interComm with secondary server pool(s) (interCommRight) |
---|
216 | // if (nbSndSrvPools < 1) |
---|
217 | if (nbSndSrvPools < 2) |
---|
218 | { |
---|
219 | CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); |
---|
220 | interCommRight.push_back(CClient::getInterComm()); |
---|
221 | interComm.push_back(CClient::getInterComm()); |
---|
222 | } |
---|
223 | else |
---|
224 | { |
---|
225 | // for(it=leadersServers.begin();it!=leadersServers.end();it++) |
---|
226 | // { |
---|
227 | // if (it->first != hashServer) |
---|
228 | // { |
---|
229 | // srvSndLeader = it->second; |
---|
230 | // CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); |
---|
231 | // interCommRight.push_back(CClient::getInterComm()); |
---|
232 | // interComm.push_back(CClient::getInterComm()); |
---|
233 | // } |
---|
234 | // } |
---|
235 | |
---|
236 | for(int i = 1; i < leadersServers.size(); ++i) |
---|
237 | { |
---|
238 | srvSndLeader = leadersServers[i]; |
---|
239 | CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); |
---|
240 | interCommRight.push_back(CClient::getInterComm()); |
---|
241 | interComm.push_back(CClient::getInterComm()); |
---|
242 | } |
---|
243 | } |
---|
244 | } // primary server |
---|
245 | |
---|
246 | else // secondary server pool(s) |
---|
247 | { |
---|
248 | int clientLeader; |
---|
249 | if (nbSndSrvPools < 2) |
---|
250 | // if (nbSndSrvPools < 1) |
---|
251 | { |
---|
252 | for(it=leaders.begin();it!=leaders.end();it++) |
---|
253 | { |
---|
254 | if (it->first == hashServer1) |
---|
255 | { |
---|
256 | clientLeader=it->second ; |
---|
257 | int intraCommSize, intraCommRank ; |
---|
258 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
259 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
260 | info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize |
---|
261 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
262 | |
---|
263 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
264 | interCommLeft.push_back(newComm) ; |
---|
265 | interComm.push_back(newComm) ; |
---|
266 | |
---|
267 | break; |
---|
268 | } |
---|
269 | } |
---|
270 | } |
---|
271 | else |
---|
272 | { |
---|
273 | // for(it=leadersServers.begin();it!=leadersServers.end();it++) |
---|
274 | { |
---|
275 | // if (it->first == hashServer1) |
---|
276 | { |
---|
277 | // clientLeader=it->second ; |
---|
278 | clientLeader = leadersServers[0]; |
---|
279 | int intraCommSize, intraCommRank ; |
---|
280 | MPI_Comm_size(intraComm, &intraCommSize) ; |
---|
281 | MPI_Comm_rank(intraComm, &intraCommRank) ; |
---|
282 | info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize |
---|
283 | <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; |
---|
284 | |
---|
285 | MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; |
---|
286 | interCommLeft.push_back(newComm) ; |
---|
287 | interComm.push_back(newComm) ; |
---|
288 | } |
---|
289 | } |
---|
290 | } |
---|
291 | } // secondary server |
---|
292 | } // CXios::serverLevel != 0 |
---|
293 | |
---|
294 | if (nbSndSrvPools > 1) delete [] hashAllServers; |
---|
295 | delete [] hashAll ; |
---|
296 | |
---|
297 | } |
---|
298 | // using OASIS |
---|
299 | else |
---|
300 | { |
---|
301 | // int rank ,size; |
---|
302 | int size; |
---|
303 | if (!is_MPI_Initialized) oasis_init(CXios::xiosCodeId); |
---|
304 | |
---|
305 | CTimer::get("XIOS").resume() ; |
---|
306 | MPI_Comm localComm; |
---|
307 | oasis_get_localcomm(localComm); |
---|
308 | MPI_Comm_dup(localComm, &intraComm); |
---|
309 | |
---|
310 | MPI_Comm_rank(intraComm,&rank) ; |
---|
311 | MPI_Comm_size(intraComm,&size) ; |
---|
312 | string codesId=CXios::getin<string>("oasis_codes_id") ; |
---|
313 | |
---|
314 | vector<string> splitted ; |
---|
315 | boost::split( splitted, codesId, boost::is_any_of(","), boost::token_compress_on ) ; |
---|
316 | vector<string>::iterator it ; |
---|
317 | |
---|
318 | MPI_Comm newComm ; |
---|
319 | int globalRank ; |
---|
320 | MPI_Comm_rank(CXios::globalComm,&globalRank); |
---|
321 | |
---|
322 | for(it=splitted.begin();it!=splitted.end();it++) |
---|
323 | { |
---|
324 | oasis_get_intercomm(newComm,*it) ; |
---|
325 | if (rank==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ; |
---|
326 | MPI_Comm_remote_size(newComm,&size); |
---|
327 | interComm.push_back(newComm) ; |
---|
328 | } |
---|
329 | oasis_enddef() ; |
---|
330 | } |
---|
331 | |
---|
332 | MPI_Comm_rank(intraComm, &rank) ; |
---|
333 | if (rank==0) isRoot=true; |
---|
334 | else isRoot=false; |
---|
335 | |
---|
336 | eventScheduler = new CEventScheduler(intraComm) ; |
---|
337 | } |
---|
338 | |
---|
339 | void CServer::finalize(void) |
---|
340 | { |
---|
341 | |
---|
342 | CTimer::get("XIOS").suspend() ; |
---|
343 | |
---|
344 | delete eventScheduler ; |
---|
345 | |
---|
346 | for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) |
---|
347 | MPI_Comm_free(&(*it)); |
---|
348 | |
---|
349 | // for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) |
---|
350 | // MPI_Comm_free(&(*it)); |
---|
351 | |
---|
352 | for (std::list<MPI_Comm>::iterator it = interCommLeft.begin(); it != interCommLeft.end(); it++) |
---|
353 | MPI_Comm_free(&(*it)); |
---|
354 | |
---|
355 | for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) |
---|
356 | MPI_Comm_free(&(*it)); |
---|
357 | |
---|
358 | MPI_Comm_free(&intraComm); |
---|
359 | |
---|
360 | if (!is_MPI_Initialized) |
---|
361 | { |
---|
362 | if (CXios::usingOasis) oasis_finalize(); |
---|
363 | else MPI_Finalize() ; |
---|
364 | } |
---|
365 | report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; |
---|
366 | report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; |
---|
367 | report(0)<<"Performance report : Ratio : "<<CTimer::get("Process events").getCumulatedTime()/CTimer::get("XIOS server").getCumulatedTime()*100.<<"%"<<endl ; |
---|
368 | } |
---|
369 | |
---|
370 | void CServer::eventLoop(void) |
---|
371 | { |
---|
372 | bool stop=false ; |
---|
373 | |
---|
374 | CTimer::get("XIOS server").resume() ; |
---|
375 | while(!stop) |
---|
376 | { |
---|
377 | |
---|
378 | if (isRoot) |
---|
379 | { |
---|
380 | listenContext(); |
---|
381 | if (!finished) listenFinalize() ; |
---|
382 | } |
---|
383 | else |
---|
384 | { |
---|
385 | listenRootContext(); |
---|
386 | if (!finished) listenRootFinalize() ; |
---|
387 | } |
---|
388 | |
---|
389 | contextEventLoop() ; |
---|
390 | if (finished && contextList.empty()) stop=true ; |
---|
391 | eventScheduler->checkEvent() ; |
---|
392 | |
---|
393 | } |
---|
394 | CTimer::get("XIOS server").suspend() ; |
---|
395 | } |
---|
396 | |
---|
397 | void CServer::listenFinalize(void) |
---|
398 | { |
---|
399 | list<MPI_Comm>::iterator it, itr; |
---|
400 | int msg ; |
---|
401 | int flag ; |
---|
402 | |
---|
403 | for(it=interCommLeft.begin();it!=interCommLeft.end();it++) |
---|
404 | { |
---|
405 | MPI_Status status ; |
---|
406 | traceOff() ; |
---|
407 | MPI_Iprobe(0,0,*it,&flag,&status) ; |
---|
408 | traceOn() ; |
---|
409 | if (flag==true) |
---|
410 | { |
---|
411 | MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; |
---|
412 | info(20)<<" CServer : Receive client finalize"<<endl ; |
---|
413 | |
---|
414 | // If primary server, send finalize to secondary server pool(s) |
---|
415 | for(itr=interCommRight.begin(); itr!=interCommRight.end(); itr++) |
---|
416 | { |
---|
417 | MPI_Send(&msg,1,MPI_INT,0,0,*itr) ; |
---|
418 | |
---|
419 | // MPI_Comm_free(&(*itr)); |
---|
420 | // interCommRight.erase(itr) ; |
---|
421 | } |
---|
422 | |
---|
423 | MPI_Comm_free(&(*it)); |
---|
424 | // interComm.erase(it) ; |
---|
425 | interCommLeft.erase(it) ; |
---|
426 | break ; |
---|
427 | } |
---|
428 | } |
---|
429 | |
---|
430 | if (interCommLeft.empty()) |
---|
431 | // if (interComm.empty()) |
---|
432 | { |
---|
433 | int i,size ; |
---|
434 | MPI_Comm_size(intraComm,&size) ; |
---|
435 | MPI_Request* requests= new MPI_Request[size-1] ; |
---|
436 | MPI_Status* status= new MPI_Status[size-1] ; |
---|
437 | |
---|
438 | for(int i=1;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,4,intraComm,&requests[i-1]) ; |
---|
439 | MPI_Waitall(size-1,requests,status) ; |
---|
440 | |
---|
441 | finished=true ; |
---|
442 | delete [] requests ; |
---|
443 | delete [] status ; |
---|
444 | } |
---|
445 | } |
---|
446 | |
---|
447 | |
---|
448 | void CServer::listenRootFinalize() |
---|
449 | { |
---|
450 | int flag ; |
---|
451 | MPI_Status status ; |
---|
452 | int msg ; |
---|
453 | |
---|
454 | traceOff() ; |
---|
455 | MPI_Iprobe(0,4,intraComm, &flag, &status) ; |
---|
456 | traceOn() ; |
---|
457 | if (flag==true) |
---|
458 | { |
---|
459 | MPI_Recv(&msg,1,MPI_INT,0,4,intraComm,&status) ; |
---|
460 | finished=true ; |
---|
461 | } |
---|
462 | } |
---|
463 | |
---|
464 | void CServer::listenContext(void) |
---|
465 | { |
---|
466 | |
---|
467 | MPI_Status status ; |
---|
468 | int flag ; |
---|
469 | static void* buffer ; |
---|
470 | static MPI_Request request ; |
---|
471 | static bool recept=false ; |
---|
472 | int rank ; |
---|
473 | int count ; |
---|
474 | |
---|
475 | if (recept==false) |
---|
476 | { |
---|
477 | traceOff() ; |
---|
478 | MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; |
---|
479 | traceOn() ; |
---|
480 | if (flag==true) |
---|
481 | { |
---|
482 | rank=status.MPI_SOURCE ; |
---|
483 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
484 | buffer=new char[count] ; |
---|
485 | MPI_Irecv(buffer,count,MPI_CHAR,rank,1,CXios::globalComm,&request) ; |
---|
486 | recept=true ; |
---|
487 | } |
---|
488 | } |
---|
489 | else |
---|
490 | { |
---|
491 | traceOff() ; |
---|
492 | MPI_Test(&request,&flag,&status) ; |
---|
493 | traceOn() ; |
---|
494 | if (flag==true) |
---|
495 | { |
---|
496 | rank=status.MPI_SOURCE ; |
---|
497 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
498 | recvContextMessage(buffer,count) ; |
---|
499 | delete [] buffer ; |
---|
500 | recept=false ; |
---|
501 | } |
---|
502 | } |
---|
503 | } |
---|
504 | |
---|
505 | void CServer::recvContextMessage(void* buff,int count) |
---|
506 | { |
---|
507 | static map<string,contextMessage> recvContextId; |
---|
508 | |
---|
509 | map<string,contextMessage>::iterator it ; |
---|
510 | |
---|
511 | CBufferIn buffer(buff,count) ; |
---|
512 | string id ; |
---|
513 | int clientLeader ; |
---|
514 | int nbMessage ; |
---|
515 | |
---|
516 | buffer>>id>>nbMessage>>clientLeader ; |
---|
517 | |
---|
518 | it=recvContextId.find(id) ; |
---|
519 | if (it==recvContextId.end()) |
---|
520 | { |
---|
521 | contextMessage msg={0,0} ; |
---|
522 | pair<map<string,contextMessage>::iterator,bool> ret ; |
---|
523 | ret=recvContextId.insert(pair<string,contextMessage>(id,msg)) ; |
---|
524 | it=ret.first ; |
---|
525 | } |
---|
526 | it->second.nbRecv+=1 ; |
---|
527 | it->second.leaderRank+=clientLeader ; |
---|
528 | |
---|
529 | if (it->second.nbRecv==nbMessage) |
---|
530 | { |
---|
531 | int size ; |
---|
532 | MPI_Comm_size(intraComm,&size) ; |
---|
533 | MPI_Request* requests= new MPI_Request[size-1] ; |
---|
534 | MPI_Status* status= new MPI_Status[size-1] ; |
---|
535 | |
---|
536 | for(int i=1;i<size;i++) |
---|
537 | { |
---|
538 | MPI_Isend(buff,count,MPI_CHAR,i,2,intraComm,&requests[i-1]) ; |
---|
539 | } |
---|
540 | MPI_Waitall(size-1,requests,status) ; |
---|
541 | registerContext(buff,count,it->second.leaderRank) ; |
---|
542 | |
---|
543 | recvContextId.erase(it) ; |
---|
544 | delete [] requests ; |
---|
545 | delete [] status ; |
---|
546 | |
---|
547 | } |
---|
548 | } |
---|
549 | |
---|
550 | void CServer::listenRootContext(void) |
---|
551 | { |
---|
552 | |
---|
553 | MPI_Status status ; |
---|
554 | int flag ; |
---|
555 | static void* buffer ; |
---|
556 | static MPI_Request request ; |
---|
557 | static bool recept=false ; |
---|
558 | int rank ; |
---|
559 | int count ; |
---|
560 | const int root=0 ; |
---|
561 | |
---|
562 | if (recept==false) |
---|
563 | { |
---|
564 | traceOff() ; |
---|
565 | MPI_Iprobe(root,2,intraComm, &flag, &status) ; |
---|
566 | traceOn() ; |
---|
567 | if (flag==true) |
---|
568 | { |
---|
569 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
570 | buffer=new char[count] ; |
---|
571 | MPI_Irecv(buffer,count,MPI_CHAR,root,2,intraComm,&request) ; |
---|
572 | recept=true ; |
---|
573 | } |
---|
574 | } |
---|
575 | else |
---|
576 | { |
---|
577 | MPI_Test(&request,&flag,&status) ; |
---|
578 | if (flag==true) |
---|
579 | { |
---|
580 | MPI_Get_count(&status,MPI_CHAR,&count) ; |
---|
581 | registerContext(buffer,count) ; |
---|
582 | |
---|
583 | delete [] buffer ; |
---|
584 | recept=false ; |
---|
585 | } |
---|
586 | } |
---|
587 | } |
---|
588 | |
---|
589 | void CServer::registerContext(void* buff, int count, int leaderRank) |
---|
590 | { |
---|
591 | string contextId; |
---|
592 | CBufferIn buffer(buff, count); |
---|
593 | buffer >> contextId; |
---|
594 | CContext* context; |
---|
595 | |
---|
596 | info(20) << "CServer : Register new Context : " << contextId << endl; |
---|
597 | |
---|
598 | if (contextList.find(contextId) != contextList.end()) |
---|
599 | ERROR("void CServer::registerContext(void* buff, int count, int leaderRank)", |
---|
600 | << "Context '" << contextId << "' has already been registred"); |
---|
601 | |
---|
602 | MPI_Comm contextInterComm; |
---|
603 | MPI_Intercomm_create(intraComm,0,CXios::globalComm,leaderRank,10+leaderRank,&contextInterComm); |
---|
604 | |
---|
605 | MPI_Comm inter; |
---|
606 | MPI_Intercomm_merge(contextInterComm,1,&inter); |
---|
607 | MPI_Barrier(inter); |
---|
608 | |
---|
609 | context=CContext::create(contextId); |
---|
610 | contextList[contextId]=context; |
---|
611 | context->initServer(intraComm,contextInterComm); |
---|
612 | contextInterComms.push_back(contextInterComm); |
---|
613 | |
---|
614 | if (CXios::serverLevel == 1) |
---|
615 | { |
---|
616 | // CClient::registerContext(contextId, intraComm); |
---|
617 | CClient::registerContextOnSrvPools(contextId, intraComm); |
---|
618 | } |
---|
619 | |
---|
620 | MPI_Comm_free(&inter); |
---|
621 | |
---|
622 | } |
---|
623 | |
---|
624 | void CServer::contextEventLoop(void) |
---|
625 | { |
---|
626 | bool finished ; |
---|
627 | map<string,CContext*>::iterator it ; |
---|
628 | |
---|
629 | for(it=contextList.begin();it!=contextList.end();it++) |
---|
630 | { |
---|
631 | finished=it->second->checkBuffersAndListen(); |
---|
632 | if (finished) |
---|
633 | { |
---|
634 | contextList.erase(it) ; |
---|
635 | break ; |
---|
636 | } |
---|
637 | } |
---|
638 | } |
---|
639 | |
---|
640 | //! Get rank of the current process |
---|
641 | int CServer::getRank() |
---|
642 | { |
---|
643 | return rank; |
---|
644 | } |
---|
645 | |
---|
646 | /*! |
---|
647 | * Open a file specified by a suffix and an extension and use it for the given file buffer. |
---|
648 | * The file name will be suffix+rank+extension. |
---|
649 | * |
---|
650 | * \param fileName[in] protype file name |
---|
651 | * \param ext [in] extension of the file |
---|
652 | * \param fb [in/out] the file buffer |
---|
653 | */ |
---|
654 | void CServer::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb) |
---|
655 | { |
---|
656 | StdStringStream fileNameClient; |
---|
657 | int numDigit = 0; |
---|
658 | int size = 0; |
---|
659 | int mpiRank; |
---|
660 | MPI_Comm_size(CXios::globalComm, &size); |
---|
661 | while (size) |
---|
662 | { |
---|
663 | size /= 10; |
---|
664 | ++numDigit; |
---|
665 | } |
---|
666 | |
---|
667 | if (nbSndSrvPools < 2) |
---|
668 | mpiRank = getRank(); |
---|
669 | else |
---|
670 | mpiRank = rankSndServers; |
---|
671 | fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << mpiRank << ext; |
---|
672 | fb->open(fileNameClient.str().c_str(), std::ios::out); |
---|
673 | if (!fb->is_open()) |
---|
674 | ERROR("void CServer::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", |
---|
675 | << std::endl << "Can not open <" << fileNameClient << "> file to write the server log(s)."); |
---|
676 | } |
---|
677 | |
---|
678 | /*! |
---|
679 | * \brief Open a file stream to write the info logs |
---|
680 | * Open a file stream with a specific file name suffix+rank |
---|
681 | * to write the info logs. |
---|
682 | * \param fileName [in] protype file name |
---|
683 | */ |
---|
684 | void CServer::openInfoStream(const StdString& fileName) |
---|
685 | { |
---|
686 | std::filebuf* fb = m_infoStream.rdbuf(); |
---|
687 | openStream(fileName, ".out", fb); |
---|
688 | |
---|
689 | info.write2File(fb); |
---|
690 | report.write2File(fb); |
---|
691 | } |
---|
692 | |
---|
693 | //! Write the info logs to standard output |
---|
694 | void CServer::openInfoStream() |
---|
695 | { |
---|
696 | info.write2StdOut(); |
---|
697 | report.write2StdOut(); |
---|
698 | } |
---|
699 | |
---|
700 | //! Close the info logs file if it opens |
---|
701 | void CServer::closeInfoStream() |
---|
702 | { |
---|
703 | if (m_infoStream.is_open()) m_infoStream.close(); |
---|
704 | } |
---|
705 | |
---|
706 | /*! |
---|
707 | * \brief Open a file stream to write the error log |
---|
708 | * Open a file stream with a specific file name suffix+rank |
---|
709 | * to write the error log. |
---|
710 | * \param fileName [in] protype file name |
---|
711 | */ |
---|
712 | void CServer::openErrorStream(const StdString& fileName) |
---|
713 | { |
---|
714 | std::filebuf* fb = m_errorStream.rdbuf(); |
---|
715 | openStream(fileName, ".err", fb); |
---|
716 | |
---|
717 | error.write2File(fb); |
---|
718 | } |
---|
719 | |
---|
720 | //! Write the error log to standard error output |
---|
721 | void CServer::openErrorStream() |
---|
722 | { |
---|
723 | error.write2StdErr(); |
---|
724 | } |
---|
725 | |
---|
726 | //! Close the error log file if it opens |
---|
727 | void CServer::closeErrorStream() |
---|
728 | { |
---|
729 | if (m_errorStream.is_open()) m_errorStream.close(); |
---|
730 | } |
---|
731 | } |
---|