Changeset 1134 for XIOS/dev/branch_yushan_merged/src/server.cpp
- Timestamp:
- 05/16/17 17:54:30 (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/src/server.cpp
r1032 r1134 9 9 #include <boost/functional/hash.hpp> 10 10 #include <boost/algorithm/string.hpp> 11 #include "mpi.hpp"12 11 #include "tracer.hpp" 13 12 #include "timer.hpp" … … 26 25 bool CServer::finished=false ; 27 26 bool CServer::is_MPI_Initialized ; 27 28 28 29 CEventScheduler* CServer::eventScheduler = 0; 29 30 30 31 void CServer::initialize(void) 31 32 { 32 int initialized ;33 MPI_Initialized(&initialized) ;34 if (initialized) is_MPI_Initialized=true ;35 else is_MPI_Initialized=false ;36 37 33 // Not using OASIS 38 34 if (!CXios::usingOasis) 39 35 { 40 36 41 if (!is_MPI_Initialized)42 {43 MPI_Init(NULL, NULL);44 }45 37 CTimer::get("XIOS").resume() ; 46 38 … … 50 42 unsigned long* hashAll ; 51 43 52 // int rank ; 44 53 45 int size ; 54 46 int myColor ; … … 77 69 78 70 myColor=colors[hashServer] ; 79 MPI_Comm_split(MPI_COMM_WORLD,myColor,rank,&intraComm) ; 80 71 72 73 MPI_Comm_split(CXios::globalComm,myColor,rank,&intraComm) ; 74 75 81 76 int serverLeader=leaders[hashServer] ; 82 77 int clientLeader; 83 78 84 79 serverLeader=leaders[hashServer] ; 85 for(it=leaders.begin();it!=leaders.end(); it++)80 for(it=leaders.begin();it!=leaders.end();++it) 86 81 { 87 82 if (it->first!=hashServer) … … 104 99 else 105 100 { 106 // int rank ,size;107 101 int size; 108 102 if (!is_MPI_Initialized) oasis_init(CXios::xiosCodeId); … … 135 129 } 136 130 137 // int rank;138 131 MPI_Comm_rank(intraComm,&rank) ; 139 132 if (rank==0) isRoot=true; … … 149 142 delete eventScheduler ; 150 143 151 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++)144 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); ++it) 152 145 MPI_Comm_free(&(*it)); 153 for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++)146 for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); ++it) 154 147 MPI_Comm_free(&(*it)); 148 155 149 MPI_Comm_free(&intraComm); 156 150 … … 158 152 { 159 153 if (CXios::usingOasis) oasis_finalize(); 160 else MPI_Finalize() ;154 //else {MPI_Finalize() ;} 161 155 } 156 157 162 158 report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; 163 159 report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; … … 180 176 { 181 177 listenRootContext(); 182 if (!finished) listenRootFinalize() ; 178 if (!finished) 179 { 180 listenRootFinalize() ; 181 } 183 182 } 184 183 185 184 contextEventLoop() ; 186 185 if (finished && contextList.empty()) stop=true ; 186 187 187 eventScheduler->checkEvent() ; 188 188 } 189 190 189 191 CTimer::get("XIOS server").suspend() ; 190 192 } … … 196 198 int flag ; 197 199 198 for(it=interComm.begin();it!=interComm.end(); it++)200 for(it=interComm.begin();it!=interComm.end();++it) 199 201 { 200 202 MPI_Status status ; … … 206 208 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; 207 209 info(20)<<" CServer : Receive client finalize"<<endl ; 210 208 211 MPI_Comm_free(&(*it)); 209 212 interComm.erase(it) ; … … 259 262 { 260 263 traceOff() ; 264 #ifdef _usingEP 265 MPI_Iprobe(-1,1,CXios::globalComm, &flag, &status) ; 266 #else 261 267 MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 268 #endif 262 269 traceOn() ; 270 263 271 if (flag==true) 264 272 { 273 #ifdef _usingMPI 265 274 rank=status.MPI_SOURCE ; 275 #elif _usingEP 276 rank= status.ep_src ; 277 #endif 266 278 MPI_Get_count(&status,MPI_CHAR,&count) ; 267 279 buffer=new char[count] ; … … 277 289 if (flag==true) 278 290 { 291 #ifdef _usingMPI 279 292 rank=status.MPI_SOURCE ; 293 #elif _usingEP 294 rank= status.ep_src ; 295 #endif 280 296 MPI_Get_count(&status,MPI_CHAR,&count) ; 281 297 recvContextMessage((void*)buffer,count) ; … … 399 415 bool finished ; 400 416 map<string,CContext*>::iterator it ; 401 for(it=contextList.begin();it!=contextList.end(); it++)417 for(it=contextList.begin();it!=contextList.end();++it) 402 418 { 403 419 finished=it->second->checkBuffersAndListen();
Note: See TracChangeset
for help on using the changeset viewer.