- Timestamp:
- 09/26/14 14:52:04 (10 years ago)
- Location:
- XIOS/trunk
- Files:
-
- 1 added
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/Doxyfile
r300 r490 169 169 # an explicit \brief command for a brief description.) 170 170 171 QT_AUTOBRIEF = NO171 QT_AUTOBRIEF = YES 172 172 173 173 # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen -
XIOS/trunk/inputs/COMPLETE/iodef.xml
r486 r490 15 15 <variable_group id="parameters" > 16 16 <variable id="using_server" type="boolean">false</variable> 17 <variable id="info_level" type="int">1</variable> 17 <variable id="info_level" type="int">100</variable> 18 <variable id="info_output_file" type="boolean">true</variable> 18 19 </variable_group> 19 20 </variable_definition> -
XIOS/trunk/src/client.cpp
r400 r490 1 #include "globalScopeData.hpp" 1 2 #include "xmlioserver_spl.hpp" 2 3 #include "cxios.hpp" … … 12 13 13 14 namespace xios 14 { 15 { 15 16 16 17 MPI_Comm CClient::intraComm ; … … 18 19 int CClient::serverLeader ; 19 20 bool CClient::is_MPI_Initialized ; 20 21 21 int CClient::rank = INVALID_RANK; 22 StdOFStream CClient::m_infoStream; 23 22 24 void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) 23 25 { … … 26 28 if (initialized) is_MPI_Initialized=true ; 27 29 else is_MPI_Initialized=false ; 28 30 29 31 // don't use OASIS 30 32 if (!CXios::usingOasis) … … 33 35 if (localComm == MPI_COMM_NULL) 34 36 { 35 if (!is_MPI_Initialized) 37 if (!is_MPI_Initialized) 36 38 { 37 39 int argc=0; … … 41 43 CTimer::get("XIOS").resume() ; 42 44 CTimer::get("XIOS init").resume() ; 43 boost::hash<string> hashString ; 44 45 boost::hash<string> hashString ; 46 45 47 unsigned long hashClient=hashString(codeId) ; 46 48 unsigned long hashServer=hashString(CXios::xiosCodeId) ; 47 49 unsigned long* hashAll ; 48 int rank ;49 50 int size ; 50 51 int myColor ; 51 52 int i,c ; 52 53 MPI_Comm newComm ; 53 54 54 55 MPI_Comm_size(CXios::globalComm,&size) ; 55 56 MPI_Comm_rank(CXios::globalComm,&rank); 57 56 58 hashAll=new unsigned long[size] ; 57 59 58 60 MPI_Allgather(&hashClient,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ; 59 61 60 62 map<unsigned long, int> colors ; 61 63 map<unsigned long, int> leaders ; 62 64 63 65 for(i=0,c=0;i<size;i++) 64 66 { … … 70 72 } 71 73 } 72 74 73 75 myColor=colors[hashClient] ; 74 76 75 77 MPI_Comm_split(CXios::globalComm,myColor,rank,&intraComm) ; 76 78 77 79 if (CXios::usingServer) 78 { 80 { 79 81 int clientLeader=leaders[hashClient] ; 80 82 serverLeader=leaders[hashServer] ; … … 88 90 } 89 91 // localComm argument is given 90 else 92 else 91 93 { 92 94 if (CXios::usingServer) 93 { 95 { 94 96 //ERROR("void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm)", << " giving a local communictor is not compatible with using server mode") ; 95 97 } … … 113 115 CTimer::get("XIOS").resume() ; 114 116 CTimer::get("XIOS init").resume() ; 115 116 if (CXios::usingServer) 117 118 if (CXios::usingServer) 117 119 { 118 120 MPI_Status status ; 119 int rank ; 120 MPI_Comm_rank(intraComm,&rank) ; 121 121 MPI_Comm_rank(intraComm,&rank) ; 122 122 oasis_get_intercomm(interComm,CXios::xiosCodeId) ; 123 123 if (rank==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ; 124 124 MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ; 125 125 126 126 } 127 127 else MPI_Comm_dup(intraComm,&interComm) ; 128 128 } 129 129 130 130 MPI_Comm_dup(intraComm,&returnComm) ; 131 131 } 132 133 132 133 134 134 void CClient::registerContext(const string& id,MPI_Comm contextComm) 135 135 { 136 136 CContext::setCurrent(id) ; 137 137 CContext* context=CContext::create(id) ; 138 138 139 139 if (!CXios::isServer) 140 140 { … … 143 143 int leaderRank ; 144 144 MPI_Comm contextInterComm ; 145 145 146 146 MPI_Comm_size(contextComm,&size) ; 147 147 MPI_Comm_rank(contextComm,&rank) ; 148 148 MPI_Comm_rank(CXios::globalComm,&globalRank) ; 149 149 if (rank!=0) globalRank=0 ; 150 151 150 151 152 152 CMessage msg ; 153 153 msg<<id<<size<<globalRank ; … … 157 157 CBufferOut buffer(buff,messageSize) ; 158 158 buffer<<msg ; 159 159 160 160 MPI_Send(buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; 161 161 delete [] buff ; 162 162 163 163 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 164 164 info(10)<<"Register new Context : "<<id<<endl ; 165 165 166 166 MPI_Comm inter ; 167 167 MPI_Intercomm_merge(contextInterComm,0,&inter) ; … … 178 178 } 179 179 } 180 180 181 181 void CClient::finalize(void) 182 182 { … … 185 185 if (!CXios::isServer) 186 186 { 187 MPI_Comm_rank(intraComm,&rank) ; 188 if (rank==0) 187 MPI_Comm_rank(intraComm,&rank) ; 188 if (rank==0) 189 189 { 190 190 MPI_Send(&msg,1,MPI_INT,0,0,interComm) ; 191 191 } 192 192 } 193 193 194 194 CTimer::get("XIOS finalize").suspend() ; 195 195 CTimer::get("XIOS").suspend() ; … … 201 201 } 202 202 info(20) << "Client side context is finalized"<<endl ; 203 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 203 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 204 204 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 205 205 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; … … 209 209 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 210 210 } 211 212 int CClient::getRank() 213 { 214 return rank; 215 } 216 217 /*! 218 * \brief Open file stream to write in 219 * Opening a file stream with a specific file name suffix-client+rank 220 * \param [in] protype file name 221 */ 222 void CClient::openInfoStream(const StdString& fileName) 223 { 224 std::filebuf* fb = m_infoStream.rdbuf(); 225 StdStringStream fileNameClient; 226 fileNameClient << fileName <<"_client_" << getRank() << ".txt"; 227 fb->open(fileNameClient.str().c_str(), std::ios::out); 228 if (!fb->is_open()) 229 ERROR("void CClient::openInfoStream(const StdString& fileName)", 230 <<endl<< "Can not open <"<<fileNameClient<<"> file to write" ); 231 232 info.write2File(fb); 233 } 234 235 //! Write out to standard output 236 void CClient::openInfoStream() 237 { 238 info.write2StdOut(); 239 } 240 241 //! Close file if it opens 242 void CClient::closeInfoStream() 243 { 244 if (m_infoStream.is_open()) m_infoStream.close(); 245 } 246 247 211 248 } -
XIOS/trunk/src/client.hpp
r382 r490 6 6 7 7 namespace xios 8 { 8 { 9 9 class CClient 10 10 { 11 11 public: 12 12 13 13 static void initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) ; 14 14 static void finalize(void) ; … … 19 19 static int serverLeader; 20 20 static bool is_MPI_Initialized ; 21 22 public: 23 static int getRank(); 24 25 static void openInfoStream(const StdString& fileName); 26 27 static void openInfoStream(); 28 29 static void closeInfoStream(); 30 31 protected: 32 static int rank; 33 static StdOFStream m_infoStream; 34 35 21 36 } ; 22 37 } -
XIOS/trunk/src/cxios.cpp
r401 r490 15 15 string CXios::rootFile="./iodef.xml" ; 16 16 string CXios::xiosCodeId="xios.x" ; 17 17 string CXios::infoFile="./info_output"; 18 18 19 bool CXios::isClient ; 19 20 bool CXios::isServer ; 20 21 MPI_Comm CXios::globalComm ; 21 22 bool CXios::usingOasis ; 22 bool CXios::usingServer ; 23 bool CXios::usingServer ; 23 24 size_t CXios::bufferSize ; 24 25 double CXios::bufferServerFactorSize=2 ; 25 26 size_t CXios::defaultBufferSize=1024*1024*100 ; // 100Mo 26 27 double CXios::defaultBufferServerFactorSize=2 ; 27 28 bool CXios::printInfo2File; 29 30 28 31 void CXios::initialize() 29 32 { … … 33 36 usingOasis=getin<bool>("using_oasis",false) ; 34 37 info.setLevel(getin<int>("info_level",0)) ; 38 printInfo2File=getin<bool>("info_output_file",false); 35 39 bufferSize=getin<size_t>("buffer_size",defaultBufferSize) ; 36 40 bufferServerFactorSize=getin<double>("buffer_server_factor_size",defaultBufferServerFactorSize) ; … … 41 45 void CXios::initClientSide(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 42 46 { 43 47 44 48 initialize() ; 45 49 46 50 isClient=true; 47 51 if (usingServer) isServer=false; 48 52 else isServer=true ; 49 53 50 54 CClient::initialize(codeId,localComm,returnComm) ; 51 55 52 } 56 if (printInfo2File) 57 CClient::openInfoStream(infoFile); 58 else 59 CClient::openInfoStream(); 60 } 53 61 54 62 void CXios::clientFinalize(void) 55 63 { 56 CClient::finalize() ; 64 CClient::finalize() ; 65 CClient::closeInfoStream(); 66 57 67 #ifdef XIOS_MEMTRACK 58 68 MemTrack::TrackListMemoryUsage() ; 59 MemTrack::TrackDumpBlocks(); 69 MemTrack::TrackDumpBlocks(); 60 70 #endif 61 } 62 63 71 } 72 73 64 74 void CXios::initServerSide(void) 65 75 { … … 70 80 isServer=false ; 71 81 72 CServer::initialize() ; 73 } 74 82 //! Initialize all aspect MPI 83 CServer::initialize(); 84 85 if (printInfo2File) 86 CServer::openInfoStream(infoFile); 87 else 88 CServer::openInfoStream(); 89 90 //! Enter the loop to listen message from Client 91 CServer::eventLoop(); 92 93 //! Finalize 94 CServer::finalize(); 95 CServer::closeInfoStream(); 96 } 97 75 98 void CXios::parseFile(const string& filename) 76 99 { 77 xml::CXMLParser::ParseFile(filename); 100 xml::CXMLParser::ParseFile(filename); 78 101 } 79 80 81 102 } -
XIOS/trunk/src/cxios.hpp
r382 r490 10 10 { 11 11 public: 12 12 13 13 static string rootFile ; 14 14 static string xiosCodeId ; 15 15 static string infoFile; 16 16 17 static void initialize(void) ; 17 18 18 19 19 20 static void initClientSide(const string & codeId, MPI_Comm& localComm, MPI_Comm& returnComm) ; 20 21 static void initServerSide(void) ; 21 22 static void clientFinalize(void) ; 22 23 static void parseFile(const string& filename) ; 23 24 24 25 template <typename T> 25 26 static T getin(const string& id,const T& defaultValue) ; 26 27 27 28 template <typename T> 28 29 static T getin(const string& id) ; 30 31 // static void openFileInfo(const StdString& fileName); 32 // static void closeFileInfo(); 29 33 30 34 static bool isClient ; 31 35 static bool isServer ; 32 36 33 static MPI_Comm globalComm ; 34 37 static MPI_Comm globalComm ; 38 39 static bool printInfo2File; 35 40 static bool usingOasis ; 36 static bool usingServer ; 41 static bool usingServer ; 37 42 static size_t bufferSize ; 38 43 static size_t defaultBufferSize ; 39 44 static double bufferServerFactorSize ; 40 45 static double defaultBufferServerFactorSize ; 41 46 47 private: 48 // static StdOFStream infoFileStream_; 49 42 50 } ; 43 51 -
XIOS/trunk/src/log.hpp
r380 r490 13 13 { 14 14 public : 15 CLog(const string& name_) : ostream(cout.rdbuf()),level(0),name(name_) {}16 CLog& operator()(int l) 17 { 18 if (l<=level) 15 CLog(const string& name_) : ostream(cout.rdbuf()),level(0),name(name_), strBuf_(cout.rdbuf()) {} 16 CLog& operator()(int l) 17 { 18 if (l<=level) 19 19 { 20 rdbuf(cout.rdbuf()) ; 20 // rdbuf(cout.rdbuf()) ; 21 rdbuf(strBuf_); 21 22 *this<<"-> "<<name<<" : " ; 22 23 } … … 24 25 return *this; 25 26 } 26 void setLevel(int l) {level=l; } 27 void setLevel(int l) {level=l; } 27 28 int getLevel() {return level ;} 28 29 bool isActive(void) { if (rdbuf()==NULL) return true ; else return false ;} 29 30 bool isActive(int l) {if (l<=level) return true ; else return false ; } 30 31 32 public: 33 //! Write info into a file with its streambuf 34 void write2File(std::streambuf* sBuff) { changeStreamBuff(sBuff); } 35 36 //! Write info into standard output 37 void write2StdOut() { changeStreamBuff(cout.rdbuf()); } 38 private: 39 /*! 40 * \brief Change current streambuf (by default std::cout) to new one 41 * This function associates a new streambuf to the current log object 42 * \param [in] pointer to new streambuf 43 */ 44 void changeStreamBuff(std::streambuf* sBuff) { strBuf_ = sBuff; rdbuf(sBuff); } 45 31 46 private : 32 47 int level ; 33 48 string name ; 49 std::streambuf* strBuf_; 34 50 }; 35 51 -
XIOS/trunk/src/server.cpp
r483 r490 1 #include "globalScopeData.hpp" 1 2 #include "xmlioserver_spl.hpp" 2 3 #include "cxios.hpp" … … 13 14 14 15 namespace xios 15 { 16 { 16 17 MPI_Comm CServer::intraComm ; 17 18 list<MPI_Comm> CServer::interComm ; 18 19 bool CServer::isRoot ; 19 int CServer::rank ; 20 map<string,CContext*> CServer::contextList ; 20 int CServer::rank = INVALID_RANK; 21 StdOFStream CServer::m_infoStream; 22 map<string,CContext*> CServer::contextList ; 21 23 bool CServer::finished=false ; 22 24 bool CServer::is_MPI_Initialized ; 23 25 24 26 void CServer::initialize(void) 25 27 { … … 28 30 if (initialized) is_MPI_Initialized=true ; 29 31 else is_MPI_Initialized=false ; 30 32 31 33 // Not using OASIS 32 34 if (!CXios::usingOasis) 33 35 { 34 35 if (!is_MPI_Initialized) 36 37 if (!is_MPI_Initialized) 36 38 { 37 39 int argc=0; … … 40 42 } 41 43 CTimer::get("XIOS").resume() ; 42 43 boost::hash<string> hashString ; 44 44 45 boost::hash<string> hashString ; 46 45 47 unsigned long hashServer=hashString(CXios::xiosCodeId) ; 46 48 unsigned long* hashAll ; 47 48 int rank ;49 50 // int rank ; 49 51 int size ; 50 52 int myColor ; 51 53 int i,c ; 52 54 MPI_Comm newComm ; 53 55 54 56 MPI_Comm_size(CXios::globalComm,&size) ; 55 57 MPI_Comm_rank(CXios::globalComm,&rank); 56 58 hashAll=new unsigned long[size] ; 57 59 58 60 MPI_Allgather(&hashServer,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ; 59 61 … … 61 63 map<unsigned long, int> leaders ; 62 64 map<unsigned long, int>::iterator it ; 63 65 64 66 for(i=0,c=0;i<size;i++) 65 67 { … … 71 73 } 72 74 } 73 75 74 76 myColor=colors[hashServer] ; 75 77 MPI_Comm_split(MPI_COMM_WORLD,myColor,rank,&intraComm) ; … … 77 79 int serverLeader=leaders[hashServer] ; 78 80 int clientLeader; 79 81 80 82 serverLeader=leaders[hashServer] ; 81 83 for(it=leaders.begin();it!=leaders.end();it++) … … 84 86 { 85 87 clientLeader=it->second ; 86 88 87 89 MPI_Intercomm_create(intraComm,0,CXios::globalComm,clientLeader,0,&newComm) ; 88 90 interComm.push_back(newComm) ; … … 95 97 else 96 98 { 97 int rank ,size; 98 if (!is_MPI_Initialized) oasis_init(CXios::xiosCodeId) ; 99 // int rank ,size; 100 int size; 101 if (!is_MPI_Initialized) oasis_init(CXios::xiosCodeId); 102 99 103 CTimer::get("XIOS").resume() ; 100 104 oasis_get_localcomm(intraComm) ; … … 102 106 MPI_Comm_size(intraComm,&size) ; 103 107 string codesId=CXios::getin<string>("oasis_codes_id") ; 104 108 105 109 vector<string> splitted ; 106 110 boost::split( splitted, codesId, boost::is_any_of(","), boost::token_compress_on ) ; … … 110 114 int globalRank ; 111 115 MPI_Comm_rank(CXios::globalComm,&globalRank); 112 116 113 117 for(it=splitted.begin();it!=splitted.end();it++) 114 118 { … … 120 124 oasis_enddef() ; 121 125 } 122 123 int rank;126 127 // int rank; 124 128 MPI_Comm_rank(intraComm,&rank) ; 125 129 if (rank==0) isRoot=true; 126 else isRoot=false; 127 eventLoop() ;128 finalize() ;130 else isRoot=false; 131 // eventLoop() ; 132 // finalize() ; 129 133 } 130 134 131 135 void CServer::finalize(void) 132 136 { 133 137 CTimer::get("XIOS").suspend() ; 134 138 if (!is_MPI_Initialized) 135 { 139 { 136 140 if (CXios::usingOasis) oasis_finalize(); 137 141 else MPI_Finalize() ; … … 141 145 report(0)<<"Performance report : Ratio : "<<CTimer::get("Process events").getCumulatedTime()/CTimer::get("XIOS server").getCumulatedTime()*100.<<"%"<<endl ; 142 146 } 143 147 144 148 void CServer::eventLoop(void) 145 149 { 146 150 bool stop=false ; 147 151 148 152 CTimer::get("XIOS server").resume() ; 149 153 while(!stop) … … 159 163 if (!finished) listenRootFinalize() ; 160 164 } 161 165 162 166 contextEventLoop() ; 163 167 if (finished && contextList.empty()) stop=true ; … … 165 169 CTimer::get("XIOS server").suspend() ; 166 170 } 167 171 168 172 void CServer::listenFinalize(void) 169 173 { … … 171 175 int msg ; 172 176 int flag ; 173 177 174 178 for(it=interComm.begin();it!=interComm.end();it++) 175 179 { … … 186 190 } 187 191 } 188 192 189 193 if (interComm.empty()) 190 194 { … … 193 197 MPI_Request* requests= new MPI_Request[size-1] ; 194 198 MPI_Status* status= new MPI_Status[size-1] ; 195 199 196 200 for(int i=1;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,4,intraComm,&requests[i-1]) ; 197 201 MPI_Waitall(size-1,requests,status) ; … … 202 206 } 203 207 } 204 205 208 209 206 210 void CServer::listenRootFinalize() 207 211 { … … 209 213 MPI_Status status ; 210 214 int msg ; 211 215 212 216 traceOff() ; 213 217 MPI_Iprobe(0,4,intraComm, &flag, &status) ; … … 219 223 } 220 224 } 221 225 222 226 void CServer::listenContext(void) 223 227 { 224 228 225 229 MPI_Status status ; 226 230 int flag ; … … 230 234 int rank ; 231 235 int count ; 232 236 233 237 if (recept==false) 234 238 { … … 236 240 MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 237 241 traceOn() ; 238 if (flag==true) 242 if (flag==true) 239 243 { 240 244 rank=status.MPI_SOURCE ; … … 242 246 buffer=new char[count] ; 243 247 MPI_Irecv(buffer,count,MPI_CHAR,rank,1,CXios::globalComm,&request) ; 244 recept=true ; 248 recept=true ; 245 249 } 246 250 } … … 256 260 recvContextMessage(buffer,count) ; 257 261 delete [] buffer ; 258 recept=false ; 259 } 260 } 261 } 262 262 recept=false ; 263 } 264 } 265 } 266 263 267 void CServer::recvContextMessage(void* buff,int count) 264 268 { 265 266 267 269 static map<string,contextMessage> recvContextId ; 268 270 map<string,contextMessage>::iterator it ; 269 271 270 272 CBufferIn buffer(buff,count) ; 271 273 string id ; … … 274 276 275 277 buffer>>id>>nbMessage>>clientLeader ; 276 278 277 279 it=recvContextId.find(id) ; 278 280 if (it==recvContextId.end()) 279 { 281 { 280 282 contextMessage msg={0,0} ; 281 283 pair<map<string,contextMessage>::iterator,bool> ret ; 282 284 ret=recvContextId.insert(pair<string,contextMessage>(id,msg)) ; 283 285 it=ret.first ; 284 } 286 } 285 287 it->second.nbRecv+=1 ; 286 288 it->second.leaderRank+=clientLeader ; 287 289 288 290 if (it->second.nbRecv==nbMessage) 289 { 291 { 290 292 int size ; 291 293 MPI_Comm_size(intraComm,&size) ; 292 294 MPI_Request* requests= new MPI_Request[size-1] ; 293 295 MPI_Status* status= new MPI_Status[size-1] ; 294 296 295 297 for(int i=1;i<size;i++) 296 298 { … … 305 307 306 308 } 307 } 308 309 } 310 309 311 void CServer::listenRootContext(void) 310 312 { 311 313 312 314 MPI_Status status ; 313 315 int flag ; … … 318 320 int count ; 319 321 const int root=0 ; 320 322 321 323 if (recept==false) 322 324 { … … 324 326 MPI_Iprobe(root,2,intraComm, &flag, &status) ; 325 327 traceOn() ; 326 if (flag==true) 328 if (flag==true) 327 329 { 328 330 MPI_Get_count(&status,MPI_CHAR,&count) ; 329 331 buffer=new char[count] ; 330 332 MPI_Irecv(buffer,count,MPI_CHAR,root,2,intraComm,&request) ; 331 recept=true ; 333 recept=true ; 332 334 } 333 335 } … … 340 342 registerContext(buffer,count) ; 341 343 delete [] buffer ; 342 recept=false ; 343 } 344 } 345 } 346 347 348 344 recept=false ; 345 } 346 } 347 } 348 349 350 349 351 void CServer::registerContext(void* buff,int count, int leaderRank) 350 352 { 351 353 352 354 string contextId; 353 355 CBufferIn buffer(buff,count) ; … … 356 358 MPI_Comm contextIntercomm ; 357 359 MPI_Intercomm_create(intraComm,0,CXios::globalComm,leaderRank,10+leaderRank,&contextIntercomm) ; 358 360 359 361 info(20)<<"CServer : Register new Context : "<<contextId<<endl ; 360 362 MPI_Comm inter ; 361 363 MPI_Intercomm_merge(contextIntercomm,1,&inter) ; 362 364 MPI_Barrier(inter) ; 363 if (contextList.find(contextId)!=contextList.end()) 365 if (contextList.find(contextId)!=contextList.end()) 364 366 ERROR("void CServer::registerContext(void* buff,int count, int leaderRank)", 365 367 <<"Context has already been registred") ; 366 368 367 369 CContext* context=CContext::create(contextId) ; 368 370 contextList[contextId]=context ; 369 371 context->initServer(intraComm,contextIntercomm) ; 370 371 } 372 373 372 373 } 374 375 374 376 void CServer::contextEventLoop(void) 375 377 { 376 378 bool finished ; 377 379 map<string,CContext*>::iterator it ; 378 for(it=contextList.begin();it!=contextList.end();it++) 380 for(it=contextList.begin();it!=contextList.end();it++) 379 381 { 380 382 finished=it->second->eventLoop() ; … … 385 387 } 386 388 } 387 388 } 389 389 390 } 391 392 //! Get rank of the current process 393 int CServer::getRank() 394 { 395 return rank; 396 } 397 398 /*! 399 * \brief Open file stream to write in 400 * Opening a file stream with a specific file name suffix-server+rank 401 * \param [in] protype file name 402 */ 403 void CServer::openInfoStream(const StdString& fileName) 404 { 405 std::filebuf* fb = m_infoStream.rdbuf(); 406 StdStringStream fileNameServer; 407 fileNameServer << fileName <<"_server_"<<getRank() << ".txt"; 408 fb->open(fileNameServer.str().c_str(), std::ios::out); 409 if (!fb->is_open()) 410 ERROR("void CServer::openInfoStream(const StdString& fileName)", 411 <<endl<< "Can not open <"<<fileNameServer<<"> file to write" ); 412 413 info.write2File(fb); 414 } 415 416 //! Open stream for standard output 417 void CServer::openInfoStream() 418 { 419 info.write2StdOut(); 420 } 421 422 //! Close opening stream 423 void CServer::closeInfoStream() 424 { 425 if (m_infoStream.is_open()) m_infoStream.close(); 426 } 427 390 428 } -
XIOS/trunk/src/server.hpp
r382 r490 11 11 { 12 12 public: 13 13 14 14 static void initialize(void) ; 15 15 static void finalize(void) ; … … 22 22 static void listenRootFinalize(void) ; 23 23 static void registerContext(void* buff,int count, int leaderRank=0) ; 24 24 25 25 static MPI_Comm intraComm ; 26 26 static list<MPI_Comm> interComm ; … … 31 31 int leaderRank ; 32 32 } ; 33 34 static bool isRoot ; 35 static int rank ; 36 static map<string,CContext*> contextList ; 37 static bool finished ; 38 static bool is_MPI_Initialized ; 33 34 static bool isRoot ; 35 36 static map<string,CContext*> contextList ; 37 static bool finished ; 38 static bool is_MPI_Initialized ; 39 40 public: 41 //! Get rank of the current process 42 static int getRank(); 43 44 //! Print Information into a file 45 static void openInfoStream(const StdString& fileName); 46 47 //! Print information to standard output 48 static void openInfoStream(); 49 50 //! Close Info stream (closing file) 51 static void closeInfoStream(); 52 53 private: 54 static StdOFStream m_infoStream; 55 static int rank ; 39 56 } ; 40 57 } -
XIOS/trunk/src/xmlioserver_spl.hpp
r335 r490 39 39 typedef std::ostringstream StdOStringStream; 40 40 typedef std::istringstream StdIStringStream; 41 typedef std::stringstream StdStringStream; 41 42 typedef std::ofstream StdOFStream; 42 43 typedef std::ifstream StdIFStream;
Note: See TracChangeset
for help on using the changeset viewer.