Changeset 1601 for XIOS/dev/dev_trunk_omp/src/client.cpp
- Timestamp:
- 11/19/18 15:52:54 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_omp/src/client.cpp
r1587 r1601 12 12 #include "buffer_client.hpp" 13 13 #include "string_tools.hpp" 14 using namespace ep_lib; 14 15 15 16 namespace xios … … 18 19 MPI_Comm CClient::intraComm ; 19 20 MPI_Comm CClient::interComm ; 20 std::list<MPI_Comm> CClient::contextInterComms;21 std::list<MPI_Comm> *CClient::contextInterComms_ptr = 0; 21 22 int CClient::serverLeader ; 22 23 bool CClient::is_MPI_Initialized ; … … 24 25 StdOFStream CClient::m_infoStream; 25 26 StdOFStream CClient::m_errorStream; 27 28 StdOFStream CClient::array_infoStream[16]; 29 26 30 MPI_Comm& CClient::getInterComm(void) { return (interComm); } 27 31 28 32 ///--------------------------------------------------------------- 29 33 /*! … … 106 110 MPI_Comm_size(intraComm,&intraCommSize) ; 107 111 MPI_Comm_rank(intraComm,&intraCommRank) ; 108 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 112 113 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 114 #pragma omp critical (_output) 115 { 116 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 109 117 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 110 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 111 //rank_ = intraCommRank; 118 } 112 119 } 113 120 else … … 191 198 CContext::setCurrent(id); 192 199 193 contextInterComms.push_back(contextInterComm); 200 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 201 contextInterComms_ptr->push_back(contextInterComm); 194 202 } 195 203 else … … 217 225 218 226 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 227 #pragma omp critical (_output) 219 228 info(10)<<"Register new Context : "<<id<<endl ; 220 229 MPI_Comm inter ; … … 224 233 context->initClient(contextComm,contextInterComm) ; 225 234 226 contextInterComms.push_back(contextInterComm); 235 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 236 contextInterComms_ptr->push_back(contextInterComm); 237 227 238 MPI_Comm_free(&inter); 228 239 delete [] buff ; … … 277 288 } 278 289 279 for (std::list<MPI_Comm>::iterator it = contextInterComms .begin(); it != contextInterComms.end(); it++)290 for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); it++) 280 291 MPI_Comm_free(&(*it)); 281 292 MPI_Comm_free(&interComm); … … 287 298 if (!is_MPI_Initialized) 288 299 { 289 if (CXios::usingOasis) oasis_finalize(); 290 else MPI_Finalize() ; 291 } 292 300 //if (CXios::usingOasis) oasis_finalize(); 301 //else 302 MPI_Finalize() ; 303 } 304 #pragma omp critical (_output) 293 305 info(20) << "Client side context is finalized"<<endl ; 294 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 295 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 296 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 297 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 298 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 306 307 #pragma omp critical (_output) 308 { 309 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 310 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 311 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 312 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 313 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 299 314 // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 300 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 301 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 302 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 315 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 316 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 317 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 318 } 303 319 } 304 320 … … 355 371 void CClient::openInfoStream(const StdString& fileName) 356 372 { 357 std::filebuf* fb = m_infoStream.rdbuf(); 358 openStream(fileName, ".out", fb); 359 360 info.write2File(fb); 361 report.write2File(fb); 373 info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 374 375 openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 376 377 info.write2File(info_FB[omp_get_thread_num()]); 378 report.write2File(info_FB[omp_get_thread_num()]); 362 379 } 363 380
Note: See TracChangeset
for help on using the changeset viewer.