Changeset 1342
- Timestamp:
- 11/21/17 16:03:00 (7 years ago)
- Location:
- XIOS/dev/branch_openmp/src
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/src/buffer_client.cpp
r1338 r1342 30 30 buffer[1] = new char[bufferSize]; 31 31 retBuffer = new CBufferOut(buffer[current], bufferSize); 32 //info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl; 32 #pragma omp critical (_output) 33 info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl; 33 34 } 34 35 -
XIOS/dev/branch_openmp/src/calendar.cpp
r1334 r1342 117 117 const CDate& CCalendar::update(int step) 118 118 { 119 //info(20) << "update step : " << step << " timestep " << this->timestep << std::endl; 119 #pragma omp critical (_output) 120 info(20) << "update step : " << step << " timestep " << this->timestep << std::endl; 120 121 return (this->currentDate = this->getInitDate() + step * this->timestep); 121 122 } -
XIOS/dev/branch_openmp/src/client.cpp
r1338 r1342 26 26 StdOFStream CClient::m_errorStream; 27 27 28 StdOFStream CClient::array_infoStream[16]; 29 28 30 void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) 29 31 { … … 186 188 187 189 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 188 #pragma omp critical ( std_output)189 { 190 //info(10)<<"Register new Context : "<<id<<endl ;190 #pragma omp critical (_output) 191 { 192 info(10)<<"Register new Context : "<<id<<endl ; 191 193 } 192 194 … … 254 256 MPI_Finalize() ; 255 257 } 258 #pragma omp critical (_output) 259 info(20) << "Client side context is finalized"<<endl ; 256 260 257 //info(20) << "Client side context is finalized"<<endl ; 258 //report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 259 //report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 260 //report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 261 //report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 262 //report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 263 // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 264 //report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 265 //report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 266 //report(100)<<CTimer::getAllCumulatedTime()<<endl ; 261 262 /*#pragma omp critical (_output) 263 { 264 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 265 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 266 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 267 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 268 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 269 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 270 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 271 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 272 }*/ 267 273 } 268 274 … … 307 313 void CClient::openInfoStream(const StdString& fileName) 308 314 { 309 std::filebuf* fb = m_infoStream.rdbuf(); 310 openStream(fileName, ".out", fb); 311 312 info.write2File(fb); 313 report.write2File(fb); 315 info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 316 317 openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 318 319 info.write2File(info_FB[omp_get_thread_num()]); 320 report.write2File(info_FB[omp_get_thread_num()]); 314 321 } 315 322 -
XIOS/dev/branch_openmp/src/client.hpp
r1331 r1342 56 56 #pragma omp threadprivate(m_errorStream) 57 57 58 static StdOFStream array_infoStream[16]; 59 58 60 static void openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb); 59 61 }; -
XIOS/dev/branch_openmp/src/context_server.cpp
r1338 r1342 254 254 { 255 255 finished=true; 256 //info(20)<<"Server Side context <"<<context->getId()<<"> finalized"<<endl; 256 #pragma omp critical (_output) 257 info(20)<<"Server Side context <"<<context->getId()<<"> finalized"<<endl; 257 258 std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), 258 259 iteMap = mapBufferSize_.end(), itMap; -
XIOS/dev/branch_openmp/src/cxios.cpp
r1331 r1342 124 124 if (printLogs2Files) 125 125 { 126 #pragma omp critical 126 127 CClient::openInfoStream(clientFile); 127 128 CClient::openErrorStream(clientFile); … … 139 140 if (CClient::getRank()==0) 140 141 { 142 #pragma omp critical (_output) 141 143 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 142 144 globalRegistry->toFile("xios_registry.bin") ; -
XIOS/dev/branch_openmp/src/log.cpp
r1328 r1342 1 1 #include "log.hpp" 2 #include <string> 3 #include <iostream> 4 #include <string> 2 5 3 6 namespace xios 4 7 { 8 std::filebuf* info_FB[16]; 9 10 5 11 CLog info("info") ; 6 12 CLog report("report") ; 7 13 CLog error("error", cerr.rdbuf()) ; 14 15 16 CLog& CLog::operator()(int l) 17 { 18 if (l<=level) 19 { 20 omp_set_lock( &mutex ); 21 rdbuf(strBuf_array[omp_get_thread_num()]); 22 *this<<"-> "<<name<<" : " ; 23 omp_unset_lock( &mutex ); 24 } 25 else rdbuf(NULL) ; 26 return *this; 27 } 8 28 } -
XIOS/dev/branch_openmp/src/log.hpp
r1328 r1342 5 5 #include <iostream> 6 6 #include <string> 7 #include <stdio.h> 8 #include <omp.h> 7 9 8 10 namespace xios … … 14 16 public : 15 17 CLog(const string& name_, std::streambuf* sBuff = cout.rdbuf()) 16 : ostream(sBuff), level(0), name(name_), strBuf_(sBuff) {} 17 CLog& operator()(int l) 18 : ostream(sBuff), level(0), name(name_), strBuf_(sBuff) 18 19 { 19 if (l<=level) 20 { 21 rdbuf(strBuf_); 22 *this<<"-> "<<name<<" : " ; 23 } 24 else rdbuf(NULL) ; 25 return *this; 20 omp_init_lock( &mutex ); 21 for(int i=0; i<16; i++) 22 strBuf_array[i] = sBuff; 26 23 } 24 25 ~CLog() 26 { 27 omp_destroy_lock( &mutex ); 28 } 29 30 CLog& operator()(int l); 27 31 void setLevel(int l) {level=l; } 28 32 int getLevel() {return level ;} … … 46 50 * \param [in] pointer to new streambuf 47 51 */ 48 void changeStreamBuff(std::streambuf* sBuff) { strBuf_ = sBuff; rdbuf(sBuff); } 52 void changeStreamBuff(std::streambuf* sBuff) 53 { 54 strBuf_ = sBuff; 55 strBuf_array[omp_get_thread_num()] = sBuff; 56 rdbuf(sBuff); 57 } 49 58 50 59 int level ; 51 60 string name ; 52 61 std::streambuf* strBuf_; 62 std::streambuf* strBuf_array[16]; 63 omp_lock_t mutex; 53 64 }; 54 65 … … 56 67 extern CLog report; 57 68 extern CLog error; 69 70 extern std::filebuf* info_FB[16]; 58 71 } 59 72 #endif -
XIOS/dev/branch_openmp/src/node/context.cpp
r1338 r1342 1205 1205 void CContext::updateCalendar(int step) 1206 1206 { 1207 //info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl; 1207 #pragma omp critical (_output) 1208 info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl; 1208 1209 calendar->update(step); 1209 //info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl; 1210 #pragma omp critical (_output) 1211 info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl; 1210 1212 #ifdef XIOS_MEMTRACK_LIGHT 1213 #pragma omp critical (_output) 1211 1214 info(50) << " Current memory used by XIOS : "<< MemTrack::getCurrentMemorySize()*1.0/(1024*1024)<<" Mbyte, at timestep "<<step<<" of context "<<this->getId()<<endl ; 1212 1215 #endif -
XIOS/dev/branch_openmp/src/node/field.cpp
r1341 r1342 316 316 while (currentDate >= lastDataRequestedFromServer) 317 317 { 318 //info(20) << "currentDate : " << currentDate << endl ; 319 //info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ; 320 //info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ; 321 //info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 322 318 #pragma omp critical (_output) 319 { 320 info(20) << "currentDate : " << currentDate << endl ; 321 info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ; 322 info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ; 323 info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 324 } 323 325 dataRequested |= sendReadDataRequest(lastDataRequestedFromServer + file->output_freq); 324 326 }
Note: See TracChangeset
for help on using the changeset viewer.