Changeset 1134 for XIOS/dev/branch_yushan_merged/src/client.cpp
- Timestamp:
- 05/16/17 17:54:30 (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/src/client.cpp
r1032 r1134 11 11 #include "timer.hpp" 12 12 #include "buffer_client.hpp" 13 #include "log.hpp" 14 13 15 14 16 namespace xios 15 17 { 18 extern int test_omp_rank; 19 #pragma omp threadprivate(test_omp_rank) 16 20 17 21 MPI_Comm CClient::intraComm ; 18 22 MPI_Comm CClient::interComm ; 19 std::list<MPI_Comm> CClient::contextInterComms;23 std::list<MPI_Comm> *CClient::contextInterComms_ptr = 0; 20 24 int CClient::serverLeader ; 21 25 bool CClient::is_MPI_Initialized ; … … 24 28 StdOFStream CClient::m_errorStream; 25 29 26 void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) 30 StdOFStream CClient::array_infoStream[10]; 31 32 void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 27 33 { 28 34 int initialized ; … … 35 41 { 36 42 // localComm doesn't given 43 37 44 if (localComm == MPI_COMM_NULL) 38 45 { 39 46 if (!is_MPI_Initialized) 40 47 { 41 MPI_Init(NULL, NULL); 48 //MPI_Init(NULL, NULL); 49 int return_level; 50 MPI_Init_thread(NULL, NULL, 3, &return_level); 51 assert(return_level == 3); 42 52 } 43 53 CTimer::get("XIOS").resume() ; … … 51 61 int myColor ; 52 62 int i,c ; 53 MPI_Comm newComm ; 54 55 MPI_Comm_size(CXios::globalComm,&size) ; 63 64 MPI_Comm_size(CXios::globalComm,&size); 56 65 MPI_Comm_rank(CXios::globalComm,&rank); 66 57 67 58 68 hashAll=new unsigned long[size] ; … … 96 106 MPI_Comm_size(intraComm,&intraCommSize) ; 97 107 MPI_Comm_rank(intraComm,&intraCommRank) ; 98 info(50)<<"intercommCreate::client "<<rank<<" intraCommSize : "<<intraCommSize 99 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 108 109 #pragma omp critical(_output) 110 { 111 info(10)<<"intercommCreate::client "<<test_omp_rank<< " "<< &test_omp_rank <<" intraCommSize : "<<intraCommSize 112 <<" intraCommRank :"<<intraCommRank<<" serverLeader "<< serverLeader 113 <<" globalComm : "<< &(CXios::globalComm) << endl ; 114 } 115 116 117 100 118 MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 119 101 120 } 102 121 else … … 148 167 149 168 MPI_Comm_dup(intraComm,&returnComm) ; 169 150 170 } 151 171 … … 154 174 { 155 175 CContext::setCurrent(id) ; 156 CContext* context=CContext::create(id); 176 CContext* context = CContext::create(id); 177 178 int tmp_rank; 179 MPI_Comm_rank(contextComm,&tmp_rank) ; 180 157 181 StdString idServer(id); 158 182 idServer += "_server"; … … 161 185 { 162 186 int size,rank,globalRank ; 163 size_t message_size ;164 int leaderRank ;187 //size_t message_size ; 188 //int leaderRank ; 165 189 MPI_Comm contextInterComm ; 166 190 … … 173 197 CMessage msg ; 174 198 msg<<idServer<<size<<globalRank ; 175 // msg<<id<<size<<globalRank ; 199 176 200 177 201 int messageSize=msg.size() ; … … 184 208 185 209 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 186 info(10)<<"Register new Context : "<<id<<endl ; 210 211 #pragma omp critical(_output) 212 info(10)<<" RANK "<< tmp_rank<<" Register new Context : "<<id<<endl ; 213 187 214 188 215 MPI_Comm inter ; … … 190 217 MPI_Barrier(inter) ; 191 218 219 192 220 context->initClient(contextComm,contextInterComm) ; 193 221 194 contextInterComms.push_back(contextInterComm); 222 223 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 224 contextInterComms_ptr->push_back(contextInterComm); 225 195 226 MPI_Comm_free(&inter); 196 227 } … … 209 240 // Finally, we should return current context to context client 210 241 CContext::setCurrent(id); 211 212 contextInterComms.push_back(contextInterComm); 242 243 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 244 contextInterComms_ptr->push_back(contextInterComm); 245 213 246 } 214 247 } … … 220 253 221 254 MPI_Comm_rank(intraComm,&rank) ; 222 255 223 256 if (!CXios::isServer) 224 257 { … … 230 263 } 231 264 232 for (std::list<MPI_Comm>::iterator it = contextInterComms .begin(); it != contextInterComms.end(); it++)265 for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); ++it) 233 266 MPI_Comm_free(&(*it)); 267 234 268 MPI_Comm_free(&interComm); 235 269 MPI_Comm_free(&intraComm); … … 241 275 { 242 276 if (CXios::usingOasis) oasis_finalize(); 243 else MPI_Finalize() ; 244 } 245 246 info(20) << "Client side context is finalized"<<endl ; 247 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 248 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 249 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; 250 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 251 // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 252 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 253 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 277 else MPI_Finalize(); 278 } 279 280 #pragma omp critical (_output) 281 info(20) << "Client "<<rank<<" : Client side context is finalized "<< endl ; 282 283 /* #pragma omp critical (_output) 284 { 285 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 286 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 287 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; 288 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 289 report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 290 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 291 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 292 } 293 */ 254 294 } 255 295 … … 280 320 281 321 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 322 282 323 fb->open(fileNameClient.str().c_str(), std::ios::out); 283 324 if (!fb->is_open()) 284 325 ERROR("void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", 285 326 << std::endl << "Can not open <" << fileNameClient << "> file to write the client log(s)."); 286 327 } 287 328 … … 294 335 void CClient::openInfoStream(const StdString& fileName) 295 336 { 296 std::filebuf* fb = m_infoStream.rdbuf(); 297 openStream(fileName, ".out", fb); 298 299 info.write2File(fb); 300 report.write2File(fb); 337 //std::filebuf* fb = m_infoStream.rdbuf(); 338 339 info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 340 341 openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 342 343 info.write2File(info_FB[omp_get_thread_num()]); 344 report.write2File(info_FB[omp_get_thread_num()]); 345 301 346 } 302 347
Note: See TracChangeset
for help on using the changeset viewer.