Changeset 1642 for XIOS/dev/branch_openmp/src/client.cpp
- Timestamp:
- 01/23/19 10:31:44 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/src/client.cpp
r1556 r1642 9 9 #include "oasis_cinterface.hpp" 10 10 #include "mpi.hpp" 11 //#include "mpi_wrapper.hpp" 11 12 #include "timer.hpp" 12 13 #include "buffer_client.hpp" 13 using namespace ep_lib; 14 #include "string_tools.hpp" 14 15 15 16 namespace xios 16 17 { 17 18 18 MPI_Comm CClient::intraComm ;19 MPI_Comm CClient::interComm ;20 std::list< MPI_Comm> *CClient::contextInterComms_ptr = 0;19 ep_lib::MPI_Comm CClient::intraComm ; 20 ep_lib::MPI_Comm CClient::interComm ; 21 std::list<ep_lib::MPI_Comm> CClient::contextInterComms; 21 22 int CClient::serverLeader ; 22 23 bool CClient::is_MPI_Initialized ; … … 24 25 StdOFStream CClient::m_infoStream; 25 26 StdOFStream CClient::m_errorStream; 26 27 StdOFStream CClient::array_infoStream[16]; 28 29 MPI_Comm& CClient::getInterComm(void) { return (interComm); } 30 27 ep_lib::MPI_Comm& CClient::getInterComm(void) { return (interComm); } 28 31 29 ///--------------------------------------------------------------- 32 30 /*! … … 38 36 */ 39 37 40 void CClient::initialize(const string& codeId, MPI_Comm& localComm,MPI_Comm& returnComm)38 void CClient::initialize(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) 41 39 { 42 40 int initialized ; 43 MPI_Initialized(&initialized) ;41 ep_lib::MPI_Initialized(&initialized) ; 44 42 if (initialized) is_MPI_Initialized=true ; 45 43 else is_MPI_Initialized=false ; … … 50 48 { 51 49 // localComm isn't given 52 if (localComm == MPI_COMM_NULL)50 if (localComm == EP_COMM_NULL) 53 51 { 54 52 if (!is_MPI_Initialized) 55 53 { 56 MPI_Init(NULL, NULL);54 ep_lib::MPI_Init(NULL, NULL); 57 55 } 58 56 CTimer::get("XIOS").resume() ; … … 66 64 int myColor ; 67 65 int i,c ; 68 MPI_Comm newComm ; 69 70 MPI_Comm_size(CXios::globalComm,&size) ; 71 MPI_Comm_rank(CXios::globalComm,&rank_); 66 ep_lib::MPI_Comm newComm ; 67 68 ep_lib::MPI_Comm_size(CXios::globalComm,&size) ; 69 70 ep_lib::MPI_Comm_rank(CXios::globalComm,&rank_); 72 71 73 72 hashAll=new unsigned long[size] ; 74 73 75 MPI_Allgather(&hashClient,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ;76 74 ep_lib::MPI_Allgather(&hashClient,1,EP_LONG,hashAll,1,EP_LONG,CXios::globalComm) ; 75 77 76 map<unsigned long, int> colors ; 78 77 map<unsigned long, int> leaders ; … … 100 99 101 100 myColor=colors[hashClient]; 102 MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ;103 101 ep_lib::MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ; 102 104 103 if (CXios::usingServer) 105 104 { … … 107 106 serverLeader=leaders[hashServer] ; 108 107 int intraCommSize, intraCommRank ; 109 MPI_Comm_size(intraComm,&intraCommSize) ; 110 MPI_Comm_rank(intraComm,&intraCommRank) ; 111 112 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 113 #pragma omp critical (_output) 114 { 115 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 108 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ; 109 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank) ; 110 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 116 111 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 117 }118 112 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 113 //rank_ = intraCommRank; 119 114 } 120 115 else 121 116 { 122 MPI_Comm_dup(intraComm,&interComm) ;117 ep_lib::MPI_Comm_dup(intraComm,&interComm) ; 123 118 } 124 119 delete [] hashAll ; … … 133 128 else 134 129 { 135 MPI_Comm_dup(localComm,&intraComm) ;136 MPI_Comm_dup(intraComm,&interComm) ;130 ep_lib::MPI_Comm_dup(localComm,&intraComm) ; 131 ep_lib::MPI_Comm_dup(intraComm,&interComm) ; 137 132 } 138 133 } … … 142 137 { 143 138 // localComm isn't given 144 if (localComm == MPI_COMM_NULL)139 if (localComm == EP_COMM_NULL) 145 140 { 146 141 if (!is_MPI_Initialized) oasis_init(codeId) ; 147 142 oasis_get_localcomm(localComm) ; 148 143 } 149 MPI_Comm_dup(localComm,&intraComm) ;144 ep_lib::MPI_Comm_dup(localComm,&intraComm) ; 150 145 151 146 CTimer::get("XIOS").resume() ; … … 154 149 if (CXios::usingServer) 155 150 { 156 MPI_Status status ;157 MPI_Comm_rank(intraComm,&rank_) ;151 ep_lib::MPI_Status status ; 152 ep_lib::MPI_Comm_rank(intraComm,&rank_) ; 158 153 159 154 oasis_get_intercomm(interComm,CXios::xiosCodeId) ; 160 if (rank_==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ;161 MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ;162 } 163 else MPI_Comm_dup(intraComm,&interComm) ;164 } 165 166 MPI_Comm_dup(intraComm,&returnComm) ;155 if (rank_==0) ep_lib::MPI_Recv(&serverLeader,1, EP_INT, 0, 0, interComm, &status) ; 156 ep_lib::MPI_Bcast(&serverLeader,1,EP_INT,0,intraComm) ; 157 } 158 else ep_lib::MPI_Comm_dup(intraComm,&interComm) ; 159 } 160 161 ep_lib::MPI_Comm_dup(intraComm,&returnComm) ; 167 162 } 168 163 … … 175 170 * Function is only called by client. 176 171 */ 177 void CClient::registerContext(const string& id, MPI_Comm contextComm)172 void CClient::registerContext(const string& id, ep_lib::MPI_Comm contextComm) 178 173 { 179 174 CContext::setCurrent(id) ; … … 185 180 // Attached mode 186 181 { 187 MPI_Comm contextInterComm ;188 MPI_Comm_dup(contextComm,&contextInterComm) ;182 ep_lib::MPI_Comm contextInterComm ; 183 ep_lib::MPI_Comm_dup(contextComm,&contextInterComm) ; 189 184 CContext* contextServer = CContext::create(idServer); 190 185 … … 198 193 CContext::setCurrent(id); 199 194 200 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 201 contextInterComms_ptr->push_back(contextInterComm); 195 contextInterComms.push_back(contextInterComm); 202 196 } 203 197 else … … 206 200 size_t message_size ; 207 201 int leaderRank ; 208 MPI_Comm contextInterComm ;209 210 MPI_Comm_size(contextComm,&size) ;211 MPI_Comm_rank(contextComm,&rank) ;212 MPI_Comm_rank(CXios::globalComm,&globalRank) ;202 ep_lib::MPI_Comm contextInterComm ; 203 204 ep_lib::MPI_Comm_size(contextComm,&size) ; 205 ep_lib::MPI_Comm_rank(contextComm,&rank) ; 206 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank) ; 213 207 if (rank!=0) globalRank=0 ; 214 208 … … 222 216 buffer<<msg ; 223 217 224 MPI_Send((void*)buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; 225 226 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 227 #pragma omp critical (_output) 218 ep_lib::MPI_Send((void*)buff,buffer.count(),EP_CHAR,serverLeader,1,CXios::globalComm) ; 219 220 ep_lib::MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 228 221 info(10)<<"Register new Context : "<<id<<endl ; 229 MPI_Comm inter ;230 MPI_Intercomm_merge(contextInterComm,0,&inter) ;231 MPI_Barrier(inter) ;222 ep_lib::MPI_Comm inter ; 223 ep_lib::MPI_Intercomm_merge(contextInterComm,0,&inter) ; 224 ep_lib::MPI_Barrier(inter) ; 232 225 233 226 context->initClient(contextComm,contextInterComm) ; 234 227 235 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 236 contextInterComms_ptr->push_back(contextInterComm); 237 238 MPI_Comm_free(&inter); 228 contextInterComms.push_back(contextInterComm); 229 ep_lib::MPI_Comm_free(&inter); 239 230 delete [] buff ; 240 231 241 232 } 242 233 } 234 235 /*! 236 * \fn void CClient::callOasisEnddef(void) 237 * \brief Send the order to the servers to call "oasis_enddef". It must be done by each compound of models before calling oasis_enddef on client side 238 * Function is only called by client. 239 */ 240 void CClient::callOasisEnddef(void) 241 { 242 bool oasisEnddef=CXios::getin<bool>("call_oasis_enddef",true) ; 243 if (!oasisEnddef) ERROR("void CClient::callOasisEnddef(void)", <<"Function xios_oasis_enddef called but variable <call_oasis_enddef> is set to false."<<endl 244 <<"Variable <call_oasis_enddef> must be set to true"<<endl) ; 245 if (CXios::isServer) 246 // Attached mode 247 { 248 // nothing to do 249 } 250 else 251 { 252 int rank ; 253 int msg=0 ; 254 255 ep_lib::MPI_Comm_rank(intraComm,&rank) ; 256 if (rank==0) 257 { 258 ep_lib::MPI_Send(&msg,1,EP_INT,0,5,interComm) ; // tags oasis_endded = 5 259 } 260 261 } 262 } 263 243 264 244 265 void CClient::finalize(void) … … 247 268 int msg=0 ; 248 269 249 MPI_Comm_rank(intraComm,&rank) ;270 ep_lib::MPI_Comm_rank(intraComm,&rank) ; 250 271 251 272 if (!CXios::isServer) 252 273 { 253 MPI_Comm_rank(intraComm,&rank) ;274 ep_lib::MPI_Comm_rank(intraComm,&rank) ; 254 275 if (rank==0) 255 276 { 256 MPI_Send(&msg,1,MPI_INT,0,0,interComm) ;257 } 258 } 259 260 for (std::list< MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); it++)261 MPI_Comm_free(&(*it));262 MPI_Comm_free(&interComm);263 MPI_Comm_free(&intraComm);277 ep_lib::MPI_Send(&msg,1,EP_INT,0,0,interComm) ; 278 } 279 } 280 281 for (std::list<ep_lib::MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 282 ep_lib::MPI_Comm_free(&(*it)); 283 ep_lib::MPI_Comm_free(&interComm); 284 ep_lib::MPI_Comm_free(&intraComm); 264 285 265 286 CTimer::get("XIOS init/finalize").suspend() ; … … 268 289 if (!is_MPI_Initialized) 269 290 { 270 //if (CXios::usingOasis) oasis_finalize(); 271 //else 272 MPI_Finalize() ; 273 } 274 #pragma omp critical (_output) 291 if (CXios::usingOasis) oasis_finalize(); 292 else ep_lib::MPI_Finalize() ; 293 } 294 275 295 info(20) << "Client side context is finalized"<<endl ; 276 277 #pragma omp critical (_output) 278 { 279 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 280 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 281 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 282 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 283 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 296 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 297 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 298 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 299 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 300 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 284 301 // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 285 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 286 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 287 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 288 } 302 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 303 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 304 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 289 305 } 290 306 … … 311 327 int size = 0; 312 328 int rank; 313 MPI_Comm_size(CXios::globalComm, &size);329 ep_lib::MPI_Comm_size(CXios::globalComm, &size); 314 330 while (size) 315 331 { … … 320 336 if (CXios::usingOasis) 321 337 { 322 MPI_Comm_rank(CXios::globalComm,&rank);338 ep_lib::MPI_Comm_rank(CXios::globalComm,&rank); 323 339 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << rank << ext; 324 340 } … … 341 357 void CClient::openInfoStream(const StdString& fileName) 342 358 { 343 info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 344 345 openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 346 347 info.write2File(info_FB[omp_get_thread_num()]); 348 report.write2File(info_FB[omp_get_thread_num()]); 359 std::filebuf* fb = m_infoStream.rdbuf(); 360 openStream(fileName, ".out", fb); 361 362 info.write2File(fb); 363 report.write2File(fb); 349 364 } 350 365
Note: See TracChangeset
for help on using the changeset viewer.