[490] | 1 | #include "globalScopeData.hpp" |
---|
[591] | 2 | #include "xios_spl.hpp" |
---|
[300] | 3 | #include "cxios.hpp" |
---|
[342] | 4 | #include "client.hpp" |
---|
[300] | 5 | #include <boost/functional/hash.hpp> |
---|
| 6 | #include "type.hpp" |
---|
| 7 | #include "context.hpp" |
---|
| 8 | #include "context_client.hpp" |
---|
| 9 | #include "oasis_cinterface.hpp" |
---|
[382] | 10 | #include "mpi.hpp" |
---|
[347] | 11 | #include "timer.hpp" |
---|
[400] | 12 | #include "buffer_client.hpp" |
---|
[1134] | 13 | #include "log.hpp" |
---|
[300] | 14 | |
---|
[1134] | 15 | |
---|
[335] | 16 | namespace xios |
---|
[490] | 17 | { |
---|
[1134] | 18 | extern int test_omp_rank; |
---|
| 19 | #pragma omp threadprivate(test_omp_rank) |
---|
[300] | 20 | |
---|
| 21 | MPI_Comm CClient::intraComm ; |
---|
| 22 | MPI_Comm CClient::interComm ; |
---|
[1134] | 23 | std::list<MPI_Comm> *CClient::contextInterComms_ptr = 0; |
---|
[300] | 24 | int CClient::serverLeader ; |
---|
| 25 | bool CClient::is_MPI_Initialized ; |
---|
[490] | 26 | int CClient::rank = INVALID_RANK; |
---|
| 27 | StdOFStream CClient::m_infoStream; |
---|
[523] | 28 | StdOFStream CClient::m_errorStream; |
---|
[490] | 29 | |
---|
[1164] | 30 | StdOFStream CClient::array_infoStream[16]; |
---|
[1134] | 31 | |
---|
| 32 | void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) |
---|
[300] | 33 | { |
---|
| 34 | int initialized ; |
---|
| 35 | MPI_Initialized(&initialized) ; |
---|
| 36 | if (initialized) is_MPI_Initialized=true ; |
---|
| 37 | else is_MPI_Initialized=false ; |
---|
[490] | 38 | |
---|
[300] | 39 | // don't use OASIS |
---|
| 40 | if (!CXios::usingOasis) |
---|
| 41 | { |
---|
| 42 | // localComm doesn't given |
---|
[1134] | 43 | |
---|
[300] | 44 | if (localComm == MPI_COMM_NULL) |
---|
| 45 | { |
---|
[490] | 46 | if (!is_MPI_Initialized) |
---|
[300] | 47 | { |
---|
[1134] | 48 | //MPI_Init(NULL, NULL); |
---|
| 49 | int return_level; |
---|
| 50 | MPI_Init_thread(NULL, NULL, 3, &return_level); |
---|
| 51 | assert(return_level == 3); |
---|
[300] | 52 | } |
---|
[359] | 53 | CTimer::get("XIOS").resume() ; |
---|
[1205] | 54 | CTimer::get("XIOS init/finalize").resume() ; |
---|
[490] | 55 | boost::hash<string> hashString ; |
---|
| 56 | |
---|
[300] | 57 | unsigned long hashClient=hashString(codeId) ; |
---|
| 58 | unsigned long hashServer=hashString(CXios::xiosCodeId) ; |
---|
| 59 | unsigned long* hashAll ; |
---|
| 60 | int size ; |
---|
| 61 | int myColor ; |
---|
| 62 | int i,c ; |
---|
[490] | 63 | |
---|
[1134] | 64 | MPI_Comm_size(CXios::globalComm,&size); |
---|
[300] | 65 | MPI_Comm_rank(CXios::globalComm,&rank); |
---|
[1134] | 66 | |
---|
[490] | 67 | |
---|
[300] | 68 | hashAll=new unsigned long[size] ; |
---|
[490] | 69 | |
---|
[300] | 70 | MPI_Allgather(&hashClient,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ; |
---|
| 71 | |
---|
| 72 | map<unsigned long, int> colors ; |
---|
| 73 | map<unsigned long, int> leaders ; |
---|
[490] | 74 | |
---|
[300] | 75 | for(i=0,c=0;i<size;i++) |
---|
| 76 | { |
---|
| 77 | if (colors.find(hashAll[i])==colors.end()) |
---|
| 78 | { |
---|
| 79 | colors[hashAll[i]] =c ; |
---|
| 80 | leaders[hashAll[i]]=i ; |
---|
| 81 | c++ ; |
---|
| 82 | } |
---|
| 83 | } |
---|
[490] | 84 | |
---|
[491] | 85 | // Verify whether we are on server mode or not |
---|
| 86 | CXios::setNotUsingServer(); |
---|
| 87 | for (i=0; i < size; ++i) |
---|
| 88 | { |
---|
| 89 | if (hashServer == hashAll[i]) |
---|
| 90 | { |
---|
| 91 | CXios::setUsingServer(); |
---|
| 92 | break; |
---|
| 93 | } |
---|
| 94 | } |
---|
| 95 | |
---|
[300] | 96 | myColor=colors[hashClient] ; |
---|
[490] | 97 | |
---|
[300] | 98 | MPI_Comm_split(CXios::globalComm,myColor,rank,&intraComm) ; |
---|
| 99 | |
---|
| 100 | if (CXios::usingServer) |
---|
[490] | 101 | { |
---|
[300] | 102 | int clientLeader=leaders[hashClient] ; |
---|
| 103 | serverLeader=leaders[hashServer] ; |
---|
[493] | 104 | |
---|
| 105 | int intraCommSize, intraCommRank ; |
---|
| 106 | MPI_Comm_size(intraComm,&intraCommSize) ; |
---|
| 107 | MPI_Comm_rank(intraComm,&intraCommRank) ; |
---|
[1134] | 108 | |
---|
[1187] | 109 | #pragma omp critical(_output) |
---|
[1134] | 110 | { |
---|
| 111 | info(10)<<"intercommCreate::client "<<test_omp_rank<< " "<< &test_omp_rank <<" intraCommSize : "<<intraCommSize |
---|
| 112 | <<" intraCommRank :"<<intraCommRank<<" serverLeader "<< serverLeader |
---|
| 113 | <<" globalComm : "<< &(CXios::globalComm) << endl ; |
---|
[1187] | 114 | } |
---|
[1134] | 115 | |
---|
| 116 | |
---|
[1196] | 117 | //test_sendrecv(CXios::globalComm); |
---|
[300] | 118 | MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; |
---|
[1134] | 119 | |
---|
[300] | 120 | } |
---|
| 121 | else |
---|
| 122 | { |
---|
| 123 | MPI_Comm_dup(intraComm,&interComm) ; |
---|
| 124 | } |
---|
| 125 | delete [] hashAll ; |
---|
| 126 | } |
---|
| 127 | // localComm argument is given |
---|
[490] | 128 | else |
---|
[300] | 129 | { |
---|
| 130 | if (CXios::usingServer) |
---|
[490] | 131 | { |
---|
[300] | 132 | //ERROR("void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm)", << " giving a local communictor is not compatible with using server mode") ; |
---|
| 133 | } |
---|
| 134 | else |
---|
| 135 | { |
---|
| 136 | MPI_Comm_dup(localComm,&intraComm) ; |
---|
| 137 | MPI_Comm_dup(intraComm,&interComm) ; |
---|
| 138 | } |
---|
| 139 | } |
---|
| 140 | } |
---|
| 141 | // using OASIS |
---|
| 142 | else |
---|
| 143 | { |
---|
| 144 | // localComm doesn't given |
---|
| 145 | if (localComm == MPI_COMM_NULL) |
---|
| 146 | { |
---|
| 147 | if (!is_MPI_Initialized) oasis_init(codeId) ; |
---|
[655] | 148 | oasis_get_localcomm(localComm) ; |
---|
[300] | 149 | } |
---|
[655] | 150 | MPI_Comm_dup(localComm,&intraComm) ; |
---|
| 151 | |
---|
[359] | 152 | CTimer::get("XIOS").resume() ; |
---|
[1205] | 153 | CTimer::get("XIOS init/finalize").resume() ; |
---|
[511] | 154 | |
---|
| 155 | if (CXios::usingServer) |
---|
[300] | 156 | { |
---|
| 157 | MPI_Status status ; |
---|
[491] | 158 | MPI_Comm_rank(intraComm,&rank) ; |
---|
[506] | 159 | |
---|
[300] | 160 | oasis_get_intercomm(interComm,CXios::xiosCodeId) ; |
---|
| 161 | if (rank==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ; |
---|
| 162 | MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ; |
---|
[490] | 163 | |
---|
[300] | 164 | } |
---|
| 165 | else MPI_Comm_dup(intraComm,&interComm) ; |
---|
| 166 | } |
---|
[490] | 167 | |
---|
[300] | 168 | MPI_Comm_dup(intraComm,&returnComm) ; |
---|
[1134] | 169 | |
---|
[300] | 170 | } |
---|
[490] | 171 | |
---|
| 172 | |
---|
[300] | 173 | void CClient::registerContext(const string& id,MPI_Comm contextComm) |
---|
| 174 | { |
---|
[346] | 175 | CContext::setCurrent(id) ; |
---|
[1134] | 176 | CContext* context = CContext::create(id); |
---|
| 177 | |
---|
| 178 | int tmp_rank; |
---|
| 179 | MPI_Comm_rank(contextComm,&tmp_rank) ; |
---|
| 180 | |
---|
[511] | 181 | StdString idServer(id); |
---|
| 182 | idServer += "_server"; |
---|
[490] | 183 | |
---|
[300] | 184 | if (!CXios::isServer) |
---|
| 185 | { |
---|
| 186 | int size,rank,globalRank ; |
---|
[1134] | 187 | //size_t message_size ; |
---|
| 188 | //int leaderRank ; |
---|
[300] | 189 | MPI_Comm contextInterComm ; |
---|
[490] | 190 | |
---|
[300] | 191 | MPI_Comm_size(contextComm,&size) ; |
---|
| 192 | MPI_Comm_rank(contextComm,&rank) ; |
---|
| 193 | MPI_Comm_rank(CXios::globalComm,&globalRank) ; |
---|
| 194 | if (rank!=0) globalRank=0 ; |
---|
[490] | 195 | |
---|
| 196 | |
---|
[300] | 197 | CMessage msg ; |
---|
[511] | 198 | msg<<idServer<<size<<globalRank ; |
---|
[300] | 199 | |
---|
[1134] | 200 | |
---|
[300] | 201 | int messageSize=msg.size() ; |
---|
[1032] | 202 | char * buff = new char[messageSize] ; |
---|
| 203 | CBufferOut buffer((void*)buff,messageSize) ; |
---|
[300] | 204 | buffer<<msg ; |
---|
[490] | 205 | |
---|
[1032] | 206 | MPI_Send((void*)buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; |
---|
[300] | 207 | delete [] buff ; |
---|
[490] | 208 | |
---|
[300] | 209 | MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; |
---|
[1134] | 210 | |
---|
| 211 | #pragma omp critical(_output) |
---|
| 212 | info(10)<<" RANK "<< tmp_rank<<" Register new Context : "<<id<<endl ; |
---|
[490] | 213 | |
---|
[1134] | 214 | |
---|
[300] | 215 | MPI_Comm inter ; |
---|
| 216 | MPI_Intercomm_merge(contextInterComm,0,&inter) ; |
---|
| 217 | MPI_Barrier(inter) ; |
---|
| 218 | |
---|
[1134] | 219 | |
---|
[300] | 220 | context->initClient(contextComm,contextInterComm) ; |
---|
[655] | 221 | |
---|
[1134] | 222 | |
---|
| 223 | if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; |
---|
| 224 | contextInterComms_ptr->push_back(contextInterComm); |
---|
| 225 | |
---|
[655] | 226 | MPI_Comm_free(&inter); |
---|
[300] | 227 | } |
---|
| 228 | else |
---|
| 229 | { |
---|
| 230 | MPI_Comm contextInterComm ; |
---|
| 231 | MPI_Comm_dup(contextComm,&contextInterComm) ; |
---|
[511] | 232 | CContext* contextServer = CContext::create(idServer); |
---|
| 233 | |
---|
| 234 | // Firstly, initialize context on client side |
---|
| 235 | context->initClient(contextComm,contextInterComm, contextServer); |
---|
| 236 | |
---|
| 237 | // Secondly, initialize context on server side |
---|
[597] | 238 | contextServer->initServer(contextComm,contextInterComm, context); |
---|
[511] | 239 | |
---|
| 240 | // Finally, we should return current context to context client |
---|
| 241 | CContext::setCurrent(id); |
---|
[1134] | 242 | |
---|
| 243 | if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; |
---|
| 244 | contextInterComms_ptr->push_back(contextInterComm); |
---|
[655] | 245 | |
---|
[300] | 246 | } |
---|
| 247 | } |
---|
[490] | 248 | |
---|
[300] | 249 | void CClient::finalize(void) |
---|
| 250 | { |
---|
| 251 | int rank ; |
---|
| 252 | int msg=0 ; |
---|
[697] | 253 | |
---|
| 254 | MPI_Comm_rank(intraComm,&rank) ; |
---|
[1134] | 255 | |
---|
[332] | 256 | if (!CXios::isServer) |
---|
[300] | 257 | { |
---|
[490] | 258 | MPI_Comm_rank(intraComm,&rank) ; |
---|
| 259 | if (rank==0) |
---|
[332] | 260 | { |
---|
| 261 | MPI_Send(&msg,1,MPI_INT,0,0,interComm) ; |
---|
| 262 | } |
---|
[300] | 263 | } |
---|
[490] | 264 | |
---|
[1134] | 265 | for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); ++it) |
---|
[655] | 266 | MPI_Comm_free(&(*it)); |
---|
[1134] | 267 | |
---|
[655] | 268 | MPI_Comm_free(&interComm); |
---|
| 269 | MPI_Comm_free(&intraComm); |
---|
[361] | 270 | |
---|
[1205] | 271 | CTimer::get("XIOS init/finalize").suspend() ; |
---|
[655] | 272 | CTimer::get("XIOS").suspend() ; |
---|
| 273 | |
---|
[300] | 274 | if (!is_MPI_Initialized) |
---|
| 275 | { |
---|
| 276 | if (CXios::usingOasis) oasis_finalize(); |
---|
[1134] | 277 | else MPI_Finalize(); |
---|
[300] | 278 | } |
---|
[697] | 279 | |
---|
[1134] | 280 | #pragma omp critical (_output) |
---|
| 281 | info(20) << "Client "<<rank<<" : Client side context is finalized "<< endl ; |
---|
| 282 | |
---|
[1205] | 283 | /*#pragma omp critical (_output) |
---|
| 284 | { |
---|
| 285 | report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; |
---|
| 286 | report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; |
---|
| 287 | report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; |
---|
| 288 | report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; |
---|
| 289 | report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; |
---|
| 290 | // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; |
---|
| 291 | report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; |
---|
| 292 | report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; |
---|
| 293 | report(100)<<CTimer::getAllCumulatedTime()<<endl ; |
---|
| 294 | }*/ |
---|
| 295 | |
---|
[400] | 296 | } |
---|
[490] | 297 | |
---|
| 298 | int CClient::getRank() |
---|
| 299 | { |
---|
| 300 | return rank; |
---|
| 301 | } |
---|
| 302 | |
---|
[523] | 303 | /*! |
---|
| 304 | * Open a file specified by a suffix and an extension and use it for the given file buffer. |
---|
| 305 | * The file name will be suffix+rank+extension. |
---|
| 306 | * |
---|
| 307 | * \param fileName[in] protype file name |
---|
| 308 | * \param ext [in] extension of the file |
---|
| 309 | * \param fb [in/out] the file buffer |
---|
| 310 | */ |
---|
| 311 | void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb) |
---|
| 312 | { |
---|
| 313 | StdStringStream fileNameClient; |
---|
| 314 | int numDigit = 0; |
---|
| 315 | int size = 0; |
---|
| 316 | MPI_Comm_size(CXios::globalComm, &size); |
---|
| 317 | while (size) |
---|
| 318 | { |
---|
| 319 | size /= 10; |
---|
| 320 | ++numDigit; |
---|
| 321 | } |
---|
[497] | 322 | |
---|
[523] | 323 | fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; |
---|
[1134] | 324 | |
---|
[523] | 325 | fb->open(fileNameClient.str().c_str(), std::ios::out); |
---|
| 326 | if (!fb->is_open()) |
---|
| 327 | ERROR("void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", |
---|
[1134] | 328 | << std::endl << "Can not open <" << fileNameClient << "> file to write the client log(s)."); |
---|
[523] | 329 | } |
---|
[490] | 330 | |
---|
[523] | 331 | /*! |
---|
| 332 | * \brief Open a file stream to write the info logs |
---|
| 333 | * Open a file stream with a specific file name suffix+rank |
---|
| 334 | * to write the info logs. |
---|
| 335 | * \param fileName [in] protype file name |
---|
| 336 | */ |
---|
| 337 | void CClient::openInfoStream(const StdString& fileName) |
---|
| 338 | { |
---|
[1134] | 339 | //std::filebuf* fb = m_infoStream.rdbuf(); |
---|
[490] | 340 | |
---|
[1134] | 341 | info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); |
---|
| 342 | |
---|
| 343 | openStream(fileName, ".out", info_FB[omp_get_thread_num()]); |
---|
| 344 | |
---|
| 345 | info.write2File(info_FB[omp_get_thread_num()]); |
---|
| 346 | report.write2File(info_FB[omp_get_thread_num()]); |
---|
| 347 | |
---|
[523] | 348 | } |
---|
[490] | 349 | |
---|
[523] | 350 | //! Write the info logs to standard output |
---|
| 351 | void CClient::openInfoStream() |
---|
| 352 | { |
---|
| 353 | info.write2StdOut(); |
---|
| 354 | report.write2StdOut(); |
---|
| 355 | } |
---|
[490] | 356 | |
---|
[523] | 357 | //! Close the info logs file if it opens |
---|
| 358 | void CClient::closeInfoStream() |
---|
| 359 | { |
---|
| 360 | if (m_infoStream.is_open()) m_infoStream.close(); |
---|
| 361 | } |
---|
[490] | 362 | |
---|
[523] | 363 | /*! |
---|
| 364 | * \brief Open a file stream to write the error log |
---|
| 365 | * Open a file stream with a specific file name suffix+rank |
---|
| 366 | * to write the error log. |
---|
| 367 | * \param fileName [in] protype file name |
---|
| 368 | */ |
---|
| 369 | void CClient::openErrorStream(const StdString& fileName) |
---|
| 370 | { |
---|
| 371 | std::filebuf* fb = m_errorStream.rdbuf(); |
---|
| 372 | openStream(fileName, ".err", fb); |
---|
| 373 | |
---|
| 374 | error.write2File(fb); |
---|
| 375 | } |
---|
| 376 | |
---|
| 377 | //! Write the error log to standard error output |
---|
| 378 | void CClient::openErrorStream() |
---|
| 379 | { |
---|
| 380 | error.write2StdErr(); |
---|
| 381 | } |
---|
| 382 | |
---|
| 383 | //! Close the error log file if it opens |
---|
| 384 | void CClient::closeErrorStream() |
---|
| 385 | { |
---|
| 386 | if (m_errorStream.is_open()) m_errorStream.close(); |
---|
| 387 | } |
---|
[300] | 388 | } |
---|