[490] | 1 | #include "globalScopeData.hpp" |
---|
[591] | 2 | #include "xios_spl.hpp" |
---|
[300] | 3 | #include "cxios.hpp" |
---|
[342] | 4 | #include "client.hpp" |
---|
[300] | 5 | #include <boost/functional/hash.hpp> |
---|
| 6 | #include "type.hpp" |
---|
| 7 | #include "context.hpp" |
---|
| 8 | #include "context_client.hpp" |
---|
| 9 | #include "oasis_cinterface.hpp" |
---|
[382] | 10 | #include "mpi.hpp" |
---|
[347] | 11 | #include "timer.hpp" |
---|
[400] | 12 | #include "buffer_client.hpp" |
---|
[1587] | 13 | #include "string_tools.hpp" |
---|
[1761] | 14 | #include "ressources_manager.hpp" |
---|
| 15 | #include "services_manager.hpp" |
---|
| 16 | #include <functional> |
---|
| 17 | #include <cstdio> |
---|
[2146] | 18 | #include "workflow_graph.hpp" |
---|
[2274] | 19 | #include "release_static_allocation.hpp" |
---|
[2418] | 20 | #include "mem_checker.hpp" |
---|
[300] | 21 | |
---|
[335] | 22 | namespace xios |
---|
[490] | 23 | { |
---|
[300] | 24 | |
---|
[1761] | 25 | const double serverPublishDefaultTimeout=10; |
---|
| 26 | |
---|
[2332] | 27 | MPI_Comm CClient::intraComm_ ; |
---|
| 28 | MPI_Comm CClient::interComm_ ; |
---|
[1761] | 29 | MPI_Comm CClient::clientsComm_ ; |
---|
| 30 | |
---|
[1639] | 31 | std::list<MPI_Comm> CClient::contextInterComms; |
---|
[1158] | 32 | int CClient::serverLeader ; |
---|
[300] | 33 | bool CClient::is_MPI_Initialized ; |
---|
[1148] | 34 | int CClient::rank_ = INVALID_RANK; |
---|
[490] | 35 | StdOFStream CClient::m_infoStream; |
---|
[523] | 36 | StdOFStream CClient::m_errorStream; |
---|
[1761] | 37 | CPoolRessource* CClient::poolRessource_=nullptr ; |
---|
| 38 | |
---|
[2332] | 39 | MPI_Comm& CClient::getInterComm(void) { return (interComm_); } |
---|
[1587] | 40 | |
---|
[983] | 41 | ///--------------------------------------------------------------- |
---|
| 42 | /*! |
---|
| 43 | * \fn void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) |
---|
| 44 | * Function creates intraComm (CClient::intraComm) for client group with id=codeId and interComm (CClient::interComm) between client and server groups. |
---|
| 45 | * \param [in] codeId identity of context. |
---|
| 46 | * \param [in/out] localComm local communicator. |
---|
| 47 | * \param [in/out] returnComm (intra)communicator of client group. |
---|
| 48 | */ |
---|
| 49 | |
---|
[1639] | 50 | void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) |
---|
[300] | 51 | { |
---|
[1761] | 52 | |
---|
| 53 | MPI_Comm clientComm ; |
---|
| 54 | // initialize MPI if not initialized |
---|
[300] | 55 | int initialized ; |
---|
[1639] | 56 | MPI_Initialized(&initialized) ; |
---|
[300] | 57 | if (initialized) is_MPI_Initialized=true ; |
---|
| 58 | else is_MPI_Initialized=false ; |
---|
[1761] | 59 | |
---|
| 60 | MPI_Comm globalComm=CXios::getGlobalComm() ; |
---|
| 61 | |
---|
| 62 | ///////////////////////////////////////// |
---|
| 63 | ///////////// PART 1 //////////////////// |
---|
| 64 | ///////////////////////////////////////// |
---|
| 65 | |
---|
| 66 | |
---|
| 67 | // localComm isn't given |
---|
| 68 | if (localComm == MPI_COMM_NULL) |
---|
| 69 | { |
---|
| 70 | |
---|
| 71 | // don't use OASIS |
---|
| 72 | if (!CXios::usingOasis) |
---|
| 73 | { |
---|
| 74 | |
---|
| 75 | if (!is_MPI_Initialized) |
---|
| 76 | { |
---|
| 77 | MPI_Init(NULL, NULL); |
---|
| 78 | } |
---|
| 79 | CTimer::get("XIOS").resume() ; |
---|
| 80 | CTimer::get("XIOS init/finalize",false).resume() ; |
---|
| 81 | |
---|
| 82 | // split the global communicator |
---|
| 83 | // get hash from all model to attribute a unique color (int) and then split to get client communicator |
---|
| 84 | // every mpi process of globalComm (MPI_COMM_WORLD) must participate |
---|
| 85 | |
---|
| 86 | int commRank, commSize ; |
---|
| 87 | MPI_Comm_rank(globalComm,&commRank) ; |
---|
| 88 | MPI_Comm_size(globalComm,&commSize) ; |
---|
| 89 | |
---|
| 90 | std::hash<string> hashString ; |
---|
| 91 | size_t hashClient=hashString(codeId) ; |
---|
| 92 | |
---|
| 93 | size_t* hashAll = new size_t[commSize] ; |
---|
[2238] | 94 | MPI_Allgather(&hashClient,1,MPI_SIZE_T,hashAll,1,MPI_SIZE_T,globalComm) ; |
---|
[1761] | 95 | |
---|
| 96 | int color=0 ; |
---|
[2238] | 97 | map<size_t,int> listHash ; |
---|
| 98 | for(int i=0 ; i<=commSize ; i++) |
---|
| 99 | if (listHash.count(hashAll[i])==0) |
---|
[1761] | 100 | { |
---|
[2238] | 101 | listHash[hashAll[i]]=color ; |
---|
[1761] | 102 | color=color+1 ; |
---|
| 103 | } |
---|
[2238] | 104 | color=listHash[hashClient] ; |
---|
[1761] | 105 | delete[] hashAll ; |
---|
| 106 | |
---|
| 107 | MPI_Comm_split(globalComm, color, commRank, &clientComm) ; |
---|
[2580] | 108 | CXios::getMpiGarbageCollector().registerCommunicator(clientComm) ; |
---|
[1761] | 109 | } |
---|
[2335] | 110 | else |
---|
[1761] | 111 | { |
---|
[2335] | 112 | ERROR("void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm)", <<"OASIS usage is set. In these conditions, XIOS initialization needs the local_comm created by OASIS."<<endl) ; |
---|
[1761] | 113 | } |
---|
| 114 | } |
---|
| 115 | else // localComm is given |
---|
| 116 | { |
---|
| 117 | MPI_Comm_dup(localComm,&clientComm) ; |
---|
[2580] | 118 | CXios::getMpiGarbageCollector().registerCommunicator(clientComm) ; |
---|
[2333] | 119 | MPI_Comm_dup(localComm,&intraComm_) ; |
---|
[2580] | 120 | CXios::getMpiGarbageCollector().registerCommunicator(intraComm_) ; |
---|
[2333] | 121 | |
---|
| 122 | if (CXios::usingServer) |
---|
| 123 | { |
---|
| 124 | MPI_Comm_rank(intraComm_,&rank_) ; |
---|
| 125 | } |
---|
| 126 | |
---|
[1761] | 127 | } |
---|
| 128 | |
---|
| 129 | |
---|
| 130 | ///////////////////////////////////////// |
---|
| 131 | ///////////// PART 2 //////////////////// |
---|
| 132 | ///////////////////////////////////////// |
---|
| 133 | |
---|
| 134 | |
---|
| 135 | // Create the XIOS communicator for every process which is related |
---|
| 136 | // to XIOS, as well on client side as on server side |
---|
| 137 | |
---|
| 138 | MPI_Comm xiosGlobalComm ; |
---|
| 139 | string strIds=CXios::getin<string>("clients_code_id","") ; |
---|
| 140 | vector<string> clientsCodeId=splitRegex(strIds,"\\s*,\\s*") ; |
---|
| 141 | if (strIds.empty()) |
---|
| 142 | { |
---|
| 143 | // no code Ids given, suppose XIOS initialisation is global |
---|
| 144 | int commRank, commGlobalRank, serverLeader, clientLeader,serverRemoteLeader,clientRemoteLeader ; |
---|
| 145 | MPI_Comm splitComm,interComm ; |
---|
| 146 | MPI_Comm_rank(globalComm,&commGlobalRank) ; |
---|
| 147 | MPI_Comm_split(globalComm, 0, commGlobalRank, &splitComm) ; |
---|
| 148 | int splitCommSize, globalCommSize ; |
---|
| 149 | |
---|
| 150 | MPI_Comm_size(splitComm,&splitCommSize) ; |
---|
| 151 | MPI_Comm_size(globalComm,&globalCommSize) ; |
---|
| 152 | if (splitCommSize==globalCommSize) // no server |
---|
| 153 | { |
---|
| 154 | MPI_Comm_dup(globalComm,&xiosGlobalComm) ; |
---|
| 155 | CXios::setXiosComm(xiosGlobalComm) ; |
---|
| 156 | } |
---|
| 157 | else |
---|
| 158 | { |
---|
| 159 | MPI_Comm_rank(splitComm,&commRank) ; |
---|
| 160 | if (commRank==0) clientLeader=commGlobalRank ; |
---|
| 161 | else clientLeader=0 ; |
---|
| 162 | serverLeader=0 ; |
---|
| 163 | MPI_Allreduce(&clientLeader,&clientRemoteLeader,1,MPI_INT,MPI_SUM,globalComm) ; |
---|
| 164 | MPI_Allreduce(&serverLeader,&serverRemoteLeader,1,MPI_INT,MPI_SUM,globalComm) ; |
---|
| 165 | MPI_Intercomm_create(splitComm, 0, globalComm, serverRemoteLeader,1341,&interComm) ; |
---|
| 166 | MPI_Intercomm_merge(interComm,true,&xiosGlobalComm) ; |
---|
| 167 | CXios::setXiosComm(xiosGlobalComm) ; |
---|
| 168 | } |
---|
| 169 | } |
---|
| 170 | else |
---|
| 171 | { |
---|
| 172 | |
---|
| 173 | xiosGlobalCommByFileExchange(clientComm, codeId) ; |
---|
| 174 | |
---|
| 175 | } |
---|
| 176 | |
---|
| 177 | int commRank ; |
---|
| 178 | MPI_Comm_rank(CXios::getXiosComm(), &commRank) ; |
---|
| 179 | MPI_Comm_split(CXios::getXiosComm(),false,commRank, &clientsComm_) ; |
---|
[2580] | 180 | CXios::getMpiGarbageCollector().registerCommunicator(clientsComm_) ; |
---|
[1761] | 181 | |
---|
| 182 | // is using server or not ? |
---|
| 183 | int xiosCommSize, clientsCommSize ; |
---|
| 184 | MPI_Comm_size(CXios::getXiosComm(), &xiosCommSize) ; |
---|
| 185 | MPI_Comm_size(clientsComm_, &clientsCommSize) ; |
---|
[2547] | 186 | if (xiosCommSize==clientsCommSize) CXios::setNotUsingServer() ; |
---|
| 187 | else CXios::setUsingServer() ; |
---|
[1761] | 188 | |
---|
| 189 | ///////////////////////////////////////// |
---|
| 190 | ///////////// PART 3 //////////////////// |
---|
| 191 | ///////////////////////////////////////// |
---|
| 192 | |
---|
| 193 | CXios::launchDaemonsManager(false) ; |
---|
[2523] | 194 | shared_ptr<CEventScheduler> eventScheduler ; |
---|
| 195 | poolRessource_ = new CPoolRessource(clientComm, eventScheduler, codeId, false) ; |
---|
[1761] | 196 | |
---|
| 197 | ///////////////////////////////////////// |
---|
| 198 | ///////////// PART 4 //////////////////// |
---|
| 199 | ///////////////////////////////////////// |
---|
[2458] | 200 | /* |
---|
| 201 | MPI_Request req ; |
---|
| 202 | MPI_Status status ; |
---|
[2523] | 203 | MPI_Ibarrier(CXios::getXiosComm(),&req) ; // be sure that all services are created now, could be remove later if more asynchronisity |
---|
[2458] | 204 | int ok=false ; |
---|
| 205 | while (!ok) |
---|
| 206 | { |
---|
| 207 | CXios::getDaemonsManager()->eventLoop() ; |
---|
| 208 | MPI_Test(&req,&ok,&status) ; |
---|
| 209 | } |
---|
| 210 | */ |
---|
[1761] | 211 | returnComm = clientComm ; |
---|
| 212 | } |
---|
| 213 | |
---|
| 214 | |
---|
| 215 | void CClient::xiosGlobalCommByFileExchange(MPI_Comm clientComm, const string& codeId) |
---|
| 216 | { |
---|
| 217 | |
---|
| 218 | MPI_Comm globalComm=CXios::getGlobalComm() ; |
---|
| 219 | MPI_Comm xiosGlobalComm ; |
---|
| 220 | |
---|
| 221 | string strIds=CXios::getin<string>("clients_code_id","") ; |
---|
| 222 | vector<string> clientsCodeId=splitRegex(strIds,"\\s*,\\s*") ; |
---|
| 223 | |
---|
| 224 | int commRank, globalRank, clientRank, serverRank ; |
---|
| 225 | MPI_Comm_rank(clientComm, &commRank) ; |
---|
| 226 | MPI_Comm_rank(globalComm, &globalRank) ; |
---|
| 227 | string clientFileName("__xios_publisher::"+codeId+"__to_remove__") ; |
---|
| 228 | |
---|
| 229 | int error ; |
---|
| 230 | |
---|
| 231 | if (commRank==0) // if root process publish name |
---|
| 232 | { |
---|
| 233 | std::ofstream ofs (clientFileName, std::ofstream::out); |
---|
| 234 | ofs<<globalRank ; |
---|
| 235 | ofs.close(); |
---|
| 236 | |
---|
| 237 | // get server root rank |
---|
| 238 | |
---|
| 239 | std::ifstream ifs ; |
---|
| 240 | string fileName=("__xios_publisher::"+CXios::xiosCodeId+"__to_remove__") ; |
---|
| 241 | |
---|
| 242 | double timeout = CXios::getin<double>("server_puplish_timeout",serverPublishDefaultTimeout) ; |
---|
| 243 | double time ; |
---|
| 244 | |
---|
| 245 | do |
---|
| 246 | { |
---|
| 247 | CTimer::get("server_publish_timeout").resume() ; |
---|
| 248 | ifs.clear() ; |
---|
| 249 | ifs.open(fileName, std::ifstream::in) ; |
---|
| 250 | CTimer::get("server_publish_timeout").suspend() ; |
---|
| 251 | } while (ifs.fail() && CTimer::get("server_publish_timeout").getCumulatedTime()<timeout) ; |
---|
| 252 | |
---|
| 253 | if (CTimer::get("server_publish_timeout").getCumulatedTime()>=timeout || ifs.fail()) |
---|
| 254 | { |
---|
| 255 | ifs.clear() ; |
---|
| 256 | ifs.close() ; |
---|
| 257 | ifs.clear() ; |
---|
| 258 | error=true ; |
---|
| 259 | } |
---|
| 260 | else |
---|
| 261 | { |
---|
| 262 | ifs>>serverRank ; |
---|
| 263 | ifs.close() ; |
---|
| 264 | error=false ; |
---|
| 265 | } |
---|
| 266 | |
---|
| 267 | } |
---|
| 268 | MPI_Bcast(&error,1,MPI_INT,0,clientComm) ; |
---|
| 269 | |
---|
| 270 | if (error==false) // you have a server |
---|
| 271 | { |
---|
| 272 | MPI_Comm intraComm ; |
---|
| 273 | MPI_Comm_dup(clientComm,&intraComm) ; |
---|
| 274 | MPI_Comm interComm ; |
---|
| 275 | |
---|
| 276 | int pos=0 ; |
---|
| 277 | for(int i=0 ; codeId!=clientsCodeId[i]; i++) pos=pos+1 ; |
---|
| 278 | |
---|
| 279 | bool high=true ; |
---|
| 280 | for(int i=pos ; i<clientsCodeId.size(); i++) |
---|
| 281 | { |
---|
| 282 | MPI_Intercomm_create(intraComm, 0, globalComm, serverRank, 3141, &interComm); |
---|
[2580] | 283 | CXios::getMpiGarbageCollector().registerCommunicator(interComm) ; |
---|
[1761] | 284 | MPI_Comm_free(&intraComm) ; |
---|
| 285 | MPI_Intercomm_merge(interComm,high, &intraComm ) ; |
---|
| 286 | high=false ; |
---|
[2333] | 287 | if (i==pos) { |
---|
| 288 | interComm_=interComm ; |
---|
| 289 | } |
---|
[1761] | 290 | } |
---|
| 291 | xiosGlobalComm=intraComm ; |
---|
| 292 | } |
---|
| 293 | else // no server detected |
---|
| 294 | { |
---|
| 295 | vector<int> clientsRank(clientsCodeId.size()) ; |
---|
| 296 | |
---|
| 297 | if (commRank==0) |
---|
| 298 | { |
---|
| 299 | for(int i=0;i<clientsRank.size();i++) |
---|
| 300 | { |
---|
| 301 | std::ifstream ifs ; |
---|
| 302 | string fileName=("__xios_publisher::"+clientsCodeId[i]+"__to_remove__") ; |
---|
| 303 | do |
---|
| 304 | { |
---|
| 305 | ifs.clear() ; |
---|
| 306 | ifs.open(fileName, std::ifstream::in) ; |
---|
| 307 | } while (ifs.fail()) ; |
---|
| 308 | ifs>>clientsRank[i] ; |
---|
| 309 | ifs.close() ; |
---|
| 310 | } |
---|
| 311 | } |
---|
| 312 | |
---|
| 313 | int client ; |
---|
| 314 | MPI_Comm intraComm ; |
---|
| 315 | MPI_Comm_dup(clientComm,&intraComm) ; |
---|
| 316 | MPI_Comm interComm ; |
---|
| 317 | |
---|
| 318 | int pos=0 ; |
---|
| 319 | for(int i=0 ; codeId!=clientsCodeId[i]; i++) pos=pos+1 ; |
---|
| 320 | |
---|
| 321 | bool high=true ; |
---|
| 322 | for(int i=pos+1 ; i<clientsCodeId.size(); i++) |
---|
| 323 | { |
---|
| 324 | if (codeId==clientsCodeId[0]) // first model play the server rule |
---|
| 325 | { |
---|
| 326 | MPI_Intercomm_create(intraComm, 0, globalComm, clientsRank[i], 3141, &interComm); |
---|
| 327 | MPI_Intercomm_merge(interComm,false, &intraComm ) ; |
---|
| 328 | } |
---|
| 329 | else |
---|
| 330 | { |
---|
| 331 | MPI_Intercomm_create(intraComm, 0, globalComm, clientsRank[0], 3141, &interComm); |
---|
| 332 | MPI_Intercomm_merge(interComm,high, &intraComm ) ; |
---|
| 333 | high=false ; |
---|
| 334 | } |
---|
[2333] | 335 | if (i==pos) { |
---|
| 336 | interComm_=interComm ; // NOT TESTED ! |
---|
| 337 | } |
---|
[1761] | 338 | } |
---|
| 339 | xiosGlobalComm=intraComm ; |
---|
| 340 | } |
---|
| 341 | |
---|
| 342 | MPI_Barrier(xiosGlobalComm); |
---|
| 343 | if (commRank==0) std::remove(clientFileName.c_str()) ; |
---|
| 344 | MPI_Barrier(xiosGlobalComm); |
---|
| 345 | |
---|
| 346 | CXios::setXiosComm(xiosGlobalComm) ; |
---|
| 347 | |
---|
| 348 | } |
---|
| 349 | |
---|
[1765] | 350 | // to check on other architecture |
---|
[1761] | 351 | void CClient::xiosGlobalCommByPublishing(MPI_Comm clientComm, const string& codeId) |
---|
| 352 | { |
---|
| 353 | |
---|
| 354 | // untested. need to be developped an a true MPI compliant library |
---|
| 355 | |
---|
| 356 | /* |
---|
| 357 | // try to discover other client/server |
---|
| 358 | // do you have a xios server ? |
---|
| 359 | char portName[MPI_MAX_PORT_NAME]; |
---|
| 360 | int ierr ; |
---|
| 361 | int commRank ; |
---|
| 362 | MPI_Comm_rank(clientComm,&commRank) ; |
---|
| 363 | |
---|
| 364 | MPI_Barrier(globalComm) ; |
---|
| 365 | if (commRank==0) |
---|
| 366 | { |
---|
| 367 | |
---|
| 368 | MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN ); |
---|
| 369 | const char* serviceName=CXios::xiosCodeId.c_str() ; |
---|
| 370 | ierr=MPI_Lookup_name(CXios::xiosCodeId.c_str(), MPI_INFO_NULL, portName); |
---|
| 371 | MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL ); |
---|
| 372 | } |
---|
| 373 | ierr=MPI_SUCCESS ; |
---|
| 374 | MPI_Bcast(&ierr,1,MPI_INT,0,clientComm) ; |
---|
| 375 | |
---|
| 376 | if (ierr==MPI_SUCCESS) // you have a server |
---|
| 377 | { |
---|
| 378 | MPI_Comm intraComm=clientComm ; |
---|
| 379 | MPI_Comm interComm ; |
---|
| 380 | for(int i=0 ; i<clientsCodeId.size(); i++) |
---|
| 381 | { |
---|
| 382 | MPI_Comm_connect(portName, MPI_INFO_NULL, 0, intraComm, &interComm); |
---|
| 383 | MPI_Intercomm_merge(interComm, true, &intraComm ) ; |
---|
| 384 | } |
---|
| 385 | xiosGlobalComm=intraComm ; |
---|
| 386 | } |
---|
| 387 | else // you don't have any server |
---|
| 388 | { |
---|
| 389 | if (codeId==clientsCodeId[0]) // first code will publish his name |
---|
| 390 | { |
---|
| 391 | |
---|
| 392 | if (commRank==0) // if root process publish name |
---|
| 393 | { |
---|
| 394 | MPI_Open_port(MPI_INFO_NULL, portName); |
---|
| 395 | MPI_Publish_name(CXios::xiosCodeId.c_str(), MPI_INFO_NULL, portName); |
---|
| 396 | } |
---|
| 397 | |
---|
| 398 | MPI_Comm intraComm=clientComm ; |
---|
| 399 | MPI_Comm interComm ; |
---|
| 400 | for(int i=0 ; i<clientsCodeId.size()-1; i++) |
---|
| 401 | { |
---|
| 402 | MPI_Comm_accept(portName, MPI_INFO_NULL, 0, intraComm, &interComm); |
---|
| 403 | MPI_Intercomm_merge(interComm,false, &intraComm ) ; |
---|
| 404 | } |
---|
| 405 | } |
---|
| 406 | else // other clients are connecting to the first one |
---|
| 407 | { |
---|
| 408 | if (commRank==0) |
---|
| 409 | { |
---|
| 410 | |
---|
| 411 | MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN ); |
---|
| 412 | ierr=MPI_Lookup_name(CXios::xiosCodeId.c_str(), MPI_INFO_NULL, portName); |
---|
| 413 | MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL ); |
---|
| 414 | } |
---|
| 415 | |
---|
| 416 | MPI_Bcast(&ierr,1,MPI_INT,0,clientComm) ; |
---|
| 417 | |
---|
| 418 | if (ierr==MPI_SUCCESS) // you can connect |
---|
| 419 | { |
---|
| 420 | MPI_Comm intraComm=clientComm ; |
---|
| 421 | MPI_Comm interComm ; |
---|
| 422 | for(int i=0 ; i<clientsCodeId.size()-1; i++) |
---|
| 423 | { |
---|
| 424 | MPI_Comm_connect(portName, MPI_INFO_NULL, 0, intraComm, &interComm); |
---|
| 425 | MPI_Intercomm_merge(interComm, true, &intraComm ) ; |
---|
| 426 | } |
---|
| 427 | xiosGlobalComm=intraComm ; |
---|
| 428 | } |
---|
| 429 | } |
---|
| 430 | } |
---|
| 431 | */ |
---|
| 432 | } |
---|
| 433 | |
---|
[490] | 434 | |
---|
[1765] | 435 | ///--------------------------------------------------------------- |
---|
| 436 | /*! |
---|
| 437 | * \fn void CClient::registerContext(const string& id, MPI_Comm contextComm) |
---|
| 438 | * \brief Sends a request to create a context to server. Creates client/server contexts. |
---|
| 439 | * \param [in] id id of context. |
---|
| 440 | * \param [in] contextComm. |
---|
| 441 | * Function is only called by client. |
---|
| 442 | */ |
---|
[1761] | 443 | void CClient::registerContext(const string& id, MPI_Comm contextComm) |
---|
| 444 | { |
---|
| 445 | int commRank, commSize ; |
---|
| 446 | MPI_Comm_rank(contextComm,&commRank) ; |
---|
| 447 | MPI_Comm_size(contextComm,&commSize) ; |
---|
[2523] | 448 | |
---|
| 449 | shared_ptr<CEventScheduler> eventScheduler ; |
---|
| 450 | |
---|
| 451 | getPoolRessource()->createService(contextComm, eventScheduler, id, 0, CServicesManager::CLIENT, 1) ; |
---|
| 452 | // getPoolRessource()->createService(contextComm, eventScheduler, id+"_"+CXios::defaultWriterId, 0, CServicesManager::WRITER, 1) ; |
---|
| 453 | getPoolRessource()->createNewServiceOnto(id+"_"+CXios::defaultWriterId, CServicesManager::WRITER, id) ; |
---|
| 454 | // getPoolRessource()->createService(contextComm, eventScheduler, id+"_"+CXios::defaultReaderId, 0, CServicesManager::READER, 1) ; |
---|
| 455 | getPoolRessource()->createNewServiceOnto(id+"_"+CXios::defaultReaderId, CServicesManager::READER, id) ; |
---|
[1761] | 456 | |
---|
| 457 | if (commRank==0) while (!CXios::getServicesManager()->hasService(getPoolRessource()->getId(), id, 0)) { CXios::getDaemonsManager()->eventLoop();} |
---|
| 458 | |
---|
| 459 | if (commRank==0) CXios::getContextsManager()->createServerContext(getPoolRessource()->getId(), id, 0, id) ; |
---|
| 460 | int type=CServicesManager::CLIENT ; |
---|
| 461 | string name = CXios::getContextsManager()->getServerContextName(getPoolRessource()->getId(), id, 0, type, id) ; |
---|
[2238] | 462 | double time ; |
---|
| 463 | double lastTime=0 ; |
---|
[2260] | 464 | double latency=0 ; |
---|
[2238] | 465 | bool out=false ; |
---|
| 466 | while (!out) |
---|
[1761] | 467 | { |
---|
[2238] | 468 | time=MPI_Wtime() ; |
---|
| 469 | if (time-lastTime > latency) |
---|
| 470 | { |
---|
| 471 | out=CXios::getContextsManager()->hasContext(name, contextComm); |
---|
| 472 | lastTime=time ; |
---|
| 473 | } |
---|
| 474 | if (!out) CXios::getDaemonsManager()->eventLoop() ; |
---|
[1761] | 475 | } |
---|
| 476 | |
---|
| 477 | } |
---|
| 478 | |
---|
| 479 | |
---|
[490] | 480 | |
---|
[1587] | 481 | /*! |
---|
| 482 | * \fn void CClient::callOasisEnddef(void) |
---|
| 483 | * \brief Send the order to the servers to call "oasis_enddef". It must be done by each compound of models before calling oasis_enddef on client side |
---|
| 484 | * Function is only called by client. |
---|
| 485 | */ |
---|
| 486 | void CClient::callOasisEnddef(void) |
---|
| 487 | { |
---|
| 488 | bool oasisEnddef=CXios::getin<bool>("call_oasis_enddef",true) ; |
---|
| 489 | if (!oasisEnddef) ERROR("void CClient::callOasisEnddef(void)", <<"Function xios_oasis_enddef called but variable <call_oasis_enddef> is set to false."<<endl |
---|
| 490 | <<"Variable <call_oasis_enddef> must be set to true"<<endl) ; |
---|
[2333] | 491 | if (!CXios::isClient) // != isServer (change recently ) |
---|
[1587] | 492 | // Attached mode |
---|
| 493 | { |
---|
| 494 | // nothing to do |
---|
| 495 | } |
---|
| 496 | else |
---|
| 497 | { |
---|
| 498 | int rank ; |
---|
| 499 | int msg=0 ; |
---|
| 500 | |
---|
[2332] | 501 | MPI_Comm_rank(intraComm_,&rank) ; |
---|
[1587] | 502 | if (rank==0) |
---|
| 503 | { |
---|
[2332] | 504 | MPI_Send(&msg,1,MPI_INT,0,5,interComm_) ; // tags oasis_endded = 5 |
---|
[1587] | 505 | } |
---|
| 506 | |
---|
| 507 | } |
---|
| 508 | } |
---|
| 509 | |
---|
[300] | 510 | void CClient::finalize(void) |
---|
| 511 | { |
---|
[1761] | 512 | |
---|
| 513 | MPI_Barrier(clientsComm_) ; |
---|
| 514 | int commRank ; |
---|
| 515 | MPI_Comm_rank(clientsComm_, &commRank) ; |
---|
| 516 | if (commRank==0) CXios::getRessourcesManager()->finalize() ; |
---|
| 517 | |
---|
| 518 | CTimer::get("XIOS init/finalize",false).suspend() ; |
---|
| 519 | CTimer::get("XIOS").suspend() ; |
---|
[2266] | 520 | CXios::finalizeDaemonsManager() ; |
---|
[2274] | 521 | finalizePoolRessource() ; |
---|
[2265] | 522 | CContext::removeAllContexts() ; // free memory for related context |
---|
[2310] | 523 | CXios::getMpiGarbageCollector().release() ; // release unfree MPI ressources |
---|
[2580] | 524 | MPI_Comm xiosComm=CXios::getXiosComm() ; |
---|
| 525 | MPI_Comm_free(&xiosComm) ; |
---|
[2576] | 526 | CCommTrack::dumpComm() ; |
---|
[1761] | 527 | if (!is_MPI_Initialized) |
---|
| 528 | { |
---|
[2335] | 529 | if (!CXios::usingOasis) MPI_Finalize() ; |
---|
[1761] | 530 | } |
---|
| 531 | |
---|
| 532 | info(20) << "Client side context is finalized"<<endl ; |
---|
| 533 | report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; |
---|
| 534 | report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; |
---|
| 535 | report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; |
---|
| 536 | report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; |
---|
| 537 | report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; |
---|
| 538 | // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; |
---|
| 539 | report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; |
---|
| 540 | report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; |
---|
| 541 | report(100)<<CTimer::getAllCumulatedTime()<<endl ; |
---|
[2535] | 542 | if (CXios::reportMemory) |
---|
| 543 | { |
---|
| 544 | report(100)<<CMemChecker::getAllCumulatedMem()<<endl ; |
---|
| 545 | } |
---|
[2146] | 546 | CWorkflowGraph::drawWorkFlowGraph_client(); |
---|
[2274] | 547 | |
---|
| 548 | xios::releaseStaticAllocation() ; |
---|
| 549 | |
---|
[1761] | 550 | } |
---|
| 551 | |
---|
[2274] | 552 | void CClient::finalizePoolRessource() |
---|
| 553 | { |
---|
| 554 | delete poolRessource_ ; poolRessource_=nullptr ; |
---|
| 555 | } |
---|
[1761] | 556 | |
---|
[1148] | 557 | /*! |
---|
[1243] | 558 | * Return global rank without oasis and current rank in model intraComm in case of oasis |
---|
[1148] | 559 | */ |
---|
[490] | 560 | int CClient::getRank() |
---|
| 561 | { |
---|
[1148] | 562 | return rank_; |
---|
[490] | 563 | } |
---|
| 564 | |
---|
[523] | 565 | /*! |
---|
| 566 | * Open a file specified by a suffix and an extension and use it for the given file buffer. |
---|
| 567 | * The file name will be suffix+rank+extension. |
---|
| 568 | * |
---|
| 569 | * \param fileName[in] protype file name |
---|
| 570 | * \param ext [in] extension of the file |
---|
| 571 | * \param fb [in/out] the file buffer |
---|
| 572 | */ |
---|
| 573 | void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb) |
---|
| 574 | { |
---|
| 575 | StdStringStream fileNameClient; |
---|
| 576 | int numDigit = 0; |
---|
| 577 | int size = 0; |
---|
[1233] | 578 | int rank; |
---|
[1761] | 579 | MPI_Comm_size(CXios::getGlobalComm(), &size); |
---|
| 580 | MPI_Comm_rank(CXios::getGlobalComm(),&rank); |
---|
[523] | 581 | while (size) |
---|
| 582 | { |
---|
| 583 | size /= 10; |
---|
| 584 | ++numDigit; |
---|
| 585 | } |
---|
[497] | 586 | |
---|
[1761] | 587 | fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << rank << ext; |
---|
[1233] | 588 | |
---|
[523] | 589 | fb->open(fileNameClient.str().c_str(), std::ios::out); |
---|
| 590 | if (!fb->is_open()) |
---|
| 591 | ERROR("void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", |
---|
[1542] | 592 | << std::endl << "Can not open <" << fileNameClient.str() << "> file to write the client log(s)."); |
---|
[523] | 593 | } |
---|
[490] | 594 | |
---|
[523] | 595 | /*! |
---|
| 596 | * \brief Open a file stream to write the info logs |
---|
| 597 | * Open a file stream with a specific file name suffix+rank |
---|
| 598 | * to write the info logs. |
---|
| 599 | * \param fileName [in] protype file name |
---|
| 600 | */ |
---|
| 601 | void CClient::openInfoStream(const StdString& fileName) |
---|
| 602 | { |
---|
| 603 | std::filebuf* fb = m_infoStream.rdbuf(); |
---|
| 604 | openStream(fileName, ".out", fb); |
---|
[490] | 605 | |
---|
[523] | 606 | info.write2File(fb); |
---|
| 607 | report.write2File(fb); |
---|
| 608 | } |
---|
[490] | 609 | |
---|
[523] | 610 | //! Write the info logs to standard output |
---|
| 611 | void CClient::openInfoStream() |
---|
| 612 | { |
---|
| 613 | info.write2StdOut(); |
---|
| 614 | report.write2StdOut(); |
---|
| 615 | } |
---|
[490] | 616 | |
---|
[523] | 617 | //! Close the info logs file if it opens |
---|
| 618 | void CClient::closeInfoStream() |
---|
| 619 | { |
---|
| 620 | if (m_infoStream.is_open()) m_infoStream.close(); |
---|
| 621 | } |
---|
[490] | 622 | |
---|
[523] | 623 | /*! |
---|
| 624 | * \brief Open a file stream to write the error log |
---|
| 625 | * Open a file stream with a specific file name suffix+rank |
---|
| 626 | * to write the error log. |
---|
| 627 | * \param fileName [in] protype file name |
---|
| 628 | */ |
---|
| 629 | void CClient::openErrorStream(const StdString& fileName) |
---|
| 630 | { |
---|
| 631 | std::filebuf* fb = m_errorStream.rdbuf(); |
---|
| 632 | openStream(fileName, ".err", fb); |
---|
| 633 | |
---|
| 634 | error.write2File(fb); |
---|
| 635 | } |
---|
| 636 | |
---|
| 637 | //! Write the error log to standard error output |
---|
| 638 | void CClient::openErrorStream() |
---|
| 639 | { |
---|
| 640 | error.write2StdErr(); |
---|
| 641 | } |
---|
| 642 | |
---|
| 643 | //! Close the error log file if it opens |
---|
| 644 | void CClient::closeErrorStream() |
---|
| 645 | { |
---|
| 646 | if (m_errorStream.is_open()) m_errorStream.close(); |
---|
| 647 | } |
---|
[300] | 648 | } |
---|