Changeset 1009
- Timestamp:
- 12/05/16 17:47:54 (8 years ago)
- Location:
- XIOS/dev/dev_olga
- Files:
-
- 27 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_olga/arch/arch-X64_CURIE.fcm
r983 r1009 9 9 %BASE_CFLAGS -diag-disable 1125 -diag-disable 279 10 10 %PROD_CFLAGS -O3 -D BOOST_DISABLE_ASSERTS 11 %DEV_CFLAGS -g -traceback 11 #%DEV_CFLAGS -g -traceback 12 %DEV_CFLAGS -g 12 13 %DEBUG_CFLAGS -DBZ_DEBUG -g -traceback -fno-inline 13 14 14 15 %BASE_FFLAGS -D__NONE__ 15 16 %PROD_FFLAGS -O3 16 %DEV_FFLAGS -g -traceback 17 #%DEV_FFLAGS -g -traceback 18 %DEV_FFLAGS -g 17 19 %DEBUG_FFLAGS -g -traceback 18 20 -
XIOS/dev/dev_olga/bld.cfg
r983 r1009 35 35 bld::target xios_server.exe 36 36 bld::target xios_server1.exe xios_server2.exe 37 bld::target test_regular.exe 37 38 #bld::target test_new_features.exe test_unstruct_complete.exe 38 39 #bld::target test_client.exe test_complete.exe 39 bld::target test_client.exe40 #bld::target test_client.exe 40 41 bld::exe_dep 41 42 -
XIOS/dev/dev_olga/src/attribute_map.cpp
r966 r1009 28 28 29 29 //--------------------------------------------------------------- 30 31 bool CAttributeMap::hasAttribute(const StdString& key) const32 {33 return (this->find(key) != this->end());34 }35 30 36 31 void CAttributeMap::clearAttribute(const StdString& key) -
XIOS/dev/dev_olga/src/attribute_map.hpp
r623 r1009 20 20 21 21 /// Tests /// 22 inlinebool hasAttribute(const StdString & key) const;22 bool hasAttribute(const StdString & key) const; 23 23 24 24 /// Accesseurs /// … … 72 72 }; // class CAttributeMap 73 73 74 inline bool CAttributeMap::hasAttribute(const StdString& key) const 75 { 76 return (this->find(key) != this->end()); 77 } 78 74 79 } // namespace xios 75 80 -
XIOS/dev/dev_olga/src/client.cpp
r992 r1009 18 18 MPI_Comm CClient::interComm ; 19 19 std::list<MPI_Comm> CClient::contextInterComms; 20 intCClient::serverLeader;20 vector <int> CClient::serverLeader; 21 21 bool CClient::is_MPI_Initialized ; 22 22 int CClient::rank = INVALID_RANK; … … 103 103 { 104 104 int clientLeader=leaders[hashClient] ; 105 serverLeader=leaders[hashServer] ; 105 // serverLeader=leaders[hashServer] ; 106 serverLeader.push_back(leaders[hashServer]) ; 106 107 int intraCommSize, intraCommRank ; 107 108 MPI_Comm_size(intraComm,&intraCommSize) ; 108 109 MPI_Comm_rank(intraComm,&intraCommRank) ; 109 110 info(50)<<"intercommCreate::client "<<rank<<" intraCommSize : "<<intraCommSize 110 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 111 MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 111 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader.back()<<endl ; 112 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader.back(), 0, &interComm) ; 113 // info(50)<<"intercommCreate::client "<<rank<<" intraCommSize : "<<intraCommSize 114 // <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 115 // MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 112 116 } 113 117 else … … 161 165 } 162 166 163 void CClient::initializeClientOnServer(const int rank, MPI_Comm& intraCommPrmSrv, const int srvSndLeader)167 void CClient::initializeClientOnServer(const int rank, const MPI_Comm& intraCommPrmSrv, const int srvSndLeader) 164 168 { 165 169 MPI_Comm_dup(intraCommPrmSrv, &intraComm) ; 166 serverLeader = srvSndLeader;170 serverLeader.push_back(srvSndLeader); 167 171 int intraCommSize, intraCommRank ; 168 172 MPI_Comm_size(intraComm,&intraCommSize) ; 169 173 MPI_Comm_rank(intraComm,&intraCommRank) ; 170 174 info(50)<<"intercommCreate::client "<<rank<<" intraCommSize : "<<intraCommSize 171 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 172 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 173 } 175 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvSndLeader<<endl ; 176 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &interComm) ; 177 } 178 174 179 175 180 ///--------------------------------------------------------------- … … 226 231 buffer<<msg ; 227 232 228 MPI_Send(buff, buffer.count(), MPI_CHAR, serverLeader, 1, CXios::globalComm) ; 233 for (int i = 0; i < serverLeader.size(); ++i) 234 { 235 MPI_Send(buff, buffer.count(), MPI_CHAR, serverLeader[i], 1, CXios::globalComm) ; 236 MPI_Intercomm_create(contextComm, 0, CXios::globalComm, serverLeader[i], 10+globalRank, &contextInterComm) ; 237 info(10)<<"Register new Context : "<<id<<endl ; 238 MPI_Comm inter ; 239 MPI_Intercomm_merge(contextInterComm,0,&inter) ; 240 MPI_Barrier(inter) ; 241 242 context->initClient(contextComm,contextInterComm) ; 243 244 contextInterComms.push_back(contextInterComm); 245 MPI_Comm_free(&inter); 246 } 229 247 delete [] buff ; 230 248 231 MPI_Intercomm_create(contextComm, 0, CXios::globalComm, serverLeader, 10+globalRank, &contextInterComm) ; 232 info(10)<<"Register new Context : "<<id<<endl ; 233 234 MPI_Comm inter ; 235 MPI_Intercomm_merge(contextInterComm,0,&inter) ; 236 MPI_Barrier(inter) ; 237 238 context->initClient(contextComm,contextInterComm) ; 239 240 contextInterComms.push_back(contextInterComm); 241 MPI_Comm_free(&inter); 242 } 243 } 249 } 250 } 251 252 ///--------------------------------------------------------------- 253 /*! 254 * \fn void CClient::registerContext(const string& id, const int poolNb, MPI_Comm contextComm) 255 * Function creates intraComm (CClient::intraComm) for client group with id=codeId and interComm (CClient::interComm) between client and server groups. 256 * \param [in] id id of context. 257 * \param [in] contextComm. 258 */ 259 void CClient::registerContextOnSrvPools(const string& id, MPI_Comm contextComm) 260 { 261 CContext::setCurrent(id) ; 262 CContext* context=CContext::create(id); 263 StdString idServer(id); 264 idServer += "_server_"; 265 266 int size,rank,globalRank ; 267 size_t message_size ; 268 int leaderRank ; 269 MPI_Comm contextInterComm ; 270 271 MPI_Comm_size(contextComm,&size) ; 272 MPI_Comm_rank(contextComm,&rank) ; 273 MPI_Comm_rank(CXios::globalComm,&globalRank) ; 274 if (rank!=0) globalRank=0 ; 275 276 CMessage msg ; 277 278 int messageSize ; 279 void * buff ; 280 281 for (int i = 0; i < serverLeader.size(); ++i) 282 { 283 StdString str = idServer + boost::lexical_cast<string>(i); 284 msg<<str<<size<<globalRank ; 285 messageSize = msg.size() ; 286 buff = new char[messageSize] ; 287 CBufferOut buffer(buff,messageSize) ; 288 buffer<<msg ; 289 290 MPI_Send(buff, buffer.count(), MPI_CHAR, serverLeader[i], 1, CXios::globalComm) ; 291 MPI_Intercomm_create(contextComm, 0, CXios::globalComm, serverLeader[i], 10+globalRank, &contextInterComm) ; 292 info(10)<<"Register new Context : "<<id<<endl ; 293 MPI_Comm inter ; 294 MPI_Intercomm_merge(contextInterComm,0,&inter) ; 295 MPI_Barrier(inter) ; 296 297 context->initClient(contextComm,contextInterComm) ; 298 299 contextInterComms.push_back(contextInterComm); 300 MPI_Comm_free(&inter); 301 delete [] buff ; 302 } 303 } 244 304 245 305 void CClient::finalize(void) -
XIOS/dev/dev_olga/src/client.hpp
r992 r1009 11 11 public: 12 12 static void initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm); 13 static void initializeClientOnServer(const int rank, MPI_Comm& localComm, const int srvSndLeader); 13 static void initializeClientOnServer(const int rank, const MPI_Comm& localComm, const int srvSndLeader); 14 14 15 static void finalize(void); 15 16 static void registerContext(const string& id, MPI_Comm contextComm); 17 static void registerContextOnSrvPools(const string& id, MPI_Comm contextComm); 16 18 17 19 static MPI_Comm intraComm; 18 20 static MPI_Comm interComm; 19 21 static std::list<MPI_Comm> contextInterComms; 20 static intserverLeader;22 static vector<int> serverLeader; 21 23 static bool is_MPI_Initialized ; 22 24 -
XIOS/dev/dev_olga/src/context_server.cpp
r992 r1009 62 62 boost::hash<string> hashString; 63 63 StdString contextId = context->getId(); 64 contextId += "_prim"; // just to distinguish between server and serverPrimServer on server165 64 hashId=hashString(contextId); 66 65 -
XIOS/dev/dev_olga/src/declare_ref_func.hpp
r962 r1009 51 51 SuperClassAttribute::setAttributes(refer_ptr, apply); \ 52 52 } \ 53 if (this->hasAttribute("name") && this->name.isEmpty()) \ 54 this->name.setValue(this->get##type##OutputName()); \ 53 55 } \ 54 56 \ … … 95 97 const C##type* refer_ptr = this, *tmp_ptr; \ 96 98 StdString nameRef = this->name_##_ref; \ 97 std::set<const C##type*> tmpRefObjects; 99 std::set<const C##type*> tmpRefObjects; \ 98 100 while (refer_ptr->hasAutoGeneratedId() && \ 99 101 (C##type::has(nameRef))) \ … … 102 104 tmp_ptr = refer_ptr; \ 103 105 refer_ptr = tmp_ptr->getDirect##type##Reference(); \ 104 if (refer_ptr->hasDirect##type##Reference()) \ 105 nameRef = refer_ptr->name_##_ref; \ 106 if (tmpRefObjects.end() != tmpRefObjects.find(refer_ptr)) \ 107 { \ 106 if (refer_ptr->hasAutoGeneratedId() && \ 107 refer_ptr->hasDirect##type##Reference()) \ 108 nameRef = refer_ptr->name_##_ref; \ 109 else { \ 110 nameRef = refer_ptr->getId(); break; \ 111 } \ 112 if (tmpRefObjects.end() != tmpRefObjects.find(refer_ptr)) \ 113 { \ 108 114 ERROR("const StdString& C" #type "::get" #type "OutputName(void) const ", \ 109 << "Circular dependency stopped for " #name_ " object " \110 << "with id = \"" << refer_ptr->getId() << "\"."); \111 }\115 << "Circular dependency stopped for " #name_ " object " \ 116 << "with id = \"" << refer_ptr->getId() << "\"."); \ 117 } \ 112 118 } \ 113 119 return nameRef; \ -
XIOS/dev/dev_olga/src/group_template.hpp
r591 r1009 72 72 static bool dispatchEvent(CEventServer& event) ; 73 73 void sendCreateChild(const string& id="") ; 74 void sendCreateChild(const string& id, const int srvPool) ; 74 75 void sendCreateChildGroup(const string& id="") ; 75 76 static void recvCreateChild(CEventServer& event) ; -
XIOS/dev/dev_olga/src/group_template_impl.hpp
r987 r1009 379 379 { 380 380 // Use correct context client to send message 381 CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 382 383 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD) ; 384 if (contextClientTmp->isServerLeader()) 385 { 386 CMessage msg ; 387 msg<<this->getId() ; 388 msg<<id ; 389 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 390 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 391 event.push(*itRank,1,msg) ; 392 contextClientTmp->sendEvent(event) ; 393 } 394 else contextClientTmp->sendEvent(event) ; 381 // CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 382 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 383 for (int i = 0; i < nbSrvPools; ++i) 384 { 385 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] : context->client; 386 387 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD) ; 388 if (contextClientTmp->isServerLeader()) 389 { 390 CMessage msg ; 391 msg<<this->getId() ; 392 msg<<id ; 393 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 394 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 395 event.push(*itRank,1,msg) ; 396 contextClientTmp->sendEvent(event) ; 397 } 398 else contextClientTmp->sendEvent(event) ; 399 } 395 400 } 396 401 … … 416 421 417 422 template <class U, class V, class W> 423 void CGroupTemplate<U, V, W>::sendCreateChild(const string& id, const int srvPool) 424 { 425 CContext* context=CContext::getCurrent() ; 426 CContextClient* contextClientTmp = context->clientPrimServer[srvPool]; 427 428 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD) ; 429 if (contextClientTmp->isServerLeader()) 430 { 431 CMessage msg ; 432 msg<<this->getId() ; 433 msg<<id ; 434 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 435 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 436 event.push(*itRank,1,msg) ; 437 contextClientTmp->sendEvent(event) ; 438 } 439 else contextClientTmp->sendEvent(event) ; 440 } 441 442 template <class U, class V, class W> 418 443 void CGroupTemplate<U, V, W>::sendCreateChildGroup(const string& id) 419 444 { … … 424 449 { 425 450 // Use correct context client to send message 426 CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 427 428 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD_GROUP) ; 429 if (contextClientTmp->isServerLeader()) 430 { 431 CMessage msg ; 432 msg<<this->getId() ; 433 msg<<id ; 434 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 435 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 436 event.push(*itRank,1,msg) ; 437 contextClientTmp->sendEvent(event) ; 438 } 439 else contextClientTmp->sendEvent(event) ; 451 // CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 452 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 453 for (int i = 0; i < nbSrvPools; ++i) 454 { 455 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] : context->client; 456 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD_GROUP) ; 457 if (contextClientTmp->isServerLeader()) 458 { 459 CMessage msg ; 460 msg<<this->getId() ; 461 msg<<id ; 462 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 463 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 464 event.push(*itRank,1,msg) ; 465 contextClientTmp->sendEvent(event) ; 466 } 467 else contextClientTmp->sendEvent(event) ; 468 } 440 469 } 441 470 -
XIOS/dev/dev_olga/src/node/axis.cpp
r987 r1009 889 889 { 890 890 CContext* context = CContext::getCurrent(); 891 892 CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer 893 : context->client; 894 895 896 int nbServer = contextClientTmp->serverSize; 897 898 CServerDistributionDescription serverDescription(globalDim, nbServer); 899 serverDescription.computeServerDistribution(); 900 901 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 902 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 903 904 globalDimGrid.resize(globalDim.size()); 905 for (int idx = 0; idx < globalDim.size(); ++idx) globalDimGrid(idx) = globalDim[idx]; 906 907 CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT); 908 if (contextClientTmp->isServerLeader()) 909 { 910 std::list<CMessage> msgs; 911 912 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 913 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 914 { 915 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 916 const int begin = serverIndexBegin[*itRank][orderPositionInGrid]; 917 const int ni = serverDimensionSizes[*itRank][orderPositionInGrid]; 918 const int end = begin + ni - 1; 919 920 msgs.push_back(CMessage()); 921 CMessage& msg = msgs.back(); 922 msg << this->getId(); 923 msg << ni << begin << end; 924 msg << global_zoom_begin.getValue() << global_zoom_n.getValue(); 925 msg << isCompressible_; 926 msg << orderPositionInGrid; 927 msg << globalDimGrid; 928 929 event.push(*itRank,1,msg); 930 } 931 contextClientTmp->sendEvent(event); 932 } 933 else contextClientTmp->sendEvent(event); 891 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 892 for (int i = 0; i < nbSrvPools; ++i) 893 { 894 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] 895 : context->client; 896 int nbServer = contextClientTmp->serverSize; 897 898 CServerDistributionDescription serverDescription(globalDim, nbServer); 899 serverDescription.computeServerDistribution(); 900 901 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 902 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 903 904 globalDimGrid.resize(globalDim.size()); 905 for (int idx = 0; idx < globalDim.size(); ++idx) globalDimGrid(idx) = globalDim[idx]; 906 907 CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT); 908 if (contextClientTmp->isServerLeader()) 909 { 910 std::list<CMessage> msgs; 911 912 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 913 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 914 { 915 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 916 const int begin = serverIndexBegin[*itRank][orderPositionInGrid]; 917 const int ni = serverDimensionSizes[*itRank][orderPositionInGrid]; 918 const int end = begin + ni - 1; 919 920 msgs.push_back(CMessage()); 921 CMessage& msg = msgs.back(); 922 msg << this->getId(); 923 msg << ni << begin << end; 924 msg << global_zoom_begin.getValue() << global_zoom_n.getValue(); 925 msg << isCompressible_; 926 msg << orderPositionInGrid; 927 msg << globalDimGrid; 928 929 event.push(*itRank,1,msg); 930 } 931 contextClientTmp->sendEvent(event); 932 } 933 else contextClientTmp->sendEvent(event); 934 } 934 935 } 935 936 -
XIOS/dev/dev_olga/src/node/context.cpp
r992 r1009 27 27 , calendar(), hasClient(false), hasServer(false) 28 28 , isPostProcessed(false), finalized(false) 29 , idServer_(), client(0), server(0), clientPrimServer(0), serverPrimServer(0) 29 , idServer_(), client(0), server(0) 30 // , clientPrimServer(0), serverPrimServer(0) 30 31 { /* Ne rien faire de plus */ } 31 32 … … 34 35 , calendar(), hasClient(false), hasServer(false) 35 36 , isPostProcessed(false), finalized(false) 36 , idServer_(), client(0), server(0), clientPrimServer(0), serverPrimServer(0) 37 , idServer_(), client(0), server(0) 38 // , clientPrimServer(0), serverPrimServer(0) 37 39 { /* Ne rien faire de plus */ } 38 40 … … 41 43 delete client; 42 44 delete server; 43 delete clientPrimServer;44 delete serverPrimServer;45 // delete clientPrimServer; 46 // delete serverPrimServer; 45 47 } 46 48 … … 252 254 else // initClient is called by primary server pool 253 255 { 254 clientPrimServer = new CContextClient(this, intraComm, interComm); 255 serverPrimServer = new CContextServer(this, 1, intraComm, interComm); // just some int parameter to distinguish server from serverPrimServer on server1 256 // clientPrimServer = new CContextClient(this, intraComm, interComm); 257 // serverPrimServer = new CContextServer(this, 1, intraComm, interComm); // just some int parameter to distinguish server from serverPrimServer on server1 258 clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 259 serverPrimServer.push_back(new CContextServer(this, intraComm, interComm)); 256 260 } 257 261 … … 320 324 if (hasClient && hasServer) 321 325 { 322 if (clientPrimServer->isServerLeader()) 323 { 324 const std::list<int>& ranks = clientPrimServer->getRanksServerLeader(); 325 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 326 if (!bufferSize.count(*itRank)) bufferSize[*itRank] = maxEventSize[*itRank] = minBufferSize; 327 } 328 clientPrimServer->setBufferSize(bufferSize, maxEventSize); 329 } 330 326 for (int i = 0; i < clientPrimServer.size(); ++i) 327 { 328 if (clientPrimServer[i]->isServerLeader()) 329 { 330 const std::list<int>& ranks = clientPrimServer[i]->getRanksServerLeader(); 331 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 332 if (!bufferSize.count(*itRank)) bufferSize[*itRank] = maxEventSize[*itRank] = minBufferSize; 333 } 334 clientPrimServer[i]->setBufferSize(bufferSize, maxEventSize); 335 } 336 } 331 337 } 332 338 … … 377 383 { 378 384 bool serverFinished = server->eventLoop(); 379 bool serverPrimFinished = serverPrimServer->eventLoop(); 385 bool serverPrimFinished = true; 386 for (int i = 0; i < serverPrimServer.size(); ++i) 387 { 388 serverPrimFinished *= serverPrimServer[i]->eventLoop(); 389 } 380 390 return ( serverFinished && serverPrimFinished); 381 391 } … … 397 407 { 398 408 client->checkBuffers(); 399 clientPrimServer->checkBuffers(); 409 for (int i = 0; i < clientPrimServer.size(); ++i) 410 clientPrimServer[i]->checkBuffers(); 400 411 bool serverFinished = server->eventLoop(); 401 bool serverPrimFinished = serverPrimServer->eventLoop(); 412 bool serverPrimFinished = true; 413 for (int i = 0; i < serverPrimServer.size(); ++i) 414 { 415 serverPrimFinished *= serverPrimServer[i]->eventLoop(); 416 } 402 417 return ( serverFinished && serverPrimFinished); 403 418 } … … 412 427 void CContext::finalize(void) 413 428 { 414 415 416 429 if (!finalized) 430 { 431 finalized = true; 417 432 // if (hasClient) sendRegistry() ; 418 433 419 /* if (CXios::serverLevel == 0) 420 { 421 client->finalize(); 422 while (!server->hasFinished()) 423 { 424 server->eventLoop(); 425 } 426 } 427 else if (CXios::serverLevel == 1) 428 { 429 clientPrimServer->finalize(); 430 while (!serverPrimServer->hasFinished()) 431 { 432 serverPrimServer->eventLoop(); 433 } 434 client->finalize(); 435 while (!server->hasFinished()) 436 { 437 server->eventLoop(); 438 } 439 } 440 else if (CXios::serverLevel == 2) 441 { 442 client->finalize(); 443 while (!server->hasFinished()) 444 { 445 server->eventLoop(); 446 } 447 }*/ 448 449 if ((hasClient) && (hasServer)) 450 { 451 clientPrimServer->finalize(); 452 while (!serverPrimServer->hasFinished()) 453 { 454 serverPrimServer->eventLoop(); 455 CServer::eventScheduler->checkEvent() ; 456 } 457 } 458 459 client->finalize(); 460 while (!server->hasFinished()) 461 { 462 server->eventLoop(); 463 } 464 465 466 if (hasServer) 467 { 468 closeAllFile(); 469 registryOut->hierarchicalGatherRegistry() ; 470 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 471 } 472 473 for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 474 MPI_Comm_free(&(*it)); 475 comms.clear(); 434 if ((hasClient) && (hasServer)) 435 { 436 for (int i = 0; i < clientPrimServer.size(); ++i) 437 clientPrimServer[i]->finalize(); 438 439 for (int i = 0; i < serverPrimServer.size(); ++i) 440 { 441 while (!serverPrimServer[i]->hasFinished()) 442 { 443 serverPrimServer[i]->eventLoop(); 444 CServer::eventScheduler->checkEvent() ; 445 } 446 } 447 } 448 449 client->finalize(); 450 while (!server->hasFinished()) 451 { 452 server->eventLoop(); 453 } 454 455 if (hasServer) 456 { 457 closeAllFile(); 458 registryOut->hierarchicalGatherRegistry() ; 459 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 460 } 461 462 for (std::vector<CContextClient*>::iterator it = clientPrimServer.begin(); it != clientPrimServer.end(); it++) 463 delete *it; 464 465 for (std::vector<CContextServer*>::iterator it = serverPrimServer.begin(); it != serverPrimServer.end(); it++) 466 delete *it; 467 468 for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 469 MPI_Comm_free(&(*it)); 470 comms.clear(); 471 472 476 473 } 477 474 } … … 508 505 509 506 // We have enough information to send to server 510 // First of all, send all enabled files 507 if (!hasServer) 508 { 509 // First of all, send all enabled files 511 510 sendEnabledFiles(); 512 513 // Then, send all enabled fields 511 // Then, send all enabled fields 514 512 sendEnabledFields(); 513 } 514 else 515 { 516 sendEnabledFiles(clientPrimServer.size()); 517 sendEnabledFields(clientPrimServer.size()); 518 } 515 519 516 520 // At last, we have all info of domain and axis, then send them 517 521 sendRefDomainsAxis(); 518 519 522 // After that, send all grid (if any) 520 523 sendRefGrid(); 521 522 524 // We have a xml tree on the server side and now, it should be also processed 523 525 sendPostProcessing(); … … 539 541 this->solveAllRefOfEnabledFields(true); 540 542 } 541 542 543 543 544 544 // // Now tell server that it can process all messages from client … … 769 769 { 770 770 // Use correct context client to send message 771 CContextClient* contextClientTmp = (0 != clientPrimServer) ? clientPrimServer : client; 772 CEventClient event(getType(),EVENT_ID_CLOSE_DEFINITION); 773 774 if (contextClientTmp->isServerLeader()) 775 { 776 CMessage msg; 777 msg<<this->getIdServer(); 778 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 779 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 780 event.push(*itRank,1,msg); 781 contextClientTmp->sendEvent(event); 782 } 783 else contextClientTmp->sendEvent(event); 784 785 // if (!hasServer) 786 // { 787 // if (client->isServerLeader()) 788 // { 789 // CMessage msg; 790 // msg<<this->getIdServer(); 791 // const std::list<int>& ranks = client->getRanksServerLeader(); 792 // for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 793 // event.push(*itRank,1,msg); 794 // client->sendEvent(event); 795 // } 796 // else client->sendEvent(event); 797 // } 798 // else 799 // { 800 // if (clientPrimServer->isServerLeader()) 801 // { 802 // CMessage msg; 803 // msg<<this->getIdServer(); 804 // const std::list<int>& ranks = clientPrimServer->getRanksServerLeader(); 805 // for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 806 // event.push(*itRank,1,msg); 807 // clientPrimServer->sendEvent(event); 808 // } 809 // else clientPrimServer->sendEvent(event); 810 811 // } 771 // CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[0] : client; 772 int nbSrvPools = (hasServer) ? clientPrimServer.size() : 1; 773 for (int i = 0; i < nbSrvPools; ++i) 774 { 775 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 776 CEventClient event(getType(),EVENT_ID_CLOSE_DEFINITION); 777 if (contextClientTmp->isServerLeader()) 778 { 779 CMessage msg; 780 if (hasServer) 781 msg<<this->getIdServer(i); 782 else 783 msg<<this->getIdServer(); 784 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 785 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 786 event.push(*itRank,1,msg); 787 contextClientTmp->sendEvent(event); 788 } 789 else contextClientTmp->sendEvent(event); 790 } 812 791 } 813 792 … … 829 808 { 830 809 // Use correct context client to send message 831 CContextClient* contextClientTmp = (0 != clientPrimServer) ? clientPrimServer : client; 832 CEventClient event(getType(),EVENT_ID_UPDATE_CALENDAR); 833 834 if (contextClientTmp->isServerLeader()) 835 { 836 CMessage msg; 837 msg<<this->getIdServer()<<step; 838 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 839 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 840 event.push(*itRank,1,msg); 841 contextClientTmp->sendEvent(event); 842 } 843 else contextClientTmp->sendEvent(event); 810 // CContextClient* contextClientTmp = (0 != clientPrimServer) ? clientPrimServer : client; 811 int nbSrvPools = (hasServer) ? clientPrimServer.size() : 1; 812 for (int i = 0; i < nbSrvPools; ++i) 813 { 814 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 815 CEventClient event(getType(),EVENT_ID_UPDATE_CALENDAR); 816 817 if (contextClientTmp->isServerLeader()) 818 { 819 CMessage msg; 820 if (hasServer) 821 msg<<this->getIdServer(i)<<step; 822 else 823 msg<<this->getIdServer()<<step; 824 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 825 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 826 event.push(*itRank,1,msg); 827 contextClientTmp->sendEvent(event); 828 } 829 else contextClientTmp->sendEvent(event); 830 } 844 831 845 832 // if (!hasServer) … … 896 883 { 897 884 // Use correct context client to send message 898 CContextClient* contextClientTmp = (0 != clientPrimServer) ? clientPrimServer : client; 899 CEventClient event(getType(),EVENT_ID_CREATE_FILE_HEADER); 900 901 if (contextClientTmp->isServerLeader()) 902 { 903 CMessage msg; 904 msg<<this->getIdServer(); 905 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 906 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 907 event.push(*itRank,1,msg) ; 908 contextClientTmp->sendEvent(event); 909 } 910 else contextClientTmp->sendEvent(event); 911 885 // CContextClient* contextClientTmp = (0 != clientPrimServer) ? clientPrimServer : client; 886 int nbSrvPools = (hasServer) ? clientPrimServer.size() : 1; 887 for (int i = 0; i < nbSrvPools; ++i) 888 { 889 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 890 CEventClient event(getType(),EVENT_ID_CREATE_FILE_HEADER); 891 892 if (contextClientTmp->isServerLeader()) 893 { 894 CMessage msg; 895 if (hasServer) 896 msg<<this->getIdServer(i); 897 else 898 msg<<this->getIdServer(); 899 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 900 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 901 event.push(*itRank,1,msg) ; 902 contextClientTmp->sendEvent(event); 903 } 904 else contextClientTmp->sendEvent(event); 905 } 912 906 // if (!hasServer) 913 907 // { … … 964 958 { 965 959 // Use correct context client to send message 966 CContextClient* contextClientTmp = (0 != clientPrimServer) ? clientPrimServer : client; 967 CEventClient event(getType(),EVENT_ID_POST_PROCESS); 968 969 if (contextClientTmp->isServerLeader()) 970 { 971 CMessage msg; 972 msg<<this->getIdServer(); 973 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 974 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 960 // CContextClient* contextClientTmp = (0 != clientPrimServer) ? clientPrimServer : client; 961 int nbSrvPools = (hasServer) ? clientPrimServer.size() : 1; 962 for (int i = 0; i < nbSrvPools; ++i) 963 { 964 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 965 CEventClient event(getType(),EVENT_ID_POST_PROCESS); 966 if (contextClientTmp->isServerLeader()) 967 { 968 CMessage msg; 969 if (hasServer) 970 msg<<this->getIdServer(i); 971 else 972 msg<<this->getIdServer(); 973 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 974 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 975 975 event.push(*itRank,1,msg); 976 contextClientTmp->sendEvent(event); 977 } 978 else contextClientTmp->sendEvent(event); 976 contextClientTmp->sendEvent(event); 977 } 978 else contextClientTmp->sendEvent(event); 979 } 979 980 980 981 // if (hasClient) … … 1038 1039 } 1039 1040 1041 const StdString& CContext::getIdServer(const int i) 1042 { 1043 idServer_ = this->getId(); 1044 idServer_ += "_server_"; 1045 idServer_ += boost::lexical_cast<string>(i); 1046 return idServer_; 1047 } 1048 1049 1040 1050 /*! 1041 1051 \brief Do some simple post processings after parsing xml file … … 1192 1202 StdString fileDefRoot("file_definition"); 1193 1203 CFileGroup* cfgrpPtr = CFileGroup::get(fileDefRoot); 1194 1195 1204 for (int i = 0; i < size; ++i) 1196 1205 { … … 1201 1210 } 1202 1211 1212 //! Client side: Send infomation of active files (files are enabled to write out) 1213 void CContext::sendEnabledFiles(const int nbPools) 1214 { 1215 int size = this->enabledFiles.size(); 1216 1217 // In a context, each type has a root definition, e.g: axis, domain, field. 1218 // Every object must be a child of one of these root definition. In this case 1219 // all new file objects created on server must be children of the root "file_definition" 1220 StdString fileDefRoot("file_definition"); 1221 CFileGroup* cfgrpPtr = CFileGroup::get(fileDefRoot); 1222 1223 { 1224 for (int i = 0; i < size; ++i) 1225 { 1226 int srvId = i % nbPools; 1227 cfgrpPtr->sendCreateChild(this->enabledFiles[i]->getId(), srvId); 1228 this->enabledFiles[i]->sendAllAttributesToServer(srvId); 1229 this->enabledFiles[i]->sendAddAllVariables(srvId); 1230 } 1231 } 1232 } 1233 1203 1234 //! Client side: Send information of active fields (ones are written onto files) 1204 1235 void CContext::sendEnabledFields() … … 1208 1239 { 1209 1240 this->enabledFiles[i]->sendEnabledFields(); 1241 } 1242 } 1243 1244 void CContext::sendEnabledFields(const int nbPools) 1245 { 1246 int size = this->enabledFiles.size(); 1247 for (int i = 0; i < size; ++i) 1248 { 1249 int srvId = i % nbPools; 1250 this->enabledFiles[i]->sendEnabledFields(srvId); 1210 1251 } 1211 1252 } … … 1320 1361 //! Client side: Send information of reference domain and axis of active fields 1321 1362 void CContext::sendRefDomainsAxis() 1363 { 1364 std::set<StdString> domainIds, axisIds, scalarIds; 1365 1366 // Find all reference domain and axis of all active fields 1367 int numEnabledFiles = this->enabledFiles.size(); 1368 for (int i = 0; i < numEnabledFiles; ++i) 1369 { 1370 std::vector<CField*> enabledFields = this->enabledFiles[i]->getEnabledFields(); 1371 int numEnabledFields = enabledFields.size(); 1372 for (int j = 0; j < numEnabledFields; ++j) 1373 { 1374 const std::vector<StdString>& prDomAxisScalarId = enabledFields[j]->getRefDomainAxisIds(); 1375 if ("" != prDomAxisScalarId[0]) domainIds.insert(prDomAxisScalarId[0]); 1376 if ("" != prDomAxisScalarId[1]) axisIds.insert(prDomAxisScalarId[1]); 1377 if ("" != prDomAxisScalarId[2]) scalarIds.insert(prDomAxisScalarId[2]); 1378 } 1379 } 1380 1381 // Create all reference axis on server side 1382 std::set<StdString>::iterator itDom, itAxis, itScalar; 1383 std::set<StdString>::const_iterator itE; 1384 1385 StdString scalarDefRoot("scalar_definition"); 1386 CScalarGroup* scalarPtr = CScalarGroup::get(scalarDefRoot); 1387 itE = scalarIds.end(); 1388 for (itScalar = scalarIds.begin(); itScalar != itE; ++itScalar) 1389 { 1390 if (!itScalar->empty()) 1391 { 1392 scalarPtr->sendCreateChild(*itScalar); 1393 CScalar::get(*itScalar)->sendAllAttributesToServer(); 1394 } 1395 } 1396 1397 StdString axiDefRoot("axis_definition"); 1398 CAxisGroup* axisPtr = CAxisGroup::get(axiDefRoot); 1399 itE = axisIds.end(); 1400 for (itAxis = axisIds.begin(); itAxis != itE; ++itAxis) 1401 { 1402 if (!itAxis->empty()) 1403 { 1404 axisPtr->sendCreateChild(*itAxis); 1405 CAxis::get(*itAxis)->sendAllAttributesToServer(); 1406 } 1407 } 1408 1409 // Create all reference domains on server side 1410 StdString domDefRoot("domain_definition"); 1411 CDomainGroup* domPtr = CDomainGroup::get(domDefRoot); 1412 itE = domainIds.end(); 1413 for (itDom = domainIds.begin(); itDom != itE; ++itDom) 1414 { 1415 if (!itDom->empty()) { 1416 domPtr->sendCreateChild(*itDom); 1417 CDomain::get(*itDom)->sendAllAttributesToServer(); 1418 } 1419 } 1420 } 1421 1422 //! Client side: Send information of reference domain and axis of active fields 1423 void CContext::sendRefDomainsAxis(const int nbPools) 1322 1424 { 1323 1425 std::set<StdString> domainIds, axisIds, scalarIds; … … 1466 1568 registryOut->hierarchicalGatherRegistry() ; 1467 1569 1468 // Use correct context client to send message 1469 CContextClient* contextClientTmp = (0 != clientPrimServer) ? clientPrimServer : client; 1470 CEventClient event(CContext::GetType(), CContext::EVENT_ID_SEND_REGISTRY); 1471 if (contextClientTmp->isServerLeader()) 1472 { 1473 CMessage msg ; 1474 msg<<this->getIdServer(); 1475 if (contextClientTmp->clientRank==0) msg<<*registryOut ; 1476 const std::list<int>& ranks = client->getRanksServerLeader(); 1477 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1478 event.push(*itRank,1,msg); 1479 contextClientTmp->sendEvent(event); 1480 } 1481 else contextClientTmp->sendEvent(event); 1570 // Use correct context client to send message 1571 // CContextClient* contextClientTmp = (0 != clientPrimServer) ? clientPrimServer : client; 1572 int nbSrvPools = (hasServer) ? clientPrimServer.size() : 1; 1573 for (int i = 0; i < nbSrvPools; ++i) 1574 { 1575 CContextClient* contextClientTmp = (hasServer) ? clientPrimServer[i] : client; 1576 CEventClient event(CContext::GetType(), CContext::EVENT_ID_SEND_REGISTRY); 1577 if (contextClientTmp->isServerLeader()) 1578 { 1579 CMessage msg ; 1580 if (hasServer) 1581 msg<<this->getIdServer(i); 1582 else 1583 msg<<this->getIdServer(); 1584 if (contextClientTmp->clientRank==0) msg<<*registryOut ; 1585 const std::list<int>& ranks = client->getRanksServerLeader(); 1586 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1587 event.push(*itRank,1,msg); 1588 contextClientTmp->sendEvent(event); 1589 } 1590 else contextClientTmp->sendEvent(event); 1591 } 1482 1592 1483 1593 // if (!hasServer) -
XIOS/dev/dev_olga/src/node/context.hpp
r983 r1009 133 133 void sendCreateFileHeader(void); 134 134 void sendEnabledFiles(); 135 void sendEnabledFiles(const int nbPools); 135 136 void sendEnabledFields(); 137 void sendEnabledFields(const int nbPools); 136 138 void sendRefDomainsAxis(); 139 void sendRefDomainsAxis(const int nbPools); 137 140 void sendRefGrid(); 138 141 void sendPostProcessing(); … … 141 144 142 145 const StdString& getIdServer(); 146 const StdString& getIdServer(const int srvPoolNb); 143 147 144 148 // Client side: Receive and process messages … … 227 231 228 232 // Client-server pair in case of secondary server pool 229 CContextClient* clientPrimServer; 230 CContextServer* serverPrimServer; 233 // CContextClient* clientPrimServer; 234 // CContextServer* serverPrimServer; 235 std::vector<CContextClient*> clientPrimServer; 236 std::vector<CContextServer*> serverPrimServer; 231 237 232 238 private: -
XIOS/dev/dev_olga/src/node/domain.cpp
r987 r1009 1024 1024 } 1025 1025 } 1026 1027 1028 1029 1030 1031 1032 1026 1033 1027 void CDomain::checkEligibilityForCompressedOutput(void) … … 1396 1390 CContext* context = CContext::getCurrent(); 1397 1391 // Use correct context client to send message 1398 CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer 1399 : context->client; 1400 1401 // CContextClient* client = context->client; 1402 int nbServer = contextClientTmp->serverSize; 1403 1404 CServerDistributionDescription serverDescription(nGlobDomain_, nbServer); 1405 if (isUnstructed_) serverDescription.computeServerDistribution(false, 0); 1406 else serverDescription.computeServerDistribution(false, 1); 1407 1408 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 1409 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 1410 1411 CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT); 1412 if (contextClientTmp->isServerLeader()) 1413 { 1414 std::list<CMessage> msgs; 1415 1416 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 1417 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1418 { 1419 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 1420 const int ibegin_srv = serverIndexBegin[*itRank][0]; 1421 const int jbegin_srv = serverIndexBegin[*itRank][1]; 1422 const int ni_srv = serverDimensionSizes[*itRank][0]; 1423 const int nj_srv = serverDimensionSizes[*itRank][1]; 1424 const int iend_srv = ibegin_srv + ni_srv - 1; 1425 const int jend_srv = jbegin_srv + nj_srv - 1; 1426 1427 msgs.push_back(CMessage()); 1428 CMessage& msg = msgs.back(); 1429 msg << this->getId() ; 1430 msg << ni_srv << ibegin_srv << iend_srv << nj_srv << jbegin_srv << jend_srv; 1431 msg << global_zoom_ni.getValue() << global_zoom_ibegin.getValue() << global_zoom_nj.getValue() << global_zoom_jbegin.getValue(); 1432 msg << isCompressible_; 1433 1434 event.push(*itRank,1,msg); 1435 } 1436 contextClientTmp->sendEvent(event); 1437 } 1438 else contextClientTmp->sendEvent(event); 1392 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1393 for (int i = 0; i < nbSrvPools; ++i) 1394 { 1395 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] 1396 : context->client; 1397 // CContextClient* client = context->client; 1398 int nbServer = contextClientTmp->serverSize; 1399 1400 CServerDistributionDescription serverDescription(nGlobDomain_, nbServer); 1401 if (isUnstructed_) serverDescription.computeServerDistribution(false, 0); 1402 else serverDescription.computeServerDistribution(false, 1); 1403 1404 std::vector<std::vector<int> > serverIndexBegin = serverDescription.getServerIndexBegin(); 1405 std::vector<std::vector<int> > serverDimensionSizes = serverDescription.getServerDimensionSizes(); 1406 1407 CEventClient event(getType(),EVENT_ID_SERVER_ATTRIBUT); 1408 if (contextClientTmp->isServerLeader()) 1409 { 1410 std::list<CMessage> msgs; 1411 1412 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 1413 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1414 { 1415 // Use const int to ensure CMessage holds a copy of the value instead of just a reference 1416 const int ibegin_srv = serverIndexBegin[*itRank][0]; 1417 const int jbegin_srv = serverIndexBegin[*itRank][1]; 1418 const int ni_srv = serverDimensionSizes[*itRank][0]; 1419 const int nj_srv = serverDimensionSizes[*itRank][1]; 1420 const int iend_srv = ibegin_srv + ni_srv - 1; 1421 const int jend_srv = jbegin_srv + nj_srv - 1; 1422 1423 msgs.push_back(CMessage()); 1424 CMessage& msg = msgs.back(); 1425 msg << this->getId() ; 1426 msg << ni_srv << ibegin_srv << iend_srv << nj_srv << jbegin_srv << jend_srv; 1427 msg << global_zoom_ni.getValue() << global_zoom_ibegin.getValue() << global_zoom_nj.getValue() << global_zoom_jbegin.getValue(); 1428 msg << isCompressible_; 1429 1430 event.push(*itRank,1,msg); 1431 } 1432 contextClientTmp->sendEvent(event); 1433 } 1434 else contextClientTmp->sendEvent(event); 1435 } 1439 1436 } 1440 1437 -
XIOS/dev/dev_olga/src/node/field.cpp
r987 r1009 123 123 CContext* context = CContext::getCurrent(); 124 124 // CContextClient* client = context->client; 125 CContextClient* client = (!context->hasServer) ? context->client : context->clientPrimServer; 125 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 126 for (int i = 0; i < nbSrvPools; ++i) 127 { 128 CContextClient* client = (!context->hasServer) ? context->client : context->clientPrimServer[i]; 129 130 CEventClient event(getType(), EVENT_ID_UPDATE_DATA); 131 132 map<int, CArray<int,1> >::iterator it; 133 list<CMessage> list_msg; 134 list<CArray<double,1> > list_data; 135 136 if (!grid->doGridHaveDataDistributed()) 137 { 138 if (client->isServerLeader()) 139 { 140 for (it = grid->storeIndex_toSrv.begin(); it != grid->storeIndex_toSrv.end(); it++) 141 { 142 int rank = it->first; 143 CArray<int,1>& index = it->second; 144 145 list_msg.push_back(CMessage()); 146 list_data.push_back(CArray<double,1>(index.numElements())); 147 148 CArray<double,1>& data_tmp = list_data.back(); 149 for (int n = 0; n < data_tmp.numElements(); n++) data_tmp(n) = data(index(n)); 150 151 list_msg.back() << getId() << data_tmp; 152 event.push(rank, 1, list_msg.back()); 153 } 154 client->sendEvent(event); 155 } 156 else client->sendEvent(event); 157 } 158 else 159 { 160 for (it = grid->storeIndex_toSrv.begin(); it != grid->storeIndex_toSrv.end(); it++) 161 { 162 int rank = it->first; 163 CArray<int,1>& index = it->second; 164 165 list_msg.push_back(CMessage()); 166 list_data.push_back(CArray<double,1>(index.numElements())); 167 168 CArray<double,1>& data_tmp = list_data.back(); 169 for (int n = 0; n < data_tmp.numElements(); n++) data_tmp(n) = data(index(n)); 170 171 list_msg.back() << getId() << data_tmp; 172 event.push(rank, grid->nbSenders[rank], list_msg.back()); 173 } 174 client->sendEvent(event); 175 } 176 } 177 178 CTimer::get("XIOS Send Data").suspend(); 179 } 180 181 void CField::sendUpdateData(const CArray<double,1>& data, const int srvPool) 182 { 183 CTimer::get("XIOS Send Data").resume(); 184 185 CContext* context = CContext::getCurrent(); 186 CContextClient* client = context->clientPrimServer[srvPool]; 126 187 127 188 CEventClient event(getType(), EVENT_ID_UPDATE_DATA); … … 150 211 } 151 212 client->sendEvent(event); 152 } 213 } 153 214 else client->sendEvent(event); 154 215 } … … 171 232 client->sendEvent(event); 172 233 } 173 174 234 CTimer::get("XIOS Send Data").suspend(); 175 235 } … … 248 308 // else 249 309 this->outputField(fieldData); 250 sendUpdateData(fieldData); 310 // sendUpdateData(fieldData); 311 // Redirecting data to the correct secondary server 312 int fileIdx = std::find(context->enabledFiles.begin(), context->enabledFiles.end(), this->file) - context->enabledFiles.begin(); 313 int srvId = fileIdx % context->clientPrimServer.size(); 314 sendUpdateData(fieldData, srvId); 251 315 } 252 316 if (!context->hasClient && context->hasServer) 253 317 { 318 // size_t writtenSize; 319 // if (this->getUseCompressedOutput()) 320 // writtenSize = grid->getNumberWrittenIndexes(); 321 // else 322 // writtenSize = grid->getWrittenDataSize(); 323 // 324 // CArray<double,1> fieldData(writtenSize); 325 326 // if (this->getUseCompressedOutput()) 327 // this->outputCompressedField(fieldData); 328 // else 329 // this->outputField(fieldData); 254 330 writeField(); 255 331 } … … 1249 1325 } 1250 1326 1327 void CField::sendAddAllVariables(const int srvPool) 1328 { 1329 std::vector<CVariable*> allVar = getAllVariables(); 1330 std::vector<CVariable*>::const_iterator it = allVar.begin(); 1331 std::vector<CVariable*>::const_iterator itE = allVar.end(); 1332 1333 for (; it != itE; ++it) 1334 { 1335 this->sendAddVariable((*it)->getId()); 1336 (*it)->sendAllAttributesToServer(srvPool); 1337 (*it)->sendValue(srvPool); 1338 } 1339 } 1340 1251 1341 void CField::sendAddVariable(const string& id) 1252 1342 { -
XIOS/dev/dev_olga/src/node/field.hpp
r957 r1009 139 139 static bool dispatchEvent(CEventServer& event); 140 140 void sendUpdateData(const CArray<double,1>& data); 141 void sendUpdateData(const CArray<double,1>& data, const int srvPool); 141 142 static void recvUpdateData(CEventServer& event); 142 143 void recvUpdateData(vector<int>& ranks, vector<CBufferIn*>& buffers); … … 168 169 CVariableGroup* addVariableGroup(const string& id = ""); 169 170 void sendAddVariable(const string& id = ""); 171 void sendAddVariable(const string& id, const int srvPool); 170 172 void sendAddVariableGroup(const string& id = ""); 171 173 static void recvAddVariable(CEventServer& event); … … 174 176 void recvAddVariableGroup(CBufferIn& buffer); 175 177 void sendAddAllVariables(); 178 void sendAddAllVariables(const int srvPool); 176 179 177 180 -
XIOS/dev/dev_olga/src/node/file.cpp
r987 r1009 235 235 const int recordOffset = record_offset.isEmpty() ? 0 : record_offset; 236 236 237 set<CAxis*> setAxis; 238 set<CDomain*> setDomains; 237 // set<CAxis*> setAxis; 238 // set<CDomain*> setDomains; 239 set<StdString> setAxis; 240 set<StdString> setDomains; 239 241 240 242 std::vector<CField*>::iterator it, end = this->enabledFields.end(); … … 245 247 std::vector<CAxis*> vecAxis = field->grid->getAxis(); 246 248 for (size_t i = 0; i < vecAxis.size(); ++i) 247 setAxis.insert(vecAxis[i]); 249 setAxis.insert(vecAxis[i]->getAxisOutputName()); 250 // setAxis.insert(vecAxis[i]); 248 251 std::vector<CDomain*> vecDomains = field->grid->getDomains(); 249 252 for (size_t i = 0; i < vecDomains.size(); ++i) 250 setDomains.insert(vecDomains[i]); 253 setDomains.insert(vecDomains[i]->getDomainOutputName()); 254 // setDomains.insert(vecDomains[i]); 251 255 252 256 field->resetNStep(recordOffset); … … 849 853 } 850 854 855 void CFile::sendAddField(const string& id, const int srvPool) 856 { 857 sendAddItem(id, EVENT_ID_ADD_FIELD, srvPool); 858 // CContext* context = CContext::getCurrent(); 859 860 // if (! context->hasServer ) 861 // { 862 // CContextClient* client = context->client; 863 864 // CEventClient event(this->getType(),EVENT_ID_ADD_FIELD); 865 // if (client->isServerLeader()) 866 // { 867 // CMessage msg; 868 // msg << this->getId(); 869 // msg << id; 870 // const std::list<int>& ranks = client->getRanksServerLeader(); 871 // for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 872 // event.push(*itRank,1,msg); 873 // client->sendEvent(event); 874 // } 875 // else client->sendEvent(event); 876 // } 877 878 } 879 851 880 /*! 852 881 \brief Send a message to create a field group on server side … … 945 974 } 946 975 976 void CFile::sendAddAllVariables(const int srvPool) 977 { 978 std::vector<CVariable*> allVar = getAllVariables(); 979 std::vector<CVariable*>::const_iterator it = allVar.begin(); 980 std::vector<CVariable*>::const_iterator itE = allVar.end(); 981 982 for (; it != itE; ++it) 983 { 984 this->sendAddVariable((*it)->getId(), srvPool); 985 (*it)->sendAllAttributesToServer(srvPool); 986 (*it)->sendValue(srvPool); 987 } 988 } 989 990 /*! 991 \brief Send a message to create a variable group on server side 992 \param[in] id String identity of variable group that will be created on server 993 */ 994 void CFile::sendAddVariableGroup(const string& id) 995 { 996 sendAddItem(id, (int)EVENT_ID_ADD_VARIABLE_GROUP); 997 // CContext* context = CContext::getCurrent(); 998 // if (! context->hasServer ) 999 // { 1000 // CContextClient* client = context->client; 1001 1002 // CEventClient event(this->getType(),EVENT_ID_ADD_VARIABLE_GROUP); 1003 // if (client->isServerLeader()) 1004 // { 1005 // CMessage msg; 1006 // msg << this->getId(); 1007 // msg << id; 1008 // const std::list<int>& ranks = client->getRanksServerLeader(); 1009 // for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1010 // event.push(*itRank,1,msg); 1011 // client->sendEvent(event); 1012 // } 1013 // else client->sendEvent(event); 1014 // } 1015 1016 } 1017 947 1018 /*! 948 1019 \brief Send a message to create a variable on server side … … 975 1046 } 976 1047 977 /*! 978 \brief Send a message to create a variable group on server side 979 \param[in] id String identity of variable group that will be created on server 980 */ 981 void CFile::sendAddVariableGroup(const string& id) 982 { 983 sendAddItem(id, (int)EVENT_ID_ADD_VARIABLE_GROUP); 1048 void CFile::sendAddVariable(const string& id, const int srvPool) 1049 { 1050 sendAddItem(id, (int)EVENT_ID_ADD_VARIABLE, srvPool); 984 1051 // CContext* context = CContext::getCurrent(); 1052 985 1053 // if (! context->hasServer ) 986 1054 // { 987 1055 // CContextClient* client = context->client; 988 1056 989 // CEventClient event(this->getType(),EVENT_ID_ADD_VARIABLE _GROUP);1057 // CEventClient event(this->getType(),EVENT_ID_ADD_VARIABLE); 990 1058 // if (client->isServerLeader()) 991 1059 // { … … 1070 1138 } 1071 1139 1140 void CFile::sendEnabledFields(const int srvPool) 1141 { 1142 size_t size = this->enabledFields.size(); 1143 for (size_t i = 0; i < size; ++i) 1144 { 1145 CField* field = this->enabledFields[i]; 1146 this->sendAddField(field->getId(), srvPool); 1147 field->sendAllAttributesToServer(srvPool); 1148 field->sendAddAllVariables(srvPool); 1149 } 1150 } 1151 1072 1152 /*! 1073 1153 \brief Dispatch event received from client -
XIOS/dev/dev_olga/src/node/file.hpp
r992 r1009 117 117 // Send info to server 118 118 void sendEnabledFields(); 119 void sendEnabledFields(const int srvPool); 119 120 void sendAddField(const string& id = ""); 121 void sendAddField(const string& id, const int srvPool); 120 122 void sendAddFieldGroup(const string& id = ""); 121 123 void sendAddAllVariables(); 124 void sendAddAllVariables(const int srvPool); 122 125 void sendAddVariable(const string& id = ""); 126 void sendAddVariable(const string& id, const int srvPool); 123 127 void sendAddVariableGroup(const string& id = ""); 124 128 -
XIOS/dev/dev_olga/src/node/grid.cpp
r987 r1009 273 273 { 274 274 CContext* context = CContext::getCurrent(); 275 CContextClient* client= context->hasServer ? context->clientPrimServer : context->client; 276 277 if (isScalarGrid()) 275 CContextClient* client; 276 277 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 278 for (int i = 0; i < nbSrvPools; ++i) 278 279 { 280 if (context->hasClient) 281 { 282 if (context->hasServer) 283 client = context->clientPrimServer[i]; 284 else 285 client = context->client; 286 } 287 288 if (isScalarGrid()) 289 { 290 if (context->hasClient && !context->hasServer) 291 // if (context->hasClient) 292 if (this->isChecked && doSendingIndex && !isIndexSent) { sendIndexScalarGrid(); this->isIndexSent = true; } 293 294 if (this->isChecked) return; 295 if (context->hasClient && !context->hasServer) 296 // if (context->hasClient) 297 { 298 this->computeIndexScalarGrid(); 299 } 300 301 this->isChecked = true; 302 return; 303 } 304 279 305 if (context->hasClient && !context->hasServer) 280 //if (context->hasClient)281 if (this->isChecked && doSendingIndex && !isIndexSent) { sendIndexScalarGrid(); this->isIndexSent = true; }306 // if (context->hasClient) 307 if (this->isChecked && doSendingIndex && !isIndexSent) { sendIndex(); this->isIndexSent = true; } 282 308 283 309 if (this->isChecked) return; 310 284 311 if (context->hasClient && !context->hasServer) 285 //if (context->hasClient)312 // if (context->hasClient) 286 313 { 287 this->computeIndexScalarGrid(); 314 this->checkAttributesAfterTransformation(); 315 this->checkMask(); 316 this->computeIndex(); 288 317 } 289 290 318 this->isChecked = true; 291 return;292 319 } 293 294 if (context->hasClient && !context->hasServer)295 // if (context->hasClient)296 if (this->isChecked && doSendingIndex && !isIndexSent) { sendIndex(); this->isIndexSent = true; }297 298 if (this->isChecked) return;299 300 if (context->hasClient && !context->hasServer)301 // if (context->hasClient)302 {303 this->checkAttributesAfterTransformation();304 this->checkMask();305 this->computeIndex();306 }307 this->isChecked = true;308 320 } 309 321 … … 502 514 CContext* context = CContext::getCurrent(); 503 515 // CContextClient* client = context->client; 504 CContextClient* client = (context->hasServer) ? context->clientPrimServer : context->client; 505 506 507 // First of all, compute distribution on client side 508 if (0 != serverDistribution_) 509 clientDistribution_ = new CDistributionClient(client->clientRank, serverDistribution_->getGlobalLocalIndex()); 510 else 511 clientDistribution_ = new CDistributionClient(client->clientRank, this); 512 513 // Get local data index on client 514 int tmp = clientDistribution_->getLocalDataIndexOnClient().size(); 515 storeIndex_client.resize(clientDistribution_->getLocalDataIndexOnClient().size()); 516 int nbStoreIndex = storeIndex_client.numElements(); 517 for (int idx = 0; idx < nbStoreIndex; ++idx) storeIndex_client(idx) = (clientDistribution_->getLocalDataIndexOnClient())[idx]; 518 isDataDistributed_= clientDistribution_->isDataDistributed(); 519 520 connectedServerRank_.clear(); 521 522 if (!doGridHaveDataDistributed()) 516 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 517 for (int i = 0; i < nbSrvPools; ++i) 523 518 { 524 if (client->isServerLeader()) 525 { 526 size_t ssize = clientDistribution_->getLocalDataIndexOnClient().size(); 527 const std::list<int>& ranks = client->getRanksServerLeader(); 528 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 519 CContextClient* client = (context->hasServer) ? context->clientPrimServer[i] : context->client; 520 521 522 // First of all, compute distribution on client side 523 if (0 != serverDistribution_) 524 clientDistribution_ = new CDistributionClient(client->clientRank, serverDistribution_->getGlobalLocalIndex()); 525 else 526 clientDistribution_ = new CDistributionClient(client->clientRank, this); 527 528 // Get local data index on client 529 int tmp = clientDistribution_->getLocalDataIndexOnClient().size(); 530 storeIndex_client.resize(clientDistribution_->getLocalDataIndexOnClient().size()); 531 int nbStoreIndex = storeIndex_client.numElements(); 532 for (int idx = 0; idx < nbStoreIndex; ++idx) storeIndex_client(idx) = (clientDistribution_->getLocalDataIndexOnClient())[idx]; 533 isDataDistributed_= clientDistribution_->isDataDistributed(); 534 535 connectedServerRank_.clear(); 536 537 if (!doGridHaveDataDistributed()) 538 { 539 if (client->isServerLeader()) 529 540 { 530 connectedServerRank_.push_back(*itRank); 531 connectedDataSize_[*itRank] = ssize; 541 size_t ssize = clientDistribution_->getLocalDataIndexOnClient().size(); 542 const std::list<int>& ranks = client->getRanksServerLeader(); 543 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 544 { 545 connectedServerRank_.push_back(*itRank); 546 connectedDataSize_[*itRank] = ssize; 547 } 532 548 } 533 } 534 return; 549 return; 550 } 551 552 // Compute mapping between client and server 553 std::vector<boost::unordered_map<size_t,std::vector<int> > > indexServerOnElement; 554 CServerDistributionDescription serverDistributionDescription(globalDim_, client->serverSize); 555 serverDistributionDescription.computeServerGlobalByElement(indexServerOnElement, 556 client->clientRank, 557 client->clientSize, 558 axis_domain_order, 559 positionDimensionDistributed_); 560 computeIndexByElement(indexServerOnElement, globalIndexOnServer_); 561 562 const CDistributionClient::GlobalLocalDataMap& globalLocalIndexSendToServer = clientDistribution_->getGlobalLocalDataSendToServer(); 563 CDistributionClient::GlobalLocalDataMap::const_iterator iteGlobalLocalIndexMap = globalLocalIndexSendToServer.end(), itGlobalLocalIndexMap; 564 CClientServerMapping::GlobalIndexMap::const_iterator iteGlobalMap, itbGlobalMap, itGlobalMap; 565 itGlobalMap = itbGlobalMap = globalIndexOnServer_.begin(); 566 iteGlobalMap = globalIndexOnServer_.end(); 567 568 for (; itGlobalMap != iteGlobalMap; ++itGlobalMap) 569 { 570 int serverRank = itGlobalMap->first; 571 int indexSize = itGlobalMap->second.size(); 572 const std::vector<size_t>& indexVec = itGlobalMap->second; 573 for (int idx = 0; idx < indexSize; ++idx) 574 { 575 itGlobalLocalIndexMap = globalLocalIndexSendToServer.find(indexVec[idx]); 576 if (iteGlobalLocalIndexMap != itGlobalLocalIndexMap) 577 { 578 if (connectedDataSize_.end() == connectedDataSize_.find(serverRank)) 579 connectedDataSize_[serverRank] = 1; 580 else 581 ++connectedDataSize_[serverRank]; 582 } 583 } 584 } 585 586 for (itGlobalMap = itbGlobalMap; itGlobalMap != iteGlobalMap; ++itGlobalMap) { 587 connectedServerRank_.push_back(itGlobalMap->first); 588 } 589 590 nbSenders = clientServerMap_->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_); 535 591 } 536 537 // Compute mapping between client and server538 std::vector<boost::unordered_map<size_t,std::vector<int> > > indexServerOnElement;539 CServerDistributionDescription serverDistributionDescription(globalDim_, client->serverSize);540 serverDistributionDescription.computeServerGlobalByElement(indexServerOnElement,541 client->clientRank,542 client->clientSize,543 axis_domain_order,544 positionDimensionDistributed_);545 computeIndexByElement(indexServerOnElement, globalIndexOnServer_);546 547 const CDistributionClient::GlobalLocalDataMap& globalLocalIndexSendToServer = clientDistribution_->getGlobalLocalDataSendToServer();548 CDistributionClient::GlobalLocalDataMap::const_iterator iteGlobalLocalIndexMap = globalLocalIndexSendToServer.end(), itGlobalLocalIndexMap;549 CClientServerMapping::GlobalIndexMap::const_iterator iteGlobalMap, itbGlobalMap, itGlobalMap;550 itGlobalMap = itbGlobalMap = globalIndexOnServer_.begin();551 iteGlobalMap = globalIndexOnServer_.end();552 553 for (; itGlobalMap != iteGlobalMap; ++itGlobalMap)554 {555 int serverRank = itGlobalMap->first;556 int indexSize = itGlobalMap->second.size();557 const std::vector<size_t>& indexVec = itGlobalMap->second;558 for (int idx = 0; idx < indexSize; ++idx)559 {560 itGlobalLocalIndexMap = globalLocalIndexSendToServer.find(indexVec[idx]);561 if (iteGlobalLocalIndexMap != itGlobalLocalIndexMap)562 {563 if (connectedDataSize_.end() == connectedDataSize_.find(serverRank))564 connectedDataSize_[serverRank] = 1;565 else566 ++connectedDataSize_[serverRank];567 }568 }569 }570 571 for (itGlobalMap = itbGlobalMap; itGlobalMap != iteGlobalMap; ++itGlobalMap) {572 connectedServerRank_.push_back(itGlobalMap->first);573 }574 575 nbSenders = clientServerMap_->computeConnectedClients(client->serverSize, client->clientSize, client->intraComm, connectedServerRank_);576 592 } 577 593 … … 588 604 { 589 605 CContext* context = CContext::getCurrent(); 590 CContextClient* client = context->hasServer ? context->clientPrimServer : context->client; 591 int serverSize = client->serverSize; 592 std::vector<CDomain*> domList = getDomains(); 593 std::vector<CAxis*> axisList = getAxis(); 594 595 // Some pre-calculations of global index on each element of current grid. 596 int nbElement = axis_domain_order.numElements(); 597 std::vector<CArray<size_t,1> > globalIndexElement(nbElement); 598 int domainIdx = 0, axisIdx = 0, scalarIdx = 0; 599 std::vector<size_t> elementNGlobal(nbElement); 600 elementNGlobal[0] = 1; 601 size_t globalSize = 1; 602 for (int idx = 0; idx < nbElement; ++idx) 606 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 607 for (int i = 0; i < nbSrvPools; ++i) 603 608 { 604 elementNGlobal[idx] = globalSize; 605 size_t elementSize; 606 size_t elementGlobalSize = 1; 607 if (2 == axis_domain_order(idx)) // This is domain 609 CContextClient* client = context->hasServer ? context->clientPrimServer[i] : context->client; 610 int serverSize = client->serverSize; 611 std::vector<CDomain*> domList = getDomains(); 612 std::vector<CAxis*> axisList = getAxis(); 613 614 // Some pre-calculations of global index on each element of current grid. 615 int nbElement = axis_domain_order.numElements(); 616 std::vector<CArray<size_t,1> > globalIndexElement(nbElement); 617 int domainIdx = 0, axisIdx = 0, scalarIdx = 0; 618 std::vector<size_t> elementNGlobal(nbElement); 619 elementNGlobal[0] = 1; 620 size_t globalSize = 1; 621 for (int idx = 0; idx < nbElement; ++idx) 608 622 { 609 elementSize = domList[domainIdx]->i_index.numElements(); 610 globalIndexElement[idx].resize(elementSize); 611 for (int jdx = 0; jdx < elementSize; ++jdx) 623 elementNGlobal[idx] = globalSize; 624 size_t elementSize; 625 size_t elementGlobalSize = 1; 626 if (2 == axis_domain_order(idx)) // This is domain 612 627 { 613 globalIndexElement[idx](jdx) = (domList[domainIdx]->i_index)(jdx) + domList[domainIdx]->ni_glo * (domList[domainIdx]->j_index)(jdx); 628 elementSize = domList[domainIdx]->i_index.numElements(); 629 globalIndexElement[idx].resize(elementSize); 630 for (int jdx = 0; jdx < elementSize; ++jdx) 631 { 632 globalIndexElement[idx](jdx) = (domList[domainIdx]->i_index)(jdx) + domList[domainIdx]->ni_glo * (domList[domainIdx]->j_index)(jdx); 633 } 634 elementGlobalSize = domList[domainIdx]->ni_glo.getValue() * domList[domainIdx]->nj_glo.getValue(); 635 ++domainIdx; 614 636 } 615 elementGlobalSize = domList[domainIdx]->ni_glo.getValue() * domList[domainIdx]->nj_glo.getValue(); 616 ++domainIdx; 637 else if (1 == axis_domain_order(idx)) // This is axis 638 { 639 elementSize = axisList[axisIdx]->index.numElements(); 640 globalIndexElement[idx].resize(elementSize); 641 for (int jdx = 0; jdx < elementSize; ++jdx) 642 { 643 globalIndexElement[idx](jdx) = (axisList[axisIdx]->index)(jdx); 644 } 645 elementGlobalSize = axisList[axisIdx]->n_glo.getValue(); 646 ++axisIdx; 647 } 648 else // Of course, this is scalar 649 { 650 globalIndexElement[idx].resize(1); 651 globalIndexElement[idx](0) = 0; 652 elementGlobalSize = 1; 653 } 654 globalSize *= elementGlobalSize; 617 655 } 618 else if (1 == axis_domain_order(idx)) // This is axis 656 657 std::vector<std::vector<bool> > elementOnServer(nbElement, std::vector<bool>(serverSize, false)); 658 std::vector<boost::unordered_map<int,std::vector<size_t> > > globalElementIndexOnServer(nbElement); 659 CArray<int,1> nbIndexOnServer(serverSize); // Number of distributed global index held by each client for each server 660 // Number of temporary distributed global index held by each client for each server 661 // We have this variable for the case of non-distributed element (often axis) to check the duplicate server rank 662 CArray<int,1> nbIndexOnServerTmp(serverSize); 663 for (int idx = 0; idx < nbElement; ++idx) 619 664 { 620 elementSize = axisList[axisIdx]->index.numElements(); 621 globalIndexElement[idx].resize(elementSize); 622 for (int jdx = 0; jdx < elementSize; ++jdx) 665 nbIndexOnServer = 0; 666 const boost::unordered_map<size_t,std::vector<int> >& indexServerElement = indexServerOnElement[idx]; 667 const CArray<size_t,1>& globalIndexElementOnClient = globalIndexElement[idx]; 668 CClientClientDHTInt clientClientDHT(indexServerElement, client->intraComm); 669 clientClientDHT.computeIndexInfoMapping(globalIndexElementOnClient); 670 const CClientClientDHTInt::Index2VectorInfoTypeMap& globalIndexElementOnServerMap = clientClientDHT.getInfoIndexMap(); 671 CClientClientDHTInt::Index2VectorInfoTypeMap::const_iterator itb = globalIndexElementOnServerMap.begin(), 672 ite = globalIndexElementOnServerMap.end(), it; 673 for (it = itb; it != ite; ++it) 623 674 { 624 globalIndexElement[idx](jdx) = (axisList[axisIdx]->index)(jdx); 675 const std::vector<int>& tmp = it->second; 676 nbIndexOnServerTmp = 0; 677 for (int i = 0; i < tmp.size(); ++i) 678 { 679 if (0 == nbIndexOnServerTmp(tmp[i])) ++nbIndexOnServerTmp(tmp[i]); 680 } 681 nbIndexOnServer += nbIndexOnServerTmp; 625 682 } 626 elementGlobalSize = axisList[axisIdx]->n_glo.getValue(); 627 ++axisIdx; 628 } 629 else // Of course, this is scalar 630 { 631 globalIndexElement[idx].resize(1); 632 globalIndexElement[idx](0) = 0; 633 elementGlobalSize = 1; 634 } 635 globalSize *= elementGlobalSize; 636 } 637 638 std::vector<std::vector<bool> > elementOnServer(nbElement, std::vector<bool>(serverSize, false)); 639 std::vector<boost::unordered_map<int,std::vector<size_t> > > globalElementIndexOnServer(nbElement); 640 CArray<int,1> nbIndexOnServer(serverSize); // Number of distributed global index held by each client for each server 641 // Number of temporary distributed global index held by each client for each server 642 // We have this variable for the case of non-distributed element (often axis) to check the duplicate server rank 643 CArray<int,1> nbIndexOnServerTmp(serverSize); 644 for (int idx = 0; idx < nbElement; ++idx) 645 { 646 nbIndexOnServer = 0; 647 const boost::unordered_map<size_t,std::vector<int> >& indexServerElement = indexServerOnElement[idx]; 648 const CArray<size_t,1>& globalIndexElementOnClient = globalIndexElement[idx]; 649 CClientClientDHTInt clientClientDHT(indexServerElement, client->intraComm); 650 clientClientDHT.computeIndexInfoMapping(globalIndexElementOnClient); 651 const CClientClientDHTInt::Index2VectorInfoTypeMap& globalIndexElementOnServerMap = clientClientDHT.getInfoIndexMap(); 652 CClientClientDHTInt::Index2VectorInfoTypeMap::const_iterator itb = globalIndexElementOnServerMap.begin(), 653 ite = globalIndexElementOnServerMap.end(), it; 654 for (it = itb; it != ite; ++it) 655 { 656 const std::vector<int>& tmp = it->second; 657 nbIndexOnServerTmp = 0; 658 for (int i = 0; i < tmp.size(); ++i) 683 684 for (int i = 0; i < serverSize; ++i) 659 685 { 660 if (0 == nbIndexOnServerTmp(tmp[i])) ++nbIndexOnServerTmp(tmp[i]); 686 if (0 != nbIndexOnServer(i)) 687 { 688 globalElementIndexOnServer[idx][i].resize(nbIndexOnServer(i)); 689 elementOnServer[idx][i] = true; 690 } 661 691 } 662 nbIndexOnServer += nbIndexOnServerTmp; 663 } 664 665 for (int i = 0; i < serverSize; ++i) 666 { 667 if (0 != nbIndexOnServer(i)) 692 693 nbIndexOnServer = 0; 694 for (it = itb; it != ite; ++it) 668 695 { 669 globalElementIndexOnServer[idx][i].resize(nbIndexOnServer(i)); 670 elementOnServer[idx][i] = true; 696 const std::vector<int>& tmp = it->second; 697 nbIndexOnServerTmp = 0; 698 for (int i = 0; i < tmp.size(); ++i) 699 { 700 if (0 == nbIndexOnServerTmp(tmp[i])) 701 { 702 globalElementIndexOnServer[idx][tmp[i]][nbIndexOnServer(tmp[i])] = it->first; 703 ++nbIndexOnServerTmp(tmp[i]); 704 } 705 } 706 nbIndexOnServer += nbIndexOnServerTmp; 671 707 } 672 708 } 673 709 674 nbIndexOnServer = 0; 675 for (it = itb; it != ite; ++it) 676 { 677 const std::vector<int>& tmp = it->second; 678 nbIndexOnServerTmp = 0; 679 for (int i = 0; i < tmp.size(); ++i) 680 { 681 if (0 == nbIndexOnServerTmp(tmp[i])) 682 { 683 globalElementIndexOnServer[idx][tmp[i]][nbIndexOnServer(tmp[i])] = it->first; 684 ++nbIndexOnServerTmp(tmp[i]); 685 } 686 } 687 nbIndexOnServer += nbIndexOnServerTmp; 688 } 710 // Determine server which contain global source index 711 std::vector<bool> intersectedProc(serverSize, true); 712 for (int idx = 0; idx < nbElement; ++idx) 713 { 714 std::transform(elementOnServer[idx].begin(), elementOnServer[idx].end(), 715 intersectedProc.begin(), intersectedProc.begin(), 716 std::logical_and<bool>()); 717 } 718 719 std::vector<int> srcRank; 720 for (int idx = 0; idx < serverSize; ++idx) 721 { 722 if (intersectedProc[idx]) srcRank.push_back(idx); 723 } 724 725 // Compute the global index of grid from global index of each element. 726 for (int i = 0; i < srcRank.size(); ++i) 727 { 728 size_t ssize = 1; 729 int rankSrc = srcRank[i]; 730 std::vector<std::vector<size_t>* > globalIndexOfElementTmp(nbElement); 731 std::vector<size_t> currentIndex(nbElement,0); 732 for (int idx = 0; idx < nbElement; ++idx) 733 { 734 ssize *= (globalElementIndexOnServer[idx][rankSrc]).size(); 735 globalIndexOfElementTmp[idx] = &(globalElementIndexOnServer[idx][rankSrc]); 736 } 737 globalIndexOnServer[rankSrc].resize(ssize); 738 739 std::vector<int> idxLoop(nbElement,0); 740 int innnerLoopSize = (globalIndexOfElementTmp[0])->size(); 741 size_t idx = 0; 742 while (idx < ssize) 743 { 744 for (int ind = 0; ind < nbElement; ++ind) 745 { 746 if (idxLoop[ind] == (globalIndexOfElementTmp[ind])->size()) 747 { 748 idxLoop[ind] = 0; 749 ++idxLoop[ind+1]; 750 } 751 752 currentIndex[ind] = (*(globalIndexOfElementTmp[ind]))[idxLoop[ind]]; 753 } 754 755 for (int ind = 0; ind < innnerLoopSize; ++ind) 756 { 757 currentIndex[0] = (*globalIndexOfElementTmp[0])[ind]; 758 size_t globalSrcIndex = 0; 759 for (int idxElement = 0; idxElement < nbElement; ++idxElement) 760 { 761 globalSrcIndex += currentIndex[idxElement] * elementNGlobal[idxElement]; 762 } 763 globalIndexOnServer[rankSrc][idx] = globalSrcIndex; 764 ++idx; 765 ++idxLoop[0]; 766 } 767 } 768 } 689 769 } 690 691 // Determine server which contain global source index692 std::vector<bool> intersectedProc(serverSize, true);693 for (int idx = 0; idx < nbElement; ++idx)694 {695 std::transform(elementOnServer[idx].begin(), elementOnServer[idx].end(),696 intersectedProc.begin(), intersectedProc.begin(),697 std::logical_and<bool>());698 }699 700 std::vector<int> srcRank;701 for (int idx = 0; idx < serverSize; ++idx)702 {703 if (intersectedProc[idx]) srcRank.push_back(idx);704 }705 706 // Compute the global index of grid from global index of each element.707 for (int i = 0; i < srcRank.size(); ++i)708 {709 size_t ssize = 1;710 int rankSrc = srcRank[i];711 std::vector<std::vector<size_t>* > globalIndexOfElementTmp(nbElement);712 std::vector<size_t> currentIndex(nbElement,0);713 for (int idx = 0; idx < nbElement; ++idx)714 {715 ssize *= (globalElementIndexOnServer[idx][rankSrc]).size();716 globalIndexOfElementTmp[idx] = &(globalElementIndexOnServer[idx][rankSrc]);717 }718 globalIndexOnServer[rankSrc].resize(ssize);719 720 std::vector<int> idxLoop(nbElement,0);721 int innnerLoopSize = (globalIndexOfElementTmp[0])->size();722 size_t idx = 0;723 while (idx < ssize)724 {725 for (int ind = 0; ind < nbElement; ++ind)726 {727 if (idxLoop[ind] == (globalIndexOfElementTmp[ind])->size())728 {729 idxLoop[ind] = 0;730 ++idxLoop[ind+1];731 }732 733 currentIndex[ind] = (*(globalIndexOfElementTmp[ind]))[idxLoop[ind]];734 }735 736 for (int ind = 0; ind < innnerLoopSize; ++ind)737 {738 currentIndex[0] = (*globalIndexOfElementTmp[0])[ind];739 size_t globalSrcIndex = 0;740 for (int idxElement = 0; idxElement < nbElement; ++idxElement)741 {742 globalSrcIndex += currentIndex[idxElement] * elementNGlobal[idxElement];743 }744 globalIndexOnServer[rankSrc][idx] = globalSrcIndex;745 ++idx;746 ++idxLoop[0];747 }748 }749 }750 770 } 751 771 //---------------------------------------------------------------- … … 986 1006 { 987 1007 CContext* context = CContext::getCurrent(); 988 CContextClient* client = context->hasServer ? context->clientPrimServer : context->client; 989 990 storeIndex_client.resize(1); 991 storeIndex_client(0) = 0; 992 993 connectedServerRank_.clear(); 994 995 if (0 == client->clientRank) 996 { 997 for (int rank = 0; rank < client->serverSize; ++rank) 998 { 999 connectedServerRank_.push_back(rank); 1000 connectedDataSize_[rank] = 1; 1001 nbSenders[rank] = 1; 1002 } 1003 } 1004 isDataDistributed_ = false; 1008 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1009 for (int i = 0; i < nbSrvPools; ++i) 1010 { 1011 CContextClient* client = context->hasServer ? context->clientPrimServer[i] : context->client; 1012 1013 storeIndex_client.resize(1); 1014 storeIndex_client(0) = 0; 1015 1016 connectedServerRank_.clear(); 1017 1018 if (0 == client->clientRank) 1019 { 1020 for (int rank = 0; rank < client->serverSize; ++rank) 1021 { 1022 connectedServerRank_.push_back(rank); 1023 connectedDataSize_[rank] = 1; 1024 nbSenders[rank] = 1; 1025 } 1026 } 1027 isDataDistributed_ = false; 1028 } 1005 1029 } 1006 1030 … … 1042 1066 { 1043 1067 CContext* context = CContext::getCurrent(); 1044 CContextClient* client = context->hasServer ? context->clientPrimServer : context->client; 1045 1046 CEventClient event(getType(), EVENT_ID_INDEX); 1047 list<CMessage> listMsg; 1048 list<CArray<size_t,1> > listOutIndex; 1049 1050 if (client->isServerLeader()) 1051 { 1052 const std::list<int>& ranks = client->getRanksServerLeader(); 1053 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1054 { 1055 int rank = *itRank; 1056 int nb = 1; 1057 storeIndex_toSrv.insert(std::make_pair(rank, CArray<int,1>(nb))); 1058 listOutIndex.push_back(CArray<size_t,1>(nb)); 1059 1060 CArray<int, 1>& outLocalIndexToServer = storeIndex_toSrv[rank]; 1061 CArray<size_t, 1>& outGlobalIndexOnServer = listOutIndex.back(); 1062 1063 for (int k = 0; k < nb; ++k) 1064 { 1065 outGlobalIndexOnServer(k) = 0; 1066 outLocalIndexToServer(k) = 0; 1067 } 1068 1069 listMsg.push_back(CMessage()); 1070 listMsg.back() << getId( )<< isDataDistributed_ << isCompressible_ << listOutIndex.back(); 1071 1072 event.push(rank, 1, listMsg.back()); 1073 } 1074 client->sendEvent(event); 1075 } 1076 else 1077 client->sendEvent(event); 1078 } 1079 1080 void CGrid::sendIndex(void) 1081 { 1082 CContext* context = CContext::getCurrent(); 1083 // CContextClient* client = context->client; 1084 CContextClient* client = context->hasServer ? context->clientPrimServer : context->client ; 1085 1086 CEventClient event(getType(), EVENT_ID_INDEX); 1087 int rank; 1088 list<CMessage> listMsg; 1089 list<CArray<size_t,1> > listOutIndex; 1090 const CDistributionClient::GlobalLocalDataMap& globalLocalIndexSendToServer = clientDistribution_->getGlobalLocalDataSendToServer(); 1091 CDistributionClient::GlobalLocalDataMap::const_iterator itIndex = globalLocalIndexSendToServer.begin(), 1092 iteIndex = globalLocalIndexSendToServer.end(); 1093 1094 if (!doGridHaveDataDistributed()) 1095 { 1068 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1069 for (int i = 0; i < nbSrvPools; ++i) 1070 { 1071 CContextClient* client = context->hasServer ? context->clientPrimServer[i] : context->client; 1072 1073 CEventClient event(getType(), EVENT_ID_INDEX); 1074 list<CMessage> listMsg; 1075 list<CArray<size_t,1> > listOutIndex; 1076 1096 1077 if (client->isServerLeader()) 1097 1078 { 1098 int indexSize = globalLocalIndexSendToServer.size();1099 CArray<size_t,1> outGlobalIndexOnServer(indexSize);1100 CArray<int,1> outLocalIndexToServer(indexSize);1101 for (int idx = 0; itIndex != iteIndex; ++itIndex, ++idx)1102 {1103 outGlobalIndexOnServer(idx) = itIndex->first;1104 outLocalIndexToServer(idx) = itIndex->second;1105 }1106 1107 //int nbClient = client->clientSize; // This stupid variable signals the servers the number of client connect to them1108 1079 const std::list<int>& ranks = client->getRanksServerLeader(); 1109 1080 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1110 1081 { 1111 storeIndex_toSrv.insert(std::make_pair(*itRank, CArray<int,1>(outLocalIndexToServer))); 1112 listOutIndex.push_back(CArray<size_t,1>(outGlobalIndexOnServer)); 1082 int rank = *itRank; 1083 int nb = 1; 1084 storeIndex_toSrv.insert(std::make_pair(rank, CArray<int,1>(nb))); 1085 listOutIndex.push_back(CArray<size_t,1>(nb)); 1086 1087 CArray<int, 1>& outLocalIndexToServer = storeIndex_toSrv[rank]; 1088 CArray<size_t, 1>& outGlobalIndexOnServer = listOutIndex.back(); 1089 1090 for (int k = 0; k < nb; ++k) 1091 { 1092 outGlobalIndexOnServer(k) = 0; 1093 outLocalIndexToServer(k) = 0; 1094 } 1095 1096 listMsg.push_back(CMessage()); 1097 listMsg.back() << getId( )<< isDataDistributed_ << isCompressible_ << listOutIndex.back(); 1098 1099 event.push(rank, 1, listMsg.back()); 1100 } 1101 client->sendEvent(event); 1102 } 1103 else 1104 client->sendEvent(event); 1105 } 1106 } 1107 1108 void CGrid::sendIndex(void) 1109 { 1110 CContext* context = CContext::getCurrent(); 1111 // CContextClient* client = context->client; 1112 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 1113 for (int i = 0; i < nbSrvPools; ++i) 1114 { 1115 CContextClient* client = context->hasServer ? context->clientPrimServer[i] : context->client ; 1116 1117 CEventClient event(getType(), EVENT_ID_INDEX); 1118 int rank; 1119 list<CMessage> listMsg; 1120 list<CArray<size_t,1> > listOutIndex; 1121 const CDistributionClient::GlobalLocalDataMap& globalLocalIndexSendToServer = clientDistribution_->getGlobalLocalDataSendToServer(); 1122 CDistributionClient::GlobalLocalDataMap::const_iterator itIndex = globalLocalIndexSendToServer.begin(), 1123 iteIndex = globalLocalIndexSendToServer.end(); 1124 1125 if (!doGridHaveDataDistributed()) 1126 { 1127 if (client->isServerLeader()) 1128 { 1129 int indexSize = globalLocalIndexSendToServer.size(); 1130 CArray<size_t,1> outGlobalIndexOnServer(indexSize); 1131 CArray<int,1> outLocalIndexToServer(indexSize); 1132 for (int idx = 0; itIndex != iteIndex; ++itIndex, ++idx) 1133 { 1134 outGlobalIndexOnServer(idx) = itIndex->first; 1135 outLocalIndexToServer(idx) = itIndex->second; 1136 } 1137 1138 //int nbClient = client->clientSize; // This stupid variable signals the servers the number of client connect to them 1139 const std::list<int>& ranks = client->getRanksServerLeader(); 1140 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 1141 { 1142 storeIndex_toSrv.insert(std::make_pair(*itRank, CArray<int,1>(outLocalIndexToServer))); 1143 listOutIndex.push_back(CArray<size_t,1>(outGlobalIndexOnServer)); 1144 1145 listMsg.push_back(CMessage()); 1146 listMsg.back() << getId() << isDataDistributed_ << isCompressible_ << listOutIndex.back(); 1147 1148 event.push(*itRank, 1, listMsg.back()); 1149 } 1150 client->sendEvent(event); 1151 } 1152 else 1153 client->sendEvent(event); 1154 } 1155 else 1156 { 1157 CClientServerMapping::GlobalIndexMap::const_iterator iteGlobalMap, itGlobalMap; 1158 itGlobalMap = globalIndexOnServer_.begin(); 1159 iteGlobalMap = globalIndexOnServer_.end(); 1160 1161 std::map<int,std::vector<int> >localIndexTmp; 1162 std::map<int,std::vector<size_t> > globalIndexTmp; 1163 for (; itGlobalMap != iteGlobalMap; ++itGlobalMap) 1164 { 1165 int serverRank = itGlobalMap->first; 1166 int indexSize = itGlobalMap->second.size(); 1167 const std::vector<size_t>& indexVec = itGlobalMap->second; 1168 for (int idx = 0; idx < indexSize; ++idx) 1169 { 1170 itIndex = globalLocalIndexSendToServer.find(indexVec[idx]); 1171 if (iteIndex != itIndex) 1172 { 1173 globalIndexTmp[serverRank].push_back(itIndex->first); 1174 localIndexTmp[serverRank].push_back(itIndex->second); 1175 } 1176 } 1177 } 1178 1179 // for (int ns = 0; ns < connectedServerRank_.size(); ++ns) 1180 for (int ns = 0; ns < client->serverSize; ++ns) 1181 { 1182 rank = connectedServerRank_[ns]; 1183 int nb = 0; 1184 if (globalIndexTmp.end() != globalIndexTmp.find(rank)) 1185 nb = globalIndexTmp[rank].size(); 1186 1187 storeIndex_toSrv.insert(make_pair(rank, CArray<int,1>(nb))); 1188 listOutIndex.push_back(CArray<size_t,1>(nb)); 1189 1190 CArray<int, 1>& outLocalIndexToServer = storeIndex_toSrv[rank]; 1191 CArray<size_t, 1>& outGlobalIndexOnServer = listOutIndex.back(); 1192 1193 for (int k = 0; k < nb; ++k) 1194 { 1195 outGlobalIndexOnServer(k) = globalIndexTmp[rank].at(k); 1196 outLocalIndexToServer(k) = localIndexTmp[rank].at(k); 1197 } 1113 1198 1114 1199 listMsg.push_back(CMessage()); 1115 1200 listMsg.back() << getId() << isDataDistributed_ << isCompressible_ << listOutIndex.back(); 1116 1201 1117 event.push(*itRank, 1, listMsg.back()); 1118 } 1202 event.push(rank, nbSenders[rank], listMsg.back()); 1203 } 1204 1119 1205 client->sendEvent(event); 1120 1206 } 1121 else1122 client->sendEvent(event);1123 }1124 else1125 {1126 CClientServerMapping::GlobalIndexMap::const_iterator iteGlobalMap, itGlobalMap;1127 itGlobalMap = globalIndexOnServer_.begin();1128 iteGlobalMap = globalIndexOnServer_.end();1129 1130 std::map<int,std::vector<int> >localIndexTmp;1131 std::map<int,std::vector<size_t> > globalIndexTmp;1132 for (; itGlobalMap != iteGlobalMap; ++itGlobalMap)1133 {1134 int serverRank = itGlobalMap->first;1135 int indexSize = itGlobalMap->second.size();1136 const std::vector<size_t>& indexVec = itGlobalMap->second;1137 for (int idx = 0; idx < indexSize; ++idx)1138 {1139 itIndex = globalLocalIndexSendToServer.find(indexVec[idx]);1140 if (iteIndex != itIndex)1141 {1142 globalIndexTmp[serverRank].push_back(itIndex->first);1143 localIndexTmp[serverRank].push_back(itIndex->second);1144 }1145 }1146 }1147 1148 for (int ns = 0; ns < connectedServerRank_.size(); ++ns)1149 {1150 rank = connectedServerRank_[ns];1151 int nb = 0;1152 if (globalIndexTmp.end() != globalIndexTmp.find(rank))1153 nb = globalIndexTmp[rank].size();1154 1155 storeIndex_toSrv.insert(make_pair(rank, CArray<int,1>(nb)));1156 listOutIndex.push_back(CArray<size_t,1>(nb));1157 1158 CArray<int, 1>& outLocalIndexToServer = storeIndex_toSrv[rank];1159 CArray<size_t, 1>& outGlobalIndexOnServer = listOutIndex.back();1160 1161 for (int k = 0; k < nb; ++k)1162 {1163 outGlobalIndexOnServer(k) = globalIndexTmp[rank].at(k);1164 outLocalIndexToServer(k) = localIndexTmp[rank].at(k);1165 }1166 1167 listMsg.push_back(CMessage());1168 listMsg.back() << getId() << isDataDistributed_ << isCompressible_ << listOutIndex.back();1169 1170 event.push(rank, nbSenders[rank], listMsg.back());1171 }1172 1173 client->sendEvent(event);1174 1207 } 1175 1208 } … … 1202 1235 { 1203 1236 CContext* context = CContext::getCurrent(); 1204 CContextServer* server = (context->hasServer) ? context->server : context->serverPrimServer; 1205 CContextClient* client = (context->hasServer) ? context->client : context->clientPrimServer; 1237 // CContextServer* server = (context->hasServer) ? context->server : context->serverPrimServer; 1238 // CContextClient* client = (context->hasServer) ? context->client : context->clientPrimServer; 1239 CContextServer* server = context->server; 1240 CContextClient* client = context->client; 1206 1241 numberWrittenIndexes_ = totalNumberWrittenIndexes_ = offsetWrittenIndexes_ = 0; 1207 1242 connectedServerRank_ = ranks; -
XIOS/dev/dev_olga/src/node/variable.cpp
r987 r1009 85 85 if (context->hasClient) 86 86 { 87 // Use correct context client to send message 88 CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer 89 : context->client; 90 91 CEventClient event(this->getType(),EVENT_ID_VARIABLE_VALUE) ; 92 if (contextClientTmp->isServerLeader()) 87 // Use correct context client to send message 88 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 89 for (int i = 0; i < nbSrvPools; ++i) 93 90 { 94 CMessage msg ; 95 msg<<this->getId() ; 96 msg<<content ; 97 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 98 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 99 event.push(*itRank,1,msg); 100 contextClientTmp->sendEvent(event) ; 101 } 102 else contextClientTmp->sendEvent(event) ; 103 } 104 91 // CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer 92 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] 93 : context->client; 94 95 CEventClient event(this->getType(),EVENT_ID_VARIABLE_VALUE) ; 96 if (contextClientTmp->isServerLeader()) 97 { 98 CMessage msg ; 99 msg<<this->getId() ; 100 msg<<content ; 101 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 102 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 103 event.push(*itRank,1,msg); 104 contextClientTmp->sendEvent(event) ; 105 } 106 else contextClientTmp->sendEvent(event) ; 107 } 108 } 105 109 106 110 // if (!context->hasServer) … … 123 127 } 124 128 129 void CVariable::sendValue(const int srvPool) 130 { 131 CContext* context=CContext::getCurrent() ; 132 CContextClient* contextClientTmp = context->clientPrimServer[srvPool]; 133 CEventClient event(this->getType(),EVENT_ID_VARIABLE_VALUE) ; 134 if (contextClientTmp->isServerLeader()) 135 { 136 CMessage msg ; 137 msg<<this->getId() ; 138 msg<<content ; 139 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 140 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 141 event.push(*itRank,1,msg); 142 contextClientTmp->sendEvent(event) ; 143 } 144 else contextClientTmp->sendEvent(event) ; 145 } 146 125 147 /* 126 148 *\brief Receive value of a variable with its id from client to server -
XIOS/dev/dev_olga/src/node/variable.hpp
r987 r1009 79 79 //! Sending a request to set up variable data 80 80 void sendValue(); 81 void sendValue(const int srvPool); 81 82 82 83 static void recvValue(CEventServer& event) ; -
XIOS/dev/dev_olga/src/object_template.hpp
r987 r1009 56 56 std::map<int, size_t> getMinimumBufferSizeForAttributes(); 57 57 void sendAttributToServer(const string& id); 58 void sendAttributToServer(const string& id, const int srvPool); 58 59 void sendAttributToServer(CAttribute& attr) ; 60 void sendAttributToServer(CAttribute& attr, const int srvPool) ; 59 61 void sendAllAttributesToServer(); 62 void sendAllAttributesToServer(const int srvPool); 60 63 void sendAddItem(const string& id, int itemType); 64 void sendAddItem(const string& id, int itemType, const int srvPool); 61 65 static void recvAttributFromClient(CEventServer& event) ; 62 66 static bool dispatchEvent(CEventServer& event) ; -
XIOS/dev/dev_olga/src/object_template_impl.hpp
r987 r1009 168 168 169 169 // Use correct context client to send message 170 CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 171 172 std::map<int, size_t> minimumSizes; 173 174 if (contextClientTmp->isServerLeader()) 170 // CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 171 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 172 for (int i = 0; i < nbSrvPools; ++i) 175 173 { 176 size_t minimumSize = 0; 177 CAttributeMap& attrMap = *this; 178 CAttributeMap::const_iterator it = attrMap.begin(), itE = attrMap.end(); 179 for (; it != itE; ++it) 174 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] : context->client; 175 176 std::map<int, size_t> minimumSizes; 177 178 if (contextClientTmp->isServerLeader()) 180 179 { 181 if (!it->second->isEmpty()) 180 size_t minimumSize = 0; 181 CAttributeMap& attrMap = *this; 182 CAttributeMap::const_iterator it = attrMap.begin(), itE = attrMap.end(); 183 for (; it != itE; ++it) 182 184 { 183 size_t size = it->second->getName().size() + sizeof(size_t) + it->second->size(); 184 if (size > minimumSize) 185 minimumSize = size; 185 if (!it->second->isEmpty()) 186 { 187 size_t size = it->second->getName().size() + sizeof(size_t) + it->second->size(); 188 if (size > minimumSize) 189 minimumSize = size; 190 } 191 } 192 193 if (minimumSize) 194 { 195 // Account for extra header info 196 minimumSize += CEventClient::headerSize + getIdServer().size() + sizeof(size_t); 197 198 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 199 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 200 minimumSizes.insert(std::make_pair(*itRank, minimumSize)); 186 201 } 187 202 } 188 189 if (minimumSize)190 {191 // Account for extra header info192 minimumSize += CEventClient::headerSize + getIdServer().size() + sizeof(size_t);193 194 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader();195 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)196 minimumSizes.insert(std::make_pair(*itRank, minimumSize));197 }198 }199 203 200 204 // if (client->isServerLeader()) … … 224 228 // } 225 229 226 return minimumSizes; 230 return minimumSizes; 231 } 227 232 } 228 233 … … 238 243 } 239 244 245 template<typename T> 246 void CObjectTemplate<T>::sendAllAttributesToServer(const int srvPool) 247 { 248 CAttributeMap& attrMap = *this; 249 CAttributeMap::const_iterator it = attrMap.begin(), itE = attrMap.end(); 250 for (; it != itE; ++it) 251 { 252 if (!(it->second)->isEmpty()) sendAttributToServer(*(it->second), srvPool); 253 } 254 } 255 240 256 template <class T> 241 257 void CObjectTemplate<T>::sendAttributToServer(const string& id) … … 244 260 CAttribute* attr=attrMap[id]; 245 261 sendAttributToServer(*attr); 262 } 263 264 template <class T> 265 void CObjectTemplate<T>::sendAttributToServer(const string& id, const int srvPool) 266 { 267 CAttributeMap & attrMap = *this; 268 CAttribute* attr=attrMap[id]; 269 sendAttributToServer(*attr, srvPool); 246 270 } 247 271 … … 252 276 253 277 // Use correct context client to send message 254 CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 255 256 // if (!context->hasServer) 278 // CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 257 279 if (context->hasClient) 258 280 { 259 // CContextClient* contextClientTmp=context->contextClientTmp; 260 261 CEventClient event(getType(),EVENT_ID_SEND_ATTRIBUTE); 262 if (contextClientTmp->isServerLeader()) 263 { 264 CMessage msg; 265 // msg << this->getId(); 266 msg << this->getIdServer(); 267 msg << attr.getName(); 268 msg << attr; 269 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 270 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 271 event.push(*itRank,1,msg); 272 contextClientTmp->sendEvent(event); 273 } 274 else contextClientTmp->sendEvent(event); 281 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 282 for (int i = 0; i < nbSrvPools; ++i) 283 { 284 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] : context->client; 285 // CContextClient* contextClientTmp=context->contextClientTmp; 286 287 CEventClient event(getType(),EVENT_ID_SEND_ATTRIBUTE); 288 if (contextClientTmp->isServerLeader()) 289 { 290 CMessage msg; 291 // if (context->hasServer) 292 // { 293 // StdString tmp = this->getIdServer() + "_" +boost::lexical_cast<string>(i); 294 // msg<<tmp; 295 // } 296 // else 297 msg<<this->getIdServer(); 298 299 msg << attr.getName(); 300 msg << attr; 301 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 302 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 303 event.push(*itRank,1,msg); 304 contextClientTmp->sendEvent(event); 305 } 306 else contextClientTmp->sendEvent(event); 307 } 275 308 } 276 277 309 // // if (!context->hasServer) 278 310 // if (context->hasClient) … … 298 330 } 299 331 332 template <class T> 333 void CObjectTemplate<T>::sendAttributToServer(CAttribute& attr, const int srvPool) 334 { 335 CContext* context=CContext::getCurrent(); 336 CContextClient* contextClientTmp = context->clientPrimServer[srvPool]; 337 CEventClient event(getType(),EVENT_ID_SEND_ATTRIBUTE); 338 if (contextClientTmp->isServerLeader()) 339 { 340 CMessage msg; 341 msg<<this->getIdServer(); 342 msg << attr.getName(); 343 msg << attr; 344 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 345 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 346 event.push(*itRank,1,msg); 347 contextClientTmp->sendEvent(event); 348 } 349 else contextClientTmp->sendEvent(event); 350 } 351 352 300 353 /*! 301 354 This generic funtion only provides instance for sending, for receving, each … … 312 365 { 313 366 // Use correct context client to send message 314 CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 315 CEventClient event(this->getType(),ItemType(itemType)); 316 if (contextClientTmp->isServerLeader()) 317 { 318 CMessage msg; 319 msg << this->getId(); 320 msg << id; 321 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 322 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 323 event.push(*itRank,1,msg); 324 contextClientTmp->sendEvent(event); 325 } 326 else contextClientTmp->sendEvent(event); 367 // CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 368 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 369 for (int i = 0; i < nbSrvPools; ++i) 370 { 371 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] : context->client; 372 CEventClient event(this->getType(),ItemType(itemType)); 373 if (contextClientTmp->isServerLeader()) 374 { 375 CMessage msg; 376 msg << this->getId(); 377 msg << id; 378 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 379 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 380 event.push(*itRank,1,msg); 381 contextClientTmp->sendEvent(event); 382 } 383 else contextClientTmp->sendEvent(event); 384 } 327 385 } 386 } 387 388 template<class T> 389 void CObjectTemplate<T>::sendAddItem(const StdString& id, int itemType, const int srvPool) 390 { 391 CContext* context = CContext::getCurrent(); 392 typedef typename T::EEventId ItemType; 393 CContextClient* contextClientTmp = context->clientPrimServer[srvPool]; 394 CEventClient event(this->getType(),ItemType(itemType)); 395 if (contextClientTmp->isServerLeader()) 396 { 397 CMessage msg; 398 msg << this->getId(); 399 msg << id; 400 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 401 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 402 event.push(*itRank,1,msg); 403 contextClientTmp->sendEvent(event); 404 } 405 else contextClientTmp->sendEvent(event); 328 406 } 329 407 -
XIOS/dev/dev_olga/src/server.cpp
r992 r1009 22 22 list<MPI_Comm> CServer::interComm ; 23 23 std::list<MPI_Comm> CServer::contextInterComms; 24 int CServer::nbSndSrvPools = (CXios::serverLevel == 0) ? 0 : 1; 25 int CServer::poolNb = 0; 24 26 bool CServer::isRoot = false ; 25 27 int CServer::rank = INVALID_RANK; 28 int CServer::rankSndServers = 0; 26 29 StdOFStream CServer::m_infoStream; 27 30 StdOFStream CServer::m_errorStream; … … 43 46 else is_MPI_Initialized=false ; 44 47 48 45 49 // Not using OASIS 46 50 if (!CXios::usingOasis) … … 52 56 } 53 57 CTimer::get("XIOS").resume() ; 54 55 int nbSrvLevels = 2;56 58 57 59 boost::hash<string> hashString ; … … 61 63 62 64 unsigned long* hashAll ; 65 unsigned long* hashAllServers ; 63 66 64 67 // int rank ; … … 66 69 int myColor ; 67 70 int i,c ; 68 MPI_Comm newComm 71 MPI_Comm newComm, serversInterComm; 69 72 70 73 MPI_Comm_size(CXios::globalComm, &size) ; 71 74 MPI_Comm_rank(CXios::globalComm, &rank); 75 72 76 hashAll=new unsigned long[size] ; 73 77 MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ; 74 78 75 map<unsigned long, int> colors ;79 map<unsigned long, int> colors, colorsServers ; 76 80 map<unsigned long, int> leaders ; 77 81 map<unsigned long, int>::iterator it ; 82 // map<unsigned long, int> leadersServers ; 83 vector<int> leadersServers; 78 84 79 85 for(i=0,c=0;i<size;i++) … … 87 93 } 88 94 89 myColor=colors[hashServer] ; 90 MPI_Comm_split(MPI_COMM_WORLD, myColor, rank, &intraComm) ; 95 nbSndSrvPools = size - leaders[hashServer2]; // one proc per secondary-server pool 96 97 // Taking into account multiple pools on secondary server 98 if (nbSndSrvPools > 1) 99 { 100 if (CXios::serverLevel > 1) 101 { 102 int nbProcs = size - leaders[hashServer2]; 103 int remain = nbProcs % nbSndSrvPools; 104 int procsPerPool = nbProcs / nbSndSrvPools; 105 rankSndServers = rank - leaders[hashServer2]; 106 StdString strTmp = CXios::xiosCodeIdSnd; 107 108 if (remain == 0) 109 { 110 poolNb = rankSndServers/procsPerPool; 111 } 112 else 113 { 114 if (rankSndServers <= (procsPerPool + 1) * remain) 115 poolNb = rankSndServers/(procsPerPool+1); 116 else 117 { 118 poolNb = remain + 1; 119 rankSndServers -= (procsPerPool + 1) * remain; 120 rankSndServers -= procsPerPool; 121 poolNb += rankSndServers/procsPerPool; 122 } 123 } 124 strTmp += boost::lexical_cast<string>(poolNb+1); // add 1 to avoid hashing zero 125 hashServer = hashString(strTmp); 126 hashServer2 = hashString(strTmp); 127 } 128 } 129 130 if (nbSndSrvPools > 1) 131 { 132 myColor = size; 133 MPI_Comm_split(MPI_COMM_WORLD, myColor, rank, &serversInterComm) ; 134 } 135 else 136 { 137 myColor=colors[hashServer] ; 138 MPI_Comm_split(MPI_COMM_WORLD, myColor, rank, &intraComm) ; 139 } 140 141 if (nbSndSrvPools > 1) 142 { 143 int sizeServers; 144 // int rankServers; 145 // MPI_Comm_rank(serversInterComm, &rankServers) ; 146 MPI_Comm_size(serversInterComm, &sizeServers) ; 147 hashAllServers=new unsigned long[sizeServers] ; 148 MPI_Allgather(&hashServer, 1, MPI_LONG, hashAllServers, 1, MPI_LONG, serversInterComm) ; 149 150 for(i=0, c=0; i<sizeServers; i++) 151 { 152 if (colorsServers.find(hashAllServers[i])==colorsServers.end()) 153 { 154 colorsServers[hashAllServers[i]]=c ; 155 // leadersServers[hashAllServers[i]]= leaders[hashServer1] + i ; 156 leadersServers.push_back( leaders[hashServer1] + i ); 157 c++ ; 158 } 159 } 160 myColor=colorsServers[hashServer] ; 161 MPI_Comm_split(serversInterComm, myColor, rank, &intraComm) ; 162 } 163 91 164 92 165 if (CXios::serverLevel == 0) … … 104 177 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 105 178 106 179 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 107 180 interCommLeft.push_back(newComm) ; 108 181 interComm.push_back(newComm) ; … … 110 183 } 111 184 } 112 113 else if ((CXios::serverLevel == 1)) 114 { 115 int clientLeader; 116 int srvSndLeader; 117 for(it=leaders.begin();it!=leaders.end();it++) 185 else 186 { 187 if ((CXios::serverLevel == 1)) 118 188 { 119 if (it->first != hashServer2) 120 { 121 if (it->first != hashServer1) 189 // Creating interComm with client (interCommLeft) 190 int clientLeader; 191 int srvSndLeader; 192 for(it=leaders.begin();it!=leaders.end();it++) 193 { 194 if (it->first != hashServer2) 122 195 { 123 clientLeader=it->second ; 124 int intraCommSize, intraCommRank ; 125 MPI_Comm_size(intraComm,&intraCommSize) ; 126 MPI_Comm_rank(intraComm,&intraCommRank) ; 127 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 128 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 129 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 130 interCommLeft.push_back(newComm) ; 131 interComm.push_back(newComm) ; 196 if (it->first != hashServer1) 197 { 198 clientLeader=it->second ; 199 int intraCommSize, intraCommRank ; 200 MPI_Comm_size(intraComm, &intraCommSize) ; 201 MPI_Comm_rank(intraComm, &intraCommRank) ; 202 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 203 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 204 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 205 interCommLeft.push_back(newComm) ; 206 interComm.push_back(newComm) ; 207 } 132 208 } 209 else 210 { 211 srvSndLeader = it->second; 212 } 213 } 214 215 // Creating interComm with secondary server pool(s) (interCommRight) 216 // if (nbSndSrvPools < 1) 217 if (nbSndSrvPools < 2) 218 { 219 CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 220 interCommRight.push_back(CClient::getInterComm()); 221 interComm.push_back(CClient::getInterComm()); 133 222 } 134 223 else 135 224 { 136 srvSndLeader = it->second; 137 } 138 } 139 140 CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 141 interCommRight.push_back(CClient::getInterComm()); 142 interComm.push_back(CClient::getInterComm()); 143 144 } 145 146 else // secondary server pool 147 { 148 int clientLeader; 149 for(it=leaders.begin();it!=leaders.end();it++) 225 // for(it=leadersServers.begin();it!=leadersServers.end();it++) 226 // { 227 // if (it->first != hashServer) 228 // { 229 // srvSndLeader = it->second; 230 // CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 231 // interCommRight.push_back(CClient::getInterComm()); 232 // interComm.push_back(CClient::getInterComm()); 233 // } 234 // } 235 236 for(int i = 1; i < leadersServers.size(); ++i) 237 { 238 srvSndLeader = leadersServers[i]; 239 CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 240 interCommRight.push_back(CClient::getInterComm()); 241 interComm.push_back(CClient::getInterComm()); 242 } 243 } 244 } // primary server 245 246 else // secondary server pool(s) 150 247 { 151 if (it->first == hashServer1) 152 { 153 clientLeader=it->second ; 154 int intraCommSize, intraCommRank ; 155 MPI_Comm_size(intraComm,&intraCommSize) ; 156 MPI_Comm_rank(intraComm,&intraCommRank) ; 157 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 158 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 159 160 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 161 interCommLeft.push_back(newComm) ; 162 interComm.push_back(newComm) ; 163 164 break; 165 } 166 } 167 } 168 248 int clientLeader; 249 if (nbSndSrvPools < 2) 250 // if (nbSndSrvPools < 1) 251 { 252 for(it=leaders.begin();it!=leaders.end();it++) 253 { 254 if (it->first == hashServer1) 255 { 256 clientLeader=it->second ; 257 int intraCommSize, intraCommRank ; 258 MPI_Comm_size(intraComm, &intraCommSize) ; 259 MPI_Comm_rank(intraComm, &intraCommRank) ; 260 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 261 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 262 263 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 264 interCommLeft.push_back(newComm) ; 265 interComm.push_back(newComm) ; 266 267 break; 268 } 269 } 270 } 271 else 272 { 273 // for(it=leadersServers.begin();it!=leadersServers.end();it++) 274 { 275 // if (it->first == hashServer1) 276 { 277 // clientLeader=it->second ; 278 clientLeader = leadersServers[0]; 279 int intraCommSize, intraCommRank ; 280 MPI_Comm_size(intraComm, &intraCommSize) ; 281 MPI_Comm_rank(intraComm, &intraCommRank) ; 282 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 283 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 284 285 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 286 interCommLeft.push_back(newComm) ; 287 interComm.push_back(newComm) ; 288 } 289 } 290 } 291 } // secondary server 292 } // CXios::serverLevel != 0 293 294 if (nbSndSrvPools > 1) delete [] hashAllServers; 169 295 delete [] hashAll ; 170 296 … … 204 330 } 205 331 206 // int rank; 207 MPI_Comm_rank(intraComm,&rank) ; 332 MPI_Comm_rank(intraComm, &rank) ; 208 333 if (rank==0) isRoot=true; 209 334 else isRoot=false; … … 489 614 if (CXios::serverLevel == 1) 490 615 { 491 CClient::registerContext(contextId, intraComm); 616 // CClient::registerContext(contextId, intraComm); 617 CClient::registerContextOnSrvPools(contextId, intraComm); 492 618 } 493 619 … … 531 657 int numDigit = 0; 532 658 int size = 0; 659 int mpiRank; 533 660 MPI_Comm_size(CXios::globalComm, &size); 534 661 while (size) … … 538 665 } 539 666 540 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 667 if (nbSndSrvPools < 2) 668 mpiRank = getRank(); 669 else 670 mpiRank = rankSndServers; 671 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << mpiRank << ext; 541 672 fb->open(fileNameClient.str().c_str(), std::ios::out); 542 673 if (!fb->is_open()) -
XIOS/dev/dev_olga/src/server.hpp
r992 r1009 27 27 // Communicators for the primary group of servers 28 28 static MPI_Comm intraComm; 29 static list<MPI_Comm> interCommLeft; // interComm between server and its client (client or primary server)29 static list<MPI_Comm> interCommLeft; // interComm between server (primary or secondary) and its client (client or primary server) 30 30 static list<MPI_Comm> interCommRight; // interComm between primary server and secondary server (non-empty only for primary server pool) 31 31 static list<MPI_Comm> interComm; // interCommLeft + interCommRight 32 32 static std::list<MPI_Comm> contextInterComms; 33 33 static CEventScheduler* eventScheduler; 34 35 static int nbSndSrvPools; // number of secondary server pools 36 static int poolNb; // for secondary servers; stores the pool number 34 37 35 38 struct contextMessage … … 65 68 private: 66 69 static int rank; 70 static int rankSndServers; 67 71 static StdOFStream m_infoStream; 68 72 static StdOFStream m_errorStream; -
XIOS/dev/dev_olga/src/test/test_regular.f90
r924 r1009 20 20 INTEGER :: size, rank 21 21 22 INTEGER :: nlon = 10023 INTEGER :: nlat = 10022 INTEGER :: nlon = 5 !100 23 INTEGER :: nlat = 5 !100 24 24 INTEGER :: ncell 25 25 INTEGER :: ilat, ilon, ind … … 37 37 DOUBLE PRECISION,ALLOCATABLE :: bounds_lat(:,:) 38 38 DOUBLE PRECISION,ALLOCATABLE :: field_temp(:,:) 39 DOUBLE PRECISION,ALLOCATABLE :: field_temp1(:,:) 40 DOUBLE PRECISION,ALLOCATABLE :: field_temp2(:,:) 39 41 40 42 !!! MPI Initialization … … 118 120 ALLOCATE(bounds_lat(4,ni)) 119 121 ALLOCATE(field_temp(ni,ntime)) 120 lon = lon_glo(1+ibegin:1+ibegin+ni) 121 lat = lat_glo(1+ibegin:1+ibegin+ni) 122 bounds_lon(:,:) = bounds_lon_glo(:,1+ibegin:1+ibegin+ni) 123 bounds_lat(:,:) = bounds_lat_glo(:,1+ibegin:1+ibegin+ni) 122 ALLOCATE(field_temp1(ni,ntime)) 123 ALLOCATE(field_temp2(ni,ntime)) 124 lon = lon_glo(ibegin:1+ibegin+ni) 125 lat = lat_glo(ibegin:1+ibegin+ni) 126 bounds_lon(:,:) = bounds_lon_glo(:,ibegin:1+ibegin+ni) 127 bounds_lat(:,:) = bounds_lat_glo(:,ibegin:1+ibegin+ni) 128 124 129 field_temp(:,:) = rank 130 field_temp1(:,:) = rank 131 field_temp2(:,:) = rank + 10 125 132 126 133 … … 135 142 time_origin=xios_date(1999, 01, 01, 15, 00, 00)) 136 143 137 CALL xios_set_domain_attr("face", ni_glo=ncell, ibegin=ibegin, ni=ni, type='unstructured') 138 CALL xios_set_domain_attr("face", lonvalue_1d=lon, latvalue_1d=lat) 139 CALL xios_set_domain_attr("face", bounds_lon_1d=bounds_lon, bounds_lat_1d=bounds_lat) 140 144 CALL xios_set_domain_attr("face1", ni_glo=ncell, ibegin=ibegin, ni=ni, type='unstructured') 145 CALL xios_set_domain_attr("face1", lonvalue_1d=lon, latvalue_1d=lat) 146 CALL xios_set_domain_attr("face1", bounds_lon_1d=bounds_lon, bounds_lat_1d=bounds_lat) 147 148 CALL xios_set_domain_attr("face2", ni_glo=ncell, ibegin=ibegin, ni=ni, type='unstructured') 149 CALL xios_set_domain_attr("face2", lonvalue_1d=lon, latvalue_1d=lat) 150 CALL xios_set_domain_attr("face2", bounds_lon_1d=bounds_lon, bounds_lat_1d=bounds_lat) 141 151 142 152 !!! Definition du timestep … … 160 170 161 171 !!! On donne la valeur du champ atm 162 CALL xios_send_field("temp",field_temp(:,1)) 172 CALL xios_send_field("temp1",field_temp1(:,1)) 173 CALL xios_send_field("temp2",field_temp2(:,1)) 163 174 164 175 ENDDO … … 178 189 DEALLOCATE(bounds_lon, bounds_lat) 179 190 DEALLOCATE(field_temp) 191 DEALLOCATE(field_temp1, field_temp2) 180 192 181 193 !!! Fin de XIOS -
XIOS/dev/dev_olga/src/type/message.cpp
r680 r1009 27 27 size_t retSize=0 ; 28 28 29 for(it=typeList.begin();it!=typeList.end();it++) retSize+=(*it)->size() ; 29 for(it=typeList.begin();it!=typeList.end();it++) 30 retSize+=(*it)->size() ; 30 31 return retSize ; 31 32 }
Note: See TracChangeset
for help on using the changeset viewer.