Changeset 1077 for XIOS/dev/dev_olga/src
- Timestamp:
- 03/17/17 15:05:36 (7 years ago)
- Location:
- XIOS/dev/dev_olga/src
- Files:
-
- 8 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_olga/src/buffer_client.cpp
r1071 r1077 80 80 { 81 81 traceOff(); 82 MPI_Test(&request, &flag, &status); 82 MPI_Errhandler_set(interComm,MPI_ERRORS_RETURN); 83 error=MPI_Test(&request, &flag, &status); 84 if (error != MPI_SUCCESS) 85 { 86 MPI_Error_class(error, &errclass); 87 MPI_Error_string(error, errstring, &len); 88 ERROR("MPI error class: ", <<errclass<<" MPI error "<<errstring ); 89 } 83 90 traceOn(); 84 91 if (flag == true) pending = false; -
XIOS/dev/dev_olga/src/context_client.cpp
r1071 r1077 264 264 { 265 265 map<int,CClientBuffer*>::iterator itBuff; 266 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) delete itBuff->second; 267 // buffersReleased_ = true; 266 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) 267 delete itBuff->second; 268 buffers.clear(); 268 269 } 269 270 -
XIOS/dev/dev_olga/src/context_client.hpp
r1071 r1077 79 79 StdSize maxBufferedEvents; 80 80 81 // bool buffersReleased_;82 83 81 struct { 84 82 std::list<int> ranks, sizes; -
XIOS/dev/dev_olga/src/cxios.hpp
r1054 r1077 29 29 static string rootFile ; //!< Configuration filename 30 30 static string xiosCodeId ; //!< Identity for XIOS 31 // static string xiosCodeIdPrm ; //!< Identity for XIOS primary server32 // static string xiosCodeIdSnd ; //!< Identity for XIOS secondary server33 31 static string clientFile; //!< Filename template for client 34 32 static string serverFile; //!< Filename template for server -
XIOS/dev/dev_olga/src/node/context.cpp
r1071 r1077 460 460 if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 461 461 } 462 463 // for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)464 // MPI_Comm_free(&(*it));465 // comms.clear();466 467 462 } 468 463 } 464 469 465 //! Free internally allocated communicators 470 466 void CContext::freeComms(void) … … 562 558 void CContext::closeDefinition(void) 563 559 { 564 // There is nothing client need to send to server565 // if (hasClient)566 // if (hasClient && !hasServer)567 // if (hasClient)568 // {569 // // After xml is parsed, there are some more works with post processing570 // postProcessing();571 // }572 573 // setClientServerBuffer();574 575 // // if (hasClient && !hasServer)576 // if (hasClient)577 // {578 // // Send all attributes of current context to server579 // this->sendAllAttributesToServer();580 581 // // Send all attributes of current calendar582 // CCalendarWrapper::get(CCalendarWrapper::GetDefName())->sendAllAttributesToServer();583 584 // // We have enough information to send to server585 // // First of all, send all enabled files586 // sendEnabledFiles();587 588 // // Then, send all enabled fields589 // sendEnabledFields();590 591 // // At last, we have all info of domain and axis, then send them592 // sendRefDomainsAxis();593 594 // // After that, send all grid (if any)595 // sendRefGrid();596 597 // // // We have a xml tree on the server side and now, it should be also processed598 // sendPostProcessing();599 // }600 601 602 // Now tell server that it can process all messages from client603 // if (hasClient) this->sendCloseDefinition();604 560 postProcessingGlobalAttributes(); 605 561 … … 611 567 if (hasClient && !hasServer) 612 568 { 613 // this->buildFilterGraphOfEnabledFields(); // references are resolved here (access xml file)614 569 buildFilterGraphOfFieldsWithReadAccess(); 615 // this->solveAllRefOfEnabledFields(true);616 570 } 617 571 … … 619 573 this->processGridEnabledFields(); 620 574 if (hasClient) this->sendProcessingGridOfEnabledFields(); 621 // if (hasClient) // We have a xml tree on the server side and now, it should be also processed 622 // sendPostProcessing(); 623 // // Now tell server that it can process all messages from client 624 //// if (hasClient && !hasServer) this->sendCloseDefinition(); 625 if (hasClient) this->sendCloseDefinition(); 575 if (hasClient) this->sendCloseDefinition(); 626 576 627 577 // Nettoyage de l'arborescence 628 // if (hasClient && !hasServer) CleanTree(); // Only on client side??629 578 if (hasClient) CleanTree(); // Only on client side?? 630 579 -
XIOS/dev/dev_olga/src/node/grid.cpp
r1054 r1077 1949 1949 { 1950 1950 CDomain* pDom = CDomain::get(*it); 1951 //if (context->hasClient && !context->hasServer)1952 if (context->hasClient)1951 if (context->hasClient && !context->hasServer) 1952 //if (context->hasClient) 1953 1953 { 1954 1954 pDom->solveRefInheritance(apply); … … 1962 1962 { 1963 1963 CAxis* pAxis = CAxis::get(*it); 1964 //if (context->hasClient && !context->hasServer)1965 if (context->hasClient)1964 if (context->hasClient && !context->hasServer) 1965 // if (context->hasClient) 1966 1966 { 1967 1967 pAxis->solveRefInheritance(apply); … … 1975 1975 { 1976 1976 CScalar* pScalar = CScalar::get(*it); 1977 //if (context->hasClient && !context->hasServer)1978 if (context->hasClient)1977 if (context->hasClient && !context->hasServer) 1978 // if (context->hasClient) 1979 1979 { 1980 1980 pScalar->solveRefInheritance(apply); -
XIOS/dev/dev_olga/src/server.cpp
r1071 r1077 30 30 int CServer::nbContexts_ = 0; 31 31 bool CServer::isRoot = false ; 32 int CServer::rank = INVALID_RANK;32 int CServer::rank_ = INVALID_RANK; 33 33 StdOFStream CServer::m_infoStream; 34 34 StdOFStream CServer::m_errorStream; … … 81 81 82 82 MPI_Comm_size(CXios::globalComm, &size) ; 83 MPI_Comm_rank(CXios::globalComm, &rank );83 MPI_Comm_rank(CXios::globalComm, &rank_); 84 84 85 85 hashAll=new unsigned long[size] ; … … 104 104 if (CXios::usingServer2) 105 105 { 106 int serverRank = rank - leaders[hashServer]; // server proc rank starting 0106 int serverRank = rank_ - leaders[hashServer]; // server proc rank starting 0 107 107 serverSize_ = size - leaders[hashServer]; 108 108 nbPools = serverSize_ * CXios::ratioServer2 / 100; … … 115 115 serverLevel = 2; 116 116 poolId = serverRank - serverSize_ + nbPools; 117 myColor = rank ;117 myColor = rank_; 118 118 } 119 119 } 120 120 121 MPI_Comm_split(CXios::globalComm, myColor, rank , &intraComm) ;121 MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ; 122 122 123 123 if (serverLevel == 0) … … 132 132 MPI_Comm_size(intraComm,&intraCommSize) ; 133 133 MPI_Comm_rank(intraComm,&intraCommRank) ; 134 info(50)<<"intercommCreate::server "<<rank <<" intraCommSize : "<<intraCommSize134 info(50)<<"intercommCreate::server "<<rank_<<" intraCommSize : "<<intraCommSize 135 135 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 136 136 … … 152 152 MPI_Comm_size(intraComm, &intraCommSize) ; 153 153 MPI_Comm_rank(intraComm, &intraCommRank) ; 154 info(50)<<"intercommCreate::server "<<rank <<" intraCommSize : "<<intraCommSize154 info(50)<<"intercommCreate::server "<<rank_<<" intraCommSize : "<<intraCommSize 155 155 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 156 156 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; … … 167 167 MPI_Comm_size(intraComm, &intraCommSize) ; 168 168 MPI_Comm_rank(intraComm, &intraCommRank) ; 169 info(50)<<"intercommCreate::client "<<rank <<" intraCommSize : "<<intraCommSize169 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 170 170 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvSndLeader<<endl ; 171 171 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ; … … 180 180 MPI_Comm_size(intraComm, &intraCommSize) ; 181 181 MPI_Comm_rank(intraComm, &intraCommRank) ; 182 info(50)<<"intercommCreate::server "<<rank <<" intraCommSize : "<<intraCommSize182 info(50)<<"intercommCreate::server "<<rank_<<" intraCommSize : "<<intraCommSize 183 183 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 184 184 … … 202 202 MPI_Comm_dup(localComm, &intraComm); 203 203 204 MPI_Comm_rank(intraComm,&rank ) ;204 MPI_Comm_rank(intraComm,&rank_) ; 205 205 MPI_Comm_size(intraComm,&size) ; 206 206 string codesId=CXios::getin<string>("oasis_codes_id") ; … … 217 217 { 218 218 oasis_get_intercomm(newComm,*it) ; 219 if (rank ==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ;219 if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ; 220 220 MPI_Comm_remote_size(newComm,&size); 221 221 // interComm.push_back(newComm) ; … … 225 225 } 226 226 227 int rankServer; 228 MPI_Comm_rank(intraComm, &rankServer) ; 229 if (rankServer==0) isRoot=true; 227 MPI_Comm_rank(intraComm, &rank_) ; 228 if (rank_==0) isRoot=true; 230 229 else isRoot=false; 231 230 … … 249 248 // MPI_Comm_free(&(*it)); 250 249 251 for (std::list<MPI_Comm>::iterator it = interCommLeft.begin(); it != interCommLeft.end(); it++)252 MPI_Comm_free(&(*it));250 // for (std::list<MPI_Comm>::iterator it = interCommLeft.begin(); it != interCommLeft.end(); it++) 251 // MPI_Comm_free(&(*it)); 253 252 254 253 for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) … … 524 523 { 525 524 StdString str = contextId +"_server_" + boost::lexical_cast<string>(i); 526 msg<<str<<size<<rank ;525 msg<<str<<size<<rank_ ; 527 526 messageSize = msg.size() ; 528 527 buff = new char[messageSize] ; … … 555 554 if (finished) 556 555 { 557 // it->second->freeComms(); // deallocate internally allcoated context communicators556 it->second->freeComms(); // deallocate internally allocated context communicators 558 557 contextList.erase(it) ; 559 558 break ; 560 559 } 561 560 else 562 {563 561 finished=it->second->checkBuffersAndListen(); 564 }565 562 } 566 563 } … … 569 566 int CServer::getRank() 570 567 { 571 return rank ;568 return rank_; 572 569 } 573 570 … … 598 595 { 599 596 if (serverLevel == 1) 600 id = rank -serverLeader_;597 id = rank_; 601 598 else 602 599 id = poolId; -
XIOS/dev/dev_olga/src/server.hpp
r1071 r1077 64 64 65 65 private: 66 static int rank ;66 static int rank_; 67 67 static int serverLeader_; //!< Leader of the classical or primary server (needed in case of secondary servers) 68 68 static int serverSize_; //!< Number of procs dedicated to servers (primary and seconday (if any) combined)
Note: See TracChangeset
for help on using the changeset viewer.