Ignore:
Timestamp:
05/16/17 17:54:30 (7 years ago)
Author:
yushan
Message:

branch merged with trunk r1130

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_yushan_merged/src/client.cpp

    r1032 r1134  
    1111#include "timer.hpp" 
    1212#include "buffer_client.hpp" 
     13#include "log.hpp" 
     14 
    1315 
    1416namespace xios 
    1517{ 
     18    extern int test_omp_rank; 
     19    #pragma omp threadprivate(test_omp_rank) 
    1620 
    1721    MPI_Comm CClient::intraComm ; 
    1822    MPI_Comm CClient::interComm ; 
    19     std::list<MPI_Comm> CClient::contextInterComms; 
     23    std::list<MPI_Comm> *CClient::contextInterComms_ptr = 0; 
    2024    int CClient::serverLeader ; 
    2125    bool CClient::is_MPI_Initialized ; 
     
    2428    StdOFStream CClient::m_errorStream; 
    2529 
    26     void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) 
     30    StdOFStream CClient::array_infoStream[10]; 
     31 
     32    void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 
    2733    { 
    2834      int initialized ; 
     
    3541      { 
    3642// localComm doesn't given 
     43 
    3744        if (localComm == MPI_COMM_NULL) 
    3845        { 
    3946          if (!is_MPI_Initialized) 
    4047          { 
    41             MPI_Init(NULL, NULL); 
     48            //MPI_Init(NULL, NULL); 
     49            int return_level; 
     50            MPI_Init_thread(NULL, NULL, 3, &return_level); 
     51            assert(return_level == 3); 
    4252          } 
    4353          CTimer::get("XIOS").resume() ; 
     
    5161          int myColor ; 
    5262          int i,c ; 
    53           MPI_Comm newComm ; 
    54  
    55           MPI_Comm_size(CXios::globalComm,&size) ; 
     63 
     64          MPI_Comm_size(CXios::globalComm,&size); 
    5665          MPI_Comm_rank(CXios::globalComm,&rank); 
     66        
    5767 
    5868          hashAll=new unsigned long[size] ; 
     
    96106            MPI_Comm_size(intraComm,&intraCommSize) ; 
    97107            MPI_Comm_rank(intraComm,&intraCommRank) ; 
    98             info(50)<<"intercommCreate::client "<<rank<<" intraCommSize : "<<intraCommSize 
    99                  <<" intraCommRank :"<<intraCommRank<<"  clientLeader "<< serverLeader<<endl ; 
     108             
     109            #pragma omp critical(_output) 
     110            { 
     111              info(10)<<"intercommCreate::client "<<test_omp_rank<< " "<< &test_omp_rank <<" intraCommSize : "<<intraCommSize 
     112                 <<" intraCommRank :"<<intraCommRank<<"  serverLeader "<< serverLeader 
     113                 <<" globalComm : "<< &(CXios::globalComm) << endl ;   
     114            } 
     115 
     116             
     117             
    100118            MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 
     119 
    101120          } 
    102121          else 
     
    148167 
    149168      MPI_Comm_dup(intraComm,&returnComm) ; 
     169 
    150170    } 
    151171 
     
    154174    { 
    155175      CContext::setCurrent(id) ; 
    156       CContext* context=CContext::create(id); 
     176      CContext* context = CContext::create(id); 
     177 
     178      int tmp_rank; 
     179      MPI_Comm_rank(contextComm,&tmp_rank) ; 
     180       
    157181      StdString idServer(id); 
    158182      idServer += "_server"; 
     
    161185      { 
    162186        int size,rank,globalRank ; 
    163         size_t message_size ; 
    164         int leaderRank ; 
     187        //size_t message_size ; 
     188        //int leaderRank ; 
    165189        MPI_Comm contextInterComm ; 
    166190 
     
    173197        CMessage msg ; 
    174198        msg<<idServer<<size<<globalRank ; 
    175 //        msg<<id<<size<<globalRank ; 
     199 
    176200 
    177201        int messageSize=msg.size() ; 
     
    184208 
    185209        MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 
    186         info(10)<<"Register new Context : "<<id<<endl ; 
     210         
     211        #pragma omp critical(_output) 
     212        info(10)<<" RANK "<< tmp_rank<<" Register new Context : "<<id<<endl ; 
     213 
    187214 
    188215        MPI_Comm inter ; 
     
    190217        MPI_Barrier(inter) ; 
    191218 
     219         
    192220        context->initClient(contextComm,contextInterComm) ; 
    193221 
    194         contextInterComms.push_back(contextInterComm); 
     222         
     223        if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 
     224        contextInterComms_ptr->push_back(contextInterComm); 
     225         
    195226        MPI_Comm_free(&inter); 
    196227      } 
     
    209240        // Finally, we should return current context to context client 
    210241        CContext::setCurrent(id); 
    211  
    212         contextInterComms.push_back(contextInterComm); 
     242         
     243        if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 
     244        contextInterComms_ptr->push_back(contextInterComm); 
     245 
    213246      } 
    214247    } 
     
    220253 
    221254      MPI_Comm_rank(intraComm,&rank) ; 
    222   
     255 
    223256      if (!CXios::isServer) 
    224257      { 
     
    230263      } 
    231264 
    232       for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 
     265      for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); ++it) 
    233266        MPI_Comm_free(&(*it)); 
     267       
    234268      MPI_Comm_free(&interComm); 
    235269      MPI_Comm_free(&intraComm); 
     
    241275      { 
    242276        if (CXios::usingOasis) oasis_finalize(); 
    243         else MPI_Finalize() ; 
    244       } 
    245        
    246       info(20) << "Client side context is finalized"<<endl ; 
    247       report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 
    248       report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 
    249       report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; 
    250       report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 
    251 //      report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 
    252       report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 
    253       report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 
     277        else MPI_Finalize(); 
     278      } 
     279       
     280      #pragma omp critical (_output) 
     281      info(20) << "Client "<<rank<<" : Client side context is finalized "<< endl ; 
     282 
     283  /*    #pragma omp critical (_output) 
     284      { 
     285         report(0) <<"     Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 
     286         report(0)<< "     Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 
     287         report(0)<< "     Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; 
     288         report(0)<< "     Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 
     289         report(0)<< "     Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 
     290         report(0)<< "     Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 
     291         report(0)<< "     Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 
     292       }       
     293*/ 
    254294   } 
    255295 
     
    280320 
    281321      fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 
     322       
    282323      fb->open(fileNameClient.str().c_str(), std::ios::out); 
    283324      if (!fb->is_open()) 
    284325        ERROR("void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", 
    285               << std::endl << "Can not open <" << fileNameClient << "> file to write the client log(s)."); 
     326            << std::endl << "Can not open <" << fileNameClient << "> file to write the client log(s)."); 
    286327    } 
    287328 
     
    294335    void CClient::openInfoStream(const StdString& fileName) 
    295336    { 
    296       std::filebuf* fb = m_infoStream.rdbuf(); 
    297       openStream(fileName, ".out", fb); 
    298  
    299       info.write2File(fb); 
    300       report.write2File(fb); 
     337      //std::filebuf* fb = m_infoStream.rdbuf(); 
     338 
     339      info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 
     340           
     341      openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 
     342 
     343      info.write2File(info_FB[omp_get_thread_num()]); 
     344      report.write2File(info_FB[omp_get_thread_num()]); 
     345       
    301346    } 
    302347 
Note: See TracChangeset for help on using the changeset viewer.