Ignore:
Timestamp:
01/23/19 10:31:44 (5 years ago)
Author:
yushan
Message:

dev on ADA. add flag switch _usingEP/_usingMPI

File:
1 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_openmp/src/client.cpp

    r1556 r1642  
    99#include "oasis_cinterface.hpp" 
    1010#include "mpi.hpp" 
     11//#include "mpi_wrapper.hpp" 
    1112#include "timer.hpp" 
    1213#include "buffer_client.hpp" 
    13 using namespace ep_lib; 
     14#include "string_tools.hpp" 
    1415 
    1516namespace xios 
    1617{ 
    1718 
    18     MPI_Comm CClient::intraComm ; 
    19     MPI_Comm CClient::interComm ; 
    20     std::list<MPI_Comm> *CClient::contextInterComms_ptr = 0; 
     19    ep_lib::MPI_Comm CClient::intraComm ; 
     20    ep_lib::MPI_Comm CClient::interComm ; 
     21    std::list<ep_lib::MPI_Comm> CClient::contextInterComms; 
    2122    int CClient::serverLeader ; 
    2223    bool CClient::is_MPI_Initialized ; 
     
    2425    StdOFStream CClient::m_infoStream; 
    2526    StdOFStream CClient::m_errorStream; 
    26  
    27     StdOFStream CClient::array_infoStream[16]; 
    28  
    29     MPI_Comm& CClient::getInterComm(void)   { return (interComm); } 
    30  
     27    ep_lib::MPI_Comm& CClient::getInterComm(void)   { return (interComm); } 
     28      
    3129///--------------------------------------------------------------- 
    3230/*! 
     
    3836 */ 
    3937 
    40     void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 
     38    void CClient::initialize(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) 
    4139    { 
    4240      int initialized ; 
    43       MPI_Initialized(&initialized) ; 
     41      ep_lib::MPI_Initialized(&initialized) ; 
    4442      if (initialized) is_MPI_Initialized=true ; 
    4543      else is_MPI_Initialized=false ; 
     
    5048      { 
    5149// localComm isn't given 
    52         if (localComm == MPI_COMM_NULL) 
     50        if (localComm == EP_COMM_NULL) 
    5351        { 
    5452          if (!is_MPI_Initialized) 
    5553          { 
    56             MPI_Init(NULL, NULL); 
     54            ep_lib::MPI_Init(NULL, NULL); 
    5755          } 
    5856          CTimer::get("XIOS").resume() ; 
     
    6664          int myColor ; 
    6765          int i,c ; 
    68           MPI_Comm newComm ; 
    69  
    70           MPI_Comm_size(CXios::globalComm,&size) ; 
    71           MPI_Comm_rank(CXios::globalComm,&rank_); 
     66          ep_lib::MPI_Comm newComm ; 
     67 
     68          ep_lib::MPI_Comm_size(CXios::globalComm,&size) ; 
     69 
     70          ep_lib::MPI_Comm_rank(CXios::globalComm,&rank_); 
    7271 
    7372          hashAll=new unsigned long[size] ; 
    7473 
    75           MPI_Allgather(&hashClient,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ; 
    76            
     74          ep_lib::MPI_Allgather(&hashClient,1,EP_LONG,hashAll,1,EP_LONG,CXios::globalComm) ; 
     75 
    7776          map<unsigned long, int> colors ; 
    7877          map<unsigned long, int> leaders ; 
     
    10099 
    101100          myColor=colors[hashClient]; 
    102           MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ; 
    103            
     101          ep_lib::MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ; 
     102 
    104103          if (CXios::usingServer) 
    105104          { 
     
    107106            serverLeader=leaders[hashServer] ; 
    108107            int intraCommSize, intraCommRank ; 
    109             MPI_Comm_size(intraComm,&intraCommSize) ; 
    110             MPI_Comm_rank(intraComm,&intraCommRank) ; 
    111  
    112             MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 
    113             #pragma omp critical (_output) 
    114             { 
    115               info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 
     108            ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ; 
     109            ep_lib::MPI_Comm_rank(intraComm,&intraCommRank) ; 
     110            info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 
    116111                   <<" intraCommRank :"<<intraCommRank<<"  clientLeader "<< serverLeader<<endl ; 
    117             }  
    118              
     112             ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 
     113             //rank_ = intraCommRank; 
    119114          } 
    120115          else 
    121116          { 
    122             MPI_Comm_dup(intraComm,&interComm) ; 
     117            ep_lib::MPI_Comm_dup(intraComm,&interComm) ; 
    123118          } 
    124119          delete [] hashAll ; 
     
    133128          else 
    134129          { 
    135             MPI_Comm_dup(localComm,&intraComm) ; 
    136             MPI_Comm_dup(intraComm,&interComm) ; 
     130            ep_lib::MPI_Comm_dup(localComm,&intraComm) ; 
     131            ep_lib::MPI_Comm_dup(intraComm,&interComm) ; 
    137132          } 
    138133        } 
     
    142137      { 
    143138        // localComm isn't given 
    144         if (localComm == MPI_COMM_NULL) 
     139        if (localComm == EP_COMM_NULL) 
    145140        { 
    146141          if (!is_MPI_Initialized) oasis_init(codeId) ; 
    147142          oasis_get_localcomm(localComm) ; 
    148143        } 
    149         MPI_Comm_dup(localComm,&intraComm) ; 
     144        ep_lib::MPI_Comm_dup(localComm,&intraComm) ; 
    150145 
    151146        CTimer::get("XIOS").resume() ; 
     
    154149        if (CXios::usingServer) 
    155150        { 
    156           MPI_Status status ; 
    157           MPI_Comm_rank(intraComm,&rank_) ; 
     151          ep_lib::MPI_Status status ; 
     152          ep_lib::MPI_Comm_rank(intraComm,&rank_) ; 
    158153 
    159154          oasis_get_intercomm(interComm,CXios::xiosCodeId) ; 
    160           if (rank_==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ; 
    161           MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ; 
    162         } 
    163         else MPI_Comm_dup(intraComm,&interComm) ; 
    164       } 
    165  
    166       MPI_Comm_dup(intraComm,&returnComm) ; 
     155          if (rank_==0) ep_lib::MPI_Recv(&serverLeader,1, EP_INT, 0, 0, interComm, &status) ; 
     156          ep_lib::MPI_Bcast(&serverLeader,1,EP_INT,0,intraComm) ; 
     157        } 
     158        else ep_lib::MPI_Comm_dup(intraComm,&interComm) ; 
     159      } 
     160 
     161      ep_lib::MPI_Comm_dup(intraComm,&returnComm) ; 
    167162    } 
    168163 
     
    175170 * Function is only called by client. 
    176171 */ 
    177     void CClient::registerContext(const string& id, MPI_Comm contextComm) 
     172    void CClient::registerContext(const string& id, ep_lib::MPI_Comm contextComm) 
    178173    { 
    179174      CContext::setCurrent(id) ; 
     
    185180      // Attached mode 
    186181      { 
    187         MPI_Comm contextInterComm ; 
    188         MPI_Comm_dup(contextComm,&contextInterComm) ; 
     182        ep_lib::MPI_Comm contextInterComm ; 
     183        ep_lib::MPI_Comm_dup(contextComm,&contextInterComm) ; 
    189184        CContext* contextServer = CContext::create(idServer); 
    190185 
     
    198193        CContext::setCurrent(id); 
    199194 
    200         if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 
    201         contextInterComms_ptr->push_back(contextInterComm); 
     195        contextInterComms.push_back(contextInterComm); 
    202196      } 
    203197      else 
     
    206200        size_t message_size ; 
    207201        int leaderRank ; 
    208         MPI_Comm contextInterComm ; 
    209  
    210         MPI_Comm_size(contextComm,&size) ; 
    211         MPI_Comm_rank(contextComm,&rank) ; 
    212         MPI_Comm_rank(CXios::globalComm,&globalRank) ; 
     202        ep_lib::MPI_Comm contextInterComm ; 
     203 
     204        ep_lib::MPI_Comm_size(contextComm,&size) ; 
     205        ep_lib::MPI_Comm_rank(contextComm,&rank) ; 
     206        ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank) ; 
    213207        if (rank!=0) globalRank=0 ; 
    214208 
     
    222216        buffer<<msg ; 
    223217 
    224         MPI_Send((void*)buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; 
    225  
    226         MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 
    227         #pragma omp critical (_output) 
     218        ep_lib::MPI_Send((void*)buff,buffer.count(),EP_CHAR,serverLeader,1,CXios::globalComm) ; 
     219 
     220        ep_lib::MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 
    228221        info(10)<<"Register new Context : "<<id<<endl ; 
    229         MPI_Comm inter ; 
    230         MPI_Intercomm_merge(contextInterComm,0,&inter) ; 
    231         MPI_Barrier(inter) ; 
     222        ep_lib::MPI_Comm inter ; 
     223        ep_lib::MPI_Intercomm_merge(contextInterComm,0,&inter) ; 
     224        ep_lib::MPI_Barrier(inter) ; 
    232225 
    233226        context->initClient(contextComm,contextInterComm) ; 
    234227 
    235         if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 
    236         contextInterComms_ptr->push_back(contextInterComm); 
    237  
    238         MPI_Comm_free(&inter); 
     228        contextInterComms.push_back(contextInterComm); 
     229        ep_lib::MPI_Comm_free(&inter); 
    239230        delete [] buff ; 
    240231 
    241232      } 
    242233    } 
     234 
     235/*! 
     236 * \fn void CClient::callOasisEnddef(void) 
     237 * \brief Send the order to the servers to call "oasis_enddef". It must be done by each compound of models before calling oasis_enddef on client side 
     238 * Function is only called by client. 
     239 */ 
     240    void CClient::callOasisEnddef(void) 
     241    { 
     242      bool oasisEnddef=CXios::getin<bool>("call_oasis_enddef",true) ; 
     243      if (!oasisEnddef) ERROR("void CClient::callOasisEnddef(void)", <<"Function xios_oasis_enddef called but variable <call_oasis_enddef> is set to false."<<endl 
     244                                                                     <<"Variable <call_oasis_enddef> must be set to true"<<endl) ; 
     245      if (CXios::isServer) 
     246      // Attached mode 
     247      { 
     248        // nothing to do    
     249      } 
     250      else 
     251      { 
     252        int rank ; 
     253        int msg=0 ; 
     254 
     255        ep_lib::MPI_Comm_rank(intraComm,&rank) ; 
     256        if (rank==0)  
     257        { 
     258          ep_lib::MPI_Send(&msg,1,EP_INT,0,5,interComm) ; // tags oasis_endded = 5 
     259        } 
     260 
     261      } 
     262    } 
     263 
    243264 
    244265    void CClient::finalize(void) 
     
    247268      int msg=0 ; 
    248269 
    249       MPI_Comm_rank(intraComm,&rank) ; 
     270      ep_lib::MPI_Comm_rank(intraComm,&rank) ; 
    250271  
    251272      if (!CXios::isServer) 
    252273      { 
    253         MPI_Comm_rank(intraComm,&rank) ; 
     274        ep_lib::MPI_Comm_rank(intraComm,&rank) ; 
    254275        if (rank==0) 
    255276        { 
    256           MPI_Send(&msg,1,MPI_INT,0,0,interComm) ; 
    257         } 
    258       } 
    259  
    260       for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); it++) 
    261         MPI_Comm_free(&(*it)); 
    262       MPI_Comm_free(&interComm); 
    263       MPI_Comm_free(&intraComm); 
     277          ep_lib::MPI_Send(&msg,1,EP_INT,0,0,interComm) ; 
     278        } 
     279      } 
     280 
     281      for (std::list<ep_lib::MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 
     282        ep_lib::MPI_Comm_free(&(*it)); 
     283      ep_lib::MPI_Comm_free(&interComm); 
     284      ep_lib::MPI_Comm_free(&intraComm); 
    264285 
    265286      CTimer::get("XIOS init/finalize").suspend() ; 
     
    268289      if (!is_MPI_Initialized) 
    269290      { 
    270         //if (CXios::usingOasis) oasis_finalize(); 
    271         //else 
    272         MPI_Finalize() ; 
    273       } 
    274       #pragma omp critical (_output) 
     291        if (CXios::usingOasis) oasis_finalize(); 
     292        else ep_lib::MPI_Finalize() ; 
     293      } 
     294       
    275295      info(20) << "Client side context is finalized"<<endl ; 
    276  
    277       #pragma omp critical (_output) 
    278       { 
    279         report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 
    280         report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 
    281         report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 
    282         report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 
    283         report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 
     296      report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 
     297      report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 
     298      report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 
     299      report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 
     300      report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 
    284301//      report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 
    285         report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 
    286         report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 
    287         report(100)<<CTimer::getAllCumulatedTime()<<endl ; 
    288       } 
     302      report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 
     303      report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 
     304      report(100)<<CTimer::getAllCumulatedTime()<<endl ; 
    289305   } 
    290306 
     
    311327      int size = 0; 
    312328      int rank; 
    313       MPI_Comm_size(CXios::globalComm, &size); 
     329      ep_lib::MPI_Comm_size(CXios::globalComm, &size); 
    314330      while (size) 
    315331      { 
     
    320336      if (CXios::usingOasis) 
    321337      { 
    322         MPI_Comm_rank(CXios::globalComm,&rank); 
     338        ep_lib::MPI_Comm_rank(CXios::globalComm,&rank); 
    323339        fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << rank << ext; 
    324340      } 
     
    341357    void CClient::openInfoStream(const StdString& fileName) 
    342358    { 
    343       info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 
    344            
    345       openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 
    346  
    347       info.write2File(info_FB[omp_get_thread_num()]); 
    348       report.write2File(info_FB[omp_get_thread_num()]); 
     359      std::filebuf* fb = m_infoStream.rdbuf(); 
     360      openStream(fileName, ".out", fb); 
     361 
     362      info.write2File(fb); 
     363      report.write2File(fb); 
    349364    } 
    350365 
Note: See TracChangeset for help on using the changeset viewer.