Changeset 1601 for XIOS


Ignore:
Timestamp:
11/19/18 15:52:54 (5 years ago)
Author:
yushan
Message:

branch_openmp merged with trunk r1597

Location:
XIOS/dev/dev_trunk_omp/src
Files:
127 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/dev_trunk_omp/src/attribute_map.hpp

    r1158 r1601  
    7676            /// Propriété statique /// 
    7777            static CAttributeMap * Current; 
     78            #pragma omp threadprivate(Current) 
    7879 
    7980      };  // class CAttributeMap 
  • XIOS/dev/dev_trunk_omp/src/attribute_template.hpp

    r1478 r1601  
    5353            void reset(void) ; 
    5454            void checkEmpty(void) const; 
    55  
    5655 
    5756            void setInheritedValue(const CAttributeTemplate& attr ); 
  • XIOS/dev/dev_trunk_omp/src/attribute_template_impl.hpp

    r1478 r1601  
    8282 
    8383      template <class T> 
    84          T CAttributeTemplate<T>::getValue(void) const 
     84      T CAttributeTemplate<T>::getValue(void) const 
    8585      { 
    8686        return CType<T>::get() ; 
     
    112112 
    113113      template <class T> 
    114          void CAttributeTemplate<T>::setValue(const T & value) 
     114      void CAttributeTemplate<T>::setValue(const T & value) 
    115115      { 
    116116         CType<T>::set(value) ; 
  • XIOS/dev/dev_trunk_omp/src/buffer_client.cpp

    r1227 r1601  
    77#include "mpi.hpp" 
    88#include "tracer.hpp" 
     9 
     10 
     11using namespace ep_lib; 
    912 
    1013namespace xios 
     
    2730    buffer[1] = new char[bufferSize]; 
    2831    retBuffer = new CBufferOut(buffer[current], bufferSize); 
     32    #pragma omp critical (_output) 
    2933    info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl; 
    3034  } 
  • XIOS/dev/dev_trunk_omp/src/buffer_client.hpp

    r1227 r1601  
    1313    public: 
    1414      static size_t maxRequestSize; 
     15      #pragma omp threadprivate(maxRequestSize) 
    1516 
    16       CClientBuffer(MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents); 
     17      CClientBuffer(ep_lib::MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents); 
    1718      ~CClientBuffer(); 
    1819 
     
    3940      bool pending; 
    4041 
    41       MPI_Request request; 
     42      ep_lib::MPI_Request request; 
    4243 
    4344      CBufferOut* retBuffer; 
    44       const MPI_Comm interComm; 
     45      const ep_lib::MPI_Comm interComm; 
    4546  }; 
    4647} 
  • XIOS/dev/dev_trunk_omp/src/calendar.cpp

    r1357 r1601  
    127127      const CDate& CCalendar::update(int step) 
    128128      { 
    129         info(20) << "update step : " << step << " timestep " << this->timestep << std::endl; 
     129        #pragma omp critical (_output) 
     130        info(80) << "update step : " << step << " timestep " << this->timestep << std::endl; 
    130131        this->step = step; 
    131132        return (this->currentDate = this->getInitDate() + step * this->timestep); 
  • XIOS/dev/dev_trunk_omp/src/client.cpp

    r1587 r1601  
    1212#include "buffer_client.hpp" 
    1313#include "string_tools.hpp" 
     14using namespace ep_lib; 
    1415 
    1516namespace xios 
     
    1819    MPI_Comm CClient::intraComm ; 
    1920    MPI_Comm CClient::interComm ; 
    20     std::list<MPI_Comm> CClient::contextInterComms; 
     21    std::list<MPI_Comm> *CClient::contextInterComms_ptr = 0; 
    2122    int CClient::serverLeader ; 
    2223    bool CClient::is_MPI_Initialized ; 
     
    2425    StdOFStream CClient::m_infoStream; 
    2526    StdOFStream CClient::m_errorStream; 
     27 
     28    StdOFStream CClient::array_infoStream[16]; 
     29 
    2630    MPI_Comm& CClient::getInterComm(void)   { return (interComm); } 
    27       
     31 
    2832///--------------------------------------------------------------- 
    2933/*! 
     
    106110            MPI_Comm_size(intraComm,&intraCommSize) ; 
    107111            MPI_Comm_rank(intraComm,&intraCommRank) ; 
    108             info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 
     112 
     113            MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 
     114            #pragma omp critical (_output) 
     115            { 
     116              info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 
    109117                   <<" intraCommRank :"<<intraCommRank<<"  clientLeader "<< serverLeader<<endl ; 
    110              MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 
    111              //rank_ = intraCommRank; 
     118            } 
    112119          } 
    113120          else 
     
    191198        CContext::setCurrent(id); 
    192199 
    193         contextInterComms.push_back(contextInterComm); 
     200        if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 
     201        contextInterComms_ptr->push_back(contextInterComm); 
    194202      } 
    195203      else 
     
    217225 
    218226        MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 
     227        #pragma omp critical (_output) 
    219228        info(10)<<"Register new Context : "<<id<<endl ; 
    220229        MPI_Comm inter ; 
     
    224233        context->initClient(contextComm,contextInterComm) ; 
    225234 
    226         contextInterComms.push_back(contextInterComm); 
     235        if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 
     236        contextInterComms_ptr->push_back(contextInterComm); 
     237 
    227238        MPI_Comm_free(&inter); 
    228239        delete [] buff ; 
     
    277288      } 
    278289 
    279       for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 
     290      for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); it++) 
    280291        MPI_Comm_free(&(*it)); 
    281292      MPI_Comm_free(&interComm); 
     
    287298      if (!is_MPI_Initialized) 
    288299      { 
    289         if (CXios::usingOasis) oasis_finalize(); 
    290         else MPI_Finalize() ; 
    291       } 
    292        
     300        //if (CXios::usingOasis) oasis_finalize(); 
     301        //else 
     302        MPI_Finalize() ; 
     303      } 
     304      #pragma omp critical (_output) 
    293305      info(20) << "Client side context is finalized"<<endl ; 
    294       report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 
    295       report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 
    296       report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 
    297       report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 
    298       report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 
     306 
     307      #pragma omp critical (_output) 
     308      { 
     309        report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 
     310        report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 
     311        report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 
     312        report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 
     313        report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 
    299314//      report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 
    300       report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 
    301       report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 
    302       report(100)<<CTimer::getAllCumulatedTime()<<endl ; 
     315        report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 
     316        report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 
     317        report(100)<<CTimer::getAllCumulatedTime()<<endl ; 
     318      } 
    303319   } 
    304320 
     
    355371    void CClient::openInfoStream(const StdString& fileName) 
    356372    { 
    357       std::filebuf* fb = m_infoStream.rdbuf(); 
    358       openStream(fileName, ".out", fb); 
    359  
    360       info.write2File(fb); 
    361       report.write2File(fb); 
     373      info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 
     374           
     375      openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 
     376 
     377      info.write2File(info_FB[omp_get_thread_num()]); 
     378      report.write2File(info_FB[omp_get_thread_num()]); 
    362379    } 
    363380 
  • XIOS/dev/dev_trunk_omp/src/client.hpp

    r1587 r1601  
    77namespace xios 
    88{ 
    9     class CClient 
    10     { 
    11       public: 
    12         static void initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm); 
    13         static void finalize(void); 
    14         static void registerContext(const string& id, MPI_Comm contextComm); 
    15         static void callOasisEnddef(void) ; 
     9  class CClient 
     10  { 
     11    public: 
     12      static void initialize(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm); 
     13      static void finalize(void); 
     14      static void registerContext(const string& id, ep_lib::MPI_Comm contextComm); 
     15      static void callOasisEnddef(void) ; 
    1616 
    17         static MPI_Comm intraComm; 
    18         static MPI_Comm interComm; 
    19         static std::list<MPI_Comm> contextInterComms; 
    20         static int serverLeader; 
    21         static bool is_MPI_Initialized ; 
     17      static ep_lib::MPI_Comm intraComm; 
     18      #pragma omp threadprivate(intraComm) 
    2219 
    23         static MPI_Comm& getInterComm(); 
     20      static ep_lib::MPI_Comm interComm; 
     21      #pragma omp threadprivate(interComm) 
    2422 
    25         //! Get global rank without oasis and current rank in model intraComm in case of oasis 
    26         static int getRank(); 
     23      //static std::list<MPI_Comm> contextInterComms; 
     24      static std::list<ep_lib::MPI_Comm> *contextInterComms_ptr; 
     25      #pragma omp threadprivate(contextInterComms_ptr) 
    2726 
    28         //! Open a file stream to write the info logs 
    29         static void openInfoStream(const StdString& fileName); 
    30         //! Write the info logs to standard output 
    31         static void openInfoStream(); 
    32         //! Close the info logs file if it opens 
    33         static void closeInfoStream(); 
     27      static int serverLeader; 
     28      #pragma omp threadprivate(serverLeader) 
    3429 
    35         //! Open a file stream to write the error log 
    36         static void openErrorStream(const StdString& fileName); 
    37         //! Write the error log to standard error output 
    38         static void openErrorStream(); 
    39         //! Close the error log file if it opens 
    40         static void closeErrorStream(); 
     30      static bool is_MPI_Initialized ; 
     31      #pragma omp threadprivate(is_MPI_Initialized) 
    4132 
    42       protected: 
    43         static int rank_;                 //!< Rank in model intraComm 
    44         static StdOFStream m_infoStream; 
    45         static StdOFStream m_errorStream; 
     33      static ep_lib::MPI_Comm& getInterComm(); 
    4634 
    47         static void openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb); 
    48     }; 
     35      //! Get global rank without oasis and current rank in model intraComm in case of oasis 
     36      static int getRank(); 
     37 
     38      //! Open a file stream to write the info logs 
     39      static void openInfoStream(const StdString& fileName); 
     40      //! Write the info logs to standard output 
     41      static void openInfoStream(); 
     42      //! Close the info logs file if it opens 
     43      static void closeInfoStream(); 
     44 
     45      //! Open a file stream to write the error log 
     46      static void openErrorStream(const StdString& fileName); 
     47      //! Write the error log to standard error output 
     48      static void openErrorStream(); 
     49      //! Close the error log file if it opens 
     50      static void closeErrorStream(); 
     51 
     52    protected: 
     53      static int rank_;                 //!< Rank in model intraComm 
     54      #pragma omp threadprivate(rank_) 
     55 
     56      static StdOFStream m_infoStream; 
     57      #pragma omp threadprivate(m_infoStream) 
     58      static StdOFStream m_errorStream; 
     59      #pragma omp threadprivate(m_errorStream) 
     60 
     61      static StdOFStream array_infoStream[16]; 
     62      static void openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb); 
     63  }; 
    4964} 
    5065 
  • XIOS/dev/dev_trunk_omp/src/client_client_dht_template.hpp

    r1542 r1601  
    4040  public: 
    4141    CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, 
    42                              const MPI_Comm& clientIntraComm); 
     42                             const ep_lib::MPI_Comm& clientIntraComm); 
    4343 
    4444    CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 
    45                              const MPI_Comm& clientIntraComm); 
     45                             const ep_lib::MPI_Comm& clientIntraComm); 
    4646 
    4747    void computeIndexInfoMapping(const CArray<size_t,1>& indices); 
     
    5555 
    5656  protected: 
    57     CClientClientDHTTemplate(const MPI_Comm& clientIntraComm); 
     57    CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm); 
    5858 
    5959  protected: 
     
    6262    // Redistribute index and info among clients 
    6363    void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, 
    64                                  const MPI_Comm& intraCommLevel, 
     64                                 const ep_lib::MPI_Comm& intraCommLevel, 
    6565                                 int level); 
    6666 
    6767    void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 
    68                                  const MPI_Comm& intraCommLevel, 
     68                                 const ep_lib::MPI_Comm& intraCommLevel, 
    6969                                 int level); 
    7070 
     
    7373 
    7474    void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 
    75                                       const MPI_Comm& intraCommLevel, 
     75                                      const ep_lib::MPI_Comm& intraCommLevel, 
    7676                                      int level); 
    7777 
     
    8585    // Send information to clients 
    8686    void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 
    87                            const MPI_Comm& clientIntraComm, 
    88                            std::vector<MPI_Request>& requestSendInfo); 
     87                           const ep_lib::MPI_Comm& clientIntraComm, 
     88                           std::vector<ep_lib::MPI_Request>& requestSendInfo); 
     89    void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 
     90                           const ep_lib::MPI_Comm& clientIntraComm, 
     91                           ep_lib::MPI_Request* requestSendInfo); 
    8992 
    9093    void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 
    91                             const MPI_Comm& clientIntraComm, 
    92                             std::vector<MPI_Request>& requestRecvInfo); 
     94                            const ep_lib::MPI_Comm& clientIntraComm, 
     95                            std::vector<ep_lib::MPI_Request>& requestRecvInfo); 
     96    void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 
     97                             const ep_lib::MPI_Comm& clientIntraComm, 
     98                             ep_lib::MPI_Request* requestRecvInfo); 
     99                                                         
    93100 
    94101    // Send global index to clients 
    95102    void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
    96                             const MPI_Comm& clientIntraComm, 
    97                             std::vector<MPI_Request>& requestSendIndexGlobal); 
     103                            const ep_lib::MPI_Comm& clientIntraComm, 
     104                            std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 
     105    void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
     106                            const ep_lib::MPI_Comm& clientIntraComm, 
     107                            ep_lib::MPI_Request* requestSendIndexGlobal); 
    98108 
    99109    void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 
    100                              const MPI_Comm& clientIntraComm, 
    101                              std::vector<MPI_Request>& requestRecvIndex); 
     110                             const ep_lib::MPI_Comm& clientIntraComm, 
     111                             std::vector<ep_lib::MPI_Request>& requestRecvIndex); 
     112    void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 
     113                              const ep_lib::MPI_Comm& clientIntraComm, 
     114                              ep_lib::MPI_Request* requestRecvIndex); 
    102115 
    103116    void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, 
  • XIOS/dev/dev_trunk_omp/src/client_client_dht_template_impl.hpp

    r1542 r1601  
    1414{ 
    1515template<typename T, typename H> 
    16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm) 
     16CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 
    1717  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    1818{ 
    19   MPI_Comm_size(clientIntraComm, &nbClient_); 
     19  ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 
    2020  this->computeMPICommLevel(); 
    2121  int nbLvl = this->getNbLevel(); 
     
    3434template<typename T, typename H> 
    3535CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 
    36                                                         const MPI_Comm& clientIntraComm) 
     36                                                        const ep_lib::MPI_Comm& clientIntraComm) 
    3737  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    3838{ 
    39   MPI_Comm_size(clientIntraComm, &nbClient_); 
     39  ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 
    4040  this->computeMPICommLevel(); 
    4141  int nbLvl = this->getNbLevel(); 
     
    5959template<typename T, typename H> 
    6060CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 
    61                                                         const MPI_Comm& clientIntraComm) 
     61                                                        const ep_lib::MPI_Comm& clientIntraComm) 
    6262  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    6363{ 
    64   MPI_Comm_size(clientIntraComm, &nbClient_); 
     64  ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 
    6565  this->computeMPICommLevel(); 
    6666  int nbLvl = this->getNbLevel(); 
     
    9595template<typename T, typename H> 
    9696void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 
    97                                                                  const MPI_Comm& commLevel, 
     97                                                                 const ep_lib::MPI_Comm& commLevel, 
    9898                                                                 int level) 
    9999{ 
    100100  int clientRank; 
    101   MPI_Comm_rank(commLevel,&clientRank); 
     101  ep_lib::MPI_Comm_rank(commLevel,&clientRank); 
    102102  int groupRankBegin = this->getGroupBegin()[level]; 
    103103  int nbClient = this->getNbInGroup()[level]; 
     
    169169    recvIndexBuff = new unsigned long[recvNbIndexCount]; 
    170170 
    171   std::vector<MPI_Request> request; 
     171  int request_size = 0; 
     172  for (int idx = 0; idx < recvRankClient.size(); ++idx) 
     173  { 
     174    if (0 != recvNbIndexClientCount[idx]) 
     175      request_size ++; 
     176  } 
     177 
     178  request_size += client2ClientIndex.size(); 
     179 
     180  std::vector<ep_lib::MPI_Request> request(request_size); 
     181   
    172182  std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 
    173183                             iteRecvIndex = recvRankClient.end(), 
     
    176186  int currentIndex = 0; 
    177187  int nbRecvClient = recvRankClient.size(); 
     188  int request_position = 0; 
    178189  for (int idx = 0; idx < nbRecvClient; ++idx) 
    179190  { 
    180191    if (0 != recvNbIndexClientCount[idx]) 
    181       recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
     192      recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 
    182193    currentIndex += recvNbIndexClientCount[idx]; 
    183194  } 
     
    186197                                                iteIndex = client2ClientIndex.end(); 
    187198  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 
    188     sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
    189  
    190   std::vector<MPI_Status> status(request.size()); 
    191   MPI_Waitall(request.size(), &request[0], &status[0]); 
     199    sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 
     200 
     201  std::vector<ep_lib::MPI_Status> status(request.size()); 
     202  ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 
    192203 
    193204  CArray<size_t,1>* tmpGlobalIndex; 
     
    242253  } 
    243254 
    244   std::vector<MPI_Request> requestOnReturn; 
     255int requestOnReturn_size=0; 
     256  for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 
     257  { 
     258    if (0 != recvNbIndexOnReturn[idx]) 
     259    { 
     260      requestOnReturn_size += 2; 
     261    } 
     262  } 
     263 
     264  for (int idx = 0; idx < nbRecvClient; ++idx) 
     265  { 
     266    if (0 != sendNbIndexOnReturn[idx]) 
     267    { 
     268      requestOnReturn_size += 2; 
     269    } 
     270  } 
     271 
     272  int requestOnReturn_position=0; 
     273 
     274  std::vector<ep_lib::MPI_Request> requestOnReturn(requestOnReturn_size); 
    245275  currentIndex = 0; 
    246276  for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 
     
    248278    if (0 != recvNbIndexOnReturn[idx]) 
    249279    { 
    250       recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 
     280      recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 
    251281      recvInfoFromClients(recvRankOnReturn[idx], 
    252282                          recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 
    253283                          recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 
    254                           commLevel, requestOnReturn); 
     284                          commLevel, &requestOnReturn[requestOnReturn_position++]); 
    255285    } 
    256286    currentIndex += recvNbIndexOnReturn[idx]; 
     
    286316 
    287317      sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 
    288                          sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 
     318                         sendNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 
    289319      sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 
    290                         sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 
     320                        sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, &requestOnReturn[requestOnReturn_position++]); 
    291321    } 
    292322    currentIndex += recvNbIndexClientCount[idx]; 
    293323  } 
    294324 
    295   std::vector<MPI_Status> statusOnReturn(requestOnReturn.size()); 
    296   MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 
     325  std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 
     326  ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 
    297327 
    298328  Index2VectorInfoTypeMap indexToInfoMapping; 
     
    360390template<typename T, typename H> 
    361391void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 
    362                                                             const MPI_Comm& commLevel, 
     392                                                            const ep_lib::MPI_Comm& commLevel, 
    363393                                                            int level) 
    364394{ 
    365395  int clientRank; 
    366   MPI_Comm_rank(commLevel,&clientRank); 
     396  ep_lib::MPI_Comm_rank(commLevel,&clientRank); 
    367397  computeSendRecvRank(level, clientRank); 
    368398 
     
    439469  // it will send a message to the correct clients. 
    440470  // Contents of the message are index and its corresponding informatioin 
    441   std::vector<MPI_Request> request; 
     471  int request_size = 0; 
     472   for (int idx = 0; idx < recvRankClient.size(); ++idx) 
     473   { 
     474     if (0 != recvNbIndexClientCount[idx]) 
     475     { 
     476       request_size += 2; 
     477     } 
     478   } 
     479  
     480   request_size += client2ClientIndex.size(); 
     481   request_size += client2ClientInfo.size(); 
     482  
     483   std::vector<ep_lib::MPI_Request> request(request_size); 
    442484  int currentIndex = 0; 
    443485  int nbRecvClient = recvRankClient.size(); 
     486  int request_position=0; 
    444487  for (int idx = 0; idx < nbRecvClient; ++idx) 
    445488  { 
    446489    if (0 != recvNbIndexClientCount[idx]) 
    447490    { 
    448       recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 
    449       recvInfoFromClients(recvRankClient[idx], 
    450                           recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 
    451                           recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 
    452                           commLevel, request); 
     491        recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 
     492        recvInfoFromClients(recvRankClient[idx], 
     493                            recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 
     494                            recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 
     495                            commLevel, &request[request_position++]); 
    453496    } 
    454497    currentIndex += recvNbIndexClientCount[idx]; 
     
    458501                                                iteIndex = client2ClientIndex.end(); 
    459502  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 
    460     sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
     503    sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 
    461504  std::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 
    462505                                                      iteInfo = client2ClientInfo.end(); 
    463506  for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 
    464     sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 
    465  
    466   std::vector<MPI_Status> status(request.size()); 
    467   MPI_Waitall(request.size(), &request[0], &status[0]); 
     507    sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, &request[request_position++]); 
     508 
     509  std::vector<ep_lib::MPI_Status> status(request.size()); 
     510  ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 
    468511 
    469512  Index2VectorInfoTypeMap indexToInfoMapping; 
     
    518561template<typename T, typename H> 
    519562void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
    520                                                        const MPI_Comm& clientIntraComm, 
    521                                                        std::vector<MPI_Request>& requestSendIndex) 
    522 { 
    523   MPI_Request request; 
     563                                                       const ep_lib::MPI_Comm& clientIntraComm, 
     564                                                       std::vector<ep_lib::MPI_Request>& requestSendIndex) 
     565{ 
     566  ep_lib::MPI_Request request; 
    524567  requestSendIndex.push_back(request); 
    525   MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 
     568  ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 
    526569            clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 
     570} 
     571 
     572/*! 
     573  Send message containing index to clients 
     574  \param [in] clientDestRank rank of destination client 
     575  \param [in] indices index to send 
     576  \param [in] indiceSize size of index array to send 
     577  \param [in] clientIntraComm communication group of client 
     578  \param [in] requestSendIndex sending request 
     579*/ 
     580template<typename T, typename H> 
     581void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
     582                                                       const ep_lib::MPI_Comm& clientIntraComm, 
     583                                                       ep_lib::MPI_Request* requestSendIndex) 
     584{ 
     585  ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 
     586            clientDestRank, MPI_DHT_INDEX, clientIntraComm, requestSendIndex); 
    527587} 
    528588 
     
    536596template<typename T, typename H> 
    537597void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 
    538                                                          const MPI_Comm& clientIntraComm, 
    539                                                          std::vector<MPI_Request>& requestRecvIndex) 
    540 { 
    541   MPI_Request request; 
     598                                                         const ep_lib::MPI_Comm& clientIntraComm, 
     599                                                         std::vector<ep_lib::MPI_Request>& requestRecvIndex) 
     600{ 
     601  ep_lib::MPI_Request request; 
    542602  requestRecvIndex.push_back(request); 
    543   MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 
     603  ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 
    544604            clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 
     605} 
     606 
     607/*! 
     608  Receive message containing index to clients 
     609  \param [in] clientDestRank rank of destination client 
     610  \param [in] indices index to send 
     611  \param [in] clientIntraComm communication group of client 
     612  \param [in] requestRecvIndex receiving request 
     613*/ 
     614template<typename T, typename H> 
     615void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 
     616                                                         const ep_lib::MPI_Comm& clientIntraComm, 
     617                                                         ep_lib::MPI_Request *requestRecvIndex) 
     618{ 
     619  ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 
     620            clientSrcRank, MPI_DHT_INDEX, clientIntraComm, requestRecvIndex); 
    545621} 
    546622 
     
    555631template<typename T, typename H> 
    556632void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 
    557                                                       const MPI_Comm& clientIntraComm, 
    558                                                       std::vector<MPI_Request>& requestSendInfo) 
    559 { 
    560   MPI_Request request; 
     633                                                      const ep_lib::MPI_Comm& clientIntraComm, 
     634                                                      std::vector<ep_lib::MPI_Request>& requestSendInfo) 
     635{ 
     636  ep_lib::MPI_Request request; 
    561637  requestSendInfo.push_back(request); 
    562638 
    563   MPI_Isend(info, infoSize, MPI_CHAR, 
     639  ep_lib::MPI_Isend(info, infoSize, MPI_CHAR, 
    564640            clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 
     641} 
     642 
     643/*! 
     644  Send message containing information to clients 
     645  \param [in] clientDestRank rank of destination client 
     646  \param [in] info info array to send 
     647  \param [in] infoSize info array size to send 
     648  \param [in] clientIntraComm communication group of client 
     649  \param [in] requestSendInfo sending request 
     650*/ 
     651template<typename T, typename H> 
     652void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 
     653                                                      const ep_lib::MPI_Comm& clientIntraComm, 
     654                                                      ep_lib::MPI_Request *requestSendInfo) 
     655{ 
     656  ep_lib::MPI_Isend(info, infoSize, MPI_CHAR, 
     657            clientDestRank, MPI_DHT_INFO, clientIntraComm, requestSendInfo); 
    565658} 
    566659 
     
    575668template<typename T, typename H> 
    576669void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 
    577                                                         const MPI_Comm& clientIntraComm, 
    578                                                         std::vector<MPI_Request>& requestRecvInfo) 
    579 { 
    580   MPI_Request request; 
     670                                                        const ep_lib::MPI_Comm& clientIntraComm, 
     671                                                        std::vector<ep_lib::MPI_Request>& requestRecvInfo) 
     672{ 
     673  ep_lib::MPI_Request request; 
    581674  requestRecvInfo.push_back(request); 
    582675 
    583   MPI_Irecv(info, infoSize, MPI_CHAR, 
     676  ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR, 
    584677            clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 
     678} 
     679 
     680/*! 
     681  Receive message containing information from other clients 
     682  \param [in] clientDestRank rank of destination client 
     683  \param [in] info info array to receive 
     684  \param [in] infoSize info array size to receive 
     685  \param [in] clientIntraComm communication group of client 
     686  \param [in] requestRecvInfo list of receiving request 
     687*/ 
     688template<typename T, typename H> 
     689void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 
     690                                                        const ep_lib::MPI_Comm& clientIntraComm, 
     691                                                        ep_lib::MPI_Request* requestRecvInfo) 
     692{ 
     693  ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR, 
     694            clientSrcRank, MPI_DHT_INFO, clientIntraComm, requestRecvInfo); 
    585695} 
    586696 
     
    651761{ 
    652762  recvNbElements.resize(recvNbRank.size()); 
    653   std::vector<MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 
    654   std::vector<MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 
     763  std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 
     764  std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 
    655765 
    656766  int nRequest = 0; 
    657767  for (int idx = 0; idx < recvNbRank.size(); ++idx) 
    658768  { 
    659     MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 
     769    ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 
    660770              recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 
    661771    ++nRequest; 
     
    664774  for (int idx = 0; idx < sendNbRank.size(); ++idx) 
    665775  { 
    666     MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 
     776    ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 
    667777              sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 
    668778    ++nRequest; 
    669779  } 
    670780 
    671   MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 
     781  ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 
    672782} 
    673783 
     
    696806  std::vector<int> recvBuff(recvBuffSize*2,0); 
    697807 
    698   std::vector<MPI_Request> request(sendBuffSize+recvBuffSize); 
    699   std::vector<MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 
    700  
     808  std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 
     809  std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 
    701810  int nRequest = 0; 
    702811  for (int idx = 0; idx < recvBuffSize; ++idx) 
    703812  { 
    704     MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
    705               recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    706     ++nRequest; 
     813    ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
     814              recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 
    707815  } 
    708816 
     
    716824  for (int idx = 0; idx < sendBuffSize; ++idx) 
    717825  { 
    718     MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 
    719               sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 
    720     ++nRequest; 
    721   } 
    722  
    723   MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 
     826    ep_lib::MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 
     827              sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 
     828  } 
     829 
     830  ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 
    724831  int nbRecvRank = 0, nbRecvElements = 0; 
    725832  recvNbRank.clear(); 
  • XIOS/dev/dev_trunk_omp/src/client_server_mapping.cpp

    r1025 r1601  
    88 */ 
    99#include "client_server_mapping.hpp" 
     10 
     11using namespace ep_lib; 
    1012 
    1113namespace xios { 
  • XIOS/dev/dev_trunk_omp/src/client_server_mapping.hpp

    r1542 r1601  
    3737 
    3838    static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 
    39                                                      MPI_Comm& clientIntraComm, 
     39                                                     ep_lib::MPI_Comm& clientIntraComm, 
    4040                                                     const std::vector<int>& connectedServerRank); 
    4141 
  • XIOS/dev/dev_trunk_omp/src/client_server_mapping_distributed.cpp

    r1542 r1601  
    1515#include "context.hpp" 
    1616#include "context_client.hpp" 
     17using namespace ep_lib; 
    1718 
    1819namespace xios 
  • XIOS/dev/dev_trunk_omp/src/client_server_mapping_distributed.hpp

    r1542 r1601  
    3535    /** Default constructor */ 
    3636    CClientServerMappingDistributed(const std::unordered_map<size_t,int>& globalIndexOfServer, 
    37                                     const MPI_Comm& clientIntraComm, 
     37                                    const ep_lib::MPI_Comm& clientIntraComm, 
    3838                                    bool isDataDistributed = true); 
    3939 
  • XIOS/dev/dev_trunk_omp/src/context_client.cpp

    r1475 r1601  
    1212#include "cxios.hpp" 
    1313#include "server.hpp" 
     14using namespace ep_lib; 
    1415 
    1516namespace xios 
     
    101102        typeId_in=event.getTypeId() ; 
    102103        classId_in=event.getClassId() ; 
    103 //        MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ; // MPI_UINT64_T standardized by MPI 3 
    104         MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm) ;  
     104        MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm) ; 
    105105        MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ; 
    106106        MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ; 
     
    341341       if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio; 
    342342     } 
    343      MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 
     343     MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 
    344344 
    345345     if (minBufferSizeEventSizeRatio < 1.0) 
     
    425425      for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 
    426426      { 
     427        #pragma omp critical (_output) 
    427428        info(100)<<"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ; 
    428429        event.push(*itRank, 1, msg); 
     
    450451    for (itMap = itbMap; itMap != iteMap; ++itMap) 
    451452    { 
     453      #pragma omp critical (_output) 
    452454      report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl 
    453455                 << "  +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 
    454456      totalBuf += itMap->second; 
    455457    } 
     458    #pragma omp critical (_output) 
    456459    report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; 
    457460 
  • XIOS/dev/dev_trunk_omp/src/context_client.hpp

    r1232 r1601  
    2727    public: 
    2828      // Contructor 
    29       CContextClient(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* parentServer = 0); 
     29      CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0); 
    3030 
    3131      // Send event to server 
     
    7171      int serverSize; //!< Size of server group 
    7272 
    73       MPI_Comm interComm; //!< Communicator of server group 
     73      ep_lib::MPI_Comm interComm; //!< Communicator of server group 
    7474 
    75       MPI_Comm intraComm; //!< Communicator of client group 
     75      ep_lib::MPI_Comm intraComm; //!< Communicator of client group 
    7676 
    7777      map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers 
  • XIOS/dev/dev_trunk_omp/src/context_server.cpp

    r1230 r1601  
    1818#include <boost/functional/hash.hpp> 
    1919 
    20  
     20using namespace ep_lib; 
    2121 
    2222namespace xios 
     
    8181 
    8282    traceOff(); 
    83     MPI_Iprobe(MPI_ANY_SOURCE, 20,interComm,&flag,&status); 
     83    MPI_Iprobe(-2, 20,interComm,&flag,&status); 
    8484    traceOn(); 
    8585 
    8686    if (flag==true) 
    8787    { 
     88      #ifdef _usingMPI 
    8889      rank=status.MPI_SOURCE ; 
     90      #elif _usingEP 
     91      rank=status.ep_src ; 
     92      #endif 
    8993      okLoop = true; 
    9094      if (pendingRequest.find(rank)==pendingRequest.end()) 
     
    112116    char * addr; 
    113117    map<int,CServerBuffer*>::iterator it; 
     118    #ifdef _usingMPI 
    114119    int rank=status.MPI_SOURCE ; 
     120    #elif _usingEP 
     121    int rank=status.ep_src; 
     122    #endif     
    115123 
    116124    it=buffers.find(rank); 
     
    118126    { 
    119127       StdSize buffSize = 0; 
    120        MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status); 
     128       MPI_Request request; 
     129        
     130       MPI_Irecv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &request); 
     131       MPI_Wait(&request, &status); 
    121132       mapBufferSize_.insert(std::make_pair(rank, buffSize)); 
    122133       it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; 
     
    132143         bufferRequest[rank]=addr; 
    133144         return true; 
    134        } 
     145      } 
    135146      else 
    136147        return false; 
     
    253264    { 
    254265      finished=true; 
     266      #pragma omp critical (_output) 
    255267      info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; 
    256268      context->finalize(); 
     
    260272      { 
    261273        rank = itMap->first; 
     274        #pragma omp critical (_output) 
    262275        report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl 
    263276            << "  +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl; 
    264277        totalBuf += itMap->second; 
    265278      } 
     279      #pragma omp critical (_output) 
    266280      report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 
    267281    } 
  • XIOS/dev/dev_trunk_omp/src/context_server.hpp

    r1228 r1601  
    1414    public: 
    1515 
    16     CContextServer(CContext* parent,MPI_Comm intraComm,MPI_Comm interComm) ; 
     16    CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm) ; 
    1717    bool eventLoop(bool enableEventsProcessing = true); 
    1818    void listen(void) ; 
    19     bool listenPendingRequest(MPI_Status& status) ; 
     19    bool listenPendingRequest(ep_lib::MPI_Status& status); 
    2020    void checkPendingRequest(void) ; 
    2121    void processRequest(int rank, char* buff,int count) ; 
     
    2626    bool hasPendingEvent(void) ; 
    2727 
    28     MPI_Comm intraComm ; 
     28    ep_lib::MPI_Comm intraComm ; 
    2929    int intraCommSize ; 
    3030    int intraCommRank ; 
    3131 
    32     MPI_Comm interComm ; 
     32    ep_lib::MPI_Comm interComm ; 
    3333    int commSize ; 
    3434 
    3535    map<int,CServerBuffer*> buffers ; 
    36     map<int,MPI_Request> pendingRequest ; 
     36    map<int,ep_lib::MPI_Request> pendingRequest ; 
    3737    map<int,char*> bufferRequest ; 
    3838 
  • XIOS/dev/dev_trunk_omp/src/cxios.cpp

    r1519 r1601  
    1111#include "memtrack.hpp" 
    1212#include "registry.hpp" 
     13using namespace ep_lib; 
    1314 
    1415namespace xios 
    1516{ 
    16   string CXios::rootFile="./iodef.xml" ; 
    17   string CXios::xiosCodeId="xios.x" ; 
    18   string CXios::clientFile="./xios_client"; 
    19   string CXios::serverFile="./xios_server"; 
    20   string CXios::serverPrmFile="./xios_server1"; 
    21   string CXios::serverSndFile="./xios_server2"; 
     17  const string CXios::rootFile="./iodef.xml" ; 
     18  const string CXios::xiosCodeId="xios.x" ; 
     19  const string CXios::clientFile="./xios_client"; 
     20  const string CXios::serverFile="./xios_server"; 
     21  const string CXios::serverPrmFile="./xios_server1"; 
     22  const string CXios::serverSndFile="./xios_server2"; 
    2223 
    2324  bool CXios::isClient ; 
     
    4344  { 
    4445    set_new_handler(noMemory); 
    45     parseFile(rootFile); 
     46    int tmp_rank; 
     47    MPI_Comm_rank(MPI_COMM_WORLD, &tmp_rank); 
     48    #pragma omp critical 
     49    { 
     50      std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsing rootfile"<<std::endl; 
     51      parseFile(rootFile); 
     52      std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsed rootfile"<<std::endl; 
     53    } 
     54    #pragma omp barrier 
    4655    parseXiosConfig(); 
    4756  } 
     
    8190    checkEventSync = getin<bool>("check_event_sync", checkEventSync); 
    8291 
    83     globalComm=MPI_COMM_WORLD ; 
     92    //globalComm=MPI_COMM_WORLD ; 
     93    int num_ep; 
     94    if(isClient)   
     95    {  
     96      num_ep = omp_get_num_threads(); 
     97    } 
     98         
     99    if(isServer)  
     100    {  
     101      num_ep = 1; 
     102    } 
     103         
     104    MPI_Info info; 
     105    #pragma omp master 
     106    { 
     107      MPI_Comm *ep_comm; 
     108      MPI_Comm_create_endpoints(MPI_COMM_WORLD->mpi_comm, num_ep, info, ep_comm);  // servers should reach here too. 
     109      passage = ep_comm;  
     110    } 
     111         
     112    #pragma omp barrier 
     113     
     114           
     115    CXios::globalComm = passage[omp_get_thread_num()]; 
    84116  } 
    85117 
     
    92124  void CXios::initClientSide(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 
    93125  { 
     126    isClient = true; 
     127    isServer = false; 
     128 
    94129    initialize() ; 
    95  
    96     isClient = true; 
    97130 
    98131    CClient::initialize(codeId,localComm,returnComm) ; 
     
    105138    if (printLogs2Files) 
    106139    { 
     140      #pragma omp critical 
    107141      CClient::openInfoStream(clientFile); 
    108142      CClient::openErrorStream(clientFile); 
     
    120154     if (CClient::getRank()==0) 
    121155     { 
     156       #pragma omp critical (_output) 
    122157       info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 
    123158       globalRegistry->toFile("xios_registry.bin") ; 
     
    155190  void CXios::initServerSide(void) 
    156191  { 
    157     initServer(); 
     192 
    158193    isClient = false; 
    159194    isServer = true; 
     195 
     196    initServer(); 
    160197 
    161198    // Initialize all aspects MPI 
  • XIOS/dev/dev_trunk_omp/src/cxios.hpp

    r1377 r1601  
    1414  { 
    1515    public: 
    16      static void initialize(void) ; 
    17      static void initClientSide(const string & codeId, MPI_Comm& localComm, MPI_Comm& returnComm) ; 
    18      static void initServerSide(void) ; 
    19      static void clientFinalize(void) ; 
    20      static void parseFile(const string& filename) ; 
     16      static void initialize(void) ; 
     17      static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 
     18      static void initServerSide(void) ; 
     19      static void clientFinalize(void) ; 
     20      static void parseFile(const string& filename) ; 
    2121 
    22      template <typename T> 
    23      static T getin(const string& id,const T& defaultValue) ; 
     22      template <typename T> 
     23      static T getin(const string& id,const T& defaultValue) ; 
    2424 
    25      template <typename T> 
    26      static T getin(const string& id) ; 
     25      template <typename T> 
     26      static T getin(const string& id) ; 
    2727 
    2828    public: 
    29      static string rootFile ; //!< Configuration filename 
    30      static string xiosCodeId ; //!< Identity for XIOS 
    31      static string clientFile;        //!< Filename template for client 
    32      static string serverFile;        //!< Filename template for server 
    33      static string serverPrmFile;  //!< Filename template for primary server in case of two server levels 
    34      static string serverSndFile;  //!< Filename template for secondary server in case of two server levels 
     29     static const string rootFile ; //!< Configuration filename 
     30     static const string xiosCodeId ; //!< Identity for XIOS 
     31     static const string clientFile;        //!< Filename template for client 
     32     static const string serverFile;        //!< Filename template for server 
     33     static const string serverPrmFile;  //!< Filename template for primary server in case of two server levels 
     34     static const string serverSndFile;  //!< Filename template for secondary server in case of two server levels 
    3535 
    3636     static bool isClient ; //!< Check if xios is client 
     37     #pragma omp threadprivate(isClient) 
    3738     static bool isServer ; //!< Check if xios is server 
     39     #pragma omp threadprivate(isServer) 
    3840 
    39      static MPI_Comm globalComm ; //!< Global communicator 
     41     static ep_lib::MPI_Comm globalComm ; //!< Global communicator 
     42     #pragma omp threadprivate(globalComm) 
    4043 
    4144     static bool printLogs2Files; //!< Printing out logs into files 
     45     #pragma omp threadprivate(printLogs2Files) 
    4246     static bool usingOasis ;     //!< Using Oasis 
     47     #pragma omp threadprivate(usingOasis) 
    4348     static bool usingServer ;    //!< Using server (server mode) 
     49     #pragma omp threadprivate(usingServer) 
    4450     static bool usingServer2 ;   //!< Using secondary server (server mode). IMPORTANT: Use this variable ONLY in CServer::initialize(). 
     51     #pragma omp threadprivate(usingServer2) 
    4552     static int ratioServer2 ;    //!< Percentage of server processors dedicated to secondary server 
     53     #pragma omp threadprivate(ratioServer2) 
    4654     static int nbPoolsServer2 ;  //!< Number of pools created on the secondary server 
     55     #pragma omp threadprivate(nbPoolsServer2) 
    4756     static double bufferSizeFactor; //!< Factor used to tune the buffer size 
     57     #pragma omp threadprivate(bufferSizeFactor) 
    4858     static const double defaultBufferSizeFactor; //!< Default factor value 
    4959     static StdSize minBufferSize; //!< Minimum buffer size 
     60     #pragma omp threadprivate(minBufferSize) 
    5061     static StdSize maxBufferSize; //!< Maximum buffer size 
     62     #pragma omp threadprivate(minBufferSize) 
    5163     static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 
     64     #pragma omp threadprivate(isOptPerformance) 
    5265     static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 
     66     #pragma omp threadprivate(globalRegistry) 
    5367     static double recvFieldTimeout; //!< Time to wait for data before issuing an error when receiving a field 
     68     #pragma omp threadprivate(recvFieldTimeout) 
    5469     static bool checkEventSync; //!< For debuuging, check if event are coherent and synchrone on client side 
    5570 
  • XIOS/dev/dev_trunk_omp/src/dht_auto_indexing.cpp

    r1158 r1601  
    88 */ 
    99#include "dht_auto_indexing.hpp" 
     10using namespace ep_lib; 
    1011 
    1112namespace xios 
  • XIOS/dev/dev_trunk_omp/src/dht_auto_indexing.hpp

    r924 r1601  
    2525 
    2626    CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 
    27                      const MPI_Comm& clientIntraComm); 
     27                     const ep_lib::MPI_Comm& clientIntraComm); 
    2828 
    2929    CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 
    30                      const MPI_Comm& clientIntraComm); 
     30                     const ep_lib::MPI_Comm& clientIntraComm); 
    3131 
    3232    size_t getNbIndexesGlobal() const; 
  • XIOS/dev/dev_trunk_omp/src/event_client.cpp

    r1377 r1601  
    5050     std::list<CMessage*>::iterator itMsg = messages.begin(); 
    5151 
    52      if (CXios::checkEventSync) info(100)<<"Send event "<<timeLine<<" classId : "<<classId<<"  typeId : "<<typeId<<endl ; 
     52     if (CXios::checkEventSync) 
     53     { 
     54       #pragma omp critical(_output) 
     55       info(100)<<"Send event "<<timeLine<<" classId : "<<classId<<"  typeId : "<<typeId<<endl ; 
     56     } 
    5357     for (; itBuff != buffers.end(); ++itBuff, ++itSizes, ++itSenders, ++itMsg) 
    5458     { 
  • XIOS/dev/dev_trunk_omp/src/event_scheduler.cpp

    r1224 r1601  
    33#include "mpi.hpp" 
    44#include "tracer.hpp" 
     5 
     6using namespace ep_lib; 
    57 
    68namespace xios 
     
    135137    while(received) 
    136138    { 
    137       MPI_Iprobe(MPI_ANY_SOURCE,1,communicator,&received, &status) ; 
     139      MPI_Iprobe(-2,1,communicator,&received, &status) ; 
    138140      if (received) 
    139141      { 
    140142        recvRequest=new SPendingRequest ; 
    141         MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 1, communicator, &(recvRequest->request)) ; 
     143        MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -2, 1, communicator, &(recvRequest->request)) ; 
    142144        pendingRecvParentRequest.push(recvRequest) ; 
    143145      } 
     
    177179    while(received) 
    178180    { 
    179       MPI_Iprobe(MPI_ANY_SOURCE,0,communicator,&received, &status) ; 
     181      MPI_Iprobe(-2,0,communicator,&received, &status) ; 
    180182      if (received) 
    181183      { 
    182184        recvRequest=new SPendingRequest ; 
    183         MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 0, communicator, &recvRequest->request) ; 
     185        MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -2, 0, communicator, &recvRequest->request) ; 
    184186        pendingRecvChildRequest.push_back(recvRequest) ; 
    185187      } 
  • XIOS/dev/dev_trunk_omp/src/event_scheduler.hpp

    r591 r1601  
    2626        *  @param[in] comm : MPI communicator du duplicate for internal use 
    2727        */ 
    28        CEventScheduler(const MPI_Comm& comm) ; 
     28       CEventScheduler(const ep_lib::MPI_Comm& comm) ; 
    2929 
    3030 
     
    151151       { 
    152152         size_t buffer[3] ;      /*!< communication buffer : timeLine, hashId, level */ 
    153          MPI_Request request ;   /*!< pending MPI request */  
     153         ep_lib::MPI_Request request ;   /*!< pending MPI request */  
    154154       } ; 
    155155        
    156        MPI_Comm communicator ;  /*!< Internal MPI communicator */  
     156       ep_lib::MPI_Comm communicator ;  /*!< Internal MPI communicator */  
    157157       int mpiRank ;            /*!< Rank in the communicator */ 
    158158       int mpiSize ;            /*!< Size of the communicator */ 
  • XIOS/dev/dev_trunk_omp/src/filter/spatial_transform_filter.cpp

    r1542 r1601  
     1#include "mpi.hpp" 
    12#include "spatial_transform_filter.hpp" 
    23#include "grid_transformation.hpp" 
     
    45#include "context_client.hpp" 
    56#include "timer.hpp" 
     7using namespace ep_lib; 
    68 
    79namespace xios 
     
    140142  } 
    141143 
    142   std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> > CSpatialTransformFilterEngine::engines; 
     144  std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> > *CSpatialTransformFilterEngine::engines_ptr = 0; 
    143145 
    144146  CSpatialTransformFilterEngine* CSpatialTransformFilterEngine::get(CGridTransformation* gridTransformation) 
     
    147149      ERROR("CSpatialTransformFilterEngine& CSpatialTransformFilterEngine::get(CGridTransformation* gridTransformation)", 
    148150            "Impossible to get the requested engine, the grid transformation is invalid."); 
    149  
    150     std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> >::iterator it = engines.find(gridTransformation); 
    151     if (it == engines.end()) 
     151     
     152    if(engines_ptr == NULL) engines_ptr = new std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> >; 
     153 
     154 
     155    std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> >::iterator it = engines_ptr->find(gridTransformation); 
     156    if (it == engines_ptr->end()) 
    152157    { 
    153158      std::shared_ptr<CSpatialTransformFilterEngine> engine(new CSpatialTransformFilterEngine(gridTransformation)); 
    154       it = engines.insert(std::make_pair(gridTransformation, engine)).first; 
     159      it = engines_ptr->insert(std::make_pair(gridTransformation, engine)).first; 
    155160    } 
    156161 
     
    230235 
    231236      idxSendBuff = 0; 
    232       std::vector<MPI_Request> sendRecvRequest; 
     237      std::vector<MPI_Request> sendRecvRequest(localIndexToSend.size() + itListRecv->size()); 
     238      int position = 0; 
    233239      for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 
    234240      { 
     
    240246          sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 
    241247        } 
    242         sendRecvRequest.push_back(MPI_Request()); 
    243         MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 
     248        MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest[position++]); 
    244249      } 
    245250 
     
    258263        int srcRank = itRecv->first; 
    259264        int countSize = itRecv->second.size(); 
    260         sendRecvRequest.push_back(MPI_Request()); 
    261         MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 
     265        MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest[position++]); 
    262266        currentBuff += countSize; 
    263267      } 
  • XIOS/dev/dev_trunk_omp/src/filter/spatial_transform_filter.hpp

    r1542 r1601  
    141141 
    142142      //! The allocated engines 
    143       static std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> > engines; 
     143 
     144      static std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> > *engines_ptr; 
     145      #pragma omp threadprivate(engines_ptr) 
    144146  }; // class CSpatialTransformFilterEngine 
    145147} // namespace xios 
  • XIOS/dev/dev_trunk_omp/src/filter/temporal_filter.cpp

    r1523 r1601  
    2222                        this->samplingOffset.second, this->samplingOffset.timestep) 
    2323    , initDate(initDate) 
    24 //    , nextSamplingDate(initDate + (this->samplingOffset + initDate.getRelCalendar().getTimeStep())) 
    2524    , nextSamplingDate(initDate + offsetMonth + ( offsetAllButMonth + initDate.getRelCalendar().getTimeStep())) 
    2625    , nbOperationDates(1) 
  • XIOS/dev/dev_trunk_omp/src/group_factory.cpp

    r501 r1601  
    44{ 
    55   /// ////////////////////// Définitions ////////////////////// /// 
    6    StdString CGroupFactory::CurrContext(""); 
     6   StdString *CGroupFactory::CurrContext_ptr = 0; 
    77 
    88   void CGroupFactory::SetCurrentContextId(const StdString & context) 
    9    {  
    10       CGroupFactory::CurrContext = context; 
     9   {   
     10      if(CGroupFactory::CurrContext_ptr == 0 ) CGroupFactory::CurrContext_ptr = new StdString;  
     11      CGroupFactory::CurrContext_ptr->assign(context); 
    1112   } 
    1213 
    1314   StdString & CGroupFactory::GetCurrentContextId(void) 
    1415   {  
    15       return (CGroupFactory::CurrContext); 
     16      return (*CGroupFactory::CurrContext_ptr); 
    1617   } 
    1718 
  • XIOS/dev/dev_trunk_omp/src/group_factory.hpp

    r1542 r1601  
    6666 
    6767         /// Propriétés statiques /// 
    68          static StdString CurrContext; 
     68         static StdString *CurrContext_ptr; 
     69         #pragma omp threadprivate(CurrContext_ptr) 
    6970 
    7071   }; // class CGroupFactory 
  • XIOS/dev/dev_trunk_omp/src/indent.hpp

    r501 r1601  
    1010    public: 
    1111    static int defaultIncSize; 
     12    #pragma omp threadprivate(defaultIncSize) 
    1213    static int index ; 
     14    #pragma omp threadprivate(index) 
    1315    int incSize ; 
    1416    int offset ; 
  • XIOS/dev/dev_trunk_omp/src/indent_xml.hpp

    r591 r1601  
    2222         /// Propriétés  statiques /// 
    2323         static unsigned int Indent; 
     24         #pragma omp threadprivate(Indent) 
    2425         static StdString    Increm; 
     26         #pragma omp threadprivate(Increm) 
    2527         static bool         WithLine; 
     28         #pragma omp threadprivate(WithLine) 
    2629 
    2730   }; // class CIndent 
  • XIOS/dev/dev_trunk_omp/src/interface/c/icdata.cpp

    r1587 r1601  
    99#include <iostream> 
    1010 
    11  
     11#include "mpi_std.hpp" 
    1212#include "xios.hpp" 
    13 #include "oasis_cinterface.hpp" 
     13//#include "oasis_cinterface.hpp" 
    1414 
    1515#include "attribute_template.hpp" 
     
    2323#include "context.hpp" 
    2424#include "context_client.hpp" 
    25 #include "mpi.hpp" 
     25 
    2626#include "timer.hpp" 
    2727#include "array_new.hpp" 
     
    5555   { 
    5656      std::string str; 
    57       MPI_Comm local_comm; 
    58       MPI_Comm return_comm; 
     57      ep_lib::MPI_Comm local_comm; 
     58      ep_lib::MPI_Comm return_comm; 
    5959 
    6060      if (!cstr2string(client_id, len_client_id, str)) return; 
     
    6262      int initialized; 
    6363      MPI_Initialized(&initialized); 
     64      #ifdef _usingMPI 
    6465      if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 
    6566      else local_comm=MPI_COMM_NULL; 
     67      #elif _usingEP 
     68      ep_lib::fc_comm_map.clear(); 
     69      if (initialized) local_comm=ep_lib::EP_Comm_f2c((f_local_comm)); 
     70      else local_comm=MPI_COMM_NULL; 
     71      #endif 
     72       
     73 
     74 
    6675      CXios::initClientSide(str, local_comm, return_comm); 
     76      #ifdef _usingMPI 
    6777      *f_return_comm=MPI_Comm_c2f(return_comm); 
     78      #elif _usingEP 
     79      *f_return_comm=*static_cast<MPI_Fint*>(ep_lib::EP_Comm_c2f(return_comm)); 
     80      #endif 
    6881      CTimer::get("XIOS init").suspend(); 
    6982      CTimer::get("XIOS").suspend(); 
     
    7386   { 
    7487     std::string str; 
    75      MPI_Comm comm; 
     88     ep_lib::MPI_Comm comm; 
    7689 
    7790     if (!cstr2string(context_id, len_context_id, str)) return; 
    7891     CTimer::get("XIOS").resume(); 
    7992     CTimer::get("XIOS init context").resume(); 
     93     #ifdef _usingMPI 
    8094     comm=MPI_Comm_f2c(*f_comm); 
     95     #elif _usingEP 
     96     comm = ep_lib::EP_Comm_f2c(f_comm); 
     97     #endif 
    8198     CClient::registerContext(str, comm); 
    8299     CTimer::get("XIOS init context").suspend(); 
  • XIOS/dev/dev_trunk_omp/src/interface/c/oasis_cinterface.cpp

    r501 r1601  
    11#include "oasis_cinterface.hpp" 
    22#include <string> 
    3 #include "mpi.hpp" 
     3using namespace ep_lib; 
    44 
    55namespace xios 
     
    2626     
    2727    fxios_oasis_get_localcomm(&f_comm) ; 
    28     comm=MPI_Comm_f2c(f_comm) ; 
     28    //comm=MPI_Comm_f2c(f_comm) ; 
    2929  } 
    3030  
     
    3434     
    3535    fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 
    36     comm_client_server=MPI_Comm_f2c(f_comm) ; 
     36    //comm_client_server=MPI_Comm_f2c(f_comm) ; 
    3737  } 
    3838  
     
    4242     
    4343    fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 
    44     comm_client_server=MPI_Comm_f2c(f_comm) ; 
     44    //comm_client_server=MPI_Comm_f2c(f_comm) ; 
    4545  } 
    4646} 
  • XIOS/dev/dev_trunk_omp/src/interface/c/oasis_cinterface.hpp

    r501 r1601  
    1010  void fxios_oasis_enddef(void) ; 
    1111  void fxios_oasis_finalize(void) ; 
    12   void fxios_oasis_get_localcomm(MPI_Fint* f_comm) ; 
    13   void fxios_oasis_get_intracomm(MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 
    14   void fxios_oasis_get_intercomm(MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 
     12  void fxios_oasis_get_localcomm(ep_lib::MPI_Fint* f_comm) ; 
     13  void fxios_oasis_get_intracomm(ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 
     14  void fxios_oasis_get_intercomm(ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 
    1515} 
    1616  
     
    2020  void oasis_enddef(void) ; 
    2121  void oasis_finalize(void) ; 
    22   void oasis_get_localcomm(MPI_Comm& comm) ; 
    23   void oasis_get_intracomm(MPI_Comm& comm_client_server,const std::string& server_id) ; 
    24   void oasis_get_intercomm(MPI_Comm& comm_client_server,const std::string& server_id) ; 
     22  void oasis_get_localcomm(ep_lib::MPI_Comm& comm) ; 
     23  void oasis_get_intracomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ; 
     24  void oasis_get_intercomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ; 
    2525} 
    2626#endif 
  • XIOS/dev/dev_trunk_omp/src/io/inetcdf4.cpp

    r1534 r1601  
    22#include "netCdfInterface.hpp" 
    33#include "netCdf_cf_constant.hpp" 
    4  
     4#include "ep_mpi.hpp" 
    55#include <boost/algorithm/string.hpp> 
    66 
    77namespace xios 
    88{ 
    9   CINetCDF4::CINetCDF4(const StdString& filename, const MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/, 
     9  CINetCDF4::CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/, 
    1010                       bool readMetaDataPar /*= false*/, const StdString& timeCounterName /*= "time_counter"*/) 
    1111  { 
     
    1414    { 
    1515      int commSize = 0; 
    16       MPI_Comm_size(*comm, &commSize); 
     16      ep_lib::MPI_Comm_size(*comm, &commSize); 
    1717      if (commSize <= 1) 
    1818        comm = NULL; 
     
    2323    // even if Parallel NetCDF ends up being used. 
    2424    if (mpi) 
    25       CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, MPI_INFO_NULL, this->ncidp); 
     25      CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, to_mpi_comm((*comm)->mpi_comm), to_mpi_info(MPI_INFO_NULL), this->ncidp); 
    2626    else 
    2727      CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); 
  • XIOS/dev/dev_trunk_omp/src/io/inetcdf4.hpp

    r1485 r1601  
    77#include "array_new.hpp" 
    88 
    9 #include "mpi.hpp" 
     9#include "mpi_std.hpp" 
    1010#include "netcdf.hpp" 
    1111 
     
    2222    public: 
    2323      /// Constructors /// 
    24       CINetCDF4(const StdString& filename, const MPI_Comm* comm = NULL, bool multifile = true, 
     24      CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 
    2525                bool readMetaDataPar = false, const StdString& timeCounterName = "time_counter"); 
    2626 
  • XIOS/dev/dev_trunk_omp/src/io/nc4_data_input.cpp

    r1582 r1601  
    1010namespace xios 
    1111{ 
    12   CNc4DataInput::CNc4DataInput(const StdString& filename, MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/, 
     12  CNc4DataInput::CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/, 
    1313                               bool readMetaDataPar /*= false*/, bool ugridConvention /*= false*/, const StdString& timeCounterName /*= "time_counter"*/) 
    1414    : SuperClass() 
     
    5555    CArray<double,1> fieldData(grid->getWrittenDataSize()); 
    5656    if (!field->default_value.isEmpty()) fieldData = field->default_value; 
    57  
     57    #ifdef _usingEP 
     58      SuperClass::type = ONE_FILE; 
     59      printf("SuperClass::type = %d\n", SuperClass::type); 
     60    #endif 
     61         
    5862    switch (SuperClass::type) 
    5963    { 
  • XIOS/dev/dev_trunk_omp/src/io/nc4_data_input.hpp

    r1486 r1601  
    33 
    44/// XIOS headers /// 
     5#include "mpi_std.hpp" 
    56#include "xios_spl.hpp" 
    67#include "data_input.hpp" 
     
    2324 
    2425    /// Constructors /// 
    25     CNc4DataInput(const StdString& filename, MPI_Comm comm_file, bool multifile, bool isCollective = true, 
     26    CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 
    2627                  bool readMetaDataPar = false, bool ugridConvention = false, const StdString& timeCounterName = "time_counter"); 
    2728    CNc4DataInput(const CNc4DataInput& dataInput);       // Not implemented. 
     
    7071  private: 
    7172    /// Private attributes /// 
    72     MPI_Comm comm_file; 
     73    ep_lib::MPI_Comm comm_file; 
    7374    const StdString filename; 
    7475    bool isCollective; 
  • XIOS/dev/dev_trunk_omp/src/io/nc4_data_output.cpp

    r1559 r1601  
    2828      CNc4DataOutput::CNc4DataOutput 
    2929         (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 
    30           MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 
     30          ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 
    3131            : SuperClass() 
    3232            , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) 
  • XIOS/dev/dev_trunk_omp/src/io/nc4_data_output.hpp

    r1542 r1601  
    44/// XIOS headers /// 
    55#include "xios_spl.hpp" 
     6#include "mpi_std.hpp" 
    67#include "onetcdf4.hpp" 
    78#include "data_output.hpp" 
     
    2728               (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, 
    2829                bool useCFConvention, 
    29                 MPI_Comm comm_file, bool multifile, bool isCollective = true, 
     30                ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 
    3031                const StdString& timeCounterName = "time_counter"); 
    3132 
     
    117118 
    118119            /// Propriétés privées /// 
    119             MPI_Comm comm_file; 
     120            ep_lib::MPI_Comm comm_file; 
    120121            const StdString filename; 
    121122            std::map<Time, StdSize> timeToRecordCache; 
  • XIOS/dev/dev_trunk_omp/src/io/netCdfInterface.cpp

    r1454 r1601  
    1010#include "netCdfInterface.hpp" 
    1111#include "netCdfException.hpp" 
    12  
     12#include "ep_mpi.hpp" 
    1313namespace xios 
    1414{ 
     
    2222int CNetCdfInterface::create(const StdString& fileName, int cMode, int& ncId) 
    2323{ 
    24   int status = nc_create(fileName.c_str(), cMode, &ncId); 
     24  int status; 
     25  #pragma omp critical (_netcdf) 
     26  { 
     27    info(100)<<"start nc_create"<<std::endl; 
     28    status = nc_create(fileName.c_str(), cMode, &ncId); 
     29    info(100)<<"end nc_create"<<std::endl; 
     30  } 
    2531  if (NC_NOERR != status) 
    2632  { 
     
    4955int CNetCdfInterface::createPar(const StdString& fileName, int cMode, MPI_Comm comm, MPI_Info info, int& ncId) 
    5056{ 
    51   int status = xios::nc_create_par(fileName.c_str(), cMode, comm, info, &ncId); 
     57  int status = xios::nc_create_par(fileName.c_str(), cMode, comm, to_mpi_info(MPI_INFO_NULL), &ncId); 
     58 
    5259  if (NC_NOERR != status) 
    5360  { 
     
    7481int CNetCdfInterface::open(const StdString& fileName, int oMode, int& ncId) 
    7582{ 
    76   int status = nc_open(fileName.c_str(), oMode, &ncId); 
     83  int status; 
     84  #pragma omp critical (_netcdf) 
     85  { 
     86    info(100)<<"start nc_open"<<std::endl; 
     87    status = nc_open(fileName.c_str(), oMode, &ncId); 
     88    info(100)<<"end nc_open"<<std::endl; 
     89  } 
    7790  if (NC_NOERR != status) 
    7891  { 
     
    102115int CNetCdfInterface::openPar(const StdString& fileName, int oMode, MPI_Comm comm, MPI_Info info, int& ncId) 
    103116{ 
    104   int status = xios::nc_open_par(fileName.c_str(), oMode, comm, info, &ncId); 
     117  int status = xios::nc_open_par(fileName.c_str(), oMode, comm, to_mpi_info(MPI_INFO_NULL), &ncId); 
     118   
    105119  if (NC_NOERR != status) 
    106120  { 
     
    125139int CNetCdfInterface::close(int ncId) 
    126140{ 
    127   int status = nc_close(ncId); 
     141  int status = NC_NOERR; 
     142  #pragma omp critical (_netcdf) 
     143  { 
     144    info(100)<<"start nc_close"<<std::endl; 
     145    status = nc_close(ncId); 
     146    info(100)<<"end nc_close"<<std::endl; 
     147  } 
     148       
    128149  if (NC_NOERR != status) 
    129150  { 
     
    147168int CNetCdfInterface::reDef(int ncId) 
    148169{ 
    149   int status = nc_redef(ncId); 
     170  int status; 
     171  #pragma omp critical (_netcdf) 
     172  { 
     173    info(100)<<"start nc_reDef"<<std::endl; 
     174    status = nc_redef(ncId); 
     175    info(100)<<"end nc_reDef"<<std::endl; 
     176  } 
     177   
    150178  if (NC_NOERR != status) 
    151179  { 
     
    169197int CNetCdfInterface::endDef(int ncId) 
    170198{ 
    171   int status = nc_enddef(ncId); 
     199  int status; 
     200  #pragma omp critical (_netcdf) 
     201  { 
     202    info(100)<<"start nc_enddef"<<std::endl; 
     203    status = nc_enddef(ncId); 
     204    info(100)<<"end nc_enddef"<<std::endl; 
     205  } 
    172206  if (NC_NOERR != status) 
    173207  { 
     
    194228int CNetCdfInterface::inqNcId(int ncid, const StdString& grpName, int& grpId) 
    195229{ 
    196   int status = nc_inq_ncid(ncid, grpName.c_str(), &grpId); 
     230  int status; 
     231  #pragma omp critical (_netcdf) 
     232  { 
     233    info(100)<<"start nc_inq_ncid"<<std::endl; 
     234    status = nc_inq_ncid(ncid, grpName.c_str(), &grpId); 
     235    info(100)<<"end nc_inq_ncid"<<std::endl; 
     236  } 
     237   
    197238  if (NC_NOERR != status) 
    198239  { 
     
    220261int CNetCdfInterface::inqVarId(int ncid, const StdString& varName, int& varId) 
    221262{ 
    222   int status = nc_inq_varid(ncid, varName.c_str(), &varId); 
     263  int status; 
     264  #pragma omp critical (_netcdf) 
     265  { 
     266    info(100)<<"start nc_inq_varid"<<std::endl; 
     267    status = nc_inq_varid(ncid, varName.c_str(), &varId); 
     268    info(100)<<"end nc_inq_varid"<<std::endl; 
     269  } 
    223270  if (NC_NOERR != status) 
    224271  { 
     
    245292int CNetCdfInterface::inqDimId(int ncid, const StdString& dimName, int& dimId) 
    246293{ 
    247   int status = nc_inq_dimid(ncid, dimName.c_str(), &dimId); 
     294  int status; 
     295  #pragma omp critical (_netcdf) 
     296  { 
     297    info(100)<<"start nc_inq_dimid"<<std::endl; 
     298    status = nc_inq_dimid(ncid, dimName.c_str(), &dimId); 
     299    info(100)<<"end nc_inq_dimid"<<std::endl; 
     300  } 
     301   
    248302  if (NC_NOERR != status) 
    249303  { 
     
    271325{ 
    272326  char varNameBuff[NC_MAX_NAME + 1]; 
    273   int status = nc_inq_varname(ncid, varId, varNameBuff); 
     327  int status; 
     328  #pragma omp critical (_netcdf) 
     329  { 
     330    info(100)<<"start nc_inq_varname"<<std::endl; 
     331    status = nc_inq_varname(ncid, varId, varNameBuff); 
     332    info(100)<<"end nc_inq_varname"<<std::endl; 
     333  } 
    274334  if (NC_NOERR != status) 
    275335  { 
     
    295355int CNetCdfInterface::inqUnLimDim(int ncid, int& dimId) 
    296356{ 
    297   int status = nc_inq_unlimdim(ncid, &dimId); 
     357  int status; 
     358  #pragma omp critical (_netcdf) 
     359  { 
     360    info(100)<<"start nc_inq_unlimdim"<<std::endl; 
     361    status = nc_inq_unlimdim(ncid, &dimId); 
     362    info(100)<<"end nc_inq_unlimdim"<<std::endl; 
     363  } 
    298364  if (NC_NOERR != status) 
    299365  { 
     
    321387{ 
    322388  char fullNameIn[NC_MAX_NAME + 1]; 
    323   int status = nc_inq_dimname(ncid, dimId, fullNameIn); 
     389  int status; 
     390  #pragma omp critical (_netcdf) 
     391  { 
     392    info(100)<<"start nc_inq_dimname"<<std::endl; 
     393    status = nc_inq_dimname(ncid, dimId, fullNameIn); 
     394    info(100)<<"end nc_inq_dimname"<<std::endl; 
     395  } 
    324396  if (NC_NOERR != status) 
    325397  { 
     
    346418int CNetCdfInterface::inqDimLen(int ncid, int dimId, StdSize& dimLen) 
    347419{ 
    348   int status = nc_inq_dimlen(ncid, dimId, &dimLen); 
     420  int status; 
     421  #pragma omp critical (_netcdf) 
     422  { 
     423    info(100)<<"start nc_inq_dimlen"<<std::endl; 
     424    status = nc_inq_dimlen(ncid, dimId, &dimLen); 
     425    info(100)<<"end nc_inq_dimlen"<<std::endl; 
     426  } 
    349427  if (NC_NOERR != status) 
    350428  { 
     
    371449int CNetCdfInterface::inqVarNDims(int ncid, int varId, int& nDims) 
    372450{ 
    373   int status = nc_inq_varndims(ncid, varId, &nDims); 
     451  int status; 
     452  #pragma omp critical (_netcdf) 
     453  { 
     454    info(100)<<"start nc_inq_varndims"<<std::endl; 
     455    status = nc_inq_varndims(ncid, varId, &nDims); 
     456    info(100)<<"end nc_inq_varndims"<<std::endl; 
     457  } 
    374458  if (NC_NOERR != status) 
    375459  { 
     
    396480int CNetCdfInterface::inqVarDimId(int ncid, int varId, int* dimIds) 
    397481{ 
    398   int status = nc_inq_vardimid(ncid, varId, dimIds); 
     482  int status; 
     483  #pragma omp critical (_netcdf) 
     484  { 
     485    info(100)<<"start nc_inq_vardimid"<<std::endl; 
     486    status = nc_inq_vardimid(ncid, varId, dimIds); 
     487    info(100)<<"end nc_inq_vardimid"<<std::endl; 
     488  } 
    399489  if (NC_NOERR != status) 
    400490  { 
     
    422512int CNetCdfInterface::inqDimIds(int ncid, int& nDims, int* dimIds, int includeParents) 
    423513{ 
    424   int status = nc_inq_dimids(ncid, &nDims, dimIds, includeParents); 
     514  int status; 
     515  #pragma omp critical (_netcdf) 
     516  { 
     517    info(100)<<"start nc_inq_dimids"<<std::endl; 
     518    status = nc_inq_dimids(ncid, &nDims, dimIds, includeParents); 
     519    info(100)<<"end nc_inq_dimids"<<std::endl; 
     520  } 
    425521  if (NC_NOERR != status) 
    426522  { 
     
    449545  StdSize strlen = 0; 
    450546  std::vector<char> buff; 
    451   int status = nc_inq_grpname_full(ncid, &strlen, NULL); 
    452   if (NC_NOERR == status) 
    453   { 
    454     buff.resize(strlen + 1); 
    455     status = nc_inq_grpname_full(ncid, NULL, &buff[0]); 
    456   } 
    457  
     547  int status; 
     548  #pragma omp critical (_netcdf) 
     549  { 
     550    info(100)<<"start nc_inq_grpname_full"<<std::endl; 
     551    status = nc_inq_grpname_full(ncid, &strlen, NULL); 
     552    info(100)<<"end nc_inq_grpname_full"<<std::endl; 
     553   
     554    if (NC_NOERR == status) 
     555    { 
     556      buff.resize(strlen + 1); 
     557      status = nc_inq_grpname_full(ncid, NULL, &buff[0]); 
     558    } 
     559    info(100)<<"start nc_inq_grpname_full"<<std::endl; 
     560  } 
    458561  if (NC_NOERR != status) 
    459562  { 
     
    482585int CNetCdfInterface::inqGrpIds(int ncid, int& numgrps, int* ncids) 
    483586{ 
    484   int status = nc_inq_grps(ncid, &numgrps, ncids); 
     587  int status; 
     588  #pragma omp critical (_netcdf) 
     589  { 
     590    info(100)<<"start nc_inq_grps"<<std::endl; 
     591    status = nc_inq_grps(ncid, &numgrps, ncids); 
     592    info(100)<<"end nc_inq_grps"<<std::endl; 
     593  } 
    485594  if (NC_NOERR != status) 
    486595  { 
     
    507616int CNetCdfInterface::inqVarIds(int ncid, int& nvars, int* varids) 
    508617{ 
    509   int status = nc_inq_varids(ncid, &nvars, varids); 
     618  int status; 
     619  #pragma omp critical (_netcdf) 
     620  { 
     621    info(100)<<"start nc_inq_varids"<<std::endl; 
     622    status = nc_inq_varids(ncid, &nvars, varids); 
     623    info(100)<<"end nc_inq_varids"<<std::endl; 
     624  } 
    510625  if (NC_NOERR != status) 
    511626  { 
     
    534649int CNetCdfInterface::inqAtt(int ncid, int varid, const StdString& name, nc_type& type, size_t& len) 
    535650{ 
    536   int status = nc_inq_att(ncid, varid, name.c_str(), &type, &len); 
     651  int status; 
     652  #pragma omp critical (_netcdf) 
     653  { 
     654    info(100)<<"start nc_inq_att"<<std::endl; 
     655    status = nc_inq_att(ncid, varid, name.c_str(), &type, &len); 
     656    info(100)<<"end nc_inq_att"<<std::endl; 
     657  } 
     658   
    537659  if (NC_NOERR != status) 
    538660  { 
     
    558680int CNetCdfInterface::inqNAtts(int ncid, int& ngatts) 
    559681{ 
    560   int status = nc_inq_natts(ncid, &ngatts); 
     682  int status; 
     683  #pragma omp critical (_netcdf) 
     684  { 
     685    info(100)<<"start nc_inq_natts"<<std::endl; 
     686    status = nc_inq_natts(ncid, &ngatts); 
     687    info(100)<<"end nc_inq_natts"<<std::endl; 
     688  } 
    561689  if (NC_NOERR != status) 
    562690  { 
     
    583711int CNetCdfInterface::inqVarNAtts(int ncid, int varid, int& natts) 
    584712{ 
    585   int status = nc_inq_varnatts(ncid, varid, &natts); 
     713  int status; 
     714  #pragma omp critical (_netcdf) 
     715  { 
     716    info(100)<<"start nc_inq_varnatts"<<std::endl; 
     717    status = nc_inq_varnatts(ncid, varid, &natts); 
     718    info(100)<<"end nc_inq_varnatts"<<std::endl; 
     719  } 
    586720  if (NC_NOERR != status) 
    587721  { 
     
    604738{ 
    605739  std::vector<char> attName(NC_MAX_NAME + 1,' '); 
    606   int status = nc_inq_attname(ncid, varid, attnum, &attName[0]); 
     740  int status; 
     741  #pragma omp critical (_netcdf) 
     742  { 
     743    info(100)<<"start nc_inq_attname"<<std::endl; 
     744    status = nc_inq_attname(ncid, varid, attnum, &attName[0]); 
     745    info(100)<<"end nc_inq_attname"<<std::endl; 
     746  } 
    607747  if (NC_NOERR != status) 
    608748  { 
     
    635775int CNetCdfInterface::defGrp(int parentNcid, const StdString& grpName, int& grpId) 
    636776{ 
    637   int status = nc_def_grp(parentNcid, grpName.c_str(), &grpId); 
     777  int status; 
     778  #pragma omp critical (_netcdf) 
     779  { 
     780    info(100)<<"start nc_def_grp"<<std::endl; 
     781    status = nc_def_grp(parentNcid, grpName.c_str(), &grpId); 
     782    info(100)<<"end nc_def_grp"<<std::endl; 
     783  } 
    638784  if (NC_NOERR != status) 
    639785  { 
     
    660806int CNetCdfInterface::defDim(int ncid, const StdString& dimName, StdSize dimLen, int& dimId) 
    661807{ 
    662   int status = nc_def_dim(ncid, dimName.c_str(), dimLen, &dimId); 
     808  int status; 
     809  #pragma omp critical (_netcdf) 
     810  { 
     811    info(100)<<"start nc_def_dim"<<std::endl; 
     812    status = nc_def_dim(ncid, dimName.c_str(), dimLen, &dimId); 
     813    info(100)<<"end nc_def_dim"<<std::endl; 
     814  } 
    663815  if (NC_NOERR != status) 
    664816  { 
     
    691843                             int nDims, const int dimIds[], int& varId) 
    692844{ 
    693   int status = nc_def_var(ncid, varName.c_str(), xtype, nDims, dimIds, &varId); 
     845  int status; 
     846  #pragma omp critical (_netcdf) 
     847  { 
     848    info(100)<<"start nc_def_var"<<std::endl; 
     849    status = nc_def_var(ncid, varName.c_str(), xtype, nDims, dimIds, &varId); 
     850    info(100)<<"end nc_def_var"<<std::endl; 
     851  } 
    694852  if (NC_NOERR != status) 
    695853  { 
     
    720878int CNetCdfInterface::defVarChunking(int ncid, int varId, int storage, StdSize chunkSize[]) 
    721879{ 
    722   int status = nc_def_var_chunking(ncid, varId, storage, chunkSize); 
     880  int status; 
     881  #pragma omp critical (_netcdf) 
     882  { 
     883    info(100)<<"start nc_def_var_chunking"<<std::endl; 
     884    status = nc_def_var_chunking(ncid, varId, storage, chunkSize); 
     885    info(100)<<"end nc_def_var_chunking"<<std::endl; 
     886  } 
    723887  if (NC_NOERR != status) 
    724888  { 
     
    748912   
    749913  if (compressionLevel == 0) return NC_NOERR ; 
    750   int status = nc_def_var_deflate(ncid, varId, (compressionLevel > 0), (compressionLevel > 0), compressionLevel); 
     914  int status; 
     915  #pragma omp critical (_netcdf) 
     916  { 
     917    info(100)<<"start nc_def_var_deflate"<<std::endl; 
     918    status = nc_def_var_deflate(ncid, varId, (compressionLevel > 0), (compressionLevel > 0), compressionLevel); 
     919    info(100)<<"end nc_def_var_deflate"<<std::endl; 
     920  } 
    751921  if (NC_NOERR != status) 
    752922  { 
     
    774944{ 
    775945  int old_fill_mode; 
    776   int status = nc_set_fill(ncid, fill ? NC_FILL: NC_NOFILL, &old_fill_mode); 
     946  int status; 
     947  #pragma omp critical (_netcdf) 
     948  { 
     949    info(100)<<"start nc_set_fill"<<std::endl; 
     950    status = nc_set_fill(ncid, fill ? NC_FILL: NC_NOFILL, &old_fill_mode); 
     951    info(100)<<"end nc_set_fill"<<std::endl; 
     952  } 
    777953  if (NC_NOERR != status) 
    778954  { 
     
    801977int CNetCdfInterface::defVarFill(int ncid, int varId, int noFill, void* fillValue) 
    802978{ 
    803   int status = nc_def_var_fill(ncid, varId, noFill, fillValue); 
     979  int status; 
     980  #pragma omp critical (_netcdf) 
     981  { 
     982    info(100)<<"start nc_def_var_fill"<<std::endl; 
     983    status = nc_def_var_fill(ncid, varId, noFill, fillValue); 
     984    info(100)<<"end nc_def_var_fill"<<std::endl; 
     985  } 
    804986  if (NC_NOERR != status) 
    805987  { 
     
    8291011int CNetCdfInterface::varParAccess(int ncid, int varId, int access) 
    8301012{ 
    831   int status = nc_var_par_access(ncid, varId, access); 
     1013  int status; 
     1014  #pragma omp critical (_netcdf) 
     1015  { 
     1016    info(100)<<"start nc_var_par_access"<<std::endl; 
     1017    status = nc_var_par_access(ncid, varId, access); 
     1018    info(100)<<"end nc_var_par_access"<<std::endl; 
     1019  } 
    8321020  if (NC_NOERR != status) 
    8331021  { 
     
    8521040int CNetCdfInterface::sync(int ncid) 
    8531041{ 
    854   int status = nc_sync(ncid); 
     1042  int status; 
     1043  #pragma omp critical (_netcdf) 
     1044  { 
     1045    info(100)<<"start nc_sync"<<std::endl; 
     1046    status = nc_sync(ncid); 
     1047    info(100)<<"end nc_sync"<<std::endl; 
     1048  } 
    8551049  if (NC_NOERR != status) 
    8561050  { 
     
    8721066int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, double* data) 
    8731067{ 
    874   return nc_get_att_double(ncid, varid, attrName, data); 
     1068  int status; 
     1069  #pragma omp critical (_netcdf) 
     1070  { 
     1071    info(100)<<"start nc_get_att_double"<<std::endl; 
     1072    status = nc_get_att_double(ncid, varid, attrName, data); 
     1073    info(100)<<"end nc_get_att_double"<<std::endl; 
     1074  } 
     1075  return status; 
    8751076} 
    8761077 
     
    8781079int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, float* data) 
    8791080{ 
    880   return nc_get_att_float(ncid, varid, attrName, data); 
     1081  int status; 
     1082  #pragma omp critical (_netcdf) 
     1083  { 
     1084    info(100)<<"start nc_get_att_float"<<std::endl; 
     1085    status = nc_get_att_float(ncid, varid, attrName, data); 
     1086    info(100)<<"end nc_get_att_float"<<std::endl; 
     1087  } 
     1088  return status; 
    8811089} 
    8821090 
     
    8841092int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, int* data) 
    8851093{ 
    886   return nc_get_att_int(ncid, varid, attrName, data); 
     1094  int status; 
     1095  #pragma omp critical (_netcdf) 
     1096  { 
     1097    info(100)<<"start nc_get_att_int"<<std::endl; 
     1098    status = nc_get_att_int(ncid, varid, attrName, data); 
     1099    info(100)<<"end nc_get_att_int"<<std::endl; 
     1100  } 
     1101  return status;  
    8871102} 
    8881103 
     
    8901105int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, long* data) 
    8911106{ 
    892   return nc_get_att_long(ncid, varid, attrName, data); 
     1107  int status; 
     1108  #pragma omp critical (_netcdf) 
     1109  { 
     1110    info(100)<<"start nc_get_att_long"<<std::endl; 
     1111    status = nc_get_att_long(ncid, varid, attrName, data); 
     1112    info(100)<<"end nc_get_att_long"<<std::endl; 
     1113  } 
     1114  return status; 
    8931115} 
    8941116 
     
    8961118int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, short* data) 
    8971119{ 
    898   return nc_get_att_short(ncid, varid, attrName, data); 
     1120  int status; 
     1121  #pragma omp critical (_netcdf) 
     1122  { 
     1123    info(100)<<"start nc_get_att_short"<<std::endl; 
     1124    status = nc_get_att_short(ncid, varid, attrName, data); 
     1125    info(100)<<"end nc_get_att_short"<<std::endl; 
     1126  } 
     1127  return status; 
    8991128} 
    9001129 
     
    9021131int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, char* data) 
    9031132{ 
    904   return nc_get_att_text(ncid, varid, attrName, data); 
     1133  int status; 
     1134  #pragma omp critical (_netcdf) 
     1135  { 
     1136    info(100)<<"start nc_get_att_text"<<std::endl; 
     1137    status = nc_get_att_text(ncid, varid, attrName, data); 
     1138    info(100)<<"end nc_get_att_text"<<std::endl; 
     1139  } 
     1140  return status; 
    9051141} 
    9061142 
     
    9101146                                   StdSize numVal, const double* data) 
    9111147{ 
    912   return nc_put_att_double(ncid, varid, attrName, NC_DOUBLE, numVal, data); 
     1148  int status; 
     1149  #pragma omp critical (_netcdf) 
     1150  { 
     1151    info(100)<<"start nc_put_att_double"<<std::endl; 
     1152    status = nc_put_att_double(ncid, varid, attrName, NC_DOUBLE, numVal, data); 
     1153    info(100)<<"end nc_put_att_double"<<std::endl; 
     1154  } 
     1155  return status; 
    9131156} 
    9141157 
     
    9171160                                   StdSize numVal, const float* data) 
    9181161{ 
    919   return nc_put_att_float(ncid, varid, attrName, NC_FLOAT, numVal, data); 
     1162  int status; 
     1163  #pragma omp critical (_netcdf) 
     1164  { 
     1165    info(100)<<"start nc_put_att_float"<<std::endl; 
     1166    status = nc_put_att_float(ncid, varid, attrName, NC_FLOAT, numVal, data); 
     1167    info(100)<<"end nc_put_att_float"<<std::endl; 
     1168  } 
     1169  return status; 
    9201170} 
    9211171 
     
    9241174                                   StdSize numVal, const int* data) 
    9251175{ 
    926   return nc_put_att_int(ncid, varid, attrName, NC_INT, numVal, data); 
     1176  int status; 
     1177  #pragma omp critical (_netcdf) 
     1178  { 
     1179    info(100)<<"start nc_put_att_int"<<std::endl; 
     1180    status = nc_put_att_int(ncid, varid, attrName, NC_INT, numVal, data); 
     1181    info(100)<<"end nc_put_att_int"<<std::endl; 
     1182  } 
     1183  return status; 
    9271184} 
    9281185 
     
    9311188                                   StdSize numVal, const long* data) 
    9321189{ 
    933   return nc_put_att_long(ncid, varid, attrName, NC_LONG, numVal, data); 
     1190  int status; 
     1191  #pragma omp critical (_netcdf) 
     1192  { 
     1193    info(100)<<"start nc_put_att_long"<<std::endl; 
     1194    status = nc_put_att_long(ncid, varid, attrName, NC_LONG, numVal, data); 
     1195    info(100)<<"end nc_put_att_long"<<std::endl; 
     1196  } 
     1197  return status; 
    9341198} 
    9351199 
     
    9381202                                   StdSize numVal, const short* data) 
    9391203{ 
    940   return nc_put_att_short(ncid, varid, attrName, NC_SHORT, numVal, data); 
     1204  int status; 
     1205  #pragma omp critical (_netcdf) 
     1206  { 
     1207    info(100)<<"start nc_put_att_short"<<std::endl; 
     1208    status = nc_put_att_short(ncid, varid, attrName, NC_SHORT, numVal, data); 
     1209    info(100)<<"end nc_put_att_short"<<std::endl; 
     1210  } 
     1211  return status; 
    9411212} 
    9421213 
     
    9451216                                   StdSize numVal, const char* data) 
    9461217{ 
    947   return nc_put_att_text(ncid, varid, attrName, numVal, data); 
     1218  int status; 
     1219  #pragma omp critical (_netcdf) 
     1220  { 
     1221    info(100)<<"start nc_put_att_text"<<std::endl; 
     1222    status = nc_put_att_text(ncid, varid, attrName, numVal, data); 
     1223    info(100)<<"end nc_put_att_text"<<std::endl; 
     1224  } 
     1225  return status; 
    9481226} 
    9491227 
     
    9521230int CNetCdfInterface::ncGetVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, double* data) 
    9531231{ 
    954   return nc_get_vara_double(ncid, varid, start, count, data); 
     1232  int status; 
     1233  #pragma omp critical (_netcdf) 
     1234  { 
     1235    info(100)<<"start nc_get_vara_double"<<std::endl; 
     1236    status = nc_get_vara_double(ncid, varid, start, count, data); 
     1237    info(100)<<"end nc_get_vara_double"<<std::endl; 
     1238  } 
     1239  return status; 
    9551240} 
    9561241 
     
    9581243int CNetCdfInterface::ncGetVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, float* data) 
    9591244{ 
    960   return nc_get_vara_float(ncid, varid, start, count, data); 
     1245  int status; 
     1246  #pragma omp critical (_netcdf) 
     1247  { 
     1248    info(100)<<"start nc_get_vara_float"<<std::endl; 
     1249    status = nc_get_vara_float(ncid, varid, start, count, data); 
     1250    info(100)<<"end nc_get_vara_float"<<std::endl; 
     1251  } 
     1252  return status;  
    9611253} 
    9621254 
     
    9641256int CNetCdfInterface::ncGetVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, int* data) 
    9651257{ 
    966   return nc_get_vara_int(ncid, varid, start, count, data); 
     1258  int status; 
     1259  #pragma omp critical (_netcdf) 
     1260  { 
     1261    info(100)<<"start nc_get_vara_int"<<std::endl; 
     1262    status = nc_get_vara_int(ncid, varid, start, count, data); 
     1263    info(100)<<"end nc_get_vara_int"<<std::endl; 
     1264  } 
     1265  return status;  
    9671266} 
    9681267 
     
    9701269int CNetCdfInterface::ncGetVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, char* data) 
    9711270{ 
    972   return nc_get_vara_text(ncid, varid, start, count, data); 
     1271  int status; 
     1272  #pragma omp critical (_netcdf) 
     1273  { 
     1274    info(100)<<"start nc_get_vara_text"<<std::endl; 
     1275    status = nc_get_vara_text(ncid, varid, start, count, data); 
     1276    info(100)<<"end nc_get_vara_text"<<std::endl; 
     1277  } 
     1278  return status; 
    9731279} 
    9741280 
     
    9771283int CNetCdfInterface::ncPutVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, const double* data) 
    9781284{ 
    979   return nc_put_vara_double(ncid, varid, start, count, data); 
     1285  int status; 
     1286  #pragma omp critical (_netcdf) 
     1287  { 
     1288    info(100)<<"start nc_put_vara_double"<<std::endl; 
     1289    status = nc_put_vara_double(ncid, varid, start, count, data); 
     1290    info(100)<<"end nc_put_vara_double"<<std::endl; 
     1291  } 
     1292  return status; 
    9801293} 
    9811294 
     
    9831296int CNetCdfInterface::ncPutVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, const float* data) 
    9841297{ 
    985   return nc_put_vara_float(ncid, varid, start, count, data); 
     1298  int status; 
     1299  #pragma omp critical (_netcdf) 
     1300  { 
     1301    info(100)<<"start nc_put_vara_float"<<std::endl; 
     1302    status = nc_put_vara_float(ncid, varid, start, count, data); 
     1303    info(100)<<"end nc_put_vara_float"<<std::endl; 
     1304  } 
     1305  return status; 
    9861306} 
    9871307 
     
    9891309int CNetCdfInterface::ncPutVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, const int* data) 
    9901310{ 
    991   return nc_put_vara_int(ncid, varid, start, count, data); 
     1311  int status; 
     1312  #pragma omp critical (_netcdf) 
     1313  { 
     1314    info(100)<<"start nc_put_vara_int"<<std::endl; 
     1315    status = nc_put_vara_int(ncid, varid, start, count, data); 
     1316    info(100)<<"end nc_put_vara_int"<<std::endl; 
     1317  } 
     1318  return status; 
    9921319} 
    9931320 
     
    9951322int CNetCdfInterface::ncPutVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, const char* data) 
    9961323{ 
    997   return nc_put_vara_text(ncid, varid, start, count, data); 
     1324  int status; 
     1325  #pragma omp critical (_netcdf) 
     1326  { 
     1327    info(100)<<"start nc_put_vara_text"<<std::endl; 
     1328    status = nc_put_vara_text(ncid, varid, start, count, data); 
     1329    info(100)<<"end nc_put_vara_text"<<std::endl; 
     1330  } 
     1331  return status; 
    9981332} 
    9991333 
     
    10081342{ 
    10091343   int varId = 0; 
    1010    return (NC_NOERR == (nc_inq_varid(ncId, varName.c_str(), &varId))); 
     1344   int status; 
     1345   #pragma omp critical (_netcdf) 
     1346   { 
     1347     info(100)<<"start isVarExisted"<<std::endl; 
     1348     status = nc_inq_varid(ncId, varName.c_str(), &varId); 
     1349     info(100)<<"end isVarExisted"<<std::endl; 
     1350   } 
     1351   return (NC_NOERR == status); 
    10111352} 
    10121353 
     
    10141355{ 
    10151356   int dimId = 0; 
    1016    return (NC_NOERR == (nc_inq_dimid(ncId, dimName.c_str(), &dimId))); 
     1357   int status; 
     1358   #pragma omp critical (_netcdf) 
     1359   { 
     1360     info(100)<<"start isDimExisted"<<std::endl; 
     1361     status = nc_inq_dimid(ncId, dimName.c_str(), &dimId); 
     1362     info(100)<<"end isDimExisted"<<std::endl; 
     1363   } 
     1364   return (NC_NOERR == status); 
    10171365} 
    10181366 
  • XIOS/dev/dev_trunk_omp/src/io/netCdfInterface.hpp

    r811 r1601  
    1010#define __NETCDF_INTERFACE_HPP_ 
    1111 
     12#include "mpi_std.hpp" 
    1213#include "xios_spl.hpp" 
    1314 
     
    1617#endif 
    1718 
    18 #include "mpi.hpp" 
     19 
    1920#include "netcdf.hpp" 
    2021 
  • XIOS/dev/dev_trunk_omp/src/io/netcdf.hpp

    r685 r1601  
    11#ifndef __XIOS_NETCDF_HPP__ 
    22#define __XIOS_NETCDF_HPP__ 
    3 #include "mpi.hpp" 
     3#include "mpi_std.hpp" 
    44#define MPI_INCLUDED 
    55#include <netcdf.h> 
     
    3333  { 
    3434#if defined(USING_NETCDF_PAR) 
    35     return ::nc_create_par(path, cmode, comm, info, ncidp) ; 
     35    int status; 
     36    #pragma omp critical (_netcdf) 
     37    { 
     38      status = ::nc_create_par(path, cmode, comm, info, ncidp) ; 
     39    } 
     40    return status; 
    3641#else 
    3742    ERROR("int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)", 
     
    4449  { 
    4550#if defined(USING_NETCDF_PAR) 
    46     return ::nc_open_par(path, mode, comm, info, ncidp) ; 
     51    int status; 
     52    #pragma omp critical (_netcdf) 
     53    { 
     54      status = ::nc_open_par(path, mode, comm, info, ncidp) ; 
     55    } 
     56    return status; 
    4757#else 
    4858    ERROR("int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)", 
     
    5565  { 
    5666#if defined(USING_NETCDF_PAR) 
    57     return ::nc_var_par_access(ncid, varid, par_access) ; 
     67    int status = ::nc_var_par_access(ncid, varid, par_access) ; 
     68     
     69    return status; 
    5870#else 
    5971    ERROR("int nc_var_par_access(int ncid, int varid, int par_access)", 
  • XIOS/dev/dev_trunk_omp/src/io/onetcdf4.cpp

    r1456 r1601  
    33#include "onetcdf4.hpp" 
    44#include "group_template.hpp" 
    5 #include "mpi.hpp" 
     5#include "ep_mpi.hpp" 
    66#include "netcdf.hpp" 
    77#include "netCdfInterface.hpp" 
     
    1515      CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 
    1616                                                        bool useCFConvention, 
    17                            const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
     17                           const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
    1818        : path() 
    1919        , wmpi(false) 
     
    3333 
    3434      void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention,  
    35                                  const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
     35                                 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
    3636      { 
    3737         this->useClassicFormat = useClassicFormat; 
     
    4444         { 
    4545            int commSize = 0; 
    46             MPI_Comm_size(*comm, &commSize); 
     46            ep_lib::MPI_Comm_size(*comm, &commSize); 
    4747            if (commSize <= 1) 
    4848               comm = NULL; 
     
    5858            CTimer::get("Files : create").resume(); 
    5959            if (wmpi) 
    60                CNetCdfInterface::createPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp); 
     60               CNetCdfInterface::createPar(filename, mode, to_mpi_comm((*comm)->mpi_comm), to_mpi_info(MPI_INFO_NULL), this->ncidp); 
    6161            else 
    6262               CNetCdfInterface::create(filename, mode, this->ncidp); 
     
    7070            CTimer::get("Files : open").resume(); 
    7171            if (wmpi) 
    72                CNetCdfInterface::openPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp); 
     72               CNetCdfInterface::openPar(filename, mode, to_mpi_comm((*comm)->mpi_comm), to_mpi_info(MPI_INFO_NULL), this->ncidp); 
    7373            else 
    7474               CNetCdfInterface::open(filename, mode, this->ncidp); 
  • XIOS/dev/dev_trunk_omp/src/io/onetcdf4.hpp

    r1456 r1601  
    44/// XIOS headers /// 
    55#include "xios_spl.hpp" 
     6#include "mpi_std.hpp" 
    67#include "exception.hpp" 
    78#include "data_output.hpp" 
    89#include "array_new.hpp" 
    9 #include "mpi.hpp" 
    1010#include "netcdf.hpp" 
    1111 
     
    2828            CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 
    2929                          bool useCFConvention = true, 
    30                       const MPI_Comm* comm = NULL, bool multifile = true, 
     30                      const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 
    3131                      const StdString& timeCounterName = "time_counter"); 
    3232 
     
    3737            /// Initialisation /// 
    3838            void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 
    39                             const MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 
     39                            const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 
    4040            void close(void); 
    4141            void sync(void); 
  • XIOS/dev/dev_trunk_omp/src/log.cpp

    r523 r1601  
    11#include "log.hpp" 
     2#include <string> 
     3#include <iostream> 
     4#include <string> 
    25 
    36namespace xios 
    47{ 
     8  std::filebuf* info_FB[16]; 
     9 
     10 
    511  CLog info("info") ; 
    612  CLog report("report") ; 
    713  CLog error("error", cerr.rdbuf()) ; 
     14 
     15   
     16  CLog& CLog::operator()(int l) 
     17    { 
     18      if (l<=level) 
     19      { 
     20        omp_set_lock( &mutex ); 
     21        rdbuf(strBuf_array[omp_get_thread_num()]);  
     22        *this<<"-> "<<name<<" : " ; 
     23        omp_unset_lock( &mutex ); 
     24      } 
     25      else rdbuf(NULL) ; 
     26      return *this; 
     27    } 
    828} 
  • XIOS/dev/dev_trunk_omp/src/log.hpp

    r523 r1601  
    55#include <iostream> 
    66#include <string> 
     7#include <stdio.h> 
     8#include <omp.h> 
    79 
    810namespace xios 
     
    1416    public : 
    1517    CLog(const string& name_, std::streambuf* sBuff = cout.rdbuf()) 
    16       : ostream(sBuff), level(0), name(name_), strBuf_(sBuff) {} 
    17     CLog& operator()(int l) 
     18      : ostream(sBuff), level(0), name(name_), strBuf_(sBuff)  
    1819    { 
    19       if (l<=level) 
    20       { 
    21         rdbuf(strBuf_); 
    22         *this<<"-> "<<name<<" : " ; 
    23       } 
    24       else rdbuf(NULL) ; 
    25       return *this; 
     20      omp_init_lock( &mutex ); 
     21      for(int i=0; i<16; i++) 
     22        strBuf_array[i] = sBuff; 
    2623    } 
     24 
     25    ~CLog() 
     26    { 
     27      omp_destroy_lock( &mutex ); 
     28    } 
     29 
     30    CLog& operator()(int l); 
    2731    void setLevel(int l) {level=l; } 
    2832    int getLevel() {return level ;} 
     
    4650     * \param [in] pointer to new streambuf 
    4751    */ 
    48     void changeStreamBuff(std::streambuf* sBuff) { strBuf_ = sBuff; rdbuf(sBuff); } 
     52    void changeStreamBuff(std::streambuf* sBuff)  
     53    {  
     54      strBuf_ = sBuff;  
     55      strBuf_array[omp_get_thread_num()] = sBuff; 
     56      rdbuf(sBuff); 
     57    } 
    4958 
    5059    int level ; 
    5160    string name ; 
    5261    std::streambuf* strBuf_; 
     62    std::streambuf* strBuf_array[16]; 
     63    omp_lock_t mutex; 
    5364  }; 
    5465 
     
    5667  extern CLog report; 
    5768  extern CLog error; 
     69 
     70  extern std::filebuf* info_FB[16]; 
    5871} 
    5972#endif 
  • XIOS/dev/dev_trunk_omp/src/mpi.hpp

    r501 r1601  
    1010/* skip C++ Binding for OpenMPI */ 
    1111#define OMPI_SKIP_MPICXX 
     12#ifdef _usingEP 
     13  #include <omp.h> 
     14  #include "../extern/src_ep_dev/ep_lib.hpp" 
     15  #include "../extern/src_ep_dev/ep_declaration.hpp" 
     16  //using namespace ep_lib; 
     17#elif _usingMPI 
     18  #include <mpi.h> 
     19#endif 
    1220 
    13 #include <mpi.h> 
    1421 
    1522#endif 
  • XIOS/dev/dev_trunk_omp/src/node/axis.cpp

    r1566 r1601  
    1414#include "distribution_client.hpp" 
    1515 
     16using namespace ep_lib; 
     17 
    1618namespace xios { 
    1719 
     
    2628      , transformationMap_(), hasValue(false), hasLabel(false) 
    2729      , computedWrittenIndex_(false) 
    28           , clients() 
     30      , clients() 
    2931   { 
    3032   } 
     
    3840      , transformationMap_(), hasValue(false), hasLabel(false) 
    3941      , computedWrittenIndex_(false) 
    40           , clients() 
     42      , clients() 
    4143   { 
    4244   } 
     
    4547   { /* Ne rien faire de plus */ } 
    4648 
    47    std::map<StdString, ETranformationType> CAxis::transformationMapList_ = std::map<StdString, ETranformationType>(); 
    48    bool CAxis::dummyTransformationMapList_ = CAxis::initializeTransformationMap(CAxis::transformationMapList_); 
     49   std::map<StdString, ETranformationType> *CAxis::transformationMapList_ptr = 0; 
     50 
    4951   bool CAxis::initializeTransformationMap(std::map<StdString, ETranformationType>& m) 
    5052   { 
     
    5860     m["temporal_splitting"] = TRANS_TEMPORAL_SPLITTING; 
    5961     m["duplicate_scalar"] = TRANS_DUPLICATE_SCALAR_TO_AXIS; 
    60  
     62   } 
     63 
     64   bool CAxis::initializeTransformationMap() 
     65   { 
     66     if(CAxis::transformationMapList_ptr == 0) CAxis::transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 
     67     (*CAxis::transformationMapList_ptr)["zoom_axis"]        = TRANS_ZOOM_AXIS; 
     68     (*CAxis::transformationMapList_ptr)["interpolate_axis"] = TRANS_INTERPOLATE_AXIS; 
     69     (*CAxis::transformationMapList_ptr)["extract_axis"]     = TRANS_EXTRACT_AXIS; 
     70     (*CAxis::transformationMapList_ptr)["inverse_axis"]     = TRANS_INVERSE_AXIS; 
     71     (*CAxis::transformationMapList_ptr)["reduce_domain"]    = TRANS_REDUCE_DOMAIN_TO_AXIS; 
     72     (*CAxis::transformationMapList_ptr)["extract_domain"]   = TRANS_EXTRACT_DOMAIN_TO_AXIS; 
     73     (*CAxis::transformationMapList_ptr)["reduce_axis"]      = TRANS_REDUCE_AXIS_TO_AXIS; 
     74     (*CAxis::transformationMapList_ptr)["temporal_splitting"] = TRANS_TEMPORAL_SPLITTING; 
     75     (*CAxis::transformationMapList_ptr)["duplicate_scalar"] = TRANS_DUPLICATE_SCALAR_TO_AXIS; 
    6176   } 
    6277 
     
    114129     \return the number of indexes written by each server 
    115130   */ 
    116    int CAxis::getNumberWrittenIndexes(MPI_Comm writtenCom) 
     131   int CAxis::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
    117132   { 
    118133     int writtenSize; 
    119      MPI_Comm_size(writtenCom, &writtenSize); 
     134     ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
    120135     return numberWrittenIndexes_[writtenSize]; 
    121136   } 
     
    125140     \return the total number of indexes written by the servers 
    126141   */ 
    127    int CAxis::getTotalNumberWrittenIndexes(MPI_Comm writtenCom) 
     142   int CAxis::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
    128143   { 
    129144     int writtenSize; 
    130      MPI_Comm_size(writtenCom, &writtenSize); 
     145     ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
    131146     return totalNumberWrittenIndexes_[writtenSize]; 
    132147   } 
     
    136151     \return the offset of indexes written by each server 
    137152   */ 
    138    int CAxis::getOffsetWrittenIndexes(MPI_Comm writtenCom) 
     153   int CAxis::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
    139154   { 
    140155     int writtenSize; 
    141      MPI_Comm_size(writtenCom, &writtenSize); 
     156     ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
    142157     return offsetWrittenIndexes_[writtenSize]; 
    143158   } 
    144159 
    145    CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(MPI_Comm writtenCom) 
     160   CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 
    146161   { 
    147162     int writtenSize; 
    148      MPI_Comm_size(writtenCom, &writtenSize); 
     163     ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
    149164     return compressedIndexToWriteOnServer[writtenSize]; 
    150165   } 
     
    689704  } 
    690705 
    691   void CAxis::computeWrittenCompressedIndex(MPI_Comm writtenComm) 
     706  void CAxis::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 
    692707  { 
    693708    int writtenCommSize; 
    694     MPI_Comm_size(writtenComm, &writtenCommSize); 
     709    ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 
    695710    if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 
    696711      return; 
     
    750765      { 
    751766              
    752         MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
    753         MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
     767        ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
     768        ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
    754769        offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 
    755770      } 
     
    13461361 
    13471362        nodeElementName = node.getElementName(); 
    1348         std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 
    1349         it = transformationMapList_.find(nodeElementName); 
     1363 
     1364        if(transformationMapList_ptr == 0) initializeTransformationMap(); 
     1365        std::map<StdString, ETranformationType>::const_iterator ite = (*CAxis::transformationMapList_ptr).end(), it; 
     1366        it = (*CAxis::transformationMapList_ptr).find(nodeElementName); 
    13501367        if (ite != it) 
    13511368        { 
  • XIOS/dev/dev_trunk_omp/src/node/axis.hpp

    r1562 r1601  
    1616#include "transformation.hpp" 
    1717#include "transformation_enum.hpp" 
     18 
     19#include "mpi_std.hpp" 
    1820 
    1921namespace xios { 
     
    6870         const std::set<StdString> & getRelFiles(void) const; 
    6971 
    70          int getNumberWrittenIndexes(MPI_Comm writtenCom); 
    71          int getTotalNumberWrittenIndexes(MPI_Comm writtenCom); 
    72          int getOffsetWrittenIndexes(MPI_Comm writtenCom); 
    73          CArray<int, 1>& getCompressedIndexToWriteOnServer(MPI_Comm writtenCom); 
     72         int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 
     73         int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 
     74         int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 
     75         CArray<int, 1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 
    7476 
    7577         std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, const std::vector<int>& globalDim, int orderPositionInGrid, 
     
    113115 
    114116         void computeWrittenIndex(); 
    115          void computeWrittenCompressedIndex(MPI_Comm); 
     117         void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 
    116118         bool hasTransformation(); 
    117119         void solveInheritanceTransformation(); 
     
    177179       private: 
    178180         static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 
    179          static std::map<StdString, ETranformationType> transformationMapList_; 
    180          static bool dummyTransformationMapList_; 
     181         static bool initializeTransformationMap(); 
     182         static std::map<StdString, ETranformationType> *transformationMapList_ptr; 
     183         #pragma omp threadprivate(transformationMapList_ptr) 
     184         //static bool dummyTransformationMapList_; 
    181185 
    182186         DECLARE_REF_FUNC(Axis,axis) 
  • XIOS/dev/dev_trunk_omp/src/node/compute_connectivity_domain.hpp

    r934 r1601  
    6060      static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CComputeConnectivityDomain 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/context.cpp

    r1542 r1601  
    2121#include "distribute_file_server2.hpp" 
    2222 
     23using namespace ep_lib; 
     24 
    2325namespace xios { 
    2426 
    25   std::shared_ptr<CContextGroup> CContext::root; 
     27  std::shared_ptr<CContextGroup> * CContext::root_ptr = 0; 
    2628 
    2729   /// ////////////////////// Définitions ////////////////////// /// 
     
    6668   CContextGroup* CContext::getRoot(void) 
    6769   { 
    68       if (root.get()==NULL) root=std::shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName())); 
    69       return root.get(); 
     70      if(root_ptr == 0) root_ptr = new std::shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName())); 
     71      return root_ptr->get(); 
    7072   } 
    7173 
     
    248250 
    249251   //! Initialize client side 
    250    void CContext::initClient(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtServer /*= 0*/) 
     252   void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 
    251253   { 
    252254 
    253255     hasClient = true; 
    254      MPI_Comm intraCommServer, interCommServer; 
     256     ep_lib::MPI_Comm intraCommServer, interCommServer; 
    255257      
    256258 
    257      if (CServer::serverLevel != 1) 
    258       // initClient is called by client 
     259     if (CServer::serverLevel != 1) // initClient is called by client 
    259260     { 
    260261       client = new CContextClient(this, intraComm, interComm, cxtServer); 
     
    266267       else 
    267268       { 
    268          MPI_Comm_dup(intraComm, &intraCommServer); 
     269         ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 
    269270         comms.push_back(intraCommServer); 
    270          MPI_Comm_dup(interComm, &interCommServer); 
     271         ep_lib::MPI_Comm_dup(interComm, &interCommServer); 
    271272         comms.push_back(interCommServer); 
    272273       } 
     
    287288       server = new CContextServer(this, intraCommServer, interCommServer); 
    288289     } 
    289      else 
    290      // initClient is called by primary server 
     290 
     291     else // initClient is called by primary server 
    291292     { 
    292293       clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 
    293        MPI_Comm_dup(intraComm, &intraCommServer); 
     294       ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 
    294295       comms.push_back(intraCommServer); 
    295        MPI_Comm_dup(interComm, &interCommServer); 
     296       ep_lib::MPI_Comm_dup(interComm, &interCommServer); 
    296297       comms.push_back(interCommServer); 
    297298       serverPrimServer.push_back(new CContextServer(this, intraCommServer, interCommServer)); 
     
    361362   } 
    362363 
    363    void CContext::initServer(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtClient /*= 0*/) 
     364   void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 
    364365   { 
    365366     hasServer=true; 
     
    379380     registryOut->setPath(contextRegistryId) ; 
    380381 
    381      MPI_Comm intraCommClient, interCommClient; 
     382     ep_lib::MPI_Comm intraCommClient, interCommClient; 
    382383     if (cxtClient) // Attached mode 
    383384     { 
     
    387388     else 
    388389     { 
    389        MPI_Comm_dup(intraComm, &intraCommClient); 
     390       ep_lib::MPI_Comm_dup(intraComm, &intraCommClient); 
    390391       comms.push_back(intraCommClient); 
    391        MPI_Comm_dup(interComm, &interCommClient); 
     392       ep_lib::MPI_Comm_dup(interComm, &interCommClient); 
    392393       comms.push_back(interCommClient); 
    393394     } 
     
    475476 
    476477         //! Free internally allocated communicators 
    477          for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
    478            MPI_Comm_free(&(*it)); 
     478         for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
     479           ep_lib::MPI_Comm_free(&(*it)); 
    479480         comms.clear(); 
    480481 
     482         #pragma omp critical (_output) 
    481483         info(20)<<"CContext: Context <"<<getId()<<"> is finalized."<<endl; 
    482484       } 
     
    494496       { 
    495497         // Blocking send of context finalize message to its client (e.g. primary server or model) 
     498         #pragma omp critical (_output) 
    496499         info(100)<<"DEBUG: context "<<getId()<<" Send client finalize<<"<<endl ; 
    497500         client->finalize(); 
     
    517520 
    518521         //! Free internally allocated communicators 
    519          for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
    520            MPI_Comm_free(&(*it)); 
     522         for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
     523           ep_lib::MPI_Comm_free(&(*it)); 
    521524         comms.clear(); 
    522525 
     526         #pragma omp critical (_output) 
    523527         info(20)<<"CContext: Context <"<<getId()<<"> is finalized."<<endl; 
    524528       } 
     
    531535   void CContext::freeComms(void) 
    532536   { 
    533      for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
    534        MPI_Comm_free(&(*it)); 
     537     for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
     538       ep_lib::MPI_Comm_free(&(*it)); 
    535539     comms.clear(); 
    536540   } 
     
    10021006       } 
    10031007 
    1004        for (std::multimap<double,int>:: iterator it=poolDataSize.begin() ; it!=poolDataSize.end(); ++it) info(30)<<"Load Balancing for servers (perfect=1) : "<<it->second<<" :  ratio "<<it->first*1./dataPerPool<<endl ; 
    1005   
     1008       for (std::multimap<double,int>:: iterator it=poolDataSize.begin() ; it!=poolDataSize.end(); ++it) 
     1009       { 
     1010         #pragma omp critical (_output) 
     1011         info(30)<<"Load Balancing for servers (perfect=1) : "<<it->second<<" :  ratio "<<it->first*1./dataPerPool<<endl ; 
     1012       } 
     1013 
    10061014       for (int i = 0; i < this->enabledReadModeFiles.size(); ++i) 
    10071015       { 
     
    10911099       } 
    10921100 
    1093        for (int i = 0; i < nbPools; ++i) info(100)<<"Pool server level2 "<<i<<"   assigned file bandwith "<<bandwithSize[i]*86400.*4./1024/1024.<<" Mb / days"<<endl ; 
    1094        for (int i = 0; i < nbPools; ++i) info(100)<<"Pool server level2 "<<i<<"   assigned grid memory "<<memorySize[i]*100/1024./1024.<<" Mb"<<endl ; 
    1095  
     1101       for (int i = 0; i < nbPools; ++i) 
     1102       { 
     1103         #pragma omp critical (_output) 
     1104         info(100)<<"Pool server level2 "<<i<<"   assigned file bandwith "<<bandwithSize[i]*86400.*4./1024/1024.<<" Mb / days"<<endl ; 
     1105       } 
     1106       for (int i = 0; i < nbPools; ++i) 
     1107       { 
     1108         #pragma omp critical (_output) 
     1109         info(100)<<"Pool server level2 "<<i<<"   assigned grid memory "<<memorySize[i]*100/1024./1024.<<" Mb"<<endl ; 
     1110       } 
    10961111 
    10971112       for (int i = 0; i < this->enabledReadModeFiles.size(); ++i) 
     
    11441159     for (; it != end; it++) 
    11451160     { 
     1161       #pragma omp critical (_output) 
    11461162       info(30)<<"Closing File : "<<(*it)->getId()<<endl; 
    11471163       (*it)->close(); 
     
    18311847        } 
    18321848 
     1849        #pragma omp critical (_output) 
    18331850        info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl; 
    18341851        calendar->update(step); 
     1852        #pragma omp critical (_output) 
    18351853        info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl; 
    18361854  #ifdef XIOS_MEMTRACK_LIGHT 
     1855        #pragma omp critical (_output) 
    18371856        info(50) << " Current memory used by XIOS : "<<  MemTrack::getCurrentMemorySize()*1.0/(1024*1024)<<" Mbyte, at timestep "<<step<<" of context "<<this->getId()<<endl ; 
    18381857  #endif 
     
    18451864      } 
    18461865      else if (prevStep == step) 
     1866      { 
     1867        #pragma omp critical (_output) 
    18471868        info(50) << "updateCalendar: already at step " << step << ", no operation done." << endl; 
     1869      } 
    18481870      else // if (prevStep > step) 
    18491871        ERROR("void CContext::updateCalendar(int step)", 
     
    19011923    CContext* context = CObjectFactory::CreateObject<CContext>(id).get(); 
    19021924    getRoot(); 
    1903     if (!hasctxt) CGroupFactory::AddChild(root, context->getShared()); 
     1925    if (!hasctxt) CGroupFactory::AddChild(*root_ptr, context->getShared()); 
    19041926 
    19051927#define DECLARE_NODE(Name_, name_) \ 
  • XIOS/dev/dev_trunk_omp/src/node/context.hpp

    r1542 r1601  
    55#include "xios_spl.hpp" 
    66//#include "node_type.hpp" 
     7#include "mpi_std.hpp" 
    78#include "calendar_wrapper.hpp" 
    89 
     
    1112#include "garbage_collector.hpp" 
    1213#include "registry.hpp" 
    13 #include "mpi.hpp" 
    1414 
    1515 
     
    8888      public : 
    8989         // Initialize server or client 
    90          void initClient(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtServer = 0); 
    91          void initServer(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtClient = 0); 
     90         void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 
     91         void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 
    9292         bool isInitialized(void); 
    9393 
     
    239239 
    240240         // Context root 
    241          static std::shared_ptr<CContextGroup> root; 
     241         static std::shared_ptr<CContextGroup> *root_ptr; 
     242         #pragma omp threadprivate(root_ptr) 
    242243 
    243244         // Determine context on client or not 
     
    262263         StdString idServer_; 
    263264         CGarbageCollector garbageCollector; 
    264          std::list<MPI_Comm> comms; //!< Communicators allocated internally 
     265         std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 
    265266 
    266267      public: // Some function maybe removed in the near future 
  • XIOS/dev/dev_trunk_omp/src/node/domain.cpp

    r1578 r1601  
    1919#include "client_server_mapping_distributed.hpp" 
    2020 
     21using namespace ep_lib; 
     22 
    2123#include <algorithm> 
    2224 
     
    6870   } 
    6971 
    70    std::map<StdString, ETranformationType> CDomain::transformationMapList_ = std::map<StdString, ETranformationType>(); 
    71    bool CDomain::_dummyTransformationMapList = CDomain::initializeTransformationMap(CDomain::transformationMapList_); 
     72   std::map<StdString, ETranformationType> *CDomain::transformationMapList_ptr = 0; 
    7273 
    7374   bool CDomain::initializeTransformationMap(std::map<StdString, ETranformationType>& m) 
     
    8283   } 
    8384 
     85   bool CDomain::initializeTransformationMap() 
     86   { 
     87     CDomain::transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 
     88     (*CDomain::transformationMapList_ptr)["zoom_domain"] = TRANS_ZOOM_DOMAIN; 
     89     (*CDomain::transformationMapList_ptr)["interpolate_domain"] = TRANS_INTERPOLATE_DOMAIN; 
     90     (*CDomain::transformationMapList_ptr)["generate_rectilinear_domain"] = TRANS_GENERATE_RECTILINEAR_DOMAIN; 
     91     (*CDomain::transformationMapList_ptr)["compute_connectivity_domain"] = TRANS_COMPUTE_CONNECTIVITY_DOMAIN; 
     92     (*CDomain::transformationMapList_ptr)["expand_domain"] = TRANS_EXPAND_DOMAIN; 
     93     (*CDomain::transformationMapList_ptr)["reorder_domain"] = TRANS_REORDER_DOMAIN; 
     94     (*CDomain::transformationMapList_ptr)["extract_domain"] = TRANS_EXTRACT_DOMAIN; 
     95   } 
     96 
    8497   const std::set<StdString> & CDomain::getRelFiles(void) const 
    8598   { 
     
    92105     \return the number of indexes written by each server 
    93106   */ 
    94    int CDomain::getNumberWrittenIndexes(MPI_Comm writtenCom) 
     107   int CDomain::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
    95108   { 
    96109     int writtenSize; 
    97      MPI_Comm_size(writtenCom, &writtenSize); 
     110     ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
    98111     return numberWrittenIndexes_[writtenSize]; 
    99112   } 
     
    103116     \return the total number of indexes written by the servers 
    104117   */ 
    105    int CDomain::getTotalNumberWrittenIndexes(MPI_Comm writtenCom) 
     118   int CDomain::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
    106119   { 
    107120     int writtenSize; 
    108      MPI_Comm_size(writtenCom, &writtenSize); 
     121     ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
    109122     return totalNumberWrittenIndexes_[writtenSize]; 
    110123   } 
     
    114127     \return the offset of indexes written by each server 
    115128   */ 
    116    int CDomain::getOffsetWrittenIndexes(MPI_Comm writtenCom) 
     129   int CDomain::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
    117130   { 
    118131     int writtenSize; 
    119      MPI_Comm_size(writtenCom, &writtenSize); 
     132     ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
    120133     return offsetWrittenIndexes_[writtenSize]; 
    121134   } 
    122135 
    123    CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(MPI_Comm writtenCom) 
     136   CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 
    124137   { 
    125138     int writtenSize; 
    126      MPI_Comm_size(writtenCom, &writtenSize); 
     139     ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
    127140     return compressedIndexToWriteOnServer[writtenSize]; 
    128141   } 
     
    654667     int v ; 
    655668     v=ibegin ; 
    656      MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ; 
     669     ep_lib::MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ; 
    657670     v=jbegin ; 
    658      MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ; 
     671     ep_lib::MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ; 
    659672     v=ni ; 
    660      MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ; 
     673     ep_lib::MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ; 
    661674     v=nj ; 
    662      MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ; 
    663  
    664      MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ; 
    665      MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ; 
     675     ep_lib::MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ; 
     676 
     677     ep_lib::MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ; 
     678     ep_lib::MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ; 
    666679 
    667680      delete[] ibegin_g ; 
     
    19011914   } 
    19021915 
    1903   void CDomain::computeWrittenCompressedIndex(MPI_Comm writtenComm) 
     1916  void CDomain::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 
    19041917  { 
    19051918    int writtenCommSize; 
    1906     MPI_Comm_size(writtenComm, &writtenCommSize); 
     1919    ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 
    19071920    if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 
    19081921      return; 
     
    19611974      { 
    19621975              
    1963         MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
    1964         MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
     1976        ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
     1977        ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
    19651978        offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 
    19661979      } 
     
    30453058 
    30463059        nodeElementName = node.getElementName(); 
    3047         std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 
    3048         it = transformationMapList_.find(nodeElementName); 
     3060        if(transformationMapList_ptr == 0) initializeTransformationMap(); 
     3061        std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_ptr->end(), it; 
     3062        it = transformationMapList_ptr->find(nodeElementName); 
    30493063        if (ite != it) 
    30503064        { 
  • XIOS/dev/dev_trunk_omp/src/node/domain.hpp

    r1578 r1601  
    1717#include "transformation_enum.hpp" 
    1818#include "server_distribution_description.hpp" 
     19#include "mpi_std.hpp" 
    1920#include "mesh.hpp" 
    2021 
     
    9495         bool isWrittenCompressed(const StdString& filename) const; 
    9596          
    96          int getNumberWrittenIndexes(MPI_Comm writtenCom); 
    97          int getTotalNumberWrittenIndexes(MPI_Comm writtenCom); 
    98          int getOffsetWrittenIndexes(MPI_Comm writtenCom); 
    99          CArray<int,1>& getCompressedIndexToWriteOnServer(MPI_Comm writtenCom); 
     97         int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 
     98         int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 
     99         int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 
     100         CArray<int,1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 
    100101 
    101102         std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, bool bufferForWriting = false); 
     
    116117          
    117118         void computeWrittenIndex(); 
    118          void computeWrittenCompressedIndex(MPI_Comm); 
     119         void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 
    119120 
    120121         void AllgatherRectilinearLonLat(CArray<double,1>& lon, CArray<double,1>& lat, 
     
    234235       private: 
    235236         static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 
    236          static std::map<StdString, ETranformationType> transformationMapList_; 
     237         static bool initializeTransformationMap(); 
     238         static std::map<StdString, ETranformationType> *transformationMapList_ptr; 
     239         #pragma omp threadprivate(transformationMapList_ptr) 
    237240         static bool _dummyTransformationMapList; 
     241         #pragma omp threadprivate(_dummyTransformationMapList) 
    238242 
    239243         DECLARE_REF_FUNC(Domain,domain) 
  • XIOS/dev/dev_trunk_omp/src/node/duplicate_scalar_to_axis.hpp

    r1314 r1601  
    6060      static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CReduceAxisToAxis 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/expand_domain.hpp

    r935 r1601  
    6060      static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CExpandDomain 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/extract_axis.hpp

    r1558 r1601  
    6060      static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CExtractAxis 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/extract_axis_to_scalar.hpp

    r960 r1601  
    6060      static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CExtractAxisToScalar 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/extract_domain.hpp

    r1549 r1601  
    6060      static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CExtractDomain 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/extract_domain_to_axis.hpp

    r895 r1601  
    6060      static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CExtractDomainToAxis 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/field.cpp

    r1574 r1601  
    343343    while (currentDate >= lastDataRequestedFromServer) 
    344344    { 
    345       info(20) << "currentDate : " << currentDate << endl ; 
    346       info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ; 
    347       info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ; 
    348       info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 
     345      #pragma omp critical (_output) 
     346      { 
     347        info(20) << "currentDate : " << currentDate << endl ; 
     348        info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ; 
     349        info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ; 
     350        info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 
     351      } 
    349352 
    350353      dataRequested |= sendReadDataRequest(lastDataRequestedFromServer + file->output_freq); 
     
    502505    if (!nstepMaxRead) 
    503506    { 
    504        MPI_Allreduce(MPI_IN_PLACE, &nstepMax, 1, MPI_INT, MPI_MAX, context->server->intraComm); 
     507       MPI_Allreduce(&nstepMax, &nstepMax, 1, MPI_INT, MPI_MAX, context->server->intraComm); 
    505508       nstepMaxRead = true; 
    506509    } 
     
    919922     { 
    920923        areAllReferenceSolved = true; 
    921         
     924 
    922925        if (context->hasClient && !context->hasServer) 
    923926        { 
  • XIOS/dev/dev_trunk_omp/src/node/file.cpp

    r1542 r1601  
    289289 
    290290      int color = allZoneEmpty ? 0 : 1; 
    291       MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 
    292       if (allZoneEmpty) MPI_Comm_free(&fileComm); 
     291      ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 
     292      if (allZoneEmpty) ep_lib::MPI_Comm_free(&fileComm); 
    293293    } 
    294294 
     
    524524         { 
    525525            int commSize, commRank; 
    526             MPI_Comm_size(fileComm, &commSize); 
    527             MPI_Comm_rank(fileComm, &commRank); 
     526            ep_lib::MPI_Comm_size(fileComm, &commSize); 
     527            ep_lib::MPI_Comm_rank(fileComm, &commRank); 
    528528 
    529529            if (server->intraCommSize > 1) 
     
    602602    CContext* context = CContext::getCurrent(); 
    603603    CContextServer* server = context->server; 
    604     MPI_Comm readComm = this->fileComm; 
     604    ep_lib::MPI_Comm readComm = this->fileComm; 
    605605 
    606606    if (!allZoneEmpty) 
     
    645645      { 
    646646        int commSize, commRank; 
    647         MPI_Comm_size(readComm, &commSize); 
    648         MPI_Comm_rank(readComm, &commRank); 
     647        ep_lib::MPI_Comm_size(readComm, &commSize); 
     648        ep_lib::MPI_Comm_rank(readComm, &commRank); 
    649649 
    650650        if (server->intraCommSize > 1) 
     
    688688        isOpen = false; 
    689689       } 
    690       if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 
     690      //if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 
    691691   } 
    692692   //---------------------------------------------------------------- 
     
    713713 
    714714        // Read necessary value from file 
     715        #pragma omp critical (_func) 
    715716        this->data_in->readFieldAttributesValues(enabledFields[idx]); 
    716717 
  • XIOS/dev/dev_trunk_omp/src/node/file.hpp

    r1542 r1601  
    44/// XIOS headers /// 
    55#include "xios_spl.hpp" 
     6#include "mpi_std.hpp" 
    67#include "field.hpp" 
    78#include "data_output.hpp" 
     
    1213#include "attribute_enum_impl.hpp" 
    1314#include "context_client.hpp" 
    14 #include "mpi.hpp" 
     15 
    1516 
    1617namespace xios { 
     
    173174         int nbAxis, nbDomains; 
    174175         bool isOpen;          
    175          MPI_Comm fileComm; 
     176         ep_lib::MPI_Comm fileComm; 
    176177 
    177178      private: 
  • XIOS/dev/dev_trunk_omp/src/node/generate_rectilinear_domain.hpp

    r836 r1601  
    6060      static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CGenerateRectilinearDomain 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/interpolate_axis.hpp

    r836 r1601  
    6262      static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 
    6363      static bool _dummyRegistered; 
     64      #pragma omp threadprivate(_dummyRegistered) 
    6465  }; // class CInterpolateAxis 
    6566 
  • XIOS/dev/dev_trunk_omp/src/node/interpolate_domain.hpp

    r1021 r1601  
    6060      static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CInterpolateDomain 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/inverse_axis.hpp

    r836 r1601  
    5959      static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 
    6060      static bool _dummyRegistered; 
     61      #pragma omp threadprivate(_dummyRegistered) 
    6162 
    6263  }; // class CInverseAxis 
  • XIOS/dev/dev_trunk_omp/src/node/mesh.cpp

    r1542 r1601  
    66 
    77#include "mesh.hpp" 
     8using namespace ep_lib; 
    89#include <boost/functional/hash.hpp> 
    9 //#include <unordered_map> 
    1010 
    1111namespace xios { 
     
    3333  } 
    3434 
    35   std::map <StdString, CMesh> CMesh::meshList = std::map <StdString, CMesh>(); 
    36   std::map <StdString, vector<int> > CMesh::domainList = std::map <StdString, vector<int> >(); 
     35  std::map <StdString, CMesh> *CMesh::meshList_ptr = 0; 
     36  std::map <StdString, vector<int> > *CMesh::domainList_ptr = 0; 
    3737 
    3838///--------------------------------------------------------------- 
     
    4545  CMesh* CMesh::getMesh (StdString meshName, int nvertex) 
    4646  { 
    47     CMesh::domainList[meshName].push_back(nvertex); 
    48  
    49     if ( CMesh::meshList.begin() != CMesh::meshList.end() ) 
    50     { 
    51       for (std::map<StdString, CMesh>::iterator it=CMesh::meshList.begin(); it!=CMesh::meshList.end(); ++it) 
     47    if(CMesh::domainList_ptr == NULL) CMesh::domainList_ptr = new std::map <StdString, vector<int> >(); 
     48    if(CMesh::meshList_ptr == NULL)   CMesh::meshList_ptr   = new std::map <StdString, CMesh>(); 
     49 
     50    CMesh::domainList_ptr->at(meshName).push_back(nvertex); 
     51 
     52    if ( CMesh::meshList_ptr->begin() != CMesh::meshList_ptr->end() ) 
     53    { 
     54      for (std::map<StdString, CMesh>::iterator it=CMesh::meshList_ptr->begin(); it!=CMesh::meshList_ptr->end(); ++it) 
    5255      { 
    5356        if (it->first == meshName) 
    54           return &meshList[meshName]; 
     57          return &meshList_ptr->at(meshName); 
    5558        else 
    5659        { 
    5760          CMesh newMesh; 
    58           CMesh::meshList.insert( make_pair(meshName, newMesh) ); 
    59           return &meshList[meshName]; 
     61          CMesh::meshList_ptr->insert( make_pair(meshName, newMesh) ); 
     62          return &meshList_ptr->at(meshName); 
    6063        } 
    6164      } 
     
    6467    { 
    6568      CMesh newMesh; 
    66       CMesh::meshList.insert( make_pair(meshName, newMesh) ); 
    67       return &meshList[meshName]; 
     69      CMesh::meshList_ptr->insert( make_pair(meshName, newMesh) ); 
     70      return &meshList_ptr->at(meshName); 
    6871    } 
    6972  } 
  • XIOS/dev/dev_trunk_omp/src/node/mesh.hpp

    r1542 r1601  
    6060                      const CArray<double, 2>&, const CArray<double, 2>& ); 
    6161                         
    62       void createMeshEpsilon(const MPI_Comm&, 
     62      void createMeshEpsilon(const ep_lib::MPI_Comm&, 
    6363                             const CArray<double, 1>&, const CArray<double, 1>&, 
    6464                             const CArray<double, 2>&, const CArray<double, 2>& ); 
    6565 
    66       void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&, 
     66      void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&, 
    6767                              const CArray<double, 2>&, const CArray<double, 2>&, 
    6868                              CArray<int, 2>&); 
     
    8383      int nbFaces_; 
    8484 
    85       static std::map <StdString, CMesh> meshList; 
    86       static std::map <StdString, vector<int> > domainList; 
     85      static std::map <StdString, CMesh> *meshList_ptr; 
     86      #pragma omp threadprivate(meshList_ptr) 
     87      static std::map <StdString, vector<int> > *domainList_ptr; 
     88      #pragma omp threadprivate(domainList_ptr) 
    8789      CClientClientDHTSizet* pNodeGlobalIndex;                    // pointer to a map <nodeHash, nodeIdxGlo> 
    8890      CClientClientDHTSizet* pEdgeGlobalIndex;                    // pointer to a map <edgeHash, edgeIdxGlo> 
    89       void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
    90       void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
     91      void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
     92      void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
    9193      void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 
    9294      void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 
  • XIOS/dev/dev_trunk_omp/src/node/reduce_axis_to_axis.hpp

    r1301 r1601  
    5959      static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 
    6060      static bool _dummyRegistered; 
     61      #pragma omp threadprivate(_dummyRegistered) 
    6162  }; // class CReduceAxisToAxis 
    6263 
  • XIOS/dev/dev_trunk_omp/src/node/reduce_axis_to_scalar.hpp

    r888 r1601  
    5959      static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 
    6060      static bool _dummyRegistered; 
     61      #pragma omp threadprivate(_dummyRegistered) 
    6162  }; // class CReduceAxisToScalar 
    6263 
  • XIOS/dev/dev_trunk_omp/src/node/reduce_domain_to_axis.hpp

    r895 r1601  
    6060      static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CReduceDomainToAxis 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/reduce_domain_to_scalar.hpp

    r976 r1601  
    6060      static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CReduceDomainToScalar 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/reduce_scalar_to_scalar.hpp

    r1314 r1601  
    5959      static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 
    6060      static bool _dummyRegistered; 
     61      #pragma omp threadprivate(_dummyRegistered) 
    6162  }; // class CReduceScalarToScalar 
    6263 
  • XIOS/dev/dev_trunk_omp/src/node/reorder_domain.hpp

    r1457 r1601  
    6060      static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CReorderDomain 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/scalar.cpp

    r1314 r1601  
    2727   { /* Ne rien faire de plus */ } 
    2828 
    29    std::map<StdString, ETranformationType> CScalar::transformationMapList_ = std::map<StdString, ETranformationType>(); 
    30    bool CScalar::dummyTransformationMapList_ = CScalar::initializeTransformationMap(CScalar::transformationMapList_); 
     29 
     30   std::map<StdString, ETranformationType> *CScalar::transformationMapList_ptr = 0; 
     31 
    3132   bool CScalar::initializeTransformationMap(std::map<StdString, ETranformationType>& m) 
    3233   { 
     
    3536     m["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_SCALAR; 
    3637     m["reduce_scalar"] = TRANS_REDUCE_SCALAR_TO_SCALAR; 
     38   } 
     39 
     40   bool CScalar::initializeTransformationMap() 
     41   { 
     42     CScalar::transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 
     43     (*CScalar::transformationMapList_ptr)["reduce_axis"]   = TRANS_REDUCE_AXIS_TO_SCALAR; 
     44     (*CScalar::transformationMapList_ptr)["extract_axis"]  = TRANS_EXTRACT_AXIS_TO_SCALAR; 
     45     (*CScalar::transformationMapList_ptr)["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_SCALAR; 
     46     (*CScalar::transformationMapList_ptr)["reduce_scalar"] = TRANS_REDUCE_SCALAR_TO_SCALAR; 
    3747   } 
    3848 
     
    165175 
    166176        nodeElementName = node.getElementName(); 
    167         std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 
    168         it = transformationMapList_.find(nodeElementName); 
     177        if(CScalar::transformationMapList_ptr == 0) initializeTransformationMap(); 
     178        std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_ptr->end(), it; 
     179        it = transformationMapList_ptr->find(nodeElementName); 
    169180        if (ite != it) 
    170181        { 
  • XIOS/dev/dev_trunk_omp/src/node/scalar.hpp

    r1436 r1601  
    8383           TransMapTypes transformationMap_; 
    8484 
    85             void setTransformations(const TransMapTypes&); 
     85           void setTransformations(const TransMapTypes&); 
    8686 
    8787       private: 
    8888           static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 
    89            static std::map<StdString, ETranformationType> transformationMapList_; 
     89           static bool initializeTransformationMap(); 
     90           static std::map<StdString, ETranformationType> *transformationMapList_ptr; 
     91           #pragma omp threadprivate(transformationMapList_ptr) 
    9092           static bool dummyTransformationMapList_; 
     93           #pragma omp threadprivate(dummyTransformationMapList_) 
    9194 
    9295 
  • XIOS/dev/dev_trunk_omp/src/node/temporal_splitting.hpp

    r1275 r1601  
    5959      static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 
    6060      static bool _dummyRegistered; 
     61      #pragma omp threadprivate(_dummyRegistered) 
    6162  }; // class CTemporalSplitting 
    6263 
  • XIOS/dev/dev_trunk_omp/src/node/zoom_axis.hpp

    r836 r1601  
    6060      static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CZoomAxis 
    6364 
  • XIOS/dev/dev_trunk_omp/src/node/zoom_domain.hpp

    r836 r1601  
    6060      static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 
    6161      static bool _dummyRegistered; 
     62      #pragma omp threadprivate(_dummyRegistered) 
    6263  }; // class CZoomDomain 
    6364 
  • XIOS/dev/dev_trunk_omp/src/object_factory.cpp

    r501 r1601  
    55   /// ////////////////////// Définitions ////////////////////// /// 
    66 
    7    StdString CObjectFactory::CurrContext(""); 
     7   StdString *CObjectFactory::CurrContext_ptr = 0; 
    88 
    99   void CObjectFactory::SetCurrentContextId(const StdString & context) 
    10    { CObjectFactory::CurrContext = context; } 
     10   { 
     11     if(CObjectFactory::CurrContext_ptr == 0 ) CObjectFactory::CurrContext_ptr = new StdString; 
     12     CObjectFactory::CurrContext_ptr->assign(context);  
     13   } 
    1114 
    1215   StdString & CObjectFactory::GetCurrentContextId(void) 
    13    { return (CObjectFactory::CurrContext); } 
     16   {  
     17     return (*CObjectFactory::CurrContext_ptr);  
     18   } 
    1419 
    1520} // namespace xios 
  • XIOS/dev/dev_trunk_omp/src/object_factory.hpp

    r1542 r1601  
    5858 
    5959         /// Propriétés statiques /// 
    60          static StdString CurrContext; 
     60         static StdString *CurrContext_ptr; 
     61         #pragma omp threadprivate(CurrContext_ptr) 
    6162 
    6263   }; // class CObjectFactory 
  • XIOS/dev/dev_trunk_omp/src/object_factory_impl.hpp

    r1542 r1601  
    1010       int CObjectFactory::GetObjectNum(void) 
    1111   { 
    12       if (CurrContext.size() == 0) 
     12      if (CurrContext_ptr->size() == 0) 
    1313         ERROR("CObjectFactory::GetObjectNum(void)", 
    1414               << "please define current context id !"); 
    15       return (U::AllVectObj[CObjectFactory::CurrContext].size()); 
     15      if(U::AllVectObj_ptr == NULL) return 0; 
     16      return (*U::AllVectObj_ptr)[*CObjectFactory::CurrContext_ptr].size(); 
    1617   } 
    1718 
     
    1920      int CObjectFactory::GetObjectIdNum(void) 
    2021   { 
    21       if (CurrContext.size() == 0) 
     22      if (CurrContext_ptr->size() == 0) 
    2223         ERROR("CObjectFactory::GetObjectIdNum(void)", 
    2324               << "please define current context id !"); 
    24       return (U::AllMapObj[CObjectFactory::CurrContext].size()); 
     25      if(U::AllMapObj_ptr == NULL) return 0; 
     26      return (*U::AllMapObj_ptr)[*CObjectFactory::CurrContext_ptr].size(); 
    2527   } 
    2628 
     
    2830      bool CObjectFactory::HasObject(const StdString & id) 
    2931   { 
    30       if (CurrContext.size() == 0) 
     32      if (CurrContext_ptr->size() == 0) 
    3133         ERROR("CObjectFactory::HasObject(const StdString & id)", 
    3234               << "[ id = " << id << " ] please define current context id !"); 
    33       return (U::AllMapObj[CObjectFactory::CurrContext].find(id) != 
    34               U::AllMapObj[CObjectFactory::CurrContext].end()); 
     35      if(U::AllMapObj_ptr  == NULL)  return false; 
     36      return ((*U::AllMapObj_ptr)[*CObjectFactory::CurrContext_ptr].find(id) != 
     37              (*U::AllMapObj_ptr)[*CObjectFactory::CurrContext_ptr].end()); 
     38 
    3539   } 
    3640 
     
    3842      bool CObjectFactory::HasObject(const StdString & context, const StdString & id) 
    3943   { 
    40       if (U::AllMapObj.find(context) == U::AllMapObj.end()) return false ; 
    41       else return (U::AllMapObj[context].find(id) !=  U::AllMapObj[context].end()); 
     44      if(U::AllMapObj_ptr  == NULL) return false; 
     45 
     46      if (U::AllMapObj_ptr->find(context) == U::AllMapObj_ptr->end()) return false ; 
     47      else return ((*U::AllMapObj_ptr)[context].find(id) !=  (*U::AllMapObj_ptr)[context].end()); 
    4248   } 
    4349 
     
    4551      std::shared_ptr<U> CObjectFactory::GetObject(const U * const object) 
    4652   { 
    47       if (CurrContext.size() == 0) 
     53      if(U::AllVectObj_ptr == NULL) return (std::shared_ptr<U>()); 
     54      if (CurrContext_ptr->size() == 0) 
    4855         ERROR("CObjectFactory::GetObject(const U * const object)", 
    4956               << "please define current context id !"); 
    50       std::vector<std::shared_ptr<U> > & vect = 
    51                      U::AllVectObj[CObjectFactory::CurrContext]; 
     57      std::vector<std::shared_ptr<U> > & vect = (*U::AllVectObj_ptr)[*CObjectFactory::CurrContext_ptr]; 
    5258 
    5359      typename std::vector<std::shared_ptr<U> >::const_iterator 
     
    7076      std::shared_ptr<U> CObjectFactory::GetObject(const StdString & id) 
    7177   { 
    72       if (CurrContext.size() == 0) 
     78      if(U::AllMapObj_ptr  == NULL) return (std::shared_ptr<U>()); 
     79      if (CurrContext_ptr->size() == 0) 
    7380         ERROR("CObjectFactory::GetObject(const StdString & id)", 
    7481               << "[ id = " << id << " ] please define current context id !"); 
     
    7784               << "[ id = " << id << ", U = " << U::GetName() << " ] " 
    7885               << "object was not found."); 
    79       return (U::AllMapObj[CObjectFactory::CurrContext][id]); 
     86      return (*U::AllMapObj_ptr)[*CObjectFactory::CurrContext_ptr][id]; 
    8087   } 
    8188 
     
    8390      std::shared_ptr<U> CObjectFactory::GetObject(const StdString & context, const StdString & id) 
    8491   { 
     92      if(U::AllMapObj_ptr  == NULL) return (std::shared_ptr<U>()); 
     93 
    8594      if (!CObjectFactory::HasObject<U>(context,id)) 
    8695         ERROR("CObjectFactory::GetObject(const StdString & id)", 
    8796               << "[ id = " << id << ", U = " << U::GetName() <<", context = "<<context<< " ] " 
    8897               << "object was not found."); 
    89       return (U::AllMapObj[context][id]); 
     98      return (*U::AllMapObj_ptr)[context][id]; 
    9099   } 
    91100 
     
    93102   std::shared_ptr<U> CObjectFactory::CreateObject(const StdString& id) 
    94103   { 
    95       if (CurrContext.empty()) 
     104      if(U::AllVectObj_ptr == NULL) U::AllVectObj_ptr = new xios_map<StdString, std::vector<std::shared_ptr<U> > >; 
     105      if(U::AllMapObj_ptr  == NULL) U::AllMapObj_ptr  = new xios_map<StdString, xios_map<StdString, std::shared_ptr<U> > >; 
     106 
     107      if (CurrContext_ptr->empty()) 
    96108         ERROR("CObjectFactory::CreateObject(const StdString& id)", 
    97109               << "[ id = " << id << " ] please define current context id !"); 
     
    105117         std::shared_ptr<U> value(new U(id.empty() ? CObjectFactory::GenUId<U>() : id)); 
    106118 
    107          U::AllVectObj[CObjectFactory::CurrContext].insert(U::AllVectObj[CObjectFactory::CurrContext].end(), value); 
    108          U::AllMapObj[CObjectFactory::CurrContext].insert(std::make_pair(value->getId(), value)); 
     119         (*U::AllVectObj_ptr)[*CObjectFactory::CurrContext_ptr].insert((*U::AllVectObj_ptr)[*CObjectFactory::CurrContext_ptr].end(), value); 
     120         (*U::AllMapObj_ptr) [*CObjectFactory::CurrContext_ptr].insert(std::make_pair(value->getId(), value)); 
    109121 
    110122         return value; 
     
    116128         CObjectFactory::GetObjectVector(const StdString & context) 
    117129   { 
    118       return (U::AllVectObj[context]); 
     130      return (*U::AllVectObj_ptr)[context]; 
    119131   } 
    120132 
     
    130142   { 
    131143      StdOStringStream oss; 
    132       oss << GetUIdBase<U>() << U::GenId[CObjectFactory::CurrContext]++; 
     144      if(U::GenId_ptr == NULL) U::GenId_ptr = new xios_map< StdString, long int >; 
     145      oss << GetUIdBase<U>() << (*U::GenId_ptr)[*CObjectFactory::CurrContext_ptr]++; 
    133146      return oss.str(); 
    134147   } 
  • XIOS/dev/dev_trunk_omp/src/object_template.hpp

    r1542 r1601  
    108108         static xios_map<StdString, 
    109109                xios_map<StdString, 
    110                 std::shared_ptr<DerivedType> > > AllMapObj; 
     110                std::shared_ptr<DerivedType> > > *AllMapObj_ptr; 
     111         #pragma omp threadprivate(AllMapObj_ptr) 
    111112         static xios_map<StdString, 
    112                 std::vector<std::shared_ptr<DerivedType> > > AllVectObj; 
     113                std::vector<std::shared_ptr<DerivedType> > > *AllVectObj_ptr; 
     114         #pragma omp threadprivate(AllVectObj_ptr) 
    113115 
    114          static xios_map< StdString, long int > GenId ; 
     116         static xios_map< StdString, long int > *GenId_ptr ; 
     117         #pragma omp threadprivate(GenId_ptr) 
    115118 
    116119   }; // class CObjectTemplate 
  • XIOS/dev/dev_trunk_omp/src/object_template_impl.hpp

    r1542 r1601  
    2424      xios_map<StdString, 
    2525      xios_map<StdString, 
    26       std::shared_ptr<T> > > CObjectTemplate<T>::AllMapObj; 
     26      std::shared_ptr<T> > > *CObjectTemplate<T>::AllMapObj_ptr = 0; 
    2727 
    2828   template <class T> 
    2929      xios_map<StdString, 
    30       std::vector<std::shared_ptr<T> > > CObjectTemplate<T>::AllVectObj; 
    31  
    32    template <class T> 
    33       xios_map<StdString,long int> CObjectTemplate<T>::GenId; 
     30      std::vector<std::shared_ptr<T> > > *CObjectTemplate<T>::AllVectObj_ptr = 0; 
     31 
     32   template <class T> 
     33      xios_map<StdString,long int> *CObjectTemplate<T>::GenId_ptr = 0; 
    3434 
    3535   template <class T> 
     
    6666         CObjectTemplate<T>::GetAllVectobject(const StdString & contextId) 
    6767   { 
    68       return (CObjectTemplate<T>::AllVectObj[contextId]); 
     68     return (CObjectTemplate<T>::AllVectObj_ptr->at(contextId)); 
    6969   } 
    7070 
     
    426426   const vector<T*> CObjectTemplate<T>::getAll() 
    427427   { 
    428      const vector< std::shared_ptr<T> >& shared_vect= CObjectFactory::GetObjectVector<T>(); 
     428     const vector< std::shared_ptr<T> >& shared_vect= CObjectFactory::GetObjectVector<T>(CObjectFactory::GetCurrentContextId()); 
    429429     vector<T*> vect; 
    430430 
  • XIOS/dev/dev_trunk_omp/src/parse_expr/yacc_parser.cpp

    r1158 r1601  
    8080} 
    8181 
    82   IFilterExprNode* parsed; 
    83   std::string globalInputText; 
    84   size_t globalReadOffset = 0; 
     82  static IFilterExprNode* parsed; 
     83  static std::string globalInputText; 
     84  static std::string *globalInputText_ptr = 0; 
     85  static size_t globalReadOffset = 0; 
     86  #pragma omp threadprivate(parsed, globalInputText_ptr, globalReadOffset) 
    8587 
    8688  int readInputForLexer(char* buffer, size_t* numBytesRead, size_t maxBytesToRead) 
    8789  { 
     90    if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 
    8891    size_t numBytesToRead = maxBytesToRead; 
    89     size_t bytesRemaining = globalInputText.length()-globalReadOffset; 
     92    size_t bytesRemaining = (*globalInputText_ptr).length()-globalReadOffset; 
    9093    size_t i; 
    9194    if (numBytesToRead > bytesRemaining) numBytesToRead = bytesRemaining; 
    92     for (i = 0; i < numBytesToRead; i++) buffer[i] = globalInputText.c_str()[globalReadOffset + i]; 
     95    for (i = 0; i < numBytesToRead; i++) buffer[i] = (*globalInputText_ptr).c_str()[globalReadOffset + i]; 
    9396    *numBytesRead = numBytesToRead; 
    9497    globalReadOffset += numBytesToRead; 
     
    20022005  IFilterExprNode* parseExpr(const string& strExpr) 
    20032006  { 
    2004     globalInputText = strExpr; 
    2005     globalReadOffset = 0; 
    2006     yyparse(); 
     2007    #pragma omp critical (_parser) 
     2008    { 
     2009      if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 
     2010      (*globalInputText_ptr).assign (strExpr); 
     2011      globalReadOffset = 0; 
     2012      yyparse(); 
     2013    } 
    20072014    return parsed; 
    20082015  } 
  • XIOS/dev/dev_trunk_omp/src/parse_expr/yacc_parser.yacc

    r1158 r1601  
    1515} 
    1616 
    17   IFilterExprNode* parsed; 
    18   std::string globalInputText; 
    19   size_t globalReadOffset = 0; 
     17  static IFilterExprNode* parsed; 
     18  static std::string globalInputText; 
     19  static std::string *globalInputText_ptr = 0; 
     20  static size_t globalReadOffset = 0; 
     21  #pragma omp threadprivate(parsed, globalInputText_ptr, globalReadOffset) 
    2022 
    2123  int readInputForLexer(char* buffer, size_t* numBytesRead, size_t maxBytesToRead) 
    2224  { 
     25    if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 
    2326    size_t numBytesToRead = maxBytesToRead; 
    24     size_t bytesRemaining = globalInputText.length()-globalReadOffset; 
     27    size_t bytesRemaining = (*globalInputText_ptr).length()-globalReadOffset; 
    2528    size_t i; 
    2629    if (numBytesToRead > bytesRemaining) numBytesToRead = bytesRemaining; 
    27     for (i = 0; i < numBytesToRead; i++) buffer[i] = globalInputText.c_str()[globalReadOffset + i]; 
     30    for (i = 0; i < numBytesToRead; i++) buffer[i] = (*globalInputText_ptr).c_str()[globalReadOffset + i]; 
    2831    *numBytesRead = numBytesToRead; 
    2932    globalReadOffset += numBytesToRead; 
     
    145148  IFilterExprNode* parseExpr(const string& strExpr) 
    146149  { 
    147     globalInputText = strExpr; 
    148     globalReadOffset = 0; 
    149     yyparse(); 
     150    #pragma omp critical (_parser) 
     151    { 
     152      if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 
     153      (*globalInputText_ptr).assign (strExpr); 
     154      globalReadOffset = 0; 
     155      yyparse(); 
     156    } 
    150157    return parsed; 
    151158  } 
  • XIOS/dev/dev_trunk_omp/src/policy.cpp

    r855 r1601  
    1010#include "policy.hpp" 
    1111#include <cmath> 
     12using namespace ep_lib; 
    1213 
    1314namespace xios 
  • XIOS/dev/dev_trunk_omp/src/policy.hpp

    r855 r1601  
    3131{ 
    3232protected: 
    33   DivideAdaptiveComm(const MPI_Comm& mpiComm); 
     33  DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm); 
    3434 
    3535  void computeMPICommLevel(); 
     
    4141 
    4242protected: 
    43   const MPI_Comm& internalComm_; 
     43  const ep_lib::MPI_Comm& internalComm_; 
    4444  std::vector<std::vector<int> > groupParentsBegin_; 
    4545  std::vector<std::vector<int> > nbInGroupParents_; 
  • XIOS/dev/dev_trunk_omp/src/registry.cpp

    r696 r1601  
    44#include <fstream> 
    55#include <sstream> 
     6using namespace ep_lib; 
    67 
    78namespace xios 
     
    258259  void CRegistry::hierarchicalGatherRegistry(void) 
    259260  { 
    260     hierarchicalGatherRegistry(communicator) ; 
     261    //hierarchicalGatherRegistry(communicator) ; 
     262    gatherRegistry(communicator) ; 
    261263  } 
    262264 
  • XIOS/dev/dev_trunk_omp/src/registry.hpp

    r700 r1601  
    2323 
    2424/** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 
    25       CRegistry(const MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 
     25      CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 
    2626 
    2727/** Copy constructor */ 
     
    106106 
    107107/** use internally for recursivity */ 
    108       void gatherRegistry(const MPI_Comm& comm) ; 
     108      void gatherRegistry(const ep_lib::MPI_Comm& comm) ; 
    109109 
    110110/** use internally for recursivity */ 
    111       void hierarchicalGatherRegistry(const MPI_Comm& comm) ; 
     111      void hierarchicalGatherRegistry(const ep_lib::MPI_Comm& comm) ; 
    112112 
    113113 
     
    120120 
    121121/** MPI communicator used for broadcast and gather operation */ 
    122       MPI_Comm communicator ; 
     122      ep_lib::MPI_Comm communicator ; 
    123123  } ; 
    124124 
  • XIOS/dev/dev_trunk_omp/src/server.cpp

    r1587 r1601  
    1515#include "event_scheduler.hpp" 
    1616#include "string_tools.hpp" 
     17using namespace ep_lib; 
    1718 
    1819namespace xios 
     
    4748    void CServer::initialize(void) 
    4849    { 
    49       int initialized ; 
    50       MPI_Initialized(&initialized) ; 
    51       if (initialized) is_MPI_Initialized=true ; 
    52       else is_MPI_Initialized=false ; 
     50      //int initialized ; 
     51      //MPI_Initialized(&initialized) ; 
     52      //if (initialized) is_MPI_Initialized=true ; 
     53      //else is_MPI_Initialized=false ; 
    5354      int rank ; 
    5455 
     
    5758      { 
    5859 
    59         if (!is_MPI_Initialized) 
    60         { 
    61           MPI_Init(NULL, NULL); 
    62         } 
     60        //if (!is_MPI_Initialized) 
     61        //{ 
     62        //  MPI_Init(NULL, NULL); 
     63        //} 
    6364        CTimer::get("XIOS").resume() ; 
    6465 
     
    152153            if (serverLevel==2) 
    153154            { 
     155              #pragma omp critical (_output) 
    154156              info(50)<<"The number of secondary server pools is "<< sndServerGlobalRanks.size() <<endl ; 
    155157              for (i=0; i<sndServerGlobalRanks.size(); i++) 
     
    188190              MPI_Comm_size(intraComm,&intraCommSize) ; 
    189191              MPI_Comm_rank(intraComm,&intraCommRank) ; 
    190               info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 
     192 
     193              MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 
     194              #pragma omp critical (_output) 
     195              { 
     196                info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 
    191197                       <<" intraCommRank :"<<intraCommRank<<"  clientLeader "<< clientLeader<<endl ; 
    192  
    193               MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 
     198              } 
     199               
    194200              interCommLeft.push_back(newComm) ; 
    195201            } 
     
    209215              MPI_Comm_size(intraComm, &intraCommSize) ; 
    210216              MPI_Comm_rank(intraComm, &intraCommRank) ; 
    211               info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 
     217               
     218              MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 
     219              #pragma omp critical (_output) 
     220              { 
     221                info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 
    212222                       <<" intraCommRank :"<<intraCommRank<<"  clientLeader "<< clientLeader<<endl ; 
    213               MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 
     223              } 
    214224              interCommLeft.push_back(newComm) ; 
    215225            } 
     
    221231            MPI_Comm_size(intraComm, &intraCommSize) ; 
    222232            MPI_Comm_rank(intraComm, &intraCommRank) ; 
    223             info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 
     233 
     234            MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; 
     235            #pragma omp critical (_output) 
     236            { 
     237              info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 
    224238                <<" intraCommRank :"<<intraCommRank<<"  clientLeader "<< sndServerGlobalRanks[i]<<endl ; 
    225             MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; 
     239            } 
    226240            interCommRight.push_back(newComm) ; 
    227241          } 
     
    234248          MPI_Comm_size(intraComm, &intraCommSize) ; 
    235249          MPI_Comm_rank(intraComm, &intraCommRank) ; 
    236           info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 
     250 
     251          MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 
     252          #pragma omp critical (_output) 
     253          { 
     254            info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 
    237255                   <<" intraCommRank :"<<intraCommRank<<"  clientLeader "<< clientLeader<<endl ; 
    238  
    239           MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 
     256          } 
     257 
    240258          interCommLeft.push_back(newComm) ; 
    241259        } 
     
    426444      { 
    427445        if (CXios::usingOasis) oasis_finalize(); 
    428         else MPI_Finalize() ; 
     446        //else MPI_Finalize() ; 
    429447      } 
     448 
    430449      report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl  ; 
    431450      report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl  ; 
     
    637656       { 
    638657         traceOff() ; 
    639          MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 
     658         MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; 
    640659         traceOn() ; 
    641660         if (flag==true) 
    642661         { 
     662           #ifdef _usingMPI 
    643663           rank=status.MPI_SOURCE ; 
     664           #elif _usingEP 
     665           rank=status.ep_src; 
     666           #endif 
    644667           MPI_Get_count(&status,MPI_CHAR,&count) ; 
    645668           buffer=new char[count] ; 
     
    655678         if (flag==true) 
    656679         { 
     680           #ifdef _usingMPI 
    657681           rank=status.MPI_SOURCE ; 
     682           #elif _usingEP 
     683           rank=status.ep_src; 
     684           #endif 
    658685           MPI_Get_count(&status,MPI_CHAR,&count) ; 
    659686           recvContextMessage((void*)buffer,count) ; 
     
    740767         MPI_Get_count(&status,MPI_CHAR,&(counts.back())) ; 
    741768         buffers.push_back(new char[counts.back()]) ; 
     769         MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&request) ; 
    742770         requests.push_back(request); 
    743          MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&(requests.back())) ; 
    744771         isEventRegistered.push_back(false); 
    745772         isEventQueued.push_back(false); 
     
    750777       { 
    751778         // (2) If context id is received, register an event 
    752          MPI_Test(&requests[ctxNb],&flag,&status) ; 
     779         if(!isEventRegistered[ctxNb]) MPI_Test(&requests[ctxNb],&flag,&status) ; 
    753780         if (flag==true && !isEventRegistered[ctxNb]) 
    754781         { 
     
    794821         MPI_Intercomm_merge(contextInterComm,1,&inter); 
    795822         MPI_Barrier(inter); 
    796          MPI_Comm_free(&inter); 
    797823         context->initServer(intraComm,contextInterComm); 
    798824         contextInterComms.push_back(contextInterComm); 
    799825 
     826         MPI_Comm_free(&inter); 
    800827       } 
    801828       // Secondary server: create communication channel with a primary server 
  • XIOS/dev/dev_trunk_omp/src/server.hpp

    r1587 r1601  
    2626        static void registerContext(void* buff,int count, int leaderRank=0); 
    2727 
    28         static MPI_Comm intraComm; 
    29         static std::list<MPI_Comm> interCommLeft;           // interComm between server (primary, classical or secondary) and its client (client or primary server) 
    30         static std::list<MPI_Comm> interCommRight;          // interComm between primary server and secondary server (non-empty only for primary server pool) 
    31         static std::list<MPI_Comm> contextInterComms;  // list of context intercomms 
    32         static std::list<MPI_Comm> contextIntraComms;  // list of context intercomms (needed only in case of secondary servers) 
     28        static ep_lib::MPI_Comm intraComm; 
     29        static std::list<ep_lib::MPI_Comm> interCommLeft;           // interComm between server (primary, classical or secondary) and its client (client or primary server) 
     30        static std::list<ep_lib::MPI_Comm> interCommRight;          // interComm between primary server and secondary server (non-empty only for primary server pool) 
     31        static std::list<ep_lib::MPI_Comm> contextInterComms;  // list of context intercomms 
     32        static std::list<ep_lib::MPI_Comm> contextIntraComms;  // list of context intercomms (needed only in case of secondary servers) 
    3333        static CEventScheduler* eventScheduler; 
    3434 
  • XIOS/dev/dev_trunk_omp/src/timer.cpp

    r1158 r1601  
    66#include <sstream> 
    77#include "tracer.hpp" 
     8using namespace ep_lib; 
    89 
    910namespace xios 
    1011{ 
    11   std::map<std::string,CTimer> CTimer::allTimer; 
     12  std::map<std::string,CTimer> *CTimer::allTimer_ptr = 0; 
    1213   
    1314  CTimer::CTimer(const std::string& name_) : name(name_)  
     
    5455  CTimer& CTimer::get(const std::string name) 
    5556  { 
    56     std::map<std::string,CTimer>::iterator it = allTimer.find(name); 
    57     if (it == allTimer.end()) 
    58       it = allTimer.insert(std::make_pair(name, CTimer(name))).first; 
     57    if(allTimer_ptr == NULL) allTimer_ptr = new std::map<std::string,CTimer>; 
     58 
     59    std::map<std::string,CTimer>::iterator it = allTimer_ptr->find(name); 
     60 
     61    if (it == allTimer_ptr->end()) 
     62      it = allTimer_ptr->insert(std::make_pair(name, CTimer(name))).first; 
     63 
    5964    return it->second; 
    6065  } 
     
    6368  { 
    6469    std::ostringstream strOut ; 
    65     for(std::map<std::string,CTimer>::iterator it=allTimer.begin();it!=allTimer.end();++it) 
     70    if(allTimer_ptr == 0) allTimer_ptr = new std::map<std::string,CTimer>; 
     71 
     72    for(std::map<std::string,CTimer>::iterator it=allTimer_ptr->begin();it!=allTimer_ptr->end();++it) 
    6673      strOut<<"Timer : "<<it->first<<"    -->   cumulated time : "<<it->second.getCumulatedTime()<<std::endl ; 
    6774    return strOut.str() ; 
  • XIOS/dev/dev_trunk_omp/src/timer.hpp

    r1158 r1601  
    2020      void reset(void); 
    2121      double getCumulatedTime(void); 
    22       static std::map<std::string,CTimer> allTimer; 
     22      static std::map<std::string,CTimer> *allTimer_ptr; 
     23      #pragma omp threadprivate(allTimer_ptr) 
    2324      static double getTime(void); 
    2425      static CTimer& get(std::string name); 
  • XIOS/dev/dev_trunk_omp/src/transformation/Functions/reduction.cpp

    r979 r1601  
    99 
    1010CReductionAlgorithm::CallBackMap* CReductionAlgorithm::reductionCreationCallBacks_ = 0; 
    11 std::map<StdString,EReductionType> CReductionAlgorithm::ReductionOperations = std::map<StdString,EReductionType>(); 
     11std::map<StdString,EReductionType> *CReductionAlgorithm::ReductionOperations_ptr = 0; 
     12 
    1213bool CReductionAlgorithm::initReductionOperation(std::map<StdString,EReductionType>& m) 
    1314{ 
     
    2930} 
    3031 
    31 bool CReductionAlgorithm::_dummyInit = CReductionAlgorithm::initReductionOperation(CReductionAlgorithm::ReductionOperations); 
     32bool CReductionAlgorithm::initReductionOperation() 
     33{ 
     34  CReductionAlgorithm::ReductionOperations_ptr = new std::map<StdString,EReductionType>(); 
     35  // So so stupid way to intialize operation but it works ... 
     36  (*CReductionAlgorithm::ReductionOperations_ptr)["sum"] = TRANS_REDUCE_SUM; 
     37  CSumReductionAlgorithm::registerTrans(); 
     38 
     39  (*CReductionAlgorithm::ReductionOperations_ptr)["min"] = TRANS_REDUCE_MIN; 
     40  CMinReductionAlgorithm::registerTrans(); 
     41 
     42  (*CReductionAlgorithm::ReductionOperations_ptr)["max"] = TRANS_REDUCE_MAX; 
     43  CMaxReductionAlgorithm::registerTrans(); 
     44 
     45  (*CReductionAlgorithm::ReductionOperations_ptr)["extract"] = TRANS_REDUCE_EXTRACT; 
     46  CExtractReductionAlgorithm::registerTrans(); 
     47 
     48  (*CReductionAlgorithm::ReductionOperations_ptr)["average"] = TRANS_REDUCE_AVERAGE; 
     49  CAverageReductionAlgorithm::registerTrans(); 
     50} 
     51 
    3252 
    3353CReductionAlgorithm* CReductionAlgorithm::createOperation(EReductionType reduceType) 
  • XIOS/dev/dev_trunk_omp/src/transformation/Functions/reduction.hpp

    r1260 r1601  
    2323{ 
    2424public: 
    25   static std::map<StdString,EReductionType> ReductionOperations; 
    26  
     25  static std::map<StdString,EReductionType> *ReductionOperations_ptr; 
     26  #pragma omp threadprivate(ReductionOperations_ptr) 
    2727public: 
    2828  CReductionAlgorithm() {} 
     
    6161  typedef std::map<EReductionType, CreateOperationCallBack> CallBackMap; 
    6262  static CallBackMap* reductionCreationCallBacks_; 
     63  #pragma omp threadprivate(reductionCreationCallBacks_) 
    6364 
    6465  static bool registerOperation(EReductionType reduceType, CreateOperationCallBack createFn); 
     
    6768protected: 
    6869  static bool initReductionOperation(std::map<StdString,EReductionType>& m); 
     70  static bool initReductionOperation(); 
    6971  static bool _dummyInit; 
     72  #pragma omp threadprivate(_dummyInit) 
    7073}; 
    7174 
  • XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_extract_domain.cpp

    r1260 r1601  
    1313#include "grid.hpp" 
    1414#include "grid_transformation_factory_impl.hpp" 
    15 #include "reduction.hpp" 
    1615 
    1716namespace xios { 
     
    6261 
    6362  pos_ = algo->position; 
    64   reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 
     63  if(CReductionAlgorithm::ReductionOperations_ptr == 0)  
     64  { 
     65    CReductionAlgorithm::initReductionOperation(); 
     66  } 
     67  reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 
    6568} 
    6669 
  • XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_extract_domain.hpp

    r1260 r1601  
    1313#include "transformation.hpp" 
    1414 
     15#include "reduction.hpp" 
    1516namespace xios { 
    1617 
     
    2526  Extract a domain to an axis 
    2627*/ 
    27 class CAxisAlgorithmExtractDomain : public CAxisAlgorithmTransformation 
     28class CAxisAlgorithmExtractDomain : public CAxisAlgorithmTransformation, public CReductionAlgorithm 
    2829{ 
    2930public: 
  • XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_inverse.cpp

    r1542 r1601  
    1515#include "inverse_axis.hpp" 
    1616#include "client_client_dht_template.hpp" 
     17using namespace ep_lib; 
    1718 
    1819namespace xios { 
  • XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_reduce_axis.cpp

    r1314 r1601  
    1212#include "grid.hpp" 
    1313#include "grid_transformation_factory_impl.hpp" 
    14 #include "reduction.hpp" 
    1514 
    1615namespace xios { 
     
    6867 
    6968  } 
     69  if(CReductionAlgorithm::ReductionOperations_ptr == 0)  
     70  { 
     71    CReductionAlgorithm::initReductionOperation(); 
     72  } 
    7073 
    71   reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 
     74  reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 
    7275} 
    7376 
  • XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_reduce_axis.hpp

    r1314 r1601  
    1212#include "axis_algorithm_transformation.hpp" 
    1313#include "transformation.hpp" 
     14#include "reduction.hpp" 
    1415 
    1516namespace xios { 
     
    2324  Reduce a axis to an axis 
    2425*/ 
    25 class CAxisAlgorithmReduceAxis : public CAxisAlgorithmTransformation 
     26class CAxisAlgorithmReduceAxis : public CAxisAlgorithmTransformation, public CReductionAlgorithm 
    2627{ 
    2728public: 
  • XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_reduce_domain.cpp

    r1299 r1601  
    1313#include "grid.hpp" 
    1414#include "grid_transformation_factory_impl.hpp" 
    15 #include "reduction.hpp" 
     15 
    1616 
    1717namespace xios { 
     
    7070 
    7171  dir_ = (CReduceDomainToAxis::direction_attr::iDir == algo->direction)  ? iDir : jDir; 
    72   reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 
     72  if(CReductionAlgorithm::ReductionOperations_ptr == 0)  
     73  { 
     74    CReductionAlgorithm::initReductionOperation(); 
     75  } 
     76  reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 
    7377  local = algo->local ; 
    7478} 
  • XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_reduce_domain.hpp

    r1299 r1601  
    1212#include "axis_algorithm_transformation.hpp" 
    1313#include "transformation.hpp" 
     14#include "reduction.hpp" 
    1415 
    1516namespace xios { 
     
    2425  Reduce a domain to an axis 
    2526*/ 
    26 class CAxisAlgorithmReduceDomain : public CAxisAlgorithmTransformation 
     27class CAxisAlgorithmReduceDomain : public CAxisAlgorithmTransformation, public CReductionAlgorithm 
    2728{ 
    2829public: 
     
    4647    jDir = 2 
    4748  }; 
    48    
     49 
    4950  ReduceDirection dir_; 
    5051  bool local ; 
  • XIOS/dev/dev_trunk_omp/src/transformation/domain_algorithm_expand.cpp

    r1553 r1601  
    161161  else domainDestination->domain_ref.setValue(domainDstRef); 
    162162 
    163    
    164163  // Here are attributes of source need tranfering 
    165164  int niGloSrc = domainSource->ni_glo; 
  • XIOS/dev/dev_trunk_omp/src/transformation/domain_algorithm_interpolate.cpp

    r1542 r1601  
    2020#include "interpolate_domain.hpp" 
    2121#include "grid.hpp" 
     22using namespace ep_lib; 
    2223 
    2324namespace xios { 
     
    406407  CContextClient* client=context->client; 
    407408 
    408   MPI_Comm poleComme(MPI_COMM_NULL); 
    409   MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 
    410   if (MPI_COMM_NULL != poleComme) 
     409  ep_lib::MPI_Comm poleComme = MPI_COMM_NULL; 
     410  ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 
     411  if (poleComme!=MPI_COMM_NULL) 
    411412  { 
    412413    int nbClientPole; 
    413     MPI_Comm_size(poleComme, &nbClientPole); 
     414    ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 
    414415 
    415416    std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, 
     
    422423    std::vector<int> recvCount(nbClientPole,0); 
    423424    std::vector<int> displ(nbClientPole,0); 
    424     MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 
    425  
     425    ep_lib::MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 
    426426    displ[0]=0; 
    427427    for(int n=1;n<nbClientPole;++n) displ[n]=displ[n-1]+recvCount[n-1] ; 
     
    445445 
    446446    // Gather all index and weight for pole 
    447     MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 
    448     MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 
     447    ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 
     448    ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 
    449449 
    450450    std::map<int,double> recvTemp; 
     
    593593 
    594594 
    595   MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 
     595  ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 
    596596 
    597597  int* sendIndexDestBuff = new int [sendBuffSize]; 
     
    599599  double* sendWeightBuff = new double [sendBuffSize]; 
    600600 
    601   std::vector<MPI_Request> sendRequest; 
     601  std::vector<ep_lib::MPI_Request> sendRequest(3*globalIndexInterpSendToClient.size()); 
    602602 
    603603  int sendOffSet = 0, l = 0; 
     604  int position = 0; 
    604605  for (itMap = itbMap; itMap != iteMap; ++itMap) 
    605606  { 
     
    620621    } 
    621622 
    622     sendRequest.push_back(MPI_Request()); 
    623     MPI_Isend(sendIndexDestBuff + sendOffSet, 
     623    ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet, 
    624624             k, 
    625625             MPI_INT, 
     
    627627             MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 
    628628             client->intraComm, 
    629              &sendRequest.back()); 
    630     sendRequest.push_back(MPI_Request()); 
    631     MPI_Isend(sendIndexSrcBuff + sendOffSet, 
     629             &sendRequest[position++]); 
     630    ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet, 
    632631             k, 
    633632             MPI_INT, 
     
    635634             MPI_DOMAIN_INTERPOLATION_SRC_INDEX, 
    636635             client->intraComm, 
    637              &sendRequest.back()); 
    638     sendRequest.push_back(MPI_Request()); 
    639     MPI_Isend(sendWeightBuff + sendOffSet, 
     636             &sendRequest[position++]); 
     637    ep_lib::MPI_Isend(sendWeightBuff + sendOffSet, 
    640638             k, 
    641639             MPI_DOUBLE, 
     
    643641             MPI_DOMAIN_INTERPOLATION_WEIGHT, 
    644642             client->intraComm, 
    645              &sendRequest.back()); 
     643             &sendRequest[position++]); 
    646644    sendOffSet += k; 
    647645  } 
     
    655653  while (receivedSize < recvBuffSize) 
    656654  { 
    657     MPI_Status recvStatus; 
    658     MPI_Recv((recvIndexDestBuff + receivedSize), 
     655    ep_lib::MPI_Status recvStatus; 
     656    ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 
    659657             recvBuffSize, 
    660658             MPI_INT, 
    661              MPI_ANY_SOURCE, 
     659             -2, 
    662660             MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 
    663661             client->intraComm, 
     
    665663 
    666664    int countBuff = 0; 
    667     MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 
     665    ep_lib::MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 
     666    #ifdef _usingMPI 
    668667    clientSrcRank = recvStatus.MPI_SOURCE; 
    669  
    670     MPI_Recv((recvIndexSrcBuff + receivedSize), 
     668    #elif _usingEP 
     669    clientSrcRank = recvStatus.ep_src; 
     670    #endif 
     671 
     672    ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 
    671673             recvBuffSize, 
    672674             MPI_INT, 
     
    676678             &recvStatus); 
    677679 
    678     MPI_Recv((recvWeightBuff + receivedSize), 
     680    ep_lib::MPI_Recv((recvWeightBuff + receivedSize), 
    679681             recvBuffSize, 
    680682             MPI_DOUBLE, 
     
    692694  } 
    693695 
    694   std::vector<MPI_Status> requestStatus(sendRequest.size()); 
    695   MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 
     696  std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 
     697  ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &requestStatus[0]); 
    696698 
    697699  delete [] sendIndexDestBuff; 
     
    706708  
    707709/*! Redefined some functions of CONetCDF4 to make use of them */ 
    708 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const MPI_Comm comm) 
     710CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm) 
    709711  : CNc4DataOutput(NULL, filename, false, false, true, comm, false, true) {} 
     712   
     713CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, bool exist, const ep_lib::MPI_Comm comm) 
     714  : CNc4DataOutput(NULL, filename, exist, false, true, comm, false, true) {} 
     715   
    710716int CDomainAlgorithmInterpolate::WriteNetCdf::addDimensionWrite(const StdString& name,  
    711717                                                                const StdSize size) 
     
    785791  } 
    786792 
    787   MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    788   MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     793  ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     794  ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    789795   
    790796  if (0 == globalNbWeight) 
     
    800806  std::vector<StdSize> count(1, localNbWeight); 
    801807   
    802   WriteNetCdf netCdfWriter(filename, client->intraComm);   
    803  
    804   // Define some dimensions 
    805   netCdfWriter.addDimensionWrite("n_src", n_src); 
    806   netCdfWriter.addDimensionWrite("n_dst", n_dst); 
    807   netCdfWriter.addDimensionWrite("n_weight", globalNbWeight); 
    808    
    809   std::vector<StdString> dims(1,"n_weight"); 
    810  
    811   // Add some variables 
    812   netCdfWriter.addVariableWrite("src_idx", NC_INT, dims); 
    813   netCdfWriter.addVariableWrite("dst_idx", NC_INT, dims); 
    814   netCdfWriter.addVariableWrite("weight", NC_DOUBLE, dims); 
    815  
    816   // End of definition 
    817   netCdfWriter.endDefinition(); 
    818  
    819   // // Write variables 
    820   if (0 != localNbWeight) 
    821   { 
    822     netCdfWriter.writeDataIndex(src_idx, "src_idx", false, 0, &start, &count); 
    823     netCdfWriter.writeDataIndex(dst_idx, "dst_idx", false, 0, &start, &count); 
    824     netCdfWriter.writeDataIndex(weights, "weight", false, 0, &start, &count); 
    825   } 
    826  
    827   netCdfWriter.closeFile(); 
     808  int my_rank_loc = client->intraComm->ep_comm_ptr->size_rank_info[1].first; 
     809  int my_rank = client->intraComm->ep_comm_ptr->size_rank_info[0].first; 
     810   
     811   
     812  
     813  WriteNetCdf *netCdfWriter; 
     814 
     815  MPI_Barrier_local(client->intraComm); 
     816   
     817  if(my_rank_loc==0) 
     818  { 
     819    info(100)<<"rank "<< my_rank <<" create weight info file"<< std::endl; 
     820     
     821    WriteNetCdf my_writer(filename, client->intraComm);   
     822    info(100)<<"rank "<< my_rank <<" file created"<< std::endl; 
     823    netCdfWriter = &my_writer;  
     824   
     825    // Define some dimensions 
     826    netCdfWriter->addDimensionWrite("n_src", n_src); 
     827    netCdfWriter->addDimensionWrite("n_dst", n_dst); 
     828    netCdfWriter->addDimensionWrite("n_weight", globalNbWeight); 
     829    info(100)<<"rank "<< my_rank <<" addDimensionWrite : n_src, n_dst, n_weight"<< std::endl; 
     830   
     831    std::vector<StdString> dims(1,"n_weight"); 
     832 
     833    // Add some variables 
     834    netCdfWriter->addVariableWrite("src_idx", NC_INT, dims); 
     835    netCdfWriter->addVariableWrite("dst_idx", NC_INT, dims); 
     836    netCdfWriter->addVariableWrite("weight", NC_DOUBLE, dims); 
     837     
     838    info(100)<<"rank "<< my_rank <<" addVariableWrite : src_idx, dst_idx, weight"<< std::endl; 
     839 
     840    // End of definition 
     841    netCdfWriter->endDefinition(); 
     842    info(100)<<"rank "<< my_rank <<" endDefinition"<< std::endl; 
     843   
     844    netCdfWriter->closeFile(); 
     845    info(100)<<"rank "<< my_rank <<" file closed"<< std::endl; 
     846  } 
     847   
     848  MPI_Barrier_local(client->intraComm); 
     849   
     850  #pragma omp critical (write_weight_data) 
     851  { 
     852    // open file 
     853    info(100)<<"rank "<< my_rank <<" writing in weight info file"<< std::endl; 
     854     
     855    WriteNetCdf my_writer(filename, true, client->intraComm);   
     856    info(100)<<"rank "<< my_rank <<" file opened"<< std::endl; 
     857    netCdfWriter = &my_writer;  
     858     
     859    // // Write variables 
     860    if (0 != localNbWeight) 
     861    { 
     862      netCdfWriter->writeDataIndex(src_idx, "src_idx", false, 0, &start, &count); 
     863      netCdfWriter->writeDataIndex(dst_idx, "dst_idx", false, 0, &start, &count); 
     864      netCdfWriter->writeDataIndex(weights, "weight", false, 0, &start, &count); 
     865       
     866      info(100)<<"rank "<< my_rank <<" WriteDataIndex : src_idx, dst_idx, weight"<< std::endl; 
     867    } 
     868     
     869    netCdfWriter->closeFile(); 
     870    info(100)<<"rank "<< my_rank <<" file closed"<< std::endl; 
     871     
     872  } 
     873   
     874  MPI_Barrier_local(client->intraComm); 
     875   
     876 
    828877} 
    829878 
     
    855904  } 
    856905                   
    857   nc_open(filename.c_str(),NC_NOWRITE, &ncid) ; 
    858   nc_inq_dimid(ncid,"n_weight",&weightDimId) ; 
    859   nc_inq_dimlen(ncid,weightDimId,&nbWeightGlo) ; 
    860  
    861   size_t nbWeight ; 
    862   size_t start ; 
    863   size_t div = nbWeightGlo/clientSize ; 
    864   size_t mod = nbWeightGlo%clientSize ; 
    865   if (clientRank < mod) 
    866   { 
    867     nbWeight=div+1 ; 
    868     start=clientRank*(div+1) ; 
    869   } 
    870   else 
    871   { 
    872     nbWeight=div ; 
    873     start= mod * (div+1) + (clientRank-mod) * div ; 
    874   } 
    875  
    876   double* weight=new double[nbWeight] ; 
    877   int weightId ; 
    878   nc_inq_varid (ncid, "weight", &weightId) ; 
    879   nc_get_vara_double(ncid, weightId, &start, &nbWeight, weight) ; 
    880  
    881   long* srcIndex=new long[nbWeight] ; 
    882   int srcIndexId ; 
    883   nc_inq_varid (ncid, "src_idx", &srcIndexId) ; 
    884   nc_get_vara_long(ncid, srcIndexId, &start, &nbWeight, srcIndex) ; 
    885  
    886   long* dstIndex=new long[nbWeight] ; 
    887   int dstIndexId ; 
    888   nc_inq_varid (ncid, "dst_idx", &dstIndexId) ; 
    889   nc_get_vara_long(ncid, dstIndexId, &start, &nbWeight, dstIndex) ; 
    890  
    891   int indexOffset=0 ; 
    892   if (fortranConvention) indexOffset=1 ; 
    893     for(size_t ind=0; ind<nbWeight;++ind) 
    894       interpMapValue[dstIndex[ind]-indexOffset].push_back(make_pair(srcIndex[ind]-indexOffset,weight[ind])); 
    895  } 
     906  #pragma omp critical (read_weight_data) 
     907  { 
     908    nc_open(filename.c_str(),NC_NOWRITE, &ncid) ; 
     909    nc_inq_dimid(ncid,"n_weight",&weightDimId) ; 
     910    nc_inq_dimlen(ncid,weightDimId,&nbWeightGlo) ; 
     911 
     912    size_t nbWeight ; 
     913    size_t start ; 
     914    size_t div = nbWeightGlo/clientSize ; 
     915    size_t mod = nbWeightGlo%clientSize ; 
     916    if (clientRank < mod) 
     917    { 
     918      nbWeight=div+1 ; 
     919      start=clientRank*(div+1) ; 
     920    } 
     921    else 
     922    { 
     923      nbWeight=div ; 
     924      start= mod * (div+1) + (clientRank-mod) * div ; 
     925    } 
     926 
     927    double* weight=new double[nbWeight] ; 
     928    int weightId ; 
     929    nc_inq_varid (ncid, "weight", &weightId) ; 
     930    nc_get_vara_double(ncid, weightId, &start, &nbWeight, weight) ; 
     931 
     932    long* srcIndex=new long[nbWeight] ; 
     933    int srcIndexId ; 
     934    nc_inq_varid (ncid, "src_idx", &srcIndexId) ; 
     935    nc_get_vara_long(ncid, srcIndexId, &start, &nbWeight, srcIndex) ; 
     936 
     937    long* dstIndex=new long[nbWeight] ; 
     938    int dstIndexId ; 
     939    nc_inq_varid (ncid, "dst_idx", &dstIndexId) ; 
     940    nc_get_vara_long(ncid, dstIndexId, &start, &nbWeight, dstIndex) ; 
     941 
     942    int indexOffset=0 ; 
     943    if (fortranConvention) indexOffset=1 ; 
     944      for(size_t ind=0; ind<nbWeight;++ind) 
     945        interpMapValue[dstIndex[ind]-indexOffset].push_back(make_pair(srcIndex[ind]-indexOffset,weight[ind])); 
     946  } 
     947} 
    896948 
    897949void CDomainAlgorithmInterpolate::apply(const std::vector<std::pair<int,double> >& localIndex, 
  • XIOS/dev/dev_trunk_omp/src/transformation/domain_algorithm_interpolate.hpp

    r1480 r1601  
    99#ifndef __XIOS_DOMAIN_ALGORITHM_INTERPOLATE_HPP__ 
    1010#define __XIOS_DOMAIN_ALGORITHM_INTERPOLATE_HPP__ 
    11  
     11#include "mpi_std.hpp" 
    1212#include "domain_algorithm_transformation.hpp" 
    1313#include "transformation.hpp" 
     
    7070  { 
    7171  public: 
    72     WriteNetCdf(const StdString& filename, const MPI_Comm comm); 
     72    WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm); 
     73    WriteNetCdf(const StdString& filename, bool exist, const ep_lib::MPI_Comm comm); 
    7374    int addDimensionWrite(const StdString& name, const StdSize size = UNLIMITED_DIM); 
    7475    int addVariableWrite(const StdString& name, nc_type type, 
  • XIOS/dev/dev_trunk_omp/src/transformation/generic_algorithm_transformation.cpp

    r1542 r1601  
    3737  int nbLocalIndex = localIndex.size();    
    3838  double defaultValue = std::numeric_limits<double>::quiet_NaN(); 
    39      
     39 
    4040  if (ignoreMissingValue) 
    4141  { 
    4242    if (firstPass) dataOut=defaultValue ; 
    43      
     43  
    4444    for (int idx = 0; idx < nbLocalIndex; ++idx) 
    4545    { 
     
    131131      { 
    132132        distributed=domainListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]->isDistributed() ; 
    133         MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
     133        ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
    134134     
    135135      } 
     
    137137      { 
    138138        distributed=axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]->isDistributed() ; 
    139         MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
     139        ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
    140140      } 
    141141      else //it's a scalar 
     
    230230  int sendValue = (computeGlobalIndexOnProc) ? 1 : 0; 
    231231  int recvValue = 0; 
    232   MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 
     232  ep_lib::MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 
    233233  computeGlobalIndexOnProc = (0 < recvValue); 
    234234 
  • XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation.cpp

    r1542 r1601  
    500500    sendRankSizeMap[itIndex->first] = sendSize; 
    501501  } 
    502   MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
     502  ep_lib::MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
    503503 
    504504  displ[0]=0 ; 
     
    507507  int* recvRankBuff=new int[recvSize]; 
    508508  int* recvSizeBuff=new int[recvSize]; 
    509   MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
    510   MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
     509  ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
     510  ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
    511511  for (int i = 0; i < nbClient; ++i) 
    512512  { 
     
    520520 
    521521  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    522   std::vector<MPI_Request> requests; 
    523   std::vector<MPI_Status> status; 
     522  std::vector<ep_lib::MPI_Request> requests(recvRankSizeMap.size()*2 + globaIndexWeightFromSrcToDst.size()*2); 
     523  std::vector<ep_lib::MPI_Status> status; 
    524524  std::unordered_map<int, unsigned char* > recvMaskDst; 
    525525  std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
     526  int requests_position = 0; 
    526527  for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 
    527528  { 
     
    531532    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 
    532533 
    533     requests.push_back(MPI_Request()); 
    534     MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    535     requests.push_back(MPI_Request()); 
    536     MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 
     534    ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 
     535    ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 
    537536  } 
    538537 
     
    569568 
    570569    // Send global index source and mask 
    571     requests.push_back(MPI_Request()); 
    572     MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    573     requests.push_back(MPI_Request()); 
    574     MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 
     570    ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 
     571    ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 
    575572  } 
    576573 
    577574  status.resize(requests.size()); 
    578   MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     575  ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    579576 
    580577  // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 
    581   std::vector<MPI_Request>().swap(requests); 
    582   std::vector<MPI_Status>().swap(status); 
     578  requests.resize(sendRankSizeMap.size() + recvRankSizeMap.size()); 
     579  requests_position = 0; 
     580  std::vector<ep_lib::MPI_Status>().swap(status); 
    583581  // Okie, on destination side, we will wait for information of masked index of source 
    584582  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) 
     
    587585    int recvSize = itSend->second; 
    588586 
    589     requests.push_back(MPI_Request()); 
    590     MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
     587    ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
    591588  } 
    592589 
     
    624621 
    625622    // Okie, now inform the destination which source index are masked 
    626     requests.push_back(MPI_Request()); 
    627     MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
     623    ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
    628624  } 
    629625  status.resize(requests.size()); 
    630   MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     626  ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    631627 
    632628  // Cool, now we can fill in local index of grid destination (counted for masked index) 
  • XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation.hpp

    r978 r1601  
    1212#include <map> 
    1313#include <vector> 
     14#include "mpi_std.hpp" 
    1415#include "generic_algorithm_transformation.hpp" 
    1516#include "transformation_enum.hpp" 
  • XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation_factory_impl.hpp

    r933 r1601  
    5757  typedef std::map<ETranformationType, CreateTransformationCallBack> CallBackMap; 
    5858  static CallBackMap* transformationCreationCallBacks_; 
     59  #pragma omp threadprivate(transformationCreationCallBacks_) 
    5960  static bool registerTransformation(ETranformationType transType, CreateTransformationCallBack createFn); 
    6061  static bool unregisterTransformation(ETranformationType transType); 
    6162  static bool initializeTransformation_; 
     63  #pragma omp threadprivate(initializeTransformation_) 
    6264}; 
    6365 
  • XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation_selector.cpp

    r1558 r1601  
    1010#include "grid.hpp" 
    1111#include "algo_types.hpp" 
     12using namespace ep_lib; 
    1213 
    1314namespace xios { 
  • XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation_selector.hpp

    r1275 r1601  
    1212#include <map> 
    1313#include <vector> 
     14#include "mpi_std.hpp" 
    1415#include "generic_algorithm_transformation.hpp" 
    1516#include "transformation_enum.hpp" 
  • XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_extract_axis.cpp

    r1260 r1601  
    1414#include "grid_transformation_factory_impl.hpp" 
    1515 
    16 #include "reduction.hpp" 
     16 
    1717 
    1818namespace xios { 
     
    4949  StdString op = "extract"; 
    5050  pos_ = algo->position; 
    51   reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 
     51  if(CReductionAlgorithm::ReductionOperations_ptr == 0)  
     52  { 
     53    CReductionAlgorithm::initReductionOperation(); 
     54  } 
     55  reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 
    5256} 
    5357 
  • XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_extract_axis.hpp

    r1260 r1601  
    1212#include "scalar_algorithm_transformation.hpp" 
    1313#include "transformation.hpp" 
     14#include "reduction.hpp" 
    1415 
    1516namespace xios { 
     
    2425  Extract a scalar from an axis 
    2526*/ 
    26 class CScalarAlgorithmExtractAxis : public CScalarAlgorithmTransformation 
     27class CScalarAlgorithmExtractAxis : public CScalarAlgorithmTransformation, public CReductionAlgorithm 
    2728{ 
    2829public: 
  • XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_axis.cpp

    r1297 r1601  
    1313#include "grid.hpp" 
    1414#include "grid_transformation_factory_impl.hpp" 
    15 #include "reduction.hpp" 
    1615 
    17 #include "reduction.hpp" 
     16 
    1817 
    1918namespace xios { 
     
    7574  } 
    7675   
    77   if (CReductionAlgorithm::ReductionOperations.end() == CReductionAlgorithm::ReductionOperations.find(op)) 
     76  if(CReductionAlgorithm::ReductionOperations_ptr == 0)  
     77  { 
     78    CReductionAlgorithm::initReductionOperation(); 
     79  } 
     80   
     81  if ((*CReductionAlgorithm::ReductionOperations_ptr).end() == (*CReductionAlgorithm::ReductionOperations_ptr).find(op)) 
    7882    ERROR("CScalarAlgorithmReduceAxis::CScalarAlgorithmReduceAxis(CAxis* axisDestination, CAxis* axisSource, CReduceAxisToScalar* algo)", 
    7983       << "Operation '" << op << "' not found. Please make sure to use a supported one" 
     
    8185       << "Scalar destination " << scalarDestination->getId()); 
    8286 
    83   reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 
     87  reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 
    8488} 
    8589 
  • XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_axis.hpp

    r1260 r1601  
    1212#include "scalar_algorithm_transformation.hpp" 
    1313#include "transformation.hpp" 
     14#include "reduction.hpp" 
    1415 
    1516namespace xios { 
     
    2425  Reducing an axis to a scalar 
    2526*/ 
    26 class CScalarAlgorithmReduceAxis : public CScalarAlgorithmTransformation 
     27class CScalarAlgorithmReduceAxis : public CScalarAlgorithmTransformation, public CReductionAlgorithm 
    2728{ 
    2829public: 
  • XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_domain.cpp

    r1396 r1601  
    1414#include "grid_transformation_factory_impl.hpp" 
    1515 
    16 #include "reduction.hpp" 
    1716 
    1817namespace xios { 
     
    7170  } 
    7271   
    73   if (CReductionAlgorithm::ReductionOperations.end() == CReductionAlgorithm::ReductionOperations.find(op)) 
     72  if(CReductionAlgorithm::ReductionOperations_ptr == 0)  
     73  { 
     74    CReductionAlgorithm::initReductionOperation(); 
     75  } 
     76  if (CReductionAlgorithm::ReductionOperations_ptr->end() == CReductionAlgorithm::ReductionOperations_ptr->find(op)) 
    7477    ERROR("CScalarAlgorithmReduceDomain::CScalarAlgorithmReduceDomain(CDomain* domainDestination, CDomain* domainSource, CReduceDomainToScalar* algo)", 
    7578       << "Operation '" << op << "' not found. Please make sure to use a supported one" 
     
    7780       << "Scalar destination " << scalarDestination->getId()); 
    7881 
    79   reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 
     82  reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 
    8083  local = algo->local ; 
    8184} 
  • XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_domain.hpp

    r1313 r1601  
    1212#include "scalar_algorithm_transformation.hpp" 
    1313#include "transformation.hpp" 
     14#include "reduction.hpp" 
    1415 
    1516namespace xios { 
     
    2425  Reducing an DOMAIN to a scalar 
    2526*/ 
    26 class CScalarAlgorithmReduceDomain : public CScalarAlgorithmTransformation 
     27class CScalarAlgorithmReduceDomain : public CScalarAlgorithmTransformation, public CReductionAlgorithm 
    2728{ 
    2829public: 
  • XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_scalar.cpp

    r1314 r1601  
    99#include "grid.hpp" 
    1010#include "grid_transformation_factory_impl.hpp" 
    11 #include "reduction.hpp" 
     11 
    1212 
    1313 
     
    7070 
    7171  } 
     72 
     73  if(CReductionAlgorithm::ReductionOperations_ptr == 0)  
     74  { 
     75    CReductionAlgorithm::initReductionOperation(); 
     76  } 
    7277   
    73   if (CReductionAlgorithm::ReductionOperations.end() == CReductionAlgorithm::ReductionOperations.find(op)) 
     78  if (CReductionAlgorithm::ReductionOperations_ptr->end() == CReductionAlgorithm::ReductionOperations_ptr->find(op)) 
    7479    ERROR("CScalarAlgorithmReduceScalar::CScalarAlgorithmReduceScalar(CScalar* scalarDestination, CScalar* scalarSource, CReduceScalarToScalar* algo)", 
    7580       << "Operation '" << op << "' not found. Please make sure to use a supported one" 
     
    7782       << "Scalar destination " << scalarDestination->getId()); 
    7883 
    79   reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 
     84  reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 
    8085} 
    8186 
  • XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_scalar.hpp

    r1314 r1601  
    88#include "scalar_algorithm_transformation.hpp" 
    99#include "transformation.hpp" 
     10#include "reduction.hpp" 
    1011 
    1112namespace xios { 
     
    1920  Reducing an scalar to a scalar 
    2021*/ 
    21 class CScalarAlgorithmReduceScalar : public CScalarAlgorithmTransformation 
     22class CScalarAlgorithmReduceScalar : public CScalarAlgorithmTransformation, public CReductionAlgorithm 
    2223{ 
    2324public: 
  • XIOS/dev/dev_trunk_omp/src/type/type.hpp

    r1478 r1601  
    9595    const CType_ref& operator = (CType<T>& val) const ; 
    9696    const CType_ref& operator = (const CType_ref& val) const; 
    97     operator T&() const;     
     97    operator T&() const; 
    9898 
    9999    inline virtual CBaseType* clone(void) const   { return _clone(); } 
  • XIOS/dev/dev_trunk_omp/src/type/type_impl.hpp

    r1478 r1601  
    8888  { 
    8989    this->checkEmpty(); 
    90    return *ptrValue ; 
     90    return *ptrValue ; 
    9191  } 
    9292 
     
    129129   CType<T>::operator const T&() const 
    130130   { 
    131     this->checkEmpty(); 
    132     return *ptrValue ; 
     131     this->checkEmpty(); 
     132     return *ptrValue ; 
    133133   } 
    134134 
  • XIOS/dev/dev_trunk_omp/src/xios.hpp

    r591 r1601  
    55 
    66/// XIOS headers /// 
    7 #include "nc4_data_output.hpp" 
     7#include "data_output.hpp" 
     8 
    89 
    910 
    1011using namespace xios; 
    11 using namespace xios::xml; 
    12 using namespace xios::func; 
     12 
     13 
    1314 
    1415#endif //__XIOS__ 
  • XIOS/dev/dev_trunk_omp/src/xios_server.f90

    r1158 r1601  
    33  IMPLICIT NONE 
    44  INCLUDE "mpif.h" 
    5   INTEGER :: ierr 
    6    
     5  INTEGER :: ierr, provided 
     6    
     7    CALL MPI_Init_thread(3, provided, ierr) 
    78    CALL xios_init_server 
     9    CALL MPI_Finalize(ierr) 
     10  
    811 
    912  END PROGRAM server_main 
Note: See TracChangeset for help on using the changeset viewer.