Ignore:
Timestamp:
02/17/17 17:55:37 (7 years ago)
Author:
yushan
Message:

ep_lib namespace specified when netcdf involved

Location:
XIOS/dev/branch_yushan/src
Files:
50 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_yushan/src/buffer_server.hpp

    r717 r1053  
    44#include "xios_spl.hpp" 
    55#include "buffer.hpp" 
    6 #include "mpi.hpp" 
     6#include "mpi_std.hpp" 
    77#include "cxios.hpp" 
    88 
  • XIOS/dev/branch_yushan/src/client.cpp

    r1037 r1053  
    3131      else is_MPI_Initialized=false ; 
    3232       
    33       //return; 
    34  
    3533// don't use OASIS 
    3634      if (!CXios::usingOasis) 
  • XIOS/dev/branch_yushan/src/client.hpp

    r1037 r1053  
    1616        static void initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm); 
    1717        static void finalize(void); 
    18         static void registerContext(const string& id, MPI_Comm contextComm); 
     18        static void registerContext(const string& id, ep_lib::MPI_Comm contextComm); 
    1919 
    2020        static MPI_Comm intraComm; 
  • XIOS/dev/branch_yushan/src/client_client_dht_template.hpp

    r941 r1053  
    1313#include "xios_spl.hpp" 
    1414#include "array_new.hpp" 
    15 #include "mpi.hpp" 
     15#include "mpi_std.hpp" 
    1616#include "policy.hpp" 
    1717#include <boost/unordered_map.hpp> 
     
    4040  public: 
    4141    CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, 
    42                              const MPI_Comm& clientIntraComm); 
     42                             const ep_lib::MPI_Comm& clientIntraComm); 
    4343 
    4444    CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 
    45                              const MPI_Comm& clientIntraComm); 
     45                             const ep_lib::MPI_Comm& clientIntraComm); 
    4646 
    4747    void computeIndexInfoMapping(const CArray<size_t,1>& indices); 
     
    5555 
    5656  protected: 
    57     CClientClientDHTTemplate(const MPI_Comm& clientIntraComm); 
     57    CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm); 
    5858 
    5959  protected: 
     
    6262    // Redistribute index and info among clients 
    6363    void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, 
    64                                  const MPI_Comm& intraCommLevel, 
     64                                 const ep_lib::MPI_Comm& intraCommLevel, 
    6565                                 int level); 
    6666 
    6767    void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 
    68                                  const MPI_Comm& intraCommLevel, 
     68                                 const ep_lib::MPI_Comm& intraCommLevel, 
    6969                                 int level); 
    7070 
     
    7373 
    7474    void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 
    75                                       const MPI_Comm& intraCommLevel, 
     75                                      const ep_lib::MPI_Comm& intraCommLevel, 
    7676                                      int level); 
    7777 
     
    8585    // Send information to clients 
    8686    void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 
    87                            const MPI_Comm& clientIntraComm, 
    88                            std::vector<MPI_Request>& requestSendInfo); 
     87                           const ep_lib::MPI_Comm& clientIntraComm, 
     88                           std::vector<ep_lib::MPI_Request>& requestSendInfo); 
    8989 
    9090    void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 
    91                             const MPI_Comm& clientIntraComm, 
    92                             std::vector<MPI_Request>& requestRecvInfo); 
     91                            const ep_lib::MPI_Comm& clientIntraComm, 
     92                            std::vector<ep_lib::MPI_Request>& requestRecvInfo); 
    9393 
    9494    // Send global index to clients 
    9595    void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
    96                             const MPI_Comm& clientIntraComm, 
    97                             std::vector<MPI_Request>& requestSendIndexGlobal); 
     96                            const ep_lib::MPI_Comm& clientIntraComm, 
     97                            std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 
    9898 
    9999    void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 
    100                              const MPI_Comm& clientIntraComm, 
    101                              std::vector<MPI_Request>& requestRecvIndex); 
     100                             const ep_lib::MPI_Comm& clientIntraComm, 
     101                             std::vector<ep_lib::MPI_Request>& requestRecvIndex); 
    102102 
    103103    void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, 
  • XIOS/dev/branch_yushan/src/client_client_dht_template_impl.hpp

    r1037 r1053  
    1818{ 
    1919template<typename T, typename H> 
    20 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm) 
     20CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 
    2121  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    2222{ 
     
    3838template<typename T, typename H> 
    3939CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 
    40                                                         const MPI_Comm& clientIntraComm) 
     40                                                        const ep_lib::MPI_Comm& clientIntraComm) 
    4141  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    4242{ 
     
    6868template<typename T, typename H> 
    6969CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 
    70                                                         const MPI_Comm& clientIntraComm) 
     70                                                        const ep_lib::MPI_Comm& clientIntraComm) 
    7171  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    7272{ 
     
    104104template<typename T, typename H> 
    105105void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 
    106                                                                  const MPI_Comm& commLevel, 
     106                                                                 const ep_lib::MPI_Comm& commLevel, 
    107107                                                                 int level) 
    108108{ 
     
    178178    recvIndexBuff = new unsigned long[recvNbIndexCount]; 
    179179 
    180   std::vector<MPI_Request> request; 
     180  std::vector<ep_lib::MPI_Request> request; 
    181181  std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 
    182182                             iteRecvIndex = recvRankClient.end(), 
     
    199199    sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
    200200 
    201   std::vector<MPI_Status> status(request.size()); 
     201  std::vector<ep_lib::MPI_Status> status(request.size()); 
    202202 
    203203  //printf("1(%d): calling wait all for %lu requests\n", clientRank, request.size()); 
     
    259259  } 
    260260 
    261   std::vector<MPI_Request> requestOnReturn; 
     261  std::vector<ep_lib::MPI_Request> requestOnReturn; 
    262262  currentIndex = 0; 
    263263  for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 
     
    310310  } 
    311311 
    312   std::vector<MPI_Status> statusOnReturn(requestOnReturn.size()); 
     312  std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 
    313313  //printf("2(%d): calling wait all for %lu requests\n", clientRank, requestOnReturn.size()); 
    314314  MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 
     
    380380template<typename T, typename H> 
    381381void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 
    382                                                             const MPI_Comm& commLevel, 
     382                                                            const ep_lib::MPI_Comm& commLevel, 
    383383                                                            int level) 
    384384{ 
     
    465465  // it will send a message to the correct clients. 
    466466  // Contents of the message are index and its corresponding informatioin 
    467   std::vector<MPI_Request> request; 
     467  std::vector<ep_lib::MPI_Request> request; 
    468468  int currentIndex = 0; 
    469469  int nbRecvClient = recvRankClient.size(); 
     
    504504 
    505505  //printf("check 8 OK. clientRank = %d\n", clientRank); 
    506   std::vector<MPI_Status> status(request.size()); 
     506  std::vector<ep_lib::MPI_Status> status(request.size()); 
    507507 
    508508  MPI_Waitall(request.size(), &request[0], &status[0]); 
     
    564564template<typename T, typename H> 
    565565void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
    566                                                        const MPI_Comm& clientIntraComm, 
    567                                                        std::vector<MPI_Request>& requestSendIndex) 
    568 { 
    569   MPI_Request request; 
     566                                                       const ep_lib::MPI_Comm& clientIntraComm, 
     567                                                       std::vector<ep_lib::MPI_Request>& requestSendIndex) 
     568{ 
     569  ep_lib::MPI_Request request; 
    570570  requestSendIndex.push_back(request); 
    571571 
     
    583583template<typename T, typename H> 
    584584void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 
    585                                                          const MPI_Comm& clientIntraComm, 
    586                                                          std::vector<MPI_Request>& requestRecvIndex) 
    587 { 
    588   MPI_Request request; 
     585                                                         const ep_lib::MPI_Comm& clientIntraComm, 
     586                                                         std::vector<ep_lib::MPI_Request>& requestRecvIndex) 
     587{ 
     588  ep_lib::MPI_Request request; 
    589589  requestRecvIndex.push_back(request); 
    590590 
     
    603603template<typename T, typename H> 
    604604void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 
    605                                                       const MPI_Comm& clientIntraComm, 
    606                                                       std::vector<MPI_Request>& requestSendInfo) 
    607 { 
    608   MPI_Request request; 
     605                                                      const ep_lib::MPI_Comm& clientIntraComm, 
     606                                                      std::vector<ep_lib::MPI_Request>& requestSendInfo) 
     607{ 
     608  ep_lib::MPI_Request request; 
    609609  requestSendInfo.push_back(request); 
    610610  //printf("MPI_IsendInfo(info, infoSize, MPI_CHAR,... char count = %d, dest = %d, buf_size = %d\n", infoSize, clientDestRank, sizeof(*info) ); 
     
    623623template<typename T, typename H> 
    624624void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 
    625                                                         const MPI_Comm& clientIntraComm, 
    626                                                         std::vector<MPI_Request>& requestRecvInfo) 
    627 { 
    628   MPI_Request request; 
     625                                                        const ep_lib::MPI_Comm& clientIntraComm, 
     626                                                        std::vector<ep_lib::MPI_Request>& requestRecvInfo) 
     627{ 
     628  ep_lib::MPI_Request request; 
    629629  requestRecvInfo.push_back(request); 
    630630 
     
    699699{ 
    700700  recvNbElements.resize(recvNbRank.size()); 
    701   std::vector<MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 
    702   std::vector<MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 
     701  std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 
     702  std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 
    703703 
    704704  int nRequest = 0; 
     
    751751  std::vector<int> recvBuff(recvBuffSize*2,0); 
    752752 
    753   std::vector<MPI_Request> request(sendBuffSize+recvBuffSize); 
    754   std::vector<MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 
     753  std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 
     754  std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 
    755755 
    756756  int nRequest = 0; 
  • XIOS/dev/branch_yushan/src/client_server_mapping.hpp

    r1037 r1053  
    4141 
    4242    static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 
    43                                                      MPI_Comm& clientIntraComm, 
     43                                                     ep_lib::MPI_Comm& clientIntraComm, 
    4444                                                     const std::vector<int>& connectedServerRank); 
    4545 
  • XIOS/dev/branch_yushan/src/client_server_mapping_distributed.hpp

    r835 r1053  
    3535    /** Default constructor */ 
    3636    CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 
    37                                     const MPI_Comm& clientIntraComm, 
     37                                    const ep_lib::MPI_Comm& clientIntraComm, 
    3838                                    bool isDataDistributed = true); 
    3939 
  • XIOS/dev/branch_yushan/src/context_client.cpp

    r1037 r1053  
    2020    \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode) 
    2121    */ 
    22     CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer) 
     22    CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer) 
    2323     : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) 
    2424    { 
     
    163163      for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) 
    164164      { 
    165         retBuffer.push_back((*itBuffer)->getBuffer(*itSize)); 
     165        CBufferOut* m_buf = (*itBuffer)->getBuffer(*itSize); 
     166        //retBuffer.push_back((*itBuffer)->getBuffer(*itSize)); 
     167        //int m_size = retBuffer.size(); 
     168        //retBuffer.resize(m_size+1); 
     169        //m_size = retBuffer.size(); 
     170        retBuffer.push_back(m_buf); 
    166171      } 
    167172      return retBuffer; 
  • XIOS/dev/branch_yushan/src/context_client.hpp

    r1037 r1053  
    3131    public: 
    3232      // Contructor 
    33       CContextClient(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* parentServer = 0); 
     33      CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0); 
    3434 
    3535      // Send event to server 
     
    6666      int serverSize; //!< Size of server group 
    6767 
    68       MPI_Comm interComm; //!< Communicator of server group 
     68      ep_lib::MPI_Comm interComm; //!< Communicator of server group 
    6969 
    70       MPI_Comm intraComm; //!< Communicator of client group 
     70      ep_lib::MPI_Comm intraComm; //!< Communicator of client group 
    7171 
    7272      map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers 
  • XIOS/dev/branch_yushan/src/context_server.cpp

    r1037 r1053  
    1010#include "file.hpp" 
    1111#include "grid.hpp" 
    12 #include "mpi.hpp" 
     12#include "mpi_std.hpp" 
    1313#include "tracer.hpp" 
    1414#include "timer.hpp" 
     
    2323{ 
    2424 
    25   CContextServer::CContextServer(CContext* parent,MPI_Comm intraComm_,MPI_Comm interComm_) 
     25  CContextServer::CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_) 
    2626  { 
    2727    context=parent; 
     
    7171    int count; 
    7272    char * addr; 
    73     MPI_Status status; 
     73    ep_lib::MPI_Status status; 
    7474    map<int,CServerBuffer*>::iterator it; 
    7575 
     
    101101            { 
    102102              addr=(char*)it->second->getBuffer(count); 
    103               MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 
     103              ep_lib::MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 
    104104              bufferRequest[rank]=addr; 
    105105              //printf("find message, i-receiving to buffer %p, rank = %d, commSize = %d\n", addr, rank, commSize); 
     
    113113  void CContextServer::checkPendingRequest(void) 
    114114  { 
    115     map<int,MPI_Request>::iterator it; 
     115    map<int,ep_lib::MPI_Request>::iterator it; 
    116116    list<int> recvRequest; 
    117117    list<int>::iterator itRecv; 
     
    119119    int flag; 
    120120    int count; 
    121     MPI_Status status; 
     121    ep_lib::MPI_Status status; 
    122122 
    123123    //printf("enter checkPendingRequest\n"); 
  • XIOS/dev/branch_yushan/src/context_server.hpp

    r1037 r1053  
    1414    public: 
    1515 
    16     CContextServer(CContext* parent,MPI_Comm intraComm,MPI_Comm interComm) ; 
     16    CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm) ; 
    1717    bool eventLoop(void) ; 
    1818    void listen(void) ; 
     
    2525    bool hasFinished(void); 
    2626 
    27     MPI_Comm intraComm ; 
     27    ep_lib::MPI_Comm intraComm ; 
    2828    int intraCommSize ; 
    2929    int intraCommRank ; 
    3030 
    31     MPI_Comm interComm ; 
     31    ep_lib::MPI_Comm interComm ; 
    3232    int commSize ; 
    3333 
    3434    map<int,CServerBuffer*> buffers ; 
    35     map<int,MPI_Request> pendingRequest ; 
     35    map<int,ep_lib::MPI_Request> pendingRequest ; 
    3636    map<int,char*> bufferRequest ; 
    3737 
  • XIOS/dev/branch_yushan/src/cxios.cpp

    r1037 r1053  
    7979    MPI_Info info; 
    8080    MPI_Comm *ep_comm; 
    81     MPI_Comm_create_endpoints(MPI_COMM_WORLD, num_ep, info, ep_comm);  
     81    MPI_Comm_create_endpoints(MPI_COMM_WORLD, num_ep, info, ep_comm);  // servers should reach here too. 
    8282       
    8383    globalComm = ep_comm[0]; 
  • XIOS/dev/branch_yushan/src/cxios.hpp

    r1037 r1053  
    1919    public: 
    2020     static void initialize(void) ; 
    21      static void initClientSide(const string & codeId, MPI_Comm& localComm, MPI_Comm& returnComm) ; 
     21     static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 
    2222     static void initServerSide(void) ; 
    2323     static void clientFinalize(void) ; 
  • XIOS/dev/branch_yushan/src/dht_auto_indexing.cpp

    r1037 r1053  
    2222 
    2323  CDHTAutoIndexing::CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 
    24                                      const MPI_Comm& clientIntraComm) 
     24                                     const ep_lib::MPI_Comm& clientIntraComm) 
    2525    : CClientClientDHTTemplate<size_t>(clientIntraComm) 
    2626  { 
     
    5858  */ 
    5959  CDHTAutoIndexing::CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 
    60                                      const MPI_Comm& clientIntraComm) 
     60                                     const ep_lib::MPI_Comm& clientIntraComm) 
    6161    : CClientClientDHTTemplate<size_t>(clientIntraComm) 
    6262  { 
  • XIOS/dev/branch_yushan/src/dht_auto_indexing.hpp

    r1037 r1053  
    2828 
    2929    CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 
    30                      const MPI_Comm& clientIntraComm); 
     30                     const ep_lib::MPI_Comm& clientIntraComm); 
    3131 
    3232    CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 
    33                      const MPI_Comm& clientIntraComm); 
     33                     const ep_lib::MPI_Comm& clientIntraComm); 
    3434 
    3535    size_t getNbIndexesGlobal() const; 
  • XIOS/dev/branch_yushan/src/filter/filter.cpp

    r1037 r1053  
    1414    CDataPacketPtr outputPacket = engine->apply(data); 
    1515    if (outputPacket) 
     16    { 
     17      printf("filter/filter.cpp : deliverOuput(outputPacket)\n"); 
    1618      deliverOuput(outputPacket); 
     19      printf("filter/filter.cpp : deliverOuput(outputPacket) OKOK\n"); 
     20    } 
    1721  } 
    1822} // namespace xios 
  • XIOS/dev/branch_yushan/src/filter/input_pin.cpp

    r1037 r1053  
    3333      // Unregister before calling onInputReady in case the filter registers again 
    3434      gc.unregisterFilter(this, packet->timestamp); 
     35      printf("filter/input_pin.cpp : onInputReady\n"); 
    3536      onInputReady(it->second.packets); 
     37      printf("filter/input_pin.cpp : onInputReady OKOK\n"); 
    3638      inputs.erase(it); 
    3739    } 
  • XIOS/dev/branch_yushan/src/filter/output_pin.cpp

    r1037 r1053  
    2222    for (it = outputs.begin(), itEnd = outputs.end(); it != itEnd; ++it) 
    2323    { 
     24      printf("filter/output_pin.cpp : setInput\n"); 
    2425      it->first->setInput(it->second, packet); 
     26      printf("filter/output_pin.cpp : setInput OKOK\n"); 
    2527    } 
    2628  } 
  • XIOS/dev/branch_yushan/src/filter/source_filter.cpp

    r1037 r1053  
    2929    grid->inputField(data, packet->data); 
    3030 
     31    printf("filter/source_filter.cpp : deliverOuput(packet) \n"); 
    3132    deliverOuput(packet); 
     33    printf("filter/source_filter.cpp : deliverOuput(packet) OKOK\n"); 
    3234  } 
    3335 
  • XIOS/dev/branch_yushan/src/filter/spatial_transform_filter.cpp

    r1037 r1053  
    150150 
    151151      idxSendBuff = 0; 
    152       std::vector<MPI_Request> sendRecvRequest; 
     152      std::vector<ep_lib::MPI_Request> sendRecvRequest; 
    153153      for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 
    154154      { 
     
    160160          sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 
    161161        } 
    162         sendRecvRequest.push_back(MPI_Request()); 
     162        sendRecvRequest.push_back(ep_lib::MPI_Request()); 
    163163        MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 
    164164      } 
     
    178178        int srcRank = itRecv->first; 
    179179        int countSize = itRecv->second.size(); 
    180         sendRecvRequest.push_back(MPI_Request()); 
     180        sendRecvRequest.push_back(ep_lib::MPI_Request()); 
    181181        MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 
    182182        currentBuff += countSize; 
    183183      } 
    184       std::vector<MPI_Status> status(sendRecvRequest.size()); 
     184      std::vector<ep_lib::MPI_Status> status(sendRecvRequest.size()); 
    185185      MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]); 
    186186 
  • XIOS/dev/branch_yushan/src/interface/c/icdata.cpp

    r1037 r1053  
    2323#include "context.hpp" 
    2424#include "context_client.hpp" 
    25 #include "mpi.hpp" 
     25#include "mpi_std.hpp" 
    2626#include "timer.hpp" 
    2727#include "array_new.hpp" 
     
    5454   { 
    5555      std::string str; 
    56       MPI_Comm local_comm; 
    57       MPI_Comm return_comm; 
     56      ep_lib::MPI_Comm local_comm; 
     57      ep_lib::MPI_Comm return_comm; 
    5858       
    59       fc_comm_map.clear(); 
     59      ep_lib::fc_comm_map.clear(); 
    6060 
    6161      if (!cstr2string(client_id, len_client_id, str)) return; 
     
    6363      int initialized; 
    6464      MPI_Initialized(&initialized); 
    65       if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 
     65      //if (initialized) local_comm.mpi_comm = MPI_Comm_f2c(*f_local_comm); 
     66      if (initialized) local_comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_local_comm)); 
    6667      else local_comm = MPI_COMM_NULL; 
    6768       
     
    6970 
    7071      CXios::initClientSide(str, local_comm, return_comm); 
    71       *f_return_comm = MPI_Comm_c2f(return_comm); 
     72 
     73      *f_return_comm = ep_lib::EP_Comm_c2f(return_comm); 
    7274 
    7375      printf("in icdata.cpp, f_return_comm = %d\n", *f_return_comm); 
     
    8082   { 
    8183     std::string str; 
    82      MPI_Comm comm; 
     84     ep_lib::MPI_Comm comm; 
    8385 
    8486     if (!cstr2string(context_id, len_context_id, str)) return; 
    8587     CTimer::get("XIOS").resume(); 
    8688     CTimer::get("XIOS init context").resume(); 
    87      comm=MPI_Comm_f2c(*f_comm); 
     89     comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_comm)); 
    8890     
    8991     CClient::registerContext(str, comm); 
    9092      
    91      //printf("client register context OK\n"); 
     93     printf("icdata.cpp: client register context OK\n"); 
    9294      
    9395     CTimer::get("XIOS init context").suspend(); 
  • XIOS/dev/branch_yushan/src/interface/c/oasis_cinterface.cpp

    r1037 r1053  
    2626     
    2727    fxios_oasis_get_localcomm(&f_comm) ; 
    28     comm=MPI_Comm_f2c(f_comm) ; 
     28    //comm=MPI_Comm_f2c(f_comm) ; 
    2929  } 
    3030  
     
    3434     
    3535    fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 
    36     comm_client_server=MPI_Comm_f2c(f_comm) ; 
     36    //comm_client_server=MPI_Comm_f2c(f_comm) ; 
    3737  } 
    3838  
     
    4242     
    4343    fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 
    44     comm_client_server=MPI_Comm_f2c(f_comm) ; 
     44    //comm_client_server=MPI_Comm_f2c(f_comm) ; 
    4545  } 
    4646} 
  • XIOS/dev/branch_yushan/src/interface/fortran/idata.F90

    r1037 r1053  
    476476       
    477477      !print*, "in fortran, world_f = ", MPI_COMM_WORLD  
     478 
    478479      print*, "in fortran, f_return_comm = ", f_return_comm  
    479480 
  • XIOS/dev/branch_yushan/src/io/inetcdf4.cpp

    r948 r1053  
    1818    } 
    1919    mpi = comm && !multifile; 
     20    MPI_Info m_info; 
    2021 
    2122    // The file format will be detected automatically by NetCDF, it is safe to always set NC_MPIIO 
    2223    // even if Parallel NetCDF ends up being used. 
    2324    if (mpi) 
    24       CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, MPI_INFO_NULL, this->ncidp); 
     25      CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, m_info, this->ncidp); 
    2526    else 
    2627      CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); 
  • XIOS/dev/branch_yushan/src/io/inetcdf4.hpp

    r802 r1053  
    77#include "array_new.hpp" 
    88 
    9 #include "mpi.hpp" 
     9#include "mpi_std.hpp" 
    1010#include "netcdf.hpp" 
    1111 
  • XIOS/dev/branch_yushan/src/io/nc4_data_output.cpp

    r1037 r1053  
    2626      CNc4DataOutput::CNc4DataOutput 
    2727         (const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 
    28           MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 
     28          ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 
    2929            : SuperClass() 
    3030            , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) 
     
    450450      StdString domainName = domain->name; 
    451451      domain->assignMesh(domainName, domain->nvertex); 
    452       domain->mesh->createMeshEpsilon(server->intraComm, domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv); 
     452      domain->mesh->createMeshEpsilon(static_cast<MPI_Comm>(server->intraComm.mpi_comm), domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv); 
    453453 
    454454      StdString node_x = domainName + "_node_x"; 
  • XIOS/dev/branch_yushan/src/io/nc4_data_output.hpp

    r887 r1053  
    2727               (const StdString & filename, bool exist, bool useClassicFormat, 
    2828                bool useCFConvention, 
    29                 MPI_Comm comm_file, bool multifile, bool isCollective = true, 
     29                ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 
    3030                const StdString& timeCounterName = "time_counter"); 
    3131 
     
    116116 
    117117            /// Propriétés privées /// 
    118             MPI_Comm comm_file; 
     118            ep_lib::MPI_Comm comm_file; 
    119119            const StdString filename; 
    120120            std::map<Time, StdSize> timeToRecordCache; 
  • XIOS/dev/branch_yushan/src/io/netCdfInterface.hpp

    r1037 r1053  
    1616#endif 
    1717 
    18 #include "mpi.hpp" 
    19 //#include <mpi.h> 
     18#include "mpi_std.hpp" 
    2019#include "netcdf.hpp" 
    2120 
  • XIOS/dev/branch_yushan/src/io/netcdf.hpp

    r685 r1053  
    11#ifndef __XIOS_NETCDF_HPP__ 
    22#define __XIOS_NETCDF_HPP__ 
    3 #include "mpi.hpp" 
     3#include "mpi_std.hpp" 
    44#define MPI_INCLUDED 
    55#include <netcdf.h> 
     
    1818extern "C" 
    1919{ 
    20 include <netcdf_par.h> 
     20  #include <netcdf_par.h> 
    2121} 
    2222#  endif 
     
    3030namespace xios 
    3131{ 
    32   inline int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp) 
     32  inline int nc_create_par(const char *path, int cmode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 
    3333  { 
    3434#if defined(USING_NETCDF_PAR) 
    35     return ::nc_create_par(path, cmode, comm, info, ncidp) ; 
     35    return ::nc_create_par(path, cmode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 
    3636#else 
    3737    ERROR("int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)", 
     
    4141  } 
    4242 
    43   inline int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp) 
     43  inline int nc_open_par(const char *path, int mode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 
    4444  { 
    4545#if defined(USING_NETCDF_PAR) 
    46     return ::nc_open_par(path, mode, comm, info, ncidp) ; 
     46    return ::nc_open_par(path, mode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 
    4747#else 
    4848    ERROR("int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)", 
  • XIOS/dev/branch_yushan/src/io/onetcdf4.cpp

    r1037 r1053  
    33#include "onetcdf4.hpp" 
    44#include "group_template.hpp" 
    5 //#include "mpi_std.hpp" 
    65#include "netcdf.hpp" 
    76#include "netCdfInterface.hpp" 
     
    1211      /// ////////////////////// Définitions ////////////////////// /// 
    1312 
    14       CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 
    15                                                         bool useCFConvention, 
    16                            const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
     13      CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention,  
     14                           const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
    1715        : path() 
    1816        , wmpi(false) 
     
    3230 
    3331      void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention,  
    34                                  const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
     32                                 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
    3533      { 
    3634         this->useClassicFormat = useClassicFormat; 
     
    5856         { 
    5957            if (wmpi) 
    60                CNetCdfInterface::createPar(filename, mode, *comm, info_null, this->ncidp); 
     58            { 
     59               CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null, this->ncidp); 
     60               printf("creating file with createPar\n"); 
     61            } 
    6162            else 
     63            { 
    6264               CNetCdfInterface::create(filename, mode, this->ncidp); 
     65               printf("creating file with create\n");   
     66            }   
     67                
    6368 
    6469            this->appendMode = false; 
     
    6873            mode |= NC_WRITE; 
    6974            if (wmpi) 
    70                CNetCdfInterface::openPar(filename, mode, *comm, info_null, this->ncidp); 
     75            { 
     76               CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null, this->ncidp); 
     77               printf("opening file with openPar\n"); 
     78            } 
    7179            else 
     80            { 
    7281               CNetCdfInterface::open(filename, mode, this->ncidp); 
     82               printf("opening file with open\n"); 
     83            } 
    7384 
    7485            this->appendMode = true; 
  • XIOS/dev/branch_yushan/src/io/onetcdf4.hpp

    r1037 r1053  
    77#include "data_output.hpp" 
    88#include "array_new.hpp" 
    9 #include "mpi.hpp" 
    10 //#include <mpi.h> 
     9#include "mpi_std.hpp" 
    1110#include "netcdf.hpp" 
    1211 
     
    2928            CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 
    3029                          bool useCFConvention = true, 
    31                       const MPI_Comm* comm = NULL, bool multifile = true, 
     30                      const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 
    3231                      const StdString& timeCounterName = "time_counter"); 
    3332 
     
    3837            /// Initialisation /// 
    3938            void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 
    40                             const MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 
     39                            const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 
    4140            void close(void); 
    4241            void sync(void); 
  • XIOS/dev/branch_yushan/src/mpi.hpp

    r1037 r1053  
    1212 
    1313#ifdef _usingEP 
    14   #include "../extern/src_ep/ep_lib.hpp" 
     14  #include "../extern/src_ep_dev/ep_lib.hpp" 
    1515  using namespace ep_lib; 
    1616#elif _usingMPI 
  • XIOS/dev/branch_yushan/src/node/axis.cpp

    r1037 r1053  
    742742      CContextServer* server = CContext::getCurrent()->server; 
    743743      axis->numberWrittenIndexes_ = axis->indexesToWrite.size(); 
    744       MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 
    745       MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 
     744      ep_lib::MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 
     745      ep_lib::MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 
    746746      axis->offsetWrittenIndexes_ -= axis->numberWrittenIndexes_; 
    747747    } 
  • XIOS/dev/branch_yushan/src/node/context.cpp

    r1037 r1053  
    236236 
    237237   //! Initialize client side 
    238    void CContext::initClient(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtServer /*= 0*/) 
     238   void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 
    239239   { 
    240240     hasClient=true; 
     
    248248     registryOut->setPath(getId()) ; 
    249249 
    250      MPI_Comm intraCommServer, interCommServer; 
     250     ep_lib::MPI_Comm intraCommServer, interCommServer; 
    251251     if (cxtServer) // Attached mode 
    252252     { 
     
    311311 
    312312   //! Initialize server 
    313    void CContext::initServer(MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient /*= 0*/) 
     313   void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 
    314314   { 
    315315     hasServer=true; 
     
    323323     registryOut->setPath(getId()) ; 
    324324 
    325      MPI_Comm intraCommClient, interCommClient; 
     325     ep_lib::MPI_Comm intraCommClient, interCommClient; 
    326326     if (cxtClient) // Attached mode 
    327327     { 
     
    369369          closeAllFile(); 
    370370          registryOut->hierarchicalGatherRegistry() ; 
     371          //registryOut->gatherRegistry() ; 
    371372          if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 
    372373        } 
    373374 
    374         for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
     375        for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
    375376          MPI_Comm_free(&(*it)); 
    376377        comms.clear(); 
  • XIOS/dev/branch_yushan/src/node/context.hpp

    r1037 r1053  
    8888      public : 
    8989         // Initialize server or client 
    90          void initServer(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtClient = 0); 
    91          void initClient(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtServer = 0); 
     90         void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 
     91         void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 
    9292         bool isInitialized(void); 
    9393 
     
    229229         StdString idServer_; 
    230230         CGarbageCollector garbageCollector; 
    231          std::list<MPI_Comm> comms; //!< Communicators allocated internally 
     231         std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 
    232232 
    233233      public: // Some function maybe removed in the near future 
  • XIOS/dev/branch_yushan/src/node/domain.cpp

    r1037 r1053  
    475475   { 
    476476          CContext* context = CContext::getCurrent(); 
    477       CContextClient* client = context->client; 
     477    CContextClient* client = context->client; 
    478478          lon_g.resize(ni_glo) ; 
    479479          lat_g.resize(nj_glo) ; 
  • XIOS/dev/branch_yushan/src/node/field_impl.hpp

    r1037 r1053  
    2020    if (clientSourceFilter) 
    2121    { 
     22      printf("file_impl.hpp : clientSourceFilter->streamData\n"); 
    2223      clientSourceFilter->streamData(CContext::getCurrent()->getCalendar()->getCurrentDate(), _data); 
     24      printf("file_impl.hpp : clientSourceFilter->streamData OKOK\n"); 
    2325    } 
    2426    else if (!field_ref.isEmpty() || !content.empty()) 
     27    { 
    2528      ERROR("void CField::setData(const CArray<double, N>& _data)", 
    2629            << "Impossible to receive data from the model for a field [ id = " << getId() << " ] with a reference or an arithmetic operation."); 
     30    } 
    2731  } 
    2832 
  • XIOS/dev/branch_yushan/src/node/file.cpp

    r1037 r1053  
    564564 
    565565      if (isOpen) data_out->closeFile(); 
    566       if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective)); 
    567       else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name)); 
     566      if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective)); 
     567      else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective, time_counter_name)); 
    568568      isOpen = true; 
    569569    } 
  • XIOS/dev/branch_yushan/src/node/file.hpp

    r1037 r1053  
    159159         bool isOpen; 
    160160         bool allDomainEmpty; 
    161          MPI_Comm fileComm; 
     161         ep_lib::MPI_Comm fileComm; 
    162162 
    163163      private : 
  • XIOS/dev/branch_yushan/src/node/mesh.cpp

    r1037 r1053  
    493493 * \param [in] bounds_lat Array of boundary latitudes. Its size depends on the element type. 
    494494 */ 
    495   void CMesh::createMeshEpsilon(const MPI_Comm& comm, 
     495  void CMesh::createMeshEpsilon(const ep_lib::MPI_Comm& comm, 
    496496                                const CArray<double, 1>& lonvalue, const CArray<double, 1>& latvalue, 
    497497                                const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat) 
     
    15341534   */ 
    15351535 
    1536   void CMesh::getGloNghbFacesNodeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx, 
     1536  void CMesh::getGloNghbFacesNodeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 
    15371537                               const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 
    15381538                               CArray<int, 2>& nghbFaces) 
     
    16901690   */ 
    16911691 
    1692   void CMesh::getGloNghbFacesEdgeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx, 
     1692  void CMesh::getGloNghbFacesEdgeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 
    16931693                               const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 
    16941694                               CArray<int, 2>& nghbFaces) 
     
    18711871   */ 
    18721872 
    1873   void CMesh::getGlobalNghbFaces(const int nghbType, const MPI_Comm& comm, 
     1873  void CMesh::getGlobalNghbFaces(const int nghbType, const ep_lib::MPI_Comm& comm, 
    18741874                                 const CArray<int, 1>& face_idx, 
    18751875                                 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 
  • XIOS/dev/branch_yushan/src/node/mesh.hpp

    r931 r1053  
    6060                      const CArray<double, 2>&, const CArray<double, 2>& ); 
    6161                         
    62       void createMeshEpsilon(const MPI_Comm&, 
     62      void createMeshEpsilon(const ep_lib::MPI_Comm&, 
    6363                             const CArray<double, 1>&, const CArray<double, 1>&, 
    6464                             const CArray<double, 2>&, const CArray<double, 2>& ); 
    6565 
    66       void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&, 
     66      void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&, 
    6767                              const CArray<double, 2>&, const CArray<double, 2>&, 
    6868                              CArray<int, 2>&); 
     
    8484      CClientClientDHTSizet* pNodeGlobalIndex;                    // pointer to a map <nodeHash, nodeIdxGlo> 
    8585      CClientClientDHTSizet* pEdgeGlobalIndex;                    // pointer to a map <edgeHash, edgeIdxGlo> 
    86       void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
    87       void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
     86      void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
     87      void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
    8888      void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 
    8989      void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 
  • XIOS/dev/branch_yushan/src/policy.hpp

    r855 r1053  
    3131{ 
    3232protected: 
    33   DivideAdaptiveComm(const MPI_Comm& mpiComm); 
     33  DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm); 
    3434 
    3535  void computeMPICommLevel(); 
     
    4141 
    4242protected: 
    43   const MPI_Comm& internalComm_; 
     43  const ep_lib::MPI_Comm& internalComm_; 
    4444  std::vector<std::vector<int> > groupParentsBegin_; 
    4545  std::vector<std::vector<int> > nbInGroupParents_; 
  • XIOS/dev/branch_yushan/src/registry.cpp

    r1037 r1053  
    11#include "registry.hpp" 
    22#include "type.hpp" 
    3 #include <mpi.hpp> 
    43#include <fstream> 
    54#include <sstream> 
     
    261260  void CRegistry::hierarchicalGatherRegistry(void) 
    262261  { 
    263     //hierarchicalGatherRegistry(communicator) ; 
     262    hierarchicalGatherRegistry(communicator) ; 
    264263  } 
    265264 
     
    288287      if (mpiRank==0 || mpiRank==mpiSize/2+mpiSize%2) color=0 ; 
    289288      else color=1 ; 
     289       
    290290      MPI_Comm_split(comm,color,mpiRank,&commDown) ; 
     291       
    291292      if (color==0) gatherRegistry(commDown) ; 
     293      printf("gatherRegistry OKOK\n"); 
    292294      MPI_Comm_free(&commDown) ;     
    293295    } 
  • XIOS/dev/branch_yushan/src/registry.hpp

    r1037 r1053  
    2828/** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 
    2929      
    30       CRegistry(const MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 
     30      CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 
    3131       
    3232      
     
    127127 
    128128/** MPI communicator used for broadcast and gather operation */ 
    129       MPI_Comm communicator ; 
     129      ep_lib::MPI_Comm communicator ; 
    130130  } ; 
    131131 
  • XIOS/dev/branch_yushan/src/test/test_client.f90

    r1037 r1053  
    4242 
    4343  CALL MPI_COMM_RANK(comm,rank,ierr) 
    44   print*, "test_client MPI_COMM_RANK OK" 
     44  print*, "test_client MPI_COMM_RANK OK", rank 
    4545  CALL MPI_COMM_SIZE(comm,size,ierr) 
     46  print*, "test_client MPI_COMM_SIZE OK", size 
    4647   
    4748 
     
    138139  PRINT*,"field field_A is active ? ",xios_field_is_active("field_A") 
    139140  !DO ts=1,24*10 
    140   DO ts=1,24 
     141  DO ts=1,6 
    141142    CALL xios_update_calendar(ts) 
    142143    print*, "xios_update_calendar OK, ts = ", ts 
  • XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.cpp

    r1037 r1053  
    173173 
    174174  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    175   std::vector<MPI_Request> requests; 
    176   std::vector<MPI_Status> status; 
     175  std::vector<ep_lib::MPI_Request> requests; 
     176  std::vector<ep_lib::MPI_Status> status; 
    177177  boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
    178178  boost::unordered_map<int, double* > sendValueToDest; 
     
    184184    sendValueToDest[recvRank] = new double [recvSize]; 
    185185 
    186     requests.push_back(MPI_Request()); 
     186    requests.push_back(ep_lib::MPI_Request()); 
    187187    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    188188  } 
     
    206206 
    207207    // Send global index source and mask 
    208     requests.push_back(MPI_Request()); 
     208    requests.push_back(ep_lib::MPI_Request()); 
    209209    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    210210  } 
     
    215215  //printf("(%d) src/transformation/axis_algorithm_inverse 1st waitall OK\n", clientRank); 
    216216 
    217   std::vector<MPI_Request>().swap(requests); 
    218   std::vector<MPI_Status>().swap(status); 
     217  std::vector<ep_lib::MPI_Request>().swap(requests); 
     218  std::vector<ep_lib::MPI_Status>().swap(status); 
    219219 
    220220  // Okie, on destination side, we will wait for information of masked index of source 
     
    224224    int recvSize = itSend->second; 
    225225 
    226     requests.push_back(MPI_Request()); 
     226    requests.push_back(ep_lib::MPI_Request()); 
    227227    MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
    228228  } 
     
    242242    } 
    243243    // Okie, now inform the destination which source index are masked 
    244     requests.push_back(MPI_Request()); 
     244    requests.push_back(ep_lib::MPI_Request()); 
    245245    MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
    246246  } 
  • XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.hpp

    r933 r1053  
    1212#include "axis_algorithm_transformation.hpp" 
    1313#include "transformation.hpp" 
    14  
     14#ifdef _usingEP 
     15#include "ep_declaration.hpp" 
     16#endif 
     17    
    1518namespace xios { 
    1619 
  • XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.cpp

    r1037 r1053  
    371371  CContextClient* client=context->client; 
    372372 
    373   MPI_Comm poleComme(MPI_COMM_NULL); 
    374   MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 
     373  ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 
     374  ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 
    375375  if (MPI_COMM_NULL != poleComme) 
    376376  { 
    377377    int nbClientPole; 
    378     MPI_Comm_size(poleComme, &nbClientPole); 
     378    ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 
    379379 
    380380    std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, 
     
    541541  double* sendWeightBuff = new double [sendBuffSize]; 
    542542 
    543   std::vector<MPI_Request> sendRequest; 
     543  std::vector<ep_lib::MPI_Request> sendRequest; 
    544544 
    545545  int sendOffSet = 0, l = 0; 
     
    562562    } 
    563563 
    564     sendRequest.push_back(MPI_Request()); 
     564    sendRequest.push_back(ep_lib::MPI_Request()); 
    565565    MPI_Isend(sendIndexDestBuff + sendOffSet, 
    566566             k, 
     
    570570             client->intraComm, 
    571571             &sendRequest.back()); 
    572     sendRequest.push_back(MPI_Request()); 
     572    sendRequest.push_back(ep_lib::MPI_Request()); 
    573573    MPI_Isend(sendIndexSrcBuff + sendOffSet, 
    574574             k, 
     
    578578             client->intraComm, 
    579579             &sendRequest.back()); 
    580     sendRequest.push_back(MPI_Request()); 
     580    sendRequest.push_back(ep_lib::MPI_Request()); 
    581581    MPI_Isend(sendWeightBuff + sendOffSet, 
    582582             k, 
     
    597597  while (receivedSize < recvBuffSize) 
    598598  { 
    599     MPI_Status recvStatus; 
     599    ep_lib::MPI_Status recvStatus; 
    600600    MPI_Recv((recvIndexDestBuff + receivedSize), 
    601601             recvBuffSize, 
     
    637637  } 
    638638 
    639   std::vector<MPI_Status> requestStatus(sendRequest.size()); 
    640   MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 
     639  std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 
     640  ep_lib::MPI_Status stat_ignore; 
     641  MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 
     642  //MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 
    641643 
    642644  delete [] sendIndexDestBuff; 
     
    724726 
    725727  MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    726   MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     728  ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    727729   
    728730  std::vector<StdSize> start(1, startIndex - localNbWeight); 
    729731  std::vector<StdSize> count(1, localNbWeight); 
    730732 
    731   WriteNetCdf netCdfWriter(filename, client->intraComm); 
     733  WriteNetCdf netCdfWriter(filename, static_cast<MPI_Comm>(client->intraComm.mpi_comm)); 
    732734 
    733735  // netCdfWriter = CONetCDF4(filename, false, false, true, client->intraComm, false); 
  • XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.hpp

    r1037 r1053  
    1313#include "transformation.hpp" 
    1414#include "nc4_data_output.hpp" 
     15#ifdef _usingEP 
     16#include "ep_declaration.hpp" 
     17#endif 
    1518 
    1619namespace xios { 
  • XIOS/dev/branch_yushan/src/transformation/grid_transformation.cpp

    r1037 r1053  
    475475 
    476476  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    477   std::vector<MPI_Request> requests; 
    478   std::vector<MPI_Status> status; 
     477  std::vector<ep_lib::MPI_Request> requests; 
     478  std::vector<ep_lib::MPI_Status> status; 
    479479  boost::unordered_map<int, unsigned char* > recvMaskDst; 
    480480  boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
     
    486486    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 
    487487 
    488     requests.push_back(MPI_Request()); 
     488    requests.push_back(ep_lib::MPI_Request()); 
    489489    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    490     requests.push_back(MPI_Request()); 
     490    requests.push_back(ep_lib::MPI_Request()); 
    491491    MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 
    492492  } 
     
    524524 
    525525    // Send global index source and mask 
    526     requests.push_back(MPI_Request()); 
     526    requests.push_back(ep_lib::MPI_Request()); 
    527527    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    528     requests.push_back(MPI_Request()); 
     528    requests.push_back(ep_lib::MPI_Request()); 
    529529    MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 
    530530  } 
     
    536536 
    537537  // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 
    538   std::vector<MPI_Request>().swap(requests); 
    539   std::vector<MPI_Status>().swap(status); 
     538  std::vector<ep_lib::MPI_Request>().swap(requests); 
     539  std::vector<ep_lib::MPI_Status>().swap(status); 
    540540  // Okie, on destination side, we will wait for information of masked index of source 
    541541  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) 
     
    544544    int recvSize = itSend->second; 
    545545 
    546     requests.push_back(MPI_Request()); 
     546    requests.push_back(ep_lib::MPI_Request()); 
    547547    MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
    548548  } 
     
    581581 
    582582    // Okie, now inform the destination which source index are masked 
    583     requests.push_back(MPI_Request()); 
     583    requests.push_back(ep_lib::MPI_Request()); 
    584584    MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
    585585  } 
Note: See TracChangeset for help on using the changeset viewer.