Changeset 1661


Ignore:
Timestamp:
05/15/19 17:19:08 (5 years ago)
Author:
yushan
Message:

MARK: branch merged with trunk @1660. Test (test_complete, test_remap) on ADA with IntelMPI and _usingEP/_usingMPI as switch.

Location:
XIOS/dev/dev_trunk_omp
Files:
29 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/dev_trunk_omp/bld.cfg

    r1646 r1661  
    5252bld::target test_remap.exe 
    5353bld::target test_remap_omp.exe 
    54 #bld::target test_complete.exe 
     54bld::target test_complete.exe 
    5555bld::target test_complete_omp.exe 
    5656#bld::target test_client.exe 
  • XIOS/dev/dev_trunk_omp/extern/remap/src/mpi_routing.cpp

    r1646 r1661  
    153153        for (int i = 0; i < nbSource; i++) 
    154154        { 
     155                #ifdef _usingEP 
    155156                MPI_Irecv(&sourceRank[i], 1, MPI_INT, -2, 0, communicator, &request[indexRequest++]); 
     157                #endif 
     158                 
     159                #ifdef _usingMPI 
     160                MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest++]); 
     161                #endif 
    156162        } 
    157163        MPI_Barrier(communicator); 
     
    171177        for (int i = 0; i < nbSource; i++) 
    172178        { 
     179#ifdef _usingEP 
    173180                MPI_Irecv(&sourceRank[i], 1, MPI_INT, -2, 0, communicator, &request[indexRequest]); 
     181#endif 
     182#ifdef _usingMPI 
     183                MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 
     184#endif 
    174185                indexRequest++; 
    175186        } 
  • XIOS/dev/dev_trunk_omp/extern/remap/src/timerRemap.cpp

    r1646 r1661  
    1212using namespace std; 
    1313 
    14 //map<string,CTimer*> CTimer::allTimer; 
    1514map<string,CTimer*> *CTimer::allTimer_ptr = 0; 
    1615 
     
    6160        map<string,CTimer*>::iterator it; 
    6261        if(allTimer_ptr == 0) allTimer_ptr = new map<string,CTimer*>; 
    63         //it=allTimer.find(name); 
    6462        it=allTimer_ptr->find(name); 
    65         //if (it==allTimer.end()) it=allTimer.insert(pair<string,CTimer*>(name,new CTimer(name))).first; 
    6663        if (it==allTimer_ptr->end()) it=allTimer_ptr->insert(pair<string,CTimer*>(name,new CTimer(name))).first; 
    6764        return *(it->second); 
  • XIOS/dev/dev_trunk_omp/extern/remap/src/timerRemap.hpp

    r1602 r1661  
    2626    double getCumulatedTime(void); 
    2727    void print(void); 
    28     //static map<string,CTimer*> allTimer; 
    2928    static map<string,CTimer*> *allTimer_ptr; 
    3029    #pragma omp threadprivate(allTimer_ptr) 
  • XIOS/dev/dev_trunk_omp/inputs/COMPLETE/context_atmosphere.xml

    r1650 r1661  
    33<context id="atmosphere"> 
    44 
    5   <field_definition level="1" enabled=".TRUE." default_value="9.96921e+36"> 
     5  <field_definition level="1" enabled=".FALSE." default_value="9.96921e+36"> 
    66    <field id="field_A_atm"  name="field_A_atm_origin" operation="average" freq_op="1ts" grid_ref="grid_A_atm" /> 
    77    <field id="field_A_atm_zoom"  name="field_A_atm" operation="average" freq_op="1ts" field_ref="field_A_atm" grid_ref="grid_A_atm_zoom" /> 
  • XIOS/dev/dev_trunk_omp/inputs/COMPLETE/iodef.xml

    r1646 r1661  
    1919 
    2020        <variable_group id="parameters" > 
    21           <variable id="info_level" type="int">50</variable> 
     21          <variable id="info_level" type="int">100</variable> 
    2222          <variable id="print_file" type="bool">true</variable> 
    2323        </variable_group> 
  • XIOS/dev/dev_trunk_omp/src/attribute_enum.hpp

    r1646 r1661  
    1414namespace xios 
    1515{ 
    16       /// ////////////////////// Déclarations ////////////////////// /// 
     16      /// ////////////////////// Declarations ////////////////////// /// 
    1717        /*! 
    1818        \class CAttributeEnum 
  • XIOS/dev/dev_trunk_omp/src/calendar_util.cpp

    r1630 r1661  
    11#include "calendar_util.hpp" 
    2 #include "calendar.hpp" 
    32 
    43namespace xios 
  • XIOS/dev/dev_trunk_omp/src/client_client_dht_template_impl.hpp

    r1601 r1661  
    1717  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    1818{ 
    19   ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 
     19  MPI_Comm_size(clientIntraComm, &nbClient_); 
    2020  this->computeMPICommLevel(); 
    2121  int nbLvl = this->getNbLevel(); 
     
    3737  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    3838{ 
    39   ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 
     39  MPI_Comm_size(clientIntraComm, &nbClient_); 
    4040  this->computeMPICommLevel(); 
    4141  int nbLvl = this->getNbLevel(); 
     
    6262  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    6363{ 
    64   ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 
     64  MPI_Comm_size(clientIntraComm, &nbClient_); 
    6565  this->computeMPICommLevel(); 
    6666  int nbLvl = this->getNbLevel(); 
     
    9999{ 
    100100  int clientRank; 
    101   ep_lib::MPI_Comm_rank(commLevel,&clientRank); 
     101  MPI_Comm_rank(commLevel,&clientRank); 
    102102  int groupRankBegin = this->getGroupBegin()[level]; 
    103103  int nbClient = this->getNbInGroup()[level]; 
     
    200200 
    201201  std::vector<ep_lib::MPI_Status> status(request.size()); 
    202   ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 
     202  MPI_Waitall(request.size(), &request[0], &status[0]); 
    203203 
    204204  CArray<size_t,1>* tmpGlobalIndex; 
     
    324324 
    325325  std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 
    326   ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 
     326  MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 
    327327 
    328328  Index2VectorInfoTypeMap indexToInfoMapping; 
     
    394394{ 
    395395  int clientRank; 
    396   ep_lib::MPI_Comm_rank(commLevel,&clientRank); 
     396  MPI_Comm_rank(commLevel,&clientRank); 
    397397  computeSendRecvRank(level, clientRank); 
    398398 
     
    508508 
    509509  std::vector<ep_lib::MPI_Status> status(request.size()); 
    510   ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 
     510  MPI_Waitall(request.size(), &request[0], &status[0]); 
    511511 
    512512  Index2VectorInfoTypeMap indexToInfoMapping; 
     
    566566  ep_lib::MPI_Request request; 
    567567  requestSendIndex.push_back(request); 
    568   ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 
     568  MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 
    569569            clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 
    570570} 
     
    601601  ep_lib::MPI_Request request; 
    602602  requestRecvIndex.push_back(request); 
    603   ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 
     603  MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 
    604604            clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 
    605605} 
     
    637637  requestSendInfo.push_back(request); 
    638638 
    639   ep_lib::MPI_Isend(info, infoSize, MPI_CHAR, 
     639  MPI_Isend(info, infoSize, MPI_CHAR, 
    640640            clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 
    641641} 
     
    674674  requestRecvInfo.push_back(request); 
    675675 
    676   ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR, 
     676  MPI_Irecv(info, infoSize, MPI_CHAR, 
    677677            clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 
    678678} 
     
    767767  for (int idx = 0; idx < recvNbRank.size(); ++idx) 
    768768  { 
    769     ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 
     769    MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 
    770770              recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 
    771771    ++nRequest; 
     
    774774  for (int idx = 0; idx < sendNbRank.size(); ++idx) 
    775775  { 
    776     ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 
     776    MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 
    777777              sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 
    778778    ++nRequest; 
    779779  } 
    780780 
    781   ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 
     781  MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 
    782782} 
    783783 
     
    811811  for (int idx = 0; idx < recvBuffSize; ++idx) 
    812812  { 
    813     ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
     813    MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 
    814814              recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 
    815815  } 
     
    824824  for (int idx = 0; idx < sendBuffSize; ++idx) 
    825825  { 
    826     ep_lib::MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 
     826    MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 
    827827              sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 
    828828  } 
    829829 
    830   ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 
     830  MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 
    831831  int nbRecvRank = 0, nbRecvElements = 0; 
    832832  recvNbRank.clear(); 
  • XIOS/dev/dev_trunk_omp/src/cxios.cpp

    r1646 r1661  
    5454    #pragma omp critical 
    5555    { 
    56       //std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsing rootfile"<<std::endl; 
    5756      parseFile(rootFile); 
    5857      std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsed rootfile"<<std::endl; 
  • XIOS/dev/dev_trunk_omp/src/filter/spatial_transform_filter.cpp

    r1646 r1661  
    7373  } 
    7474 
    75  
    76  
    77  
    78  
    7975  CSpatialTemporalFilter::CSpatialTemporalFilter(CGarbageCollector& gc, CSpatialTransformFilterEngine* engine, CGridTransformation* gridTransformation, double outputValue, size_t inputSlotsCount) 
    8076    : CSpatialTransformFilter(gc, engine, outputValue, inputSlotsCount), record(0) 
  • XIOS/dev/dev_trunk_omp/src/io/netCdfInterface.cpp

    r1646 r1661  
    568568    info(200)<<"start nc_inq_grpname_full"<<std::endl; 
    569569  } 
     570 
    570571  if (NC_NOERR != status) 
    571572  { 
  • XIOS/dev/dev_trunk_omp/src/mpi.hpp

    r1650 r1661  
    1414  #include "ep_lib.hpp" 
    1515  #include "ep_declaration.hpp" 
    16   //using namespace ep_lib; 
    1716#elif _usingMPI 
    1817  #include <mpi.h> 
  • XIOS/dev/dev_trunk_omp/src/node/axis.cpp

    r1646 r1661  
    149149     \return the number of indexes written by each server 
    150150   */ 
    151    int CAxis::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
     151   int CAxis::getNumberWrittenIndexes(MPI_Comm writtenCom) 
    152152   TRY 
    153153   { 
    154154     int writtenSize; 
    155      ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
     155     MPI_Comm_size(writtenCom, &writtenSize); 
    156156     return numberWrittenIndexes_[writtenSize]; 
    157157   } 
     
    162162     \return the total number of indexes written by the servers 
    163163   */ 
    164    int CAxis::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
     164   int CAxis::getTotalNumberWrittenIndexes(MPI_Comm writtenCom) 
    165165   TRY 
    166166   { 
    167167     int writtenSize; 
    168      ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
     168     MPI_Comm_size(writtenCom, &writtenSize); 
    169169     return totalNumberWrittenIndexes_[writtenSize]; 
    170170   } 
     
    175175     \return the offset of indexes written by each server 
    176176   */ 
    177    int CAxis::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
     177   int CAxis::getOffsetWrittenIndexes(MPI_Comm writtenCom) 
    178178   TRY 
    179179   { 
    180180     int writtenSize; 
    181      ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
     181     MPI_Comm_size(writtenCom, &writtenSize); 
    182182     return offsetWrittenIndexes_[writtenSize]; 
    183183   } 
    184184   CATCH_DUMP_ATTR 
    185185 
    186    CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 
     186   CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(MPI_Comm writtenCom) 
    187187   TRY 
    188188   { 
    189189     int writtenSize; 
    190      ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
     190     MPI_Comm_size(writtenCom, &writtenSize); 
    191191     return compressedIndexToWriteOnServer[writtenSize]; 
    192192   } 
     
    787787  CATCH_DUMP_ATTR 
    788788 
    789   void CAxis::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 
     789  void CAxis::computeWrittenCompressedIndex(MPI_Comm writtenComm) 
    790790  TRY 
    791791  { 
    792792    int writtenCommSize; 
    793     ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 
     793    MPI_Comm_size(writtenComm, &writtenCommSize); 
    794794    if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 
    795795      return; 
     
    869869      { 
    870870              
    871         ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
    872         ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
     871        MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
     872        MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
    873873        offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 
    874874      } 
  • XIOS/dev/dev_trunk_omp/src/node/axis.hpp

    r1646 r1661  
    182182         static std::map<StdString, ETranformationType> *transformationMapList_ptr; 
    183183         #pragma omp threadprivate(transformationMapList_ptr) 
    184          //static bool dummyTransformationMapList_; 
    185184 
    186185         DECLARE_REF_FUNC(Axis,axis) 
  • XIOS/dev/dev_trunk_omp/src/node/context.cpp

    r1646 r1661  
    287287       else 
    288288       { 
    289          ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 
     289         MPI_Comm_dup(intraComm, &intraCommServer); 
    290290         comms.push_back(intraCommServer); 
    291          ep_lib::MPI_Comm_dup(interComm, &interCommServer); 
     291         MPI_Comm_dup(interComm, &interCommServer); 
    292292         comms.push_back(interCommServer); 
    293293       } 
     
    312312     { 
    313313       clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 
    314        ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 
     314       MPI_Comm_dup(intraComm, &intraCommServer); 
    315315       comms.push_back(intraCommServer); 
    316        ep_lib::MPI_Comm_dup(interComm, &interCommServer); 
     316       MPI_Comm_dup(interComm, &interCommServer); 
    317317       comms.push_back(interCommServer); 
    318318       serverPrimServer.push_back(new CContextServer(this, intraCommServer, interCommServer)); 
     
    413413     else 
    414414     { 
    415        ep_lib::MPI_Comm_dup(intraComm, &intraCommClient); 
     415       MPI_Comm_dup(intraComm, &intraCommClient); 
    416416       comms.push_back(intraCommClient); 
    417        ep_lib::MPI_Comm_dup(interComm, &interCommClient); 
     417       MPI_Comm_dup(interComm, &interCommClient); 
    418418       comms.push_back(interCommClient); 
    419419     } 
     
    506506         //! Free internally allocated communicators 
    507507         for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
    508            ep_lib::MPI_Comm_free(&(*it)); 
     508           MPI_Comm_free(&(*it)); 
    509509         comms.clear(); 
    510510 
     
    554554         //! Free internally allocated communicators 
    555555         for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
    556            ep_lib::MPI_Comm_free(&(*it)); 
     556           MPI_Comm_free(&(*it)); 
    557557         comms.clear(); 
    558558 
     
    573573   { 
    574574     for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
    575        ep_lib::MPI_Comm_free(&(*it)); 
     575       MPI_Comm_free(&(*it)); 
    576576     comms.clear(); 
    577577   } 
  • XIOS/dev/dev_trunk_omp/src/node/context.hpp

    r1646 r1661  
    145145         void distributeFileOverMemoryBandwith() ; 
    146146          
    147  
    148147         // Send context close definition 
    149148         void sendCloseDefinition(void); 
  • XIOS/dev/dev_trunk_omp/src/node/domain.cpp

    r1646 r1661  
    116116     \return the number of indexes written by each server 
    117117   */ 
    118    int CDomain::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
     118   int CDomain::getNumberWrittenIndexes(MPI_Comm writtenCom) 
    119119   TRY 
    120120   { 
    121121     int writtenSize; 
    122      ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
     122     MPI_Comm_size(writtenCom, &writtenSize); 
    123123     return numberWrittenIndexes_[writtenSize]; 
    124124   } 
     
    129129     \return the total number of indexes written by the servers 
    130130   */ 
    131    int CDomain::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
     131   int CDomain::getTotalNumberWrittenIndexes(MPI_Comm writtenCom) 
    132132   TRY 
    133133   { 
    134134     int writtenSize; 
    135      ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
     135     MPI_Comm_size(writtenCom, &writtenSize); 
    136136     return totalNumberWrittenIndexes_[writtenSize]; 
    137137   } 
     
    142142     \return the offset of indexes written by each server 
    143143   */ 
    144    int CDomain::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 
     144   int CDomain::getOffsetWrittenIndexes(MPI_Comm writtenCom) 
    145145   TRY 
    146146   { 
    147147     int writtenSize; 
    148      ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
     148     MPI_Comm_size(writtenCom, &writtenSize); 
    149149     return offsetWrittenIndexes_[writtenSize]; 
    150150   } 
    151151   CATCH_DUMP_ATTR 
    152152 
    153    CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 
     153   CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(MPI_Comm writtenCom) 
    154154   TRY 
    155155   { 
    156156     int writtenSize; 
    157      ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 
     157     MPI_Comm_size(writtenCom, &writtenSize); 
    158158     return compressedIndexToWriteOnServer[writtenSize]; 
    159159   } 
     
    707707     int v ; 
    708708     v=ibegin ; 
    709      ep_lib::MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ; 
     709     MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ; 
    710710     v=jbegin ; 
    711      ep_lib::MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ; 
     711     MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ; 
    712712     v=ni ; 
    713      ep_lib::MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ; 
     713     MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ; 
    714714     v=nj ; 
    715      ep_lib::MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ; 
    716  
    717      ep_lib::MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ; 
    718      ep_lib::MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ; 
     715     MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ; 
     716 
     717     MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ; 
     718     MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ; 
    719719 
    720720      delete[] ibegin_g ; 
     
    19491949          displs[0] = 0; 
    19501950          int localCount = connectedServerRank_[nbServer].size() ; 
    1951           ep_lib::MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ; 
     1951          MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ; 
    19521952          for (int i = 0; i < clientSize-1; ++i) 
    19531953          { 
     
    19551955          } 
    19561956          std::vector<int> allConnectedServers(displs[clientSize-1]+counts[clientSize-1]); 
    1957  
    1958           ep_lib::MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 
    1959  
     1957          MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 
    19601958 
    19611959          if ((allConnectedServers.size() != nbServer) && (rank == 0)) 
     
    20222020   CATCH_DUMP_ATTR 
    20232021 
    2024   void CDomain::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 
     2022  void CDomain::computeWrittenCompressedIndex(MPI_Comm writtenComm) 
    20252023  TRY 
    20262024  { 
    20272025    int writtenCommSize; 
    2028     ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 
     2026    MPI_Comm_size(writtenComm, &writtenCommSize); 
    20292027    if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 
    20302028      return; 
     
    20832081      { 
    20842082              
    2085         ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
    2086         ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
     2083        MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
     2084        MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 
    20872085        offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 
    20882086      } 
  • XIOS/dev/dev_trunk_omp/src/node/file.cpp

    r1646 r1661  
    307307 
    308308      int color = allZoneEmpty ? 0 : 1; 
    309       ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 
    310       if (allZoneEmpty) ep_lib::MPI_Comm_free(&fileComm); 
     309      MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 
     310      if (allZoneEmpty) MPI_Comm_free(&fileComm); 
    311311    } 
    312312    CATCH_DUMP_ATTR 
     
    557557         { 
    558558            int commSize, commRank; 
    559             ep_lib::MPI_Comm_size(fileComm, &commSize); 
    560             ep_lib::MPI_Comm_rank(fileComm, &commRank); 
     559            MPI_Comm_size(fileComm, &commSize); 
     560            MPI_Comm_rank(fileComm, &commRank); 
    561561 
    562562            if (server->intraCommSize > 1) 
     
    683683      { 
    684684        int commSize, commRank; 
    685         ep_lib::MPI_Comm_size(readComm, &commSize); 
    686         ep_lib::MPI_Comm_rank(readComm, &commRank); 
     685        MPI_Comm_size(readComm, &commSize); 
     686        MPI_Comm_rank(readComm, &commRank); 
    687687 
    688688        if (server->intraCommSize > 1) 
     
    728728        isOpen = false; 
    729729       } 
    730       //if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 
     730     #ifdef _usingMPI  
     731     if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 
     732     #endif 
    731733   } 
    732734   CATCH_DUMP_ATTR 
  • XIOS/dev/dev_trunk_omp/src/node/grid.cpp

    r1646 r1661  
    697697     CContext* context = CContext::getCurrent(); 
    698698 
    699      CContextClient* client = context->client;  // Here it's not important which contextClient to recuperate 
     699     CContextClient* client = context->client; 
    700700     int rank = client->clientRank; 
    701701 
     
    856856         displs[0] = 0; 
    857857         int localCount = connectedServerRank_[receiverSize].size() ; 
    858          ep_lib::MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ; 
    859           
     858         MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ; 
    860859         for (int i = 0; i < client->clientSize-1; ++i) 
    861860         { 
     
    863862         } 
    864863         std::vector<int> allConnectedServers(displs[client->clientSize-1]+counts[client->clientSize-1]); 
    865  
    866          ep_lib::MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 
    867  
     864         MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 
    868865 
    869866         if ((allConnectedServers.size() != receiverSize) && (client->clientRank == 0)) 
  • XIOS/dev/dev_trunk_omp/src/object_factory_impl.hpp

    r1628 r1661  
    141141      if(U::AllVectObj_ptr) 
    142142      { 
    143         //const std::vector<std::shared_ptr<U> > temp; 
    144143        return (*U::AllVectObj_ptr)[context]; 
    145         //return std::vector<std::shared_ptr<U> > (0); 
    146144      } 
    147145       
  • XIOS/dev/dev_trunk_omp/src/policy.cpp

    r1646 r1661  
    1616namespace xios 
    1717{ 
    18 ///*! 
    19 //  Calculate MPI communicator for each level of hierarchy. 
    20 //  \param[in] mpiCommRoot MPI communicator of the level 0 (usually communicator of all clients) 
    21 //  \param[in] levels number of level in hierarchy 
    22 //*/ 
    23 //void DivideCommByTwo::computeMPICommLevel(const MPI_Comm& mpiCommRoot, int levels) 
    24 //{ 
    25 //  int nbProc; 
    26 //  MPI_Comm_size(mpiCommRoot,&nbProc); 
    27 //  if (levels > nbProc) levels = std::log10(nbProc) * 3.3219; // log2(x) = log2(10) * log10(x); stupid C++98 
    28 //  else if (1 > levels) levels = 1; 
    29 // 
    30 //  commLevel_.push_back(mpiCommRoot); 
    31 //  divideMPICommLevel(mpiCommRoot, levels); 
    32 //} 
    33 // 
    34 ///*! 
    35 //  Divide each MPI communicator into sub-communicator. Recursive function 
    36 //  \param [in] mpiCommLevel MPI communicator of current level 
    37 //  \param [in] level current level 
    38 //*/ 
    39 //void DivideCommByTwo::divideMPICommLevel(const MPI_Comm& mpiCommLevel, int level) 
    40 //{ 
    41 //  int clientRank; 
    42 //  MPI_Comm_rank(mpiCommLevel,&clientRank); 
    43 // 
    44 //   --level; 
    45 //  if (0 < level) 
    46 //  { 
    47 //   int color = clientRank % 2; 
    48 //   commLevel_.push_back(MPI_Comm()); 
    49 //   MPI_Comm_split(mpiCommLevel, color, 0, &(commLevel_.back())); 
    50 //   divideMPICommLevel(commLevel_.back(), level); 
    51 //  } 
    52 //} 
     18 
    5319 
    5420DivideAdaptiveComm::DivideAdaptiveComm(const MPI_Comm& mpiComm) 
  • XIOS/dev/dev_trunk_omp/src/policy.hpp

    r1601 r1661  
    1616namespace xios 
    1717{ 
    18 //class DivideCommByTwo 
    19 //{ 
    20 //protected: 
    21 //  void computeMPICommLevel(const MPI_Comm& mpiCommRoot, int levels); 
    22 // 
    23 //protected: 
    24 //  std::vector<MPI_Comm> commLevel_; 
    25 //private: 
    26 //  // Divide MPI communicator on each level recursively 
    27 //  void divideMPICommLevel(const MPI_Comm& mpiCommLevel, int level); 
    28 //}; 
     18 
    2919 
    3020class DivideAdaptiveComm 
     
    4939  std::vector<int> nbInGroup_; //! Number of process in each group 
    5040  bool computed_; 
    51 //  std::vector<std::vector<int> > child_; /*!< List of child rank for each level */ 
    52 //  std::vector<int> nbChild_;         /*!< Number of child for each level */ 
     41 
     42 
    5343}; 
    5444 
  • XIOS/dev/dev_trunk_omp/src/server.cpp

    r1646 r1661  
    5050    void CServer::initialize(void) 
    5151    { 
    52       //int initialized ; 
    53       //MPI_Initialized(&initialized) ; 
    54       //if (initialized) is_MPI_Initialized=true ; 
    55       //else is_MPI_Initialized=false ; 
    5652      int rank ; 
    5753 
     
    6056      { 
    6157 
    62         //if (!is_MPI_Initialized) 
    63         //{ 
    64         //  MPI_Init(NULL, NULL); 
    65         //} 
    6658        CTimer::get("XIOS").resume() ; 
    6759 
  • XIOS/dev/dev_trunk_omp/src/test/test_complete_omp.f90

    r1650 r1661  
    1414  TYPE(xios_duration)  :: dtime 
    1515  TYPE(xios_context) :: ctx_hdl 
    16   INTEGER,PARAMETER :: ni_glo=1000 
    17   INTEGER,PARAMETER :: nj_glo=1000 
     16  INTEGER,PARAMETER :: ni_glo=100 
     17  INTEGER,PARAMETER :: nj_glo=100 
    1818  INTEGER,PARAMETER :: llm=5 
    1919  DOUBLE PRECISION  :: lval(llm)=1 
  • XIOS/dev/dev_trunk_omp/src/tracer.cpp

    r501 r1661  
    11#include "tracer.hpp" 
    2 #ifdef VTRACE 
     2 
     3#if defined(VTRACE) 
     4 
    35#include <vt_user.h> 
     6 
     7#elif defined(SCOREP) 
     8 
     9#include <scorep/SCOREP_User.h> 
     10 
     11#elif defined(ITAC) 
     12 
     13#include <VT.h> 
     14 
    415#endif 
     16 
    517#include <string> 
     18#include <map> 
     19#include <iostream> 
    620 
    721namespace xios 
    822{ 
    923  using namespace std ; 
     24 
     25  std::map<std::string,int> regionId ; 
     26  int count=0 ; 
    1027   
    1128  void traceOn(void) 
    1229  { 
    13 #ifdef VTRACE 
     30#if defined(VTRACE) 
    1431    VT_ON() ; 
     32#elif defined(SCOREP) 
     33    SCOREP_RECORDING_ON() ; 
     34#elif defined(ITAC) 
     35    VT_traceon() ; 
    1536#endif 
    1637  } 
     
    1839  void traceOff(void)  
    1940  { 
    20 #ifdef VTRACE 
     41#if defined(VTRACE) 
    2142    VT_OFF() ; 
     43#elif defined(SCOREP) 
     44    SCOREP_RECORDING_OFF() 
     45#elif defined(ITAC) 
     46    VT_traceoff()   ;   
    2247#endif 
    2348  } 
     
    2550  void traceBegin(const string& name) 
    2651  { 
    27 #ifdef VTRACE 
     52#if defined(VTRACE) 
    2853    VT_USER_START(name.c_str()) ; 
     54#elif defined(SCOREP) 
     55    SCOREP_USER_REGION_BY_NAME_BEGIN(name.c_str(),SCOREP_USER_REGION_TYPE_COMMON) 
     56 
     57#elif defined(ITAC) 
     58    int classhandle ; 
     59    auto it = regionId.find(name); 
     60    if (it==regionId.end()) 
     61    { 
     62      classhandle=count ; 
     63      count++ ; 
     64      VT_symdef (classhandle, name.c_str(), "XIOS") ; 
     65      regionId[name]=classhandle; 
     66    } 
     67    else classhandle = it->second ; 
     68    VT_begin(classhandle) ; 
     69    cout<<"VT_begin "<<name<<"  "<<classhandle<<endl ; 
     70 
    2971#endif 
     72 
    3073  } 
    3174   
    3275  void traceEnd(const string& name) 
    3376  { 
    34 #ifdef VTRACE 
     77#if defined (VTRACE) 
    3578    VT_USER_END(name.c_str()) ; 
     79#elif defined(SCOREP) 
     80    SCOREP_USER_REGION_BY_NAME_END(name.c_str()) 
     81#elif defined(ITAC) 
     82    int classhandle ; 
     83    auto it = regionId.find(name); 
     84    if (it==regionId.end()) 
     85    { 
     86      return ; 
     87      VT_classdef (name.c_str(), &classhandle) ; 
     88      regionId[name]=classhandle; 
     89    } 
     90    else classhandle = it->second ; 
     91    VT_end(classhandle) ;     
     92    cout<<"VT_end "<<name<<"  "<<classhandle<<endl ; 
     93 
    3694#endif 
    3795  } 
    3896   
     97    
    3998//  void marker(const string& name,const string& text) ; 
    4099   
  • XIOS/dev/dev_trunk_omp/src/transformation/domain_algorithm_interpolate.cpp

    r1646 r1661  
    438438 
    439439  ep_lib::MPI_Comm poleComme = MPI_COMM_NULL; 
    440   ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 
     440  MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 
    441441  if (poleComme!=MPI_COMM_NULL) 
    442442  { 
    443443    int nbClientPole; 
    444     ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 
     444    MPI_Comm_size(poleComme, &nbClientPole); 
    445445 
    446446    std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, 
     
    453453    std::vector<int> recvCount(nbClientPole,0); 
    454454    std::vector<int> displ(nbClientPole,0); 
    455     ep_lib::MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 
     455    MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 
     456 
    456457    displ[0]=0; 
    457458    for(int n=1;n<nbClientPole;++n) displ[n]=displ[n-1]+recvCount[n-1] ; 
     
    475476 
    476477    // Gather all index and weight for pole 
    477     ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 
    478     ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 
     478    MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 
     479    MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 
    479480 
    480481    std::map<int,double> recvTemp; 
     
    633634 
    634635 
    635   ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 
     636  MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 
    636637 
    637638  int* sendIndexDestBuff = new int [sendBuffSize]; 
     
    661662    } 
    662663 
    663     ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet, 
     664    MPI_Isend(sendIndexDestBuff + sendOffSet, 
    664665             k, 
    665666             MPI_INT, 
     
    668669             client->intraComm, 
    669670             &sendRequest[position++]); 
    670     ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet, 
     671    MPI_Isend(sendIndexSrcBuff + sendOffSet, 
    671672             k, 
    672673             MPI_INT, 
     
    675676             client->intraComm, 
    676677             &sendRequest[position++]); 
    677     ep_lib::MPI_Isend(sendWeightBuff + sendOffSet, 
     678    MPI_Isend(sendWeightBuff + sendOffSet, 
    678679             k, 
    679680             MPI_DOUBLE, 
     
    694695  { 
    695696    ep_lib::MPI_Status recvStatus; 
    696     ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 
     697    MPI_Recv((recvIndexDestBuff + receivedSize), 
    697698             recvBuffSize, 
    698699             MPI_INT, 
     
    710711    #endif 
    711712 
    712     ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 
     713    MPI_Recv((recvIndexSrcBuff + receivedSize), 
    713714             recvBuffSize, 
    714715             MPI_INT, 
     
    718719             &recvStatus); 
    719720 
    720     ep_lib::MPI_Recv((recvWeightBuff + receivedSize), 
     721    MPI_Recv((recvWeightBuff + receivedSize), 
    721722             recvBuffSize, 
    722723             MPI_DOUBLE, 
     
    735736 
    736737  std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 
    737   ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &requestStatus[0]); 
     738  MPI_Waitall(sendRequest.size(), &sendRequest[0], &requestStatus[0]); 
    738739 
    739740  delete [] sendIndexDestBuff; 
     
    843844  } 
    844845 
    845   ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    846   ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     846  MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     847  MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    847848   
    848849  if (0 == globalNbWeight) 
  • XIOS/dev/dev_trunk_omp/src/transformation/generic_algorithm_transformation.cpp

    r1646 r1661  
    136136      { 
    137137        distributed=domainListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]->isDistributed() ; 
    138         ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
     138        MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
    139139     
    140140      } 
     
    142142      { 
    143143        distributed=axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]->isDistributed() ; 
    144         ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
     144        MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 
    145145      } 
    146146      else //it's a scalar 
     
    238238  int sendValue = (computeGlobalIndexOnProc) ? 1 : 0; 
    239239  int recvValue = 0; 
    240   ep_lib::MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 
     240  MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 
    241241  computeGlobalIndexOnProc = (0 < recvValue); 
    242242 
  • XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation.cpp

    r1646 r1661  
    514514    sendRankSizeMap[itIndex->first] = sendSize; 
    515515  } 
    516   ep_lib::MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
     516  MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 
    517517 
    518518  displ[0]=0 ; 
     
    521521  int* recvRankBuff=new int[recvSize]; 
    522522  int* recvSizeBuff=new int[recvSize]; 
    523   ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
    524   ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
     523  MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 
     524  MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 
    525525  for (int i = 0; i < nbClient; ++i) 
    526526  { 
     
    546546    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 
    547547 
    548     ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 
    549     ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 
     548    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 
     549    MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 
    550550  } 
    551551 
     
    582582 
    583583    // Send global index source and mask 
    584     ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 
    585     ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 
     584    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 
     585    MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 
    586586  } 
    587587 
     
    599599    int recvSize = itSend->second; 
    600600 
    601     ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
     601    MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
    602602  } 
    603603 
     
    635635 
    636636    // Okie, now inform the destination which source index are masked 
    637     ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
     637    MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 
    638638  } 
    639639  status.resize(requests.size()); 
    640   ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 
     640  MPI_Waitall(requests.size(), &requests[0], &status[0]); 
    641641 
    642642  // Cool, now we can fill in local index of grid destination (counted for masked index) 
Note: See TracChangeset for help on using the changeset viewer.