Changeset 1053


Ignore:
Timestamp:
02/17/17 17:55:37 (7 years ago)
Author:
yushan
Message:

ep_lib namespace specified when netcdf involved

Location:
XIOS/dev/branch_yushan
Files:
89 edited

Legend:

Unmodified
Added
Removed
  • XIOS/dev/branch_yushan/arch/arch-GCC_LINUX.env

    r395 r1053  
    1 export HDF5_INC_DIR=$HOME/hdf5/include 
    2 export HDF5_LIB_DIR=$HOME/hdf5/lib 
     1module unload netcdf 
     2module unload hdf5 
     3  
    34 
    4 export NETCDF_INC_DIR=$HOME/netcdf4/include 
    5 export NETCDF_LIB_DIR=$HOME/netcdf4/lib 
     5export HDF5_INC_DIR=$HOME/lib/hdf5/include 
     6export HDF5_LIB_DIR=$HOME/lib/hdf5/lib 
    67 
     8export NETCDF_INC_DIR=$HOME/lib/netcdf/include 
     9export NETCDF_LIB_DIR=$HOME/lib/netcdf/lib 
     10 
  • XIOS/dev/branch_yushan/arch/arch-GCC_LINUX.fcm

    r591 r1053  
    33################################################################################ 
    44 
    5 %CCOMPILER      mpicc 
    6 %FCOMPILER      mpif90 
    7 %LINKER         mpif90  
     5%CCOMPILER      mpicc -fopenmp -D_openmpi -D_usingEP  
     6%FCOMPILER      mpif90 -fopenmp 
     7%LINKER         mpif90 -fopenmp -D_openmpi -D_usingEP  
    88 
    99%BASE_CFLAGS    -ansi -w 
  • XIOS/dev/branch_yushan/arch/arch-GCC_LINUX.path

    r475 r1053  
    1 NETCDF_INCDIR="-I $NETCDF_INC_DIR" 
    2 NETCDF_LIBDIR="-L $NETCDF_LIB_DIR" 
    3 NETCDF_LIB="-lnetcdff -lnetcdf" 
     1NETCDF_INCDIR="-I $NETCDF_INC_DIR -I $HOME/lib/netcdf_f/include" 
     2NETCDF_LIBDIR="-L $NETCDF_LIB_DIR -L $HOME/lib/netcdf_f/lib" 
     3NETCDF_LIB=" -lnetcdf" 
    44 
    55MPI_INCDIR="" 
     
    99HDF5_INCDIR="-I $HDF5_INC_DIR" 
    1010HDF5_LIBDIR="-L $HDF5_LIB_DIR" 
    11 HDF5_LIB="-lhdf5_hl -lhdf5 -lhdf5 -lz" 
     11HDF5_LIB="-lhdf5_hl -lhdf5 -lhdf5 -lz -ldl -lcurl" 
    1212 
    1313OASIS_INCDIR="-I$PWD/../../oasis3-mct/BLD/build/lib/psmile.MPI1" 
  • XIOS/dev/branch_yushan/arch/arch-X64_CURIE.fcm

    r1002 r1053  
    33################################################################################ 
    44 
    5 %CCOMPILER      mpicc 
    6 %FCOMPILER      mpif90 
     5%CCOMPILER      mpicc -openmp -D_openmpi -D_usingEP 
     6%FCOMPILER      mpif90 -openmp -D_openmpi -D_usingEP 
    77%LINKER         mpif90  -nofor-main 
    88 
  • XIOS/dev/branch_yushan/extern/remap/src/mapper.cpp

    r923 r1053  
    1212 
    1313#include "mapper.hpp" 
     14 
    1415 
    1516namespace sphereRemap { 
  • XIOS/dev/branch_yushan/extern/remap/src/mapper.hpp

    r844 r1053  
    33#include "parallel_tree.hpp" 
    44#include "mpi.hpp" 
     5 
     6#ifdef _usingEP 
     7#include "ep_declaration.hpp" 
     8#endif 
    59 
    610namespace sphereRemap { 
     
    1822{ 
    1923public: 
    20        Mapper(MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {} 
     24       Mapper(ep_lib::MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {} 
    2125       ~Mapper(); 
    2226       void setVerbosity(verbosity v) {verbose=v ;} 
     
    6771 
    6872       CParallelTree sstree; 
    69        MPI_Comm communicator ; 
     73       ep_lib::MPI_Comm communicator ; 
    7074       std::vector<Elt>  sourceElements ; 
    7175       std::vector<Node> sourceMesh ; 
  • XIOS/dev/branch_yushan/extern/remap/src/mpi_routing.cpp

    r694 r1053  
    55#include "timerRemap.hpp" 
    66#include <iostream> 
     7#ifdef _usingEP 
     8#include "ep_declaration.hpp" 
     9#endif 
    710 
    811namespace sphereRemap { 
     
    122125        CTimer::get("CMPIRouting::init(reduce_scatter)").print(); 
    123126 
    124         MPI_Alloc_mem(nbTarget *sizeof(int), MPI_INFO_NULL, &targetRank); 
    125         MPI_Alloc_mem(nbSource *sizeof(int), MPI_INFO_NULL, &sourceRank); 
     127        MPI_Info info_null; 
     128 
     129        MPI_Alloc_mem(nbTarget *sizeof(int), info_null, &targetRank); 
     130        MPI_Alloc_mem(nbSource *sizeof(int), info_null, &sourceRank); 
    126131 
    127132        targetRankToIndex = new int[mpiSize]; 
  • XIOS/dev/branch_yushan/extern/remap/src/parallel_tree.hpp

    r694 r1053  
    66#include "mpi_cascade.hpp" 
    77#include "mpi.hpp" 
     8#ifdef _usingEP 
     9#include "ep_declaration.hpp" 
     10#endif 
    811 
    912namespace sphereRemap { 
     
    1215{ 
    1316public: 
    14         CParallelTree(MPI_Comm comm); 
     17        CParallelTree(ep_lib::MPI_Comm comm); 
    1518        ~CParallelTree(); 
    1619 
     
    3437        vector<CSampleTree> treeCascade; // first for sample tree, then for routing tree 
    3538        CMPICascade cascade; 
    36   MPI_Comm communicator ; 
     39  ep_lib::MPI_Comm communicator ; 
    3740 
    3841}; 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_alltoall.cpp

    r1037 r1053  
    1111  { 
    1212    ::MPI_Aint typesize, llb; 
    13     ::MPI_Type_get_extent(sendtype, &llb, &typesize); 
     13    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(sendtype), &llb, &typesize); 
    1414 
    1515    for(int i=0; i<comm.ep_comm_ptr->size_rank_info[0].second; i++) 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_declaration.cpp

    r1037 r1053  
    2121#undef MPI_COMM_NULL 
    2222 
     23#undef MPI_STATUS_IGNORE 
     24//#undef MPI_INFO_NULL 
     25#undef MPI_REQUEST_NULL 
     26 
     27#ifdef _openmpi 
     28//#undef MPI_Fint 
     29#endif 
     30 
    2331// _STD defined in ep_type.cpp 
    2432 
     
    3846extern ::MPI_Comm MPI_COMM_NULL_STD; 
    3947 
     48extern ::MPI_Status MPI_STATUS_IGNORE_STD; 
     49//extern ::MPI_Info MPI_INFO_NULL_STD; 
     50extern ::MPI_Request MPI_REQUEST_NULL_STD; 
     51 
    4052ep_lib::MPI_Datatype MPI_INT = MPI_INT_STD; 
    4153ep_lib::MPI_Datatype MPI_FLOAT = MPI_FLOAT_STD; 
     
    5365ep_lib::MPI_Comm MPI_COMM_NULL(MPI_COMM_NULL_STD); 
    5466 
     67//ep_lib::MPI_Info MPI_INFO_NULL(MPI_INFO_NULL_STD); 
     68ep_lib::MPI_Request MPI_REQUEST_NULL(MPI_REQUEST_NULL_STD); 
     69 
     70//ep_lib::MPI_Status MPI_STATUS_IGNORE_STD = MPI_STATUS_IGNORE_STD; 
     71 
    5572//ep_lib::MPI_Comm EP_COMM_WORLD; 
    5673//ep_lib::MPI_Comm EP_COMM_NULL; 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_declaration.hpp

    r1037 r1053  
    1717extern ::MPI_Comm MPI_COMM_NULL_STD; 
    1818 
     19extern ::MPI_Status MPI_STATUS_IGNORE_STD; 
     20//extern ::MPI_Info MPI_INFO_NULL_STD; 
     21extern ::MPI_Request MPI_REQUEST_NULL_STD; 
     22 
    1923#undef MPI_INT 
    2024#undef MPI_FLOAT 
     
    3135#undef MPI_COMM_WORLD 
    3236#undef MPI_COMM_NULL 
     37 
     38//#undef MPI_INFO_NULL 
     39#undef MPI_REQUEST_NULL 
     40 
     41#ifdef _openmpi 
     42//#undef MPI_Fint 
     43#endif 
     44 
     45#undef MPI_STATUS_IGNORE 
    3346 
    3447extern ep_lib::MPI_Datatype MPI_INT; 
     
    4760extern ep_lib::MPI_Comm MPI_COMM_NULL; 
    4861 
    49  
    50  
     62extern ep_lib::MPI_Status MPI_STATUS_IGNORE; 
     63//extern ep_lib::MPI_Info MPI_INFO_NULL; 
     64extern ep_lib::MPI_Request MPI_REQUEST_NULL; 
    5165 
    5266#endif // EP_DECLARATION_HPP_INCLUDED 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_exscan.cpp

    r1037 r1053  
    435435    if(!comm.is_ep) 
    436436    { 
    437       ::MPI_Exscan(sendbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), 
     437      ::MPI_Exscan(const_cast<void*>(sendbuf), recvbuf, count, static_cast< ::MPI_Datatype>(datatype), 
    438438                   static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    439439      return 0; 
     
    481481    if(ep_rank_loc == 0) 
    482482    { 
    483       #ifdef _serialized 
    484       #pragma omp critical (_mpi_call) 
    485       #endif // _serialized 
    486483      ::MPI_Exscan(local_sum, mpi_scan_recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    487484    } 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_finalize.cpp

    r1037 r1053  
    1515    if(id == 0) 
    1616    { 
    17       #ifdef _serialized 
    18       #pragma omp critical (_mpi_call) 
    19       #endif // _serialized 
    2017      ::MPI_Finalize(); 
    2118    } 
     
    3027    { 
    3128      ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm >(comm.mpi_comm); 
    32  
    33       #ifdef _serialized 
    34       #pragma omp critical (_mpi_call) 
    35       #endif // _serialized 
    3629      ::MPI_Abort(mpi_comm, errorcode); 
    3730    } 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_fortran.cpp

    r1037 r1053  
    44#include <map> 
    55#include <utility> 
     6 
     7#ifdef _intelmpi 
    68#undef MPI_Comm_f2c(comm) 
    79#undef MPI_Comm_c2f(comm) 
     10#endif 
    811 
     12#ifdef _openmpi 
     13//#undef MPI_Fint 
     14#endif 
    915 
    1016namespace ep_lib 
    1117{ 
    1218 
     19  int EP_Comm_c2f(MPI_Comm comm) 
     20  { 
     21    Debug("MPI_Comm_c2f"); 
     22    int fint; 
     23    #ifdef _intelmpi 
     24    fint = (::MPI_Fint)(comm.mpi_comm); 
     25    #elif _openmpi 
     26    fint = ::MPI_Comm_c2f(static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     27    #endif 
     28    std::map<std::pair<int, int>, MPI_Comm  > ::iterator it; 
     29     
     30    it = fc_comm_map.find(std::make_pair(fint, omp_get_thread_num())); 
     31    if(it == fc_comm_map.end()) 
     32    { 
     33      fc_comm_map.insert(std::make_pair( std::make_pair( fint, omp_get_thread_num()) , comm)); 
     34      printf("MAP insert: %d, %d, %p\n", fint, omp_get_thread_num(), &comm); 
     35    } 
     36     
     37     
     38    return fint; 
     39     
     40  } 
     41 
     42  MPI_Comm EP_Comm_f2c(int comm) 
     43  { 
     44    Debug("MPI_Comm_f2c"); 
     45     
     46     
     47    std::map<std::pair<int, int>, MPI_Comm  > ::iterator it; 
     48     
     49    it = fc_comm_map.find(std::make_pair(comm, omp_get_thread_num())); 
     50    if(it != fc_comm_map.end()) 
     51    { 
     52      MPI_Comm comm_ptr; 
     53      comm_ptr =  it->second; 
     54      printf("MAP find: %d, %d, %p\n", it->first.first, it->first.second, &comm_ptr); 
     55      return  comm_ptr; 
     56    } 
     57    else 
     58    {       
     59      MPI_Comm return_comm; 
     60      return_comm.mpi_comm = ::MPI_Comm_f2c(comm); 
     61      return return_comm; 
     62    } 
     63  } 
     64 
     65  #ifdef _intelmpi 
    1366 
    1467  MPI_Fint MPI_Comm_c2f(MPI_Comm comm) 
     
    3285     
    3386  } 
     87 
     88   
    3489 
    3590 
     
    57112  } 
    58113 
     114   
     115 
     116  #elif _openmpi 
     117   
     118  int MPI_Comm_c2f(MPI_Comm comm) 
     119  { 
     120    Debug("MPI_Comm_c2f"); 
     121    int fint; 
     122    fint = ::MPI_Comm_c2f(static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     123     
     124    std::map<std::pair<int, int>, MPI_Comm  > ::iterator it; 
     125     
     126    it = fc_comm_map.find(std::make_pair(fint, omp_get_thread_num())); 
     127    if(it == fc_comm_map.end()) 
     128    { 
     129      fc_comm_map.insert(std::make_pair( std::make_pair( fint, omp_get_thread_num()) , comm)); 
     130      printf("MAP insert: %d, %d, %p\n", fint, omp_get_thread_num(), &comm); 
     131    } 
     132     
     133    return fint; 
     134     
     135  } 
     136 
     137  ep_lib::MPI_Comm MPI_Comm_f2c(MPI_Fint comm) 
     138  { 
     139    Debug("MPI_Comm_f2c"); 
     140     
     141     
     142    std::map<std::pair<int, int>, MPI_Comm  > ::iterator it; 
     143     
     144    it = fc_comm_map.find(std::make_pair(comm, omp_get_thread_num())); 
     145    if(it != fc_comm_map.end()) 
     146    { 
     147      MPI_Comm comm_ptr; 
     148      comm_ptr =  it->second; 
     149      printf("MAP find: %d, %d, %p\n", it->first.first, it->first.second, &comm_ptr); 
     150      return  comm_ptr; 
     151    } 
     152    else 
     153    {       
     154      MPI_Comm return_comm; 
     155      return_comm.mpi_comm = (::MPI_Comm)(comm); 
     156      return return_comm; 
     157    } 
     158  } 
     159  #endif 
     160 
    59161} 
    60162 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_free.cpp

    r1037 r1053  
    1010  int MPI_Comm_free(MPI_Comm *comm) 
    1111  { 
    12     //return 0; 
    13      
    14     if(comm == NULL) return 0; 
     12    //if(comm == NULL) return 0; 
    1513 
    1614    MPI_Barrier(*comm); 
     
    1917    if(! comm->is_ep) 
    2018    { 
    21       if(comm->mpi_comm) 
     19      if(comm->mpi_comm != MPI_COMM_NULL_STD) 
    2220      { 
    2321        ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm->mpi_comm); 
     
    3129    else 
    3230    { 
    33       if(! comm->mpi_comm) return 0; 
     31      if(comm->mpi_comm == MPI_COMM_NULL_STD ) return 0; 
    3432 
    3533      int ep_rank, ep_rank_loc, mpi_rank; 
     
    4745 
    4846      #pragma omp critical (memory_free) 
    49       if(comm->is_intercomm && comm->ep_comm_ptr->intercomm) 
     47      if(comm->is_intercomm && comm->ep_comm_ptr->intercomm != NULL) 
    5048      { 
    5149        if(comm->ep_comm_ptr->intercomm->local_rank_map) comm->ep_comm_ptr->intercomm->local_rank_map->clear(); 
     
    6260 
    6361/* 
    64         if(comm->my_buffer) 
     62        if(comm->my_buffer != NULL) 
    6563        { 
    66           if(comm->my_buffer->buf_int) delete[] comm->my_buffer->buf_int; Debug("buf_int freed\n"); 
    67           if(comm->my_buffer->buf_float) delete[] comm->my_buffer->buf_float; Debug("buf_float freed\n"); 
    68           if(comm->my_buffer->buf_double) delete[] comm->my_buffer->buf_double; Debug("buf_double freed\n"); 
    69           if(comm->my_buffer->buf_long) delete[] comm->my_buffer->buf_long; Debug("buf_long freed\n"); 
    70           if(comm->my_buffer->buf_ulong) delete[] comm->my_buffer->buf_ulong; Debug("buf_ulong freed\n"); 
    71           if(comm->my_buffer->buf_char) delete[] comm->my_buffer->buf_char; Debug("buf_char freed\n"); 
     64          if(comm->my_buffer->buf_int != NULL) delete[] comm->my_buffer->buf_int; Debug("buf_int freed\n"); 
     65          if(comm->my_buffer->buf_float != NULL) delete[] comm->my_buffer->buf_float; Debug("buf_float freed\n"); 
     66          if(comm->my_buffer->buf_double != NULL) delete[] comm->my_buffer->buf_double; Debug("buf_double freed\n"); 
     67          if(comm->my_buffer->buf_long != NULL) delete[] comm->my_buffer->buf_long; Debug("buf_long freed\n"); 
     68          if(comm->my_buffer->buf_ulong != NULL) delete[] comm->my_buffer->buf_ulong; Debug("buf_ulong freed\n"); 
     69          if(comm->my_buffer->buf_char != NULL) delete[] comm->my_buffer->buf_char; Debug("buf_char freed\n"); 
    7270        } 
    7371*/ 
    74         if(comm->ep_barrier) 
     72        if(comm->ep_barrier != NULL) 
    7573        { 
    7674          comm->ep_barrier->~OMPbarrier(); 
     
    7977 
    8078 
    81         if(comm->rank_map) 
     79        if(comm->rank_map != NULL) 
    8280        { 
    8381          comm->rank_map->clear(); 
     
    8684 
    8785 
    88         if(comm->is_intercomm && comm->ep_comm_ptr->intercomm->mpi_inter_comm) 
     86        if(comm->is_intercomm && comm->ep_comm_ptr->intercomm->mpi_inter_comm != MPI_COMM_NULL_STD) 
    8987        { 
    9088          ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm->ep_comm_ptr->intercomm->mpi_inter_comm); 
    9189 
    9290          ::MPI_Comm_free(&mpi_comm); 
    93           comm->ep_comm_ptr->intercomm->mpi_inter_comm = NULL; 
     91          //comm->ep_comm_ptr->intercomm->mpi_inter_comm = NULL; 
    9492          Debug("mpi_intercomm freed\n"); 
    9593        } 
     
    9997        for(int i=0; i<num_ep; i++) 
    10098        { 
    101           if(comm->ep_comm_ptr->comm_list[i].ep_comm_ptr->message_queue) 
     99          if(comm->ep_comm_ptr->comm_list[i].ep_comm_ptr->message_queue != NULL) 
    102100          { 
    103101            comm->ep_comm_ptr->comm_list[i].ep_comm_ptr->message_queue->clear(); 
     
    105103          } 
    106104 
    107           if(comm->ep_comm_ptr->comm_list[i].ep_comm_ptr) 
     105          if(comm->ep_comm_ptr->comm_list[i].ep_comm_ptr != NULL) 
    108106          { 
    109107            delete comm->ep_comm_ptr->comm_list[i].ep_comm_ptr; 
     
    112110        } 
    113111 
    114         if(comm->mpi_comm) 
     112        if(comm->mpi_comm != MPI_COMM_NULL_STD) 
    115113        { 
    116114          ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm->mpi_comm); 
    117           #ifdef _serialized 
    118           #pragma omp critical (_mpi_call) 
    119           #endif // _serialized 
    120115          ::MPI_Comm_free(&mpi_comm); 
    121           comm->mpi_comm = NULL; 
     116          //comm->mpi_comm = NULL; 
    122117//          printf("label = %d, mpi_comm freed\n", comm->ep_comm_ptr->comm_label); 
    123118        } 
    124119 
    125        if(comm) {delete[] comm->ep_comm_ptr->comm_list; Debug("comm freed\n");} 
     120       if(comm != NULL) {delete[] comm->ep_comm_ptr->comm_list; Debug("comm freed\n");} 
    126121 
    127122      } 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_gather.cpp

    r1037 r1053  
    320320    if(!comm.is_ep && comm.mpi_comm) 
    321321    { 
    322       #ifdef _serialized 
    323       #pragma omp critical (_mpi_call) 
    324       #endif // _serialized 
    325       ::MPI_Gather(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 
     322      ::MPI_Gather(const_cast<void*>(sendbuf), sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 
    326323                   root, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    327324      return 0; 
     
    351348 
    352349    ::MPI_Aint datasize, lb; 
    353     #ifdef _serialized 
    354     #pragma omp critical (_mpi_call) 
    355     #endif // _serialized 
     350 
    356351    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 
    357352 
     
    377372      gatherv_displs = new int[mpi_size]; 
    378373 
    379       #ifdef _serialized 
    380       #pragma omp critical (_mpi_call) 
    381       #endif // _serialized 
     374 
    382375      ::MPI_Allgather(&gatherv_cnt, 1, MPI_INT_STD, gatherv_recvcnt, 1, MPI_INT_STD, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    383376 
     
    388381      } 
    389382 
    390       #ifdef _serialized 
    391       #pragma omp critical (_mpi_call) 
    392       #endif // _serialized 
     383 
    393384      ::MPI_Gatherv(local_gather_recvbuf, count*num_ep, static_cast< ::MPI_Datatype>(datatype), recvbuf, gatherv_recvcnt, 
    394385                    gatherv_displs, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    442433    if(!comm.is_ep && comm.mpi_comm) 
    443434    { 
    444       #ifdef _serialized 
    445       #pragma omp critical (_mpi_call) 
    446       #endif // _serialized 
    447       ::MPI_Allgather(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 
     435      ::MPI_Allgather(const_cast<void*>(sendbuf), sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 
    448436                      static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    449437      return 0; 
     
    469457 
    470458    ::MPI_Aint datasize, lb; 
    471     #ifdef _serialized 
    472     #pragma omp critical (_mpi_call) 
    473     #endif // _serialized 
     459 
    474460    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 
    475461 
     
    495481      gatherv_displs = new int[mpi_size]; 
    496482 
    497       #ifdef _serialized 
    498       #pragma omp critical (_mpi_call) 
    499       #endif // _serialized 
    500483      ::MPI_Allgather(&gatherv_cnt, 1, MPI_INT_STD, gatherv_recvcnt, 1, MPI_INT_STD, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    501484 
     
    506489      } 
    507490 
    508       #ifdef _serialized 
    509       #pragma omp critical (_mpi_call) 
    510       #endif // _serialized 
    511491      ::MPI_Allgatherv(local_gather_recvbuf, count*num_ep, static_cast< ::MPI_Datatype>(datatype), recvbuf, gatherv_recvcnt, 
    512492                    gatherv_displs, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Comm>(comm.mpi_comm)); 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_gatherv.cpp

    r1037 r1053  
    328328    if(!comm.is_ep && comm.mpi_comm) 
    329329    { 
    330       #ifdef _serialized 
    331       #pragma omp critical (_mpi_call) 
    332       #endif // _serialized 
    333       ::MPI_Gatherv(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcounts, displs, 
     330      ::MPI_Gatherv(const_cast<void*>(sendbuf), sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, const_cast<int*>(recvcounts), const_cast<int*>(displs), 
    334331                    static_cast< ::MPI_Datatype>(recvtype), root, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    335332      return 0; 
     
    362359 
    363360    ::MPI_Aint datasize, lb; 
    364     #ifdef _serialized 
    365     #pragma omp critical (_mpi_call) 
    366     #endif // _serialized 
     361 
    367362    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 
    368363 
     
    395390      //gatherv_displs = new int[mpi_size]; 
    396391 
    397       #ifdef _serialized 
    398       #pragma omp critical (_mpi_call) 
    399       #endif // _serialized 
     392 
    400393      ::MPI_Allgather(&gatherv_cnt, 1, MPI_INT_STD, gatherv_recvcnt, 1, MPI_INT_STD, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    401394 
     
    406399      } 
    407400 
    408       #ifdef _serialized 
    409       #pragma omp critical (_mpi_call) 
    410       #endif // _serialized 
     401 
    411402      ::MPI_Gatherv(local_gather_recvbuf, gatherv_cnt, static_cast< ::MPI_Datatype>(datatype), recvbuf, gatherv_recvcnt, 
    412403                    gatherv_displs, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    462453    if(!comm.is_ep && comm.mpi_comm) 
    463454    { 
    464       #ifdef _serialized 
    465       #pragma omp critical (_mpi_call) 
    466       #endif // _serialized 
    467455      ::MPI_Allgatherv(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcounts, displs, 
    468456                       static_cast< ::MPI_Datatype>(recvtype), static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    493481 
    494482    ::MPI_Aint datasize, lb; 
    495     #ifdef _serialized 
    496     #pragma omp critical (_mpi_call) 
    497     #endif // _serialized 
     483 
    498484    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 
    499485 
     
    525511      gatherv_displs = new int[mpi_size]; 
    526512 
    527       #ifdef _serialized 
    528       #pragma omp critical (_mpi_call) 
    529       #endif // _serialized 
    530513      ::MPI_Allgather(&gatherv_cnt, 1, MPI_INT_STD, gatherv_recvcnt, 1, MPI_INT_STD, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    531514      gatherv_displs[0] = displs[0]; 
     
    534517        gatherv_displs[i] = gatherv_recvcnt[i-1] + gatherv_displs[i-1]; 
    535518      } 
    536       #ifdef _serialized 
    537       #pragma omp critical (_mpi_call) 
    538       #endif // _serialized 
     519 
    539520      ::MPI_Allgatherv(local_gather_recvbuf, gatherv_cnt, static_cast< ::MPI_Datatype>(datatype), recvbuf, gatherv_recvcnt, 
    540521                    gatherv_displs, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Comm>(comm.mpi_comm)); 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_init.cpp

    r1037 r1053  
    1111  int MPI_Init_thread(int *argc, char*** argv, int required, int*provided) 
    1212  { 
    13     printf("MPI_Init_thread\n"); 
     13    //printf("MPI_Init_thread\n"); 
    1414 
    1515    int id = omp_get_thread_num(); 
     
    2424  int MPI_Init(int *argc, char ***argv) 
    2525  { 
    26     printf("MPI_init called\n"); 
     26    //printf("MPI_init called\n"); 
    2727    int id = omp_get_thread_num(); 
    2828 
     
    3636  int MPI_Initialized(int *flag) 
    3737  { 
    38     printf("MPI_initialized called\n"); 
     38    //printf("MPI_initialized called\n"); 
    3939 
    4040    ::MPI_Initialized(flag); 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_intercomm.cpp

    r1037 r1053  
    143143        if(ep_rank == new_local_leader) 
    144144        { 
    145           #ifdef _serialized 
    146           #pragma omp critical (_mpi_call) 
    147           #endif // _serialized 
    148145          ::MPI_Comm_rank(MPI_COMM_WORLD_STD, &leader_in_world[0]); 
    149146        } 
     
    190187  int MPI_Comm_test_inter(MPI_Comm comm, int *flag) 
    191188  { 
     189    *flag = false; 
    192190    if(comm.is_ep) 
    193191    { 
     
    195193      return 0; 
    196194    }  
    197     else 
     195    else if(comm.mpi_comm != MPI_COMM_NULL_STD) 
    198196    { 
    199197      ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm> (comm.mpi_comm); 
    200       #ifdef _serialized 
    201       #pragma omp critical (_mpi_call) 
    202       #endif 
     198       
    203199      ::MPI_Comm_test_inter(mpi_comm, flag); 
    204200      return 0;   
    205201    } 
     202    return 0; 
    206203  } 
    207204 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_intercomm_kernel.cpp

    r1037 r1053  
    3535    ::MPI_Comm local_mpi_comm = static_cast< ::MPI_Comm>(local_comm.mpi_comm); 
    3636 
    37     #ifdef _serialized 
    38     #pragma omp critical (_mpi_call) 
    39     #endif // _serialized 
    40     { 
    41       ::MPI_Comm_rank(MPI_COMM_WORLD_STD, &rank_in_world); 
    42       ::MPI_Comm_rank(static_cast< ::MPI_Comm>(local_comm.mpi_comm), &rank_in_local_parent); 
    43     } 
     37     
     38    ::MPI_Comm_rank(MPI_COMM_WORLD_STD, &rank_in_world); 
     39    ::MPI_Comm_rank(static_cast< ::MPI_Comm>(local_comm.mpi_comm), &rank_in_local_parent); 
     40     
    4441 
    4542    bool is_proc_master = false; 
     
    8380      send_buf[2] = num_ep; 
    8481 
    85       #ifdef _serialized 
    86       #pragma omp critical (_mpi_call) 
    87       #endif // _serialized 
    8882      ::MPI_Allgather(send_buf.data(), 3, MPI_INT_STD, recv_buf.data(), 3, MPI_INT_STD, local_mpi_comm); 
    8983 
     
    10094        leader_info[1] = remote_leader; 
    10195 
    102         #ifdef _serialized 
    103         #pragma omp critical (_mpi_call) 
    104         #endif // _serialized 
    10596        ::MPI_Comm_rank(static_cast< ::MPI_Comm>(peer_comm.mpi_comm), &rank_in_peer_mpi[0]); 
    10697 
     
    126117      send_buf[4] = rank_in_peer_mpi[1]; 
    127118 
    128       #ifdef _serialized 
    129       #pragma omp critical (_mpi_call) 
    130       #endif // _serialized 
    131119      ::MPI_Bcast(send_buf.data(), 5, MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 
    132120 
     
    156144        MPI_Send(send_buf.data(), 3*size_info[0], MPI_INT_STD, remote_leader, tag, peer_comm); 
    157145        MPI_Recv(recv_buf.data(), 3*size_info[1], MPI_INT_STD, remote_leader, tag, peer_comm, &status); 
    158  
    159       } 
    160  
    161       #ifdef _serialized 
    162       #pragma omp critical (_mpi_call) 
    163       #endif // _serialized 
     146      } 
     147 
    164148      ::MPI_Bcast(recv_buf.data(), 3*size_info[1], MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 
    165149 
     
    283267      } 
    284268 
    285       #ifdef _serialized 
    286       #pragma omp critical (_mpi_call) 
    287       #endif // _serialized 
    288269      ::MPI_Bcast(&size_info[2], 2, MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 
    289270 
     
    307288      } 
    308289 
    309  
    310       #ifdef _serialized 
    311       #pragma omp critical (_mpi_call) 
    312       #endif // _serialized 
    313290      ::MPI_Bcast(recv_buf.data(),   3*size_info[3], MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 
    314291 
     
    330307      ::MPI_Comm intercomm; 
    331308 
    332       #ifdef _serialized 
    333       #pragma omp critical (_mpi_call) 
    334       #endif // _serialized 
    335309      ::MPI_Comm_group(local_mpi_comm, &local_group); 
    336310 
    337       #ifdef _serialized 
    338       #pragma omp critical (_mpi_call) 
    339       #endif // _serialized 
    340311      ::MPI_Group_incl(local_group, size_info[2], new_rank_info[1].data(), &new_group); 
    341312 
    342       #ifdef _serialized 
    343       #pragma omp critical (_mpi_call) 
    344       #endif // _serialized 
    345313      ::MPI_Comm_create(local_mpi_comm, new_group, &new_comm); 
    346314 
     
    349317      if(is_local_leader) 
    350318      { 
    351         #ifdef _serialized 
    352         #pragma omp critical (_mpi_call) 
    353         #endif // _serialized 
    354319        ::MPI_Comm_rank(new_comm, &leader_info[2]); 
    355320      } 
    356321 
    357       #ifdef _serialized 
    358       #pragma omp critical (_mpi_call) 
    359       #endif // _serialized 
    360322      ::MPI_Bcast(&leader_info[2], 1, MPI_INT_STD, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 
    361323 
     
    363325      { 
    364326 
    365         #ifdef _serialized 
    366         #pragma omp critical (_mpi_call) 
    367         #endif // _serialized 
    368327        ::MPI_Barrier(new_comm); 
    369328 
    370         #ifdef _serialized 
    371         #pragma omp critical (_mpi_call) 
    372         #endif // _serialized 
    373329        ::MPI_Intercomm_create(new_comm, leader_info[2], static_cast< ::MPI_Comm>(peer_comm.mpi_comm), rank_in_peer_mpi[1], tag, &intercomm); 
    374330 
    375331        int id; 
    376         #ifdef _serialized 
    377         #pragma omp critical (_mpi_call) 
    378         #endif // _serialized 
     332 
    379333        ::MPI_Comm_rank(new_comm, &id); 
    380334        int my_num_ep = new_ep_info[0][id]; 
     
    604558    int rank_in_peer_mpi[2]; 
    605559 
    606     #ifdef _serialized 
    607     #pragma omp critical (_mpi_call) 
    608     #endif // _serialized 
    609560    ::MPI_Comm_rank(MPI_COMM_WORLD_STD, &rank_in_world); 
    610561 
     
    663614      { 
    664615        ::MPI_Comm mpi_dup; 
    665  
    666         #ifdef _serialized 
    667         #pragma omp critical (_mpi_call) 
    668         #endif // _serialized 
     616         
    669617        ::MPI_Comm_dup(static_cast< ::MPI_Comm>(local_comm.mpi_comm), &mpi_dup); 
    670618 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_intercomm_world.cpp

    r1037 r1053  
    281281        std::copy ( new_ep_info[0].data(),   new_ep_info[0].data()   + size_info[0], send_buf.begin() + 2*size_info[2] ); 
    282282 
    283         MPI_Send(send_buf.data(), 3*size_info[2], MPI_INT_STD, mpi_remote_leader, tag, peer_comm); 
    284         MPI_Recv(recv_buf.data(), 3*size_info[3], MPI_INT_STD, mpi_remote_leader, tag, peer_comm, &mpi_status); 
     283        ::MPI_Send(send_buf.data(), 3*size_info[2], MPI_INT_STD, mpi_remote_leader, tag, peer_comm); 
     284        ::MPI_Recv(recv_buf.data(), 3*size_info[3], MPI_INT_STD, mpi_remote_leader, tag, peer_comm, &mpi_status); 
    285285 
    286286      } 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_lib.hpp

    r1037 r1053  
    2020#endif 
    2121 
     22#ifdef _intelmpi 
    2223  #define MPI_ANY_SOURCE -2  
    2324  #define MPI_ANY_TAG -1  
     25#elif _openmpi 
     26  #define MPI_ANY_SOURCE -1  
     27  #define MPI_ANY_TAG -1  
     28#endif 
    2429 
    2530  int MPI_Init_thread(int* argc, char*** argv, int required, int*provided); 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_lib_fortran.hpp

    r1037 r1053  
    77{ 
    88   
     9  #ifdef _intelmpi 
     10   
     11  MPI_Fint MPI_Comm_c2f(MPI_Comm comm); 
     12  MPI_Comm MPI_Comm_f2c(MPI_Fint comm); 
     13   
     14  #elif _openmpi 
     15   
     16  int MPI_Comm_c2f(MPI_Comm comm); 
     17  ep_lib::MPI_Comm MPI_Comm_f2c(MPI_Fint comm); 
     18   
     19  #endif 
    920 
    10   MPI_Fint MPI_Comm_c2f(MPI_Comm comm); 
    11   //int MPI_Comm_c2f(MPI_Comm comm); 
    12  
    13  
    14   MPI_Comm MPI_Comm_f2c(MPI_Fint comm); 
    15   //void MPI_Comm_f2c(MPI_Fint comm); 
    16  
     21  int EP_Comm_c2f(MPI_Comm comm); 
     22  MPI_Comm EP_Comm_f2c(int comm); 
    1723} 
    1824 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_memory.cpp

    r1037 r1053  
    1010  int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr) 
    1111  { 
    12     #ifdef _serialized 
    13     #pragma omp critical (_mpi_call) 
    14     #endif // _serialized 
    15     ::MPI_Alloc_mem(size.mpi_aint, info.mpi_info, baseptr); 
     12    ::MPI_Alloc_mem(size.mpi_aint, static_cast< ::MPI_Info>(info.mpi_info), baseptr); 
    1613    return 0; 
    1714   } 
     
    1916  int MPI_Alloc_mem(unsigned long size, MPI_Info info, void *baseptr) 
    2017  { 
    21     #ifdef _serialized 
    22     #pragma omp critical (_mpi_call) 
    23     #endif // _serialized 
    24     ::MPI_Alloc_mem(size, info.mpi_info, baseptr); 
     18    ::MPI_Alloc_mem(size, static_cast< ::MPI_Info>(info.mpi_info), baseptr); 
    2519    return 0; 
    26    } 
     20  } 
    2721 
    2822 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_merge.cpp

    r1037 r1053  
    3939    local_mpi_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].second; 
    4040 
     41 
     42    if(local_ep_rank == 0) 
     43    { 
     44      MPI_Status status; 
     45      MPI_Request req_s, req_r; 
     46      MPI_Isend(&local_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &req_s); 
     47      MPI_Irecv(&remote_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &req_r); 
     48 
     49      MPI_Wait(&req_s, &status); 
     50      MPI_Wait(&req_r, &status); 
     51    } 
     52 
     53 
     54    MPI_Bcast(&remote_high, 1, MPI_INT, 0, *(inter_comm.ep_comm_ptr->intercomm->local_comm)); 
     55 
     56//    printf("%d, %d, %d, %d\n", local_ep_size, remote_ep_size, local_high, remote_high); 
     57 
     58 
     59    MPI_Comm_dup(inter_comm, newintracomm); 
     60 
     61    int my_ep_rank = local_high<remote_high? local_ep_rank: local_ep_rank+remote_ep_size; 
     62 
     63 
     64    int intra_ep_rank, intra_ep_rank_loc, intra_mpi_rank; 
     65    int intra_ep_size, intra_num_ep, intra_mpi_size; 
     66 
     67    intra_ep_rank = newintracomm->ep_comm_ptr->size_rank_info[0].first; 
     68    intra_ep_rank_loc = newintracomm->ep_comm_ptr->size_rank_info[1].first; 
     69    intra_mpi_rank = newintracomm->ep_comm_ptr->size_rank_info[2].first; 
     70    intra_ep_size = newintracomm->ep_comm_ptr->size_rank_info[0].second; 
     71    intra_num_ep = newintracomm->ep_comm_ptr->size_rank_info[1].second; 
     72    intra_mpi_size = newintracomm->ep_comm_ptr->size_rank_info[2].second; 
     73 
     74 
     75    MPI_Barrier_local(*newintracomm); 
     76 
     77 
     78    int *reorder; 
     79    if(intra_ep_rank_loc == 0) 
     80    { 
     81      reorder = new int[intra_ep_size]; 
     82    } 
     83 
     84 
     85    MPI_Gather(&my_ep_rank, 1, MPI_INT, reorder, 1, MPI_INT, 0, *newintracomm); 
     86    if(intra_ep_rank_loc == 0) 
     87    { 
     88      ::MPI_Bcast(reorder, intra_ep_size, MPI_INT_STD, 0, static_cast< ::MPI_Comm>(newintracomm->mpi_comm)); 
     89 
     90      vector< pair<int, int> > tmp_rank_map(intra_ep_size); 
     91 
     92 
     93      for(int i=0; i<intra_ep_size; i++) 
     94      { 
     95        tmp_rank_map[reorder[i]] = newintracomm->rank_map->at(i) ; 
     96      } 
     97 
     98      newintracomm->rank_map->swap(tmp_rank_map); 
     99 
     100      tmp_rank_map.clear(); 
     101    } 
     102 
     103    MPI_Barrier_local(*newintracomm); 
     104 
     105    (*newintracomm).ep_comm_ptr->size_rank_info[0].first = my_ep_rank; 
     106 
     107    if(intra_ep_rank_loc == 0) 
     108    { 
     109      delete[] reorder; 
     110    } 
     111 
     112    return MPI_SUCCESS; 
     113  } 
     114 
     115 
     116 
     117 
     118 
     119  int MPI_Intercomm_merge(MPI_Comm inter_comm, bool high, MPI_Comm *newintracomm) 
     120  { 
     121 
     122    assert(inter_comm.is_intercomm); 
     123 
     124    if(inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->comm_label == -99) 
     125    { 
     126        return MPI_Intercomm_merge_unique_leader(inter_comm, high, newintracomm); 
     127    } 
     128 
     129 
     130    Debug("intercomm_merge kernel\n"); 
     131 
     132    int ep_rank, ep_rank_loc, mpi_rank; 
     133    int ep_size, num_ep, mpi_size; 
     134 
     135    ep_rank = inter_comm.ep_comm_ptr->size_rank_info[0].first; 
     136    ep_rank_loc = inter_comm.ep_comm_ptr->size_rank_info[1].first; 
     137    mpi_rank = inter_comm.ep_comm_ptr->size_rank_info[2].first; 
     138    ep_size = inter_comm.ep_comm_ptr->size_rank_info[0].second; 
     139    num_ep = inter_comm.ep_comm_ptr->size_rank_info[1].second; 
     140    mpi_size = inter_comm.ep_comm_ptr->size_rank_info[2].second; 
     141 
     142 
     143    int local_ep_rank, local_ep_rank_loc, local_mpi_rank; 
     144    int local_ep_size, local_num_ep, local_mpi_size; 
     145 
     146 
     147    local_ep_rank = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].first; 
     148    local_ep_rank_loc = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].first; 
     149    local_mpi_rank = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].first; 
     150    local_ep_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].second; 
     151    local_num_ep = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].second; 
     152    local_mpi_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].second; 
     153 
     154    int remote_ep_size = inter_comm.ep_comm_ptr->intercomm->remote_rank_map->size(); 
     155 
     156    int local_high = high; 
     157    int remote_high; 
     158 
     159    MPI_Barrier(inter_comm); 
    41160 
    42161//    if(local_ep_rank == 0 && high == false) 
     
    65184    } 
    66185 
    67  
    68186    MPI_Bcast(&remote_high, 1, MPI_INT, 0, *(inter_comm.ep_comm_ptr->intercomm->local_comm)); 
    69187 
    70 //    printf("%d, %d, %d, %d\n", local_ep_size, remote_ep_size, local_high, remote_high); 
    71  
    72  
    73     MPI_Comm_dup(inter_comm, newintracomm); 
     188    int intercomm_high; 
     189    if(ep_rank == 0) intercomm_high = local_high; 
     190    MPI_Bcast(&intercomm_high, 1, MPI_INT, 0, inter_comm); 
     191 
     192    //printf("remote_ep_size = %d, local_high = %d, remote_high = %d, intercomm_high = %d\n", remote_ep_size, local_high, remote_high, intercomm_high); 
     193 
     194 
     195    ::MPI_Comm mpi_intracomm; 
     196    MPI_Comm *ep_intracomm; 
     197 
     198    if(ep_rank_loc == 0) 
     199    { 
     200 
     201      ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(inter_comm.ep_comm_ptr->intercomm->mpi_inter_comm); 
     202 
     203      ::MPI_Intercomm_merge(mpi_comm, intercomm_high, &mpi_intracomm); 
     204      MPI_Info info; 
     205      MPI_Comm_create_endpoints(mpi_intracomm, num_ep, info, ep_intracomm); 
     206 
     207      inter_comm.ep_comm_ptr->comm_list->mem_bridge = ep_intracomm; 
     208 
     209    } 
     210 
     211 
     212 
     213    MPI_Barrier_local(inter_comm); 
     214 
     215    *newintracomm = inter_comm.ep_comm_ptr->comm_list->mem_bridge[ep_rank_loc]; 
    74216 
    75217    int my_ep_rank = local_high<remote_high? local_ep_rank: local_ep_rank+remote_ep_size; 
    76  
    77218 
    78219    int intra_ep_rank, intra_ep_rank_loc, intra_mpi_rank; 
     
    87228 
    88229 
     230 
    89231    MPI_Barrier_local(*newintracomm); 
    90232 
     
    97239 
    98240 
     241 
    99242    MPI_Gather(&my_ep_rank, 1, MPI_INT, reorder, 1, MPI_INT, 0, *newintracomm); 
    100243    if(intra_ep_rank_loc == 0) 
    101244    { 
    102       #ifdef _serialized 
    103       #pragma omp critical (_mpi_call) 
    104       #endif // _serialized 
     245 
    105246      ::MPI_Bcast(reorder, intra_ep_size, MPI_INT_STD, 0, static_cast< ::MPI_Comm>(newintracomm->mpi_comm)); 
    106247 
     
    122263    (*newintracomm).ep_comm_ptr->size_rank_info[0].first = my_ep_rank; 
    123264 
    124     if(intra_ep_rank_loc == 0) 
    125     { 
    126       delete[] reorder; 
    127     } 
    128  
    129     return MPI_SUCCESS; 
    130   } 
    131  
    132  
    133  
    134  
    135  
    136   int MPI_Intercomm_merge(MPI_Comm inter_comm, bool high, MPI_Comm *newintracomm) 
    137   { 
    138  
    139     assert(inter_comm.is_intercomm); 
    140  
    141     if(inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->comm_label == -99) 
    142     { 
    143         return MPI_Intercomm_merge_unique_leader(inter_comm, high, newintracomm); 
    144     } 
    145  
    146  
    147     Debug("intercomm_merge kernel\n"); 
    148  
    149     int ep_rank, ep_rank_loc, mpi_rank; 
    150     int ep_size, num_ep, mpi_size; 
    151  
    152     ep_rank = inter_comm.ep_comm_ptr->size_rank_info[0].first; 
    153     ep_rank_loc = inter_comm.ep_comm_ptr->size_rank_info[1].first; 
    154     mpi_rank = inter_comm.ep_comm_ptr->size_rank_info[2].first; 
    155     ep_size = inter_comm.ep_comm_ptr->size_rank_info[0].second; 
    156     num_ep = inter_comm.ep_comm_ptr->size_rank_info[1].second; 
    157     mpi_size = inter_comm.ep_comm_ptr->size_rank_info[2].second; 
    158  
    159  
    160     int local_ep_rank, local_ep_rank_loc, local_mpi_rank; 
    161     int local_ep_size, local_num_ep, local_mpi_size; 
    162  
    163  
    164     local_ep_rank = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].first; 
    165     local_ep_rank_loc = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].first; 
    166     local_mpi_rank = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].first; 
    167     local_ep_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].second; 
    168     local_num_ep = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].second; 
    169     local_mpi_size = inter_comm.ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].second; 
    170  
    171     int remote_ep_size = inter_comm.ep_comm_ptr->intercomm->remote_rank_map->size(); 
    172  
    173     int local_high = high; 
    174     int remote_high; 
    175  
    176     MPI_Barrier(inter_comm); 
    177  
    178 //    if(local_ep_rank == 0 && high == false) 
    179 //    { 
    180 //      MPI_Status status; 
    181 //      MPI_Send(&local_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm); 
    182 //      MPI_Recv(&remote_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &status); 
    183 //    } 
    184 // 
    185 //    if(local_ep_rank == 0 && high == true) 
    186 //    { 
    187 //      MPI_Status status; 
    188 //      MPI_Recv(&remote_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &status); 
    189 //      MPI_Send(&local_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm); 
    190 //    } 
    191  
    192     if(local_ep_rank == 0) 
    193     { 
    194       MPI_Status status; 
    195       MPI_Request req_s, req_r; 
    196       MPI_Isend(&local_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &req_s); 
    197       MPI_Irecv(&remote_high, 1, MPI_INT, 0, inter_comm.ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &req_r); 
    198  
    199       MPI_Wait(&req_s, &status); 
    200       MPI_Wait(&req_r, &status); 
    201     } 
    202  
    203     MPI_Bcast(&remote_high, 1, MPI_INT, 0, *(inter_comm.ep_comm_ptr->intercomm->local_comm)); 
    204  
    205     int intercomm_high; 
    206     if(ep_rank == 0) intercomm_high = local_high; 
    207     MPI_Bcast(&intercomm_high, 1, MPI_INT, 0, inter_comm); 
    208  
    209     //printf("remote_ep_size = %d, local_high = %d, remote_high = %d, intercomm_high = %d\n", remote_ep_size, local_high, remote_high, intercomm_high); 
    210  
    211  
    212     ::MPI_Comm mpi_intracomm; 
    213     MPI_Comm *ep_intracomm; 
    214  
    215     if(ep_rank_loc == 0) 
    216     { 
    217  
    218       ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(inter_comm.ep_comm_ptr->intercomm->mpi_inter_comm); 
    219       #ifdef _serialized 
    220       #pragma omp critical (_mpi_call) 
    221       #endif // _serialized 
    222       ::MPI_Intercomm_merge(mpi_comm, intercomm_high, &mpi_intracomm); 
    223       MPI_Info info; 
    224       MPI_Comm_create_endpoints(mpi_intracomm, num_ep, info, ep_intracomm); 
    225  
    226       inter_comm.ep_comm_ptr->comm_list->mem_bridge = ep_intracomm; 
    227  
    228     } 
    229  
    230  
    231  
    232     MPI_Barrier_local(inter_comm); 
    233  
    234     *newintracomm = inter_comm.ep_comm_ptr->comm_list->mem_bridge[ep_rank_loc]; 
    235  
    236     int my_ep_rank = local_high<remote_high? local_ep_rank: local_ep_rank+remote_ep_size; 
    237  
    238     int intra_ep_rank, intra_ep_rank_loc, intra_mpi_rank; 
    239     int intra_ep_size, intra_num_ep, intra_mpi_size; 
    240  
    241     intra_ep_rank = newintracomm->ep_comm_ptr->size_rank_info[0].first; 
    242     intra_ep_rank_loc = newintracomm->ep_comm_ptr->size_rank_info[1].first; 
    243     intra_mpi_rank = newintracomm->ep_comm_ptr->size_rank_info[2].first; 
    244     intra_ep_size = newintracomm->ep_comm_ptr->size_rank_info[0].second; 
    245     intra_num_ep = newintracomm->ep_comm_ptr->size_rank_info[1].second; 
    246     intra_mpi_size = newintracomm->ep_comm_ptr->size_rank_info[2].second; 
    247  
    248  
    249  
    250     MPI_Barrier_local(*newintracomm); 
    251  
    252  
    253     int *reorder; 
    254     if(intra_ep_rank_loc == 0) 
    255     { 
    256       reorder = new int[intra_ep_size]; 
    257     } 
    258  
    259  
    260  
    261     MPI_Gather(&my_ep_rank, 1, MPI_INT, reorder, 1, MPI_INT, 0, *newintracomm); 
    262     if(intra_ep_rank_loc == 0) 
    263     { 
    264       #ifdef _serialized 
    265       #pragma omp critical (_mpi_call) 
    266       #endif // _serialized 
    267       ::MPI_Bcast(reorder, intra_ep_size, MPI_INT_STD, 0, static_cast< ::MPI_Comm>(newintracomm->mpi_comm)); 
    268  
    269       vector< pair<int, int> > tmp_rank_map(intra_ep_size); 
    270  
    271  
    272       for(int i=0; i<intra_ep_size; i++) 
    273       { 
    274         tmp_rank_map[reorder[i]] = newintracomm->rank_map->at(i) ; 
    275       } 
    276  
    277       newintracomm->rank_map->swap(tmp_rank_map); 
    278  
    279       tmp_rank_map.clear(); 
    280     } 
    281  
    282     MPI_Barrier_local(*newintracomm); 
    283  
    284     (*newintracomm).ep_comm_ptr->size_rank_info[0].first = my_ep_rank; 
    285  
    286265 
    287266    if(intra_ep_rank_loc == 0) 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_probe.cpp

    r1037 r1053  
    3434 
    3535    #pragma omp flush 
    36     #pragma critical (_query) 
     36 
     37    #pragma omp critical (_query) 
    3738    if(comm.ep_comm_ptr->message_queue->size() > 0) 
    3839    { 
     
    104105 
    105106    #pragma omp flush 
    106     #pragma critical (_query) 
     107 
     108    #pragma omp critical (_query) 
    107109    if(comm.ep_comm_ptr->message_queue->size() > 0) 
    108110    { 
     
    127129          message->ep_src = it->ep_src; 
    128130 
    129           #pragma omp critical (_query) 
     131          #pragma omp critical (_query2) 
    130132          {               
    131133            //printf("local message erased. src = %d, dest = %d, tag = %d\n", it->ep_src, it->ep_dest, it->ep_tag);      
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_rank.cpp

    r1037 r1053  
    2828 
    2929 
    30     if(comm.mpi_comm) 
     30    if(comm != MPI_COMM_NULL) 
    3131    { 
    3232      ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm > (comm.mpi_comm); 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_recv.cpp

    r1037 r1053  
    6868                 
    6969 
    70                 request->mpi_request = NULL; 
     70                request->mpi_request = MPI_REQUEST_NULL_STD; 
    7171                request->buf = buf; 
    7272    request->comm = comm; 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_reduce.cpp

    r1037 r1053  
    447447    if(!comm.is_ep && comm.mpi_comm) 
    448448    { 
    449       #ifdef _serialized 
    450       #pragma omp critical (_mpi_call) 
    451       #endif // _serialized 
    452449      ::MPI_Reduce(sendbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), root, 
    453450                   static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    473470 
    474471    ::MPI_Aint recvsize, lb; 
    475     #ifdef _serialized 
    476     #pragma omp critical (_mpi_call) 
    477     #endif // _serialized 
     472 
    478473    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &recvsize); 
    479474 
     
    489484    if(ep_rank_loc==0) 
    490485    { 
    491       #ifdef _serialized 
    492       #pragma omp critical (_mpi_call) 
    493       #endif // _serialized 
    494486      ::MPI_Reduce(local_recvbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    495487    } 
     
    522514    if(!comm.is_ep && comm.mpi_comm) 
    523515    { 
    524       #ifdef _serialized 
    525       #pragma omp critical (_mpi_call) 
    526       #endif // _serialized 
    527516      ::MPI_Allreduce(sendbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), 
    528517                      static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    545534 
    546535    ::MPI_Aint recvsize, lb; 
    547     #ifdef _serialized 
    548     #pragma omp critical (_mpi_call) 
    549     #endif // _serialized 
     536 
    550537    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &recvsize); 
    551538 
     
    561548    if(ep_rank_loc==0) 
    562549    { 
    563       #ifdef _serialized 
    564       #pragma omp critical (_mpi_call) 
    565       #endif // _serialized 
    566550      ::MPI_Allreduce(local_recvbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    567551    } 
     
    590574    if(!comm.is_ep && comm.mpi_comm) 
    591575    { 
    592       #ifdef _serialized 
    593       #pragma omp critical (_mpi_call) 
    594       #endif // _serialized 
    595576      ::MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), 
    596577                           static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    616597 
    617598    ::MPI_Aint datasize, lb; 
    618     #ifdef _serialized 
    619     #pragma omp critical (_mpi_call) 
    620     #endif // _serialized 
     599 
    621600    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 
    622601 
     
    636615        local_recvcnt[i] = accumulate(recvcounts+ep_rank, recvcounts+ep_rank+num_ep, 0); 
    637616      } 
    638       #ifdef _serialized 
    639       #pragma omp critical (_mpi_call) 
    640       #endif // _serialized 
     617 
    641618      ::MPI_Reduce_scatter(local_buf, local_buf2, local_recvcnt, static_cast< ::MPI_Datatype>(datatype), 
    642619                         static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_scan.cpp

    r1037 r1053  
    432432    if(!comm.is_ep) 
    433433    { 
    434       #ifdef _serialized 
    435       #pragma omp critical (_mpi_call) 
    436       #endif // _serialized 
     434 
    437435      ::MPI_Scan(sendbuf, recvbuf, count, static_cast< ::MPI_Datatype>(datatype), 
    438436                 static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    455453 
    456454    ::MPI_Aint datasize, lb; 
    457     #ifdef _serialized 
    458     #pragma omp critical (_mpi_call) 
    459     #endif // _serialized 
     455 
    460456    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 
    461457 
     
    484480    if(ep_rank_loc == 0) 
    485481    { 
    486       #ifdef _serialized 
    487       #pragma omp critical (_mpi_call) 
    488       #endif // _serialized 
    489482      ::MPI_Exscan(local_sum, mpi_scan_recvbuf, count, static_cast< ::MPI_Datatype>(datatype), static_cast< ::MPI_Op>(op), static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    490483    } 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_scatter.cpp

    r1037 r1053  
    287287    if(!comm.is_ep) 
    288288    { 
    289       #ifdef _serialized 
    290       #pragma omp critical (_mpi_call) 
    291       #endif // _serialized 
    292289      ::MPI_Scatter(sendbuf, sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 
    293290                    root, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    317314 
    318315    ::MPI_Aint datasize, lb; 
    319     #ifdef _serialized 
    320     #pragma omp critical (_mpi_call) 
    321     #endif // _serialized 
     316 
    322317    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 
    323318 
     
    343338      local_recvbuf = new void*[datasize*mpi_sendcnt]; 
    344339 
    345       #ifdef _serialized 
    346       #pragma omp critical (_mpi_call) 
    347       #endif // _serialized 
    348340      ::MPI_Gather(&mpi_sendcnt, 1, MPI_INT_STD, mpi_scatterv_sendcnt, 1, MPI_INT_STD, root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    349341 
     
    355347      if(root_ep_loc!=0) 
    356348      { 
    357         #ifdef _serialized 
    358         #pragma omp critical (_mpi_call) 
    359         #endif // _serialized 
    360349        ::MPI_Scatterv(master_sendbuf, mpi_scatterv_sendcnt, displs, static_cast< ::MPI_Datatype>(datatype), 
    361350                     local_recvbuf, mpi_sendcnt, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    363352      else 
    364353      { 
    365         #ifdef _serialized 
    366         #pragma omp critical (_mpi_call) 
    367         #endif // _serialized 
    368354        ::MPI_Scatterv(sendbuf, mpi_scatterv_sendcnt, displs, static_cast< ::MPI_Datatype>(datatype), 
    369355                     local_recvbuf, mpi_sendcnt, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_scatterv.cpp

    r1037 r1053  
    296296    if(!comm.is_ep) 
    297297    { 
    298       #ifdef _serialized 
    299       #pragma omp critical (_mpi_call) 
    300       #endif // _serialized 
    301298      ::MPI_Scatterv(sendbuf, sendcounts, displs, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, 
    302299                     static_cast< ::MPI_Datatype>(recvtype), root, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    329326 
    330327    ::MPI_Aint datasize, lb; 
    331     #ifdef _serialized 
    332     #pragma omp critical (_mpi_call) 
    333     #endif // _serialized 
     328 
    334329    ::MPI_Type_get_extent(static_cast< ::MPI_Datatype>(datatype), &lb, &datasize); 
    335330 
     
    358353      local_recvbuf = new void*[datasize*mpi_sendcnt]; 
    359354 
    360       #ifdef _serialized 
    361       #pragma omp critical (_mpi_call) 
    362       #endif // _serialized 
    363355      ::MPI_Gather(&mpi_sendcnt, 1, MPI_INT_STD, mpi_scatterv_sendcnt, 1, MPI_INT_STD, root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
    364356 
     
    370362      if(root_ep_loc!=0) 
    371363      { 
    372         #ifdef _serialized 
    373         #pragma omp critical (_mpi_call) 
    374         #endif // _serialized 
    375364        ::MPI_Scatterv(master_sendbuf, mpi_scatterv_sendcnt, mpi_displs, static_cast< ::MPI_Datatype>(datatype), 
    376365                     local_recvbuf, mpi_sendcnt, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
     
    378367      else 
    379368      { 
    380         #ifdef _serialized 
    381         #pragma omp critical (_mpi_call) 
    382         #endif // _serialized 
    383369        ::MPI_Scatterv(sendbuf, mpi_scatterv_sendcnt, mpi_displs, static_cast< ::MPI_Datatype>(datatype), 
    384370                     local_recvbuf, mpi_sendcnt, static_cast< ::MPI_Datatype>(datatype), root_mpi_rank, static_cast< ::MPI_Comm>(comm.mpi_comm)); 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_send.cpp

    r1037 r1053  
    8282    // EP intracomm 
    8383 
    84     check_sum_send(buf, count, datatype, dest, tag, comm, 1); 
     84    //check_sum_send(buf, count, datatype, dest, tag, comm, 1); 
    8585 
    8686    int ep_src_loc = comm.ep_comm_ptr->size_rank_info[1].first; 
     
    139139    // EP intracomm 
    140140 
    141     check_sum_send(buf, count, datatype, dest, tag, comm, 1); 
     141    //check_sum_send(buf, count, datatype, dest, tag, comm, 1); 
    142142 
    143143    int ep_src_loc = comm.ep_comm_ptr->size_rank_info[1].first; 
     
    171171    Debug("MPI_Isend with intercomm\n"); 
    172172 
    173     check_sum_send(buf, count, datatype, dest, tag, comm, 1); 
     173    //check_sum_send(buf, count, datatype, dest, tag, comm, 1); 
    174174 
    175175    int dest_remote_ep_rank    = comm.ep_comm_ptr->intercomm->remote_rank_map->at(dest).first; 
     
    244244    Debug("MPI_Issend with intercomm\n"); 
    245245 
    246     check_sum_send(buf, count, datatype, dest, tag, comm, 1); 
     246    //check_sum_send(buf, count, datatype, dest, tag, comm, 1); 
    247247 
    248248    int dest_remote_ep_rank    = comm.ep_comm_ptr->intercomm->remote_rank_map->at(dest).first; 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_size.cpp

    r1037 r1053  
    3131    Debug("Calling EP_Comm_size\n"); 
    3232 
    33     if(comm.mpi_comm) 
     33    if(comm.mpi_comm != MPI_COMM_NULL_STD) 
    3434    { 
    3535      ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm > (comm.mpi_comm);  
     
    5454    if(!comm.is_ep) 
    5555    { 
    56       if(comm.mpi_comm) 
     56      if(comm.mpi_comm != MPI_COMM_NULL_STD) 
    5757      { 
    5858        ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm > (comm.mpi_comm);  
    59         #ifdef _serialized 
    60         #pragma omp critical (_mpi_call) 
    61         #endif // _serialized 
    6259        ::MPI_Comm_remote_size(mpi_comm, size); 
    6360        return 0; 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_split.cpp

    r1037 r1053  
    144144        int master_color = 1; 
    145145        if(matched_number_loc[j] == 0) master_color = MPI_UNDEFINED; 
    146         #ifdef _serialized 
    147         #pragma omp critical (_mpi_call) 
    148         #endif // _serialized 
     146 
    149147        ::MPI_Comm_split(static_cast< ::MPI_Comm>(comm.mpi_comm), master_color, mpi_rank, &split_mpi_comm[j]); 
    150148         
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_test.cpp

    r1037 r1053  
    6363        status->mpi_status = new ::MPI_Status(mpi_status); 
    6464        status->ep_src = request->ep_src; 
    65       status->ep_tag = request->ep_tag; 
    66       status->ep_datatype = request->ep_datatype; 
     65        status->ep_tag = request->ep_tag; 
     66        status->ep_datatype = request->ep_datatype; 
    6767        int count; 
    6868        MPI_Get_count(status, request->ep_datatype, &count); 
    69         check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 
     69        //check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 
    7070      } 
    7171 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_type.cpp

    r1037 r1053  
    66 
    77::MPI_Comm MPI_COMM_WORLD_STD = MPI_COMM_WORLD; 
    8 //#undef MPI_COMM_WORLD 
     8#undef MPI_COMM_WORLD 
    99 
    1010 
    1111::MPI_Comm MPI_COMM_NULL_STD = MPI_COMM_NULL; 
    12 //#undef MPI_COMM_NULL 
     12#undef MPI_COMM_NULL 
    1313 
     14//::MPI_Info MPI_INFO_NULL_STD = MPI_INFO_NULL; 
     15//#undef MPI_INFO_NULL 
     16 
     17::MPI_Request MPI_REQUEST_NULL_STD = MPI_REQUEST_NULL; 
     18#undef MPI_REQUEST_NULL 
    1419 
    1520::MPI_Datatype MPI_INT_STD = MPI_INT; 
     
    3035 
    3136 
    32 //ep_lib::MPI_Datatype MPI_INT = MPI_INT_STD; 
    33 //ep_lib::MPI_Datatype MPI_FLOAT = MPI_FLOAT_STD; 
    34 //ep_lib::MPI_Datatype MPI_DOUBLE = MPI_DOUBLE_STD; 
    35 //ep_lib::MPI_Datatype MPI_LONG = MPI_LONG_STD; 
    36 //ep_lib::MPI_Datatype MPI_CHAR = MPI_CHAR_STD; 
    37 //ep_lib::MPI_Datatype MPI_UNSIGNED_LONG = MPI_UNSIGNED_LONG_STD; 
    38  
    39  
    40  
    41  
    4237::MPI_Op MPI_SUM_STD = MPI_SUM; 
    4338::MPI_Op MPI_MAX_STD = MPI_MAX; 
     
    4843#undef MPI_MIN 
    4944 
    50 //ep_lib::MPI_Op MPI_SUM = MPI_SUM_STD; 
    51 //ep_lib::MPI_Op MPI_MAX = MPI_MAX_STD; 
    52 //ep_lib::MPI_Op MPI_MIN = MPI_MIN_STD; 
    53  
    54 //  ep_lib::MPI_Comm::MPI_Comm(const MPI_Comm & comm) 
    55 //  { 
    56 //    printf("calling MPI_Comm copy constructor\n"); 
    57 //    is_ep = comm.is_ep; 
    58 //    is_intercomm = comm.is_intercomm; 
    59  
    60 //    int my_rank = comm.ep_comm_ptr->size_rank_info[1].first; 
    61 //    int num_ep  = comm.ep_comm_ptr->size_rank_info[1].second; 
    62  
    63  
    64 //    if(0 == my_rank) 
    65 //    { 
    66 //      MPI_Info info; 
    67 //      MPI_Comm *out_comm; 
    68 //      ::MPI_Comm mpi_dup; 
    69  
    70 //      ::MPI_Comm in_comm = static_cast< ::MPI_Comm>(comm.mpi_comm); 
    71  
    72 //      ::MPI_Comm_dup(in_comm, &mpi_dup); 
    73  
    74 //      MPI_Comm_create_endpoints(mpi_dup, num_ep, info, out_comm); 
    75 //      comm.ep_comm_ptr->comm_list->mem_bridge = out_comm; 
    76 //    } 
    77  
    78 //    MPI_Barrier(comm); 
    79  
    80 //    *this = (comm.ep_comm_ptr->comm_list->mem_bridge[my_rank]); 
    81 // //       // my_buffer = NULL; 
    82 // //       // ep_barrier = NULL; 
    83 // //       // rank_map = NULL; 
    84 // //       // ep_comm_ptr = NULL; 
    85 // //       // mem_bridge = NULL; 
    86 // //       // mpi_bridge = NULL; 
    87 // //       // mpi_comm = comm; 
    88 // } 
     45#ifdef _openmpi 
     46//#undef MPI_Fint 
     47#endif 
    8948 
    9049 
     
    9756 
    9857 
     58 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_type.hpp

    r1037 r1053  
    1212#include <numeric> 
    1313#include <bitset> 
    14 #include <memory.h> 
     14//#include <memory.h> 
    1515#include <algorithm> 
    1616#include <assert.h> 
     
    4242{ 
    4343  #define MPI_UNDEFINED -32766 
    44   #define MPI_STATUS_IGNORE NULL 
    45   #define MPI_INFO_NULL MPI_Info() 
     44  //#define MPI_STATUS_IGNORE NULL 
     45  //#define MPI_INFO_NULL MPI_Info(MPI_INFO_NULL_STD) 
    4646 
    4747  class ep_communicator; 
     
    8484      void* mpi_status; 
    8585 
    86       MPI_Message() 
    87       { 
    88         mpi_message = 0; 
    89         mpi_status = 0; 
    90       } 
     86      MPI_Message() {} 
     87      #ifdef _intelmpi 
     88      MPI_Message(int message): mpi_message(message) {} 
     89      #elif _openmpi 
     90      MPI_Message(void* message): mpi_message(message) {} 
     91      #endif 
    9192  }; 
    9293 
     
    289290      mem_bridge = NULL; 
    290291      mpi_bridge = NULL; 
    291       mpi_comm = 0; 
    292     } 
    293  
     292    } 
     293 
     294    #ifdef _intelmpi 
    294295    MPI_Comm(int comm) 
    295296    { 
     
    305306    } 
    306307 
    307     //MPI_Comm(const MPI_Comm &comm); 
     308    #elif _openmpi 
     309 
     310    MPI_Comm(void* comm) 
     311    { 
     312      is_ep = false; 
     313      is_intercomm = false; 
     314      my_buffer = NULL; 
     315      ep_barrier = NULL; 
     316      rank_map = NULL; 
     317      ep_comm_ptr = NULL; 
     318      mem_bridge = NULL; 
     319      mpi_bridge = NULL; 
     320      mpi_comm = comm; 
     321    } 
     322    #endif 
    308323 
    309324 
     
    338353      #endif 
    339354 
    340       MPI_Info() 
    341       { 
    342         mpi_info = 0; 
    343       } 
     355      MPI_Info() {} 
     356       
     357      #ifdef _intelmpi 
     358      MPI_Info(int info): mpi_info(info) {} 
     359      #elif _openmpi 
     360      MPI_Info(void* info): mpi_info(info) {} 
     361      #endif 
    344362  }; 
    345363 
     
    368386      MPI_Comm comm;    //! EP communicator related to the communication 
    369387 
    370       MPI_Request() 
    371       { 
    372         mpi_request = 0; 
    373       } 
     388      MPI_Request() {} 
     389 
     390      #ifdef _intelmpi 
     391      MPI_Request(int request): mpi_request(request) {} 
     392      #elif _openmpi 
     393      MPI_Request(void* request): mpi_request(request) {} 
     394      #endif 
     395  }; 
     396 
     397   
     398  class MPI_Aint 
     399  { 
     400    public: 
     401 
     402    unsigned long mpi_aint; 
     403 
     404    MPI_Aint() {} 
     405    MPI_Aint(int a): mpi_aint(a) {} 
    374406  }; 
    375407 
     
    379411 
    380412    int mpi_fint; 
    381   }; 
    382  
    383   class MPI_Aint 
    384   { 
    385     public: 
    386  
    387     unsigned long mpi_aint; 
     413 
     414    MPI_Fint() {} 
     415    MPI_Fint(int f): mpi_fint(f) {} 
     416     
    388417  }; 
    389418 
  • XIOS/dev/branch_yushan/extern/src_ep_dev/ep_wait.cpp

    r1037 r1053  
    5252      status->ep_datatype = request->ep_datatype; 
    5353 
    54       check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 
     54      //check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 
    5555 
    5656      return 0; 
     
    7070      int count; 
    7171      MPI_Get_count(status, request->ep_datatype, &count); 
    72       check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 
     72      //check_sum_recv(request->buf, count, request->ep_datatype, request->ep_src, request->ep_tag, request->comm, 2); 
    7373    } 
    7474          return MPI_SUCCESS; 
     
    106106              int check_count; 
    107107              MPI_Get_count(&array_of_statuses[i], array_of_requests[i].ep_datatype, &check_count); 
    108               check_sum_recv(array_of_requests[i].buf, count, array_of_requests[i].ep_datatype, array_of_requests[i].ep_src, array_of_requests[i].ep_tag, array_of_requests[i].comm, 2); 
     108              //check_sum_recv(array_of_requests[i].buf, count, array_of_requests[i].ep_datatype, array_of_requests[i].ep_src, array_of_requests[i].ep_tag, array_of_requests[i].comm, 2); 
    109109            } 
    110110            finished++; 
     
    124124              MPI_Get_count(&array_of_statuses[i], array_of_requests[i].ep_datatype, &recv_count); 
    125125              MPI_Mrecv(array_of_requests[i].buf, recv_count, array_of_requests[i].ep_datatype, &message, &array_of_statuses[i]); 
    126               check_sum_recv(array_of_requests[i].buf, recv_count, array_of_requests[i].ep_datatype, array_of_requests[i].ep_src, array_of_requests[i].ep_tag, array_of_requests[i].comm, 2); 
     126              //check_sum_recv(array_of_requests[i].buf, recv_count, array_of_requests[i].ep_datatype, array_of_requests[i].ep_src, array_of_requests[i].ep_tag, array_of_requests[i].comm, 2); 
    127127 
    128128              finished++; 
  • XIOS/dev/branch_yushan/src/buffer_server.hpp

    r717 r1053  
    44#include "xios_spl.hpp" 
    55#include "buffer.hpp" 
    6 #include "mpi.hpp" 
     6#include "mpi_std.hpp" 
    77#include "cxios.hpp" 
    88 
  • XIOS/dev/branch_yushan/src/client.cpp

    r1037 r1053  
    3131      else is_MPI_Initialized=false ; 
    3232       
    33       //return; 
    34  
    3533// don't use OASIS 
    3634      if (!CXios::usingOasis) 
  • XIOS/dev/branch_yushan/src/client.hpp

    r1037 r1053  
    1616        static void initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm); 
    1717        static void finalize(void); 
    18         static void registerContext(const string& id, MPI_Comm contextComm); 
     18        static void registerContext(const string& id, ep_lib::MPI_Comm contextComm); 
    1919 
    2020        static MPI_Comm intraComm; 
  • XIOS/dev/branch_yushan/src/client_client_dht_template.hpp

    r941 r1053  
    1313#include "xios_spl.hpp" 
    1414#include "array_new.hpp" 
    15 #include "mpi.hpp" 
     15#include "mpi_std.hpp" 
    1616#include "policy.hpp" 
    1717#include <boost/unordered_map.hpp> 
     
    4040  public: 
    4141    CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, 
    42                              const MPI_Comm& clientIntraComm); 
     42                             const ep_lib::MPI_Comm& clientIntraComm); 
    4343 
    4444    CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 
    45                              const MPI_Comm& clientIntraComm); 
     45                             const ep_lib::MPI_Comm& clientIntraComm); 
    4646 
    4747    void computeIndexInfoMapping(const CArray<size_t,1>& indices); 
     
    5555 
    5656  protected: 
    57     CClientClientDHTTemplate(const MPI_Comm& clientIntraComm); 
     57    CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm); 
    5858 
    5959  protected: 
     
    6262    // Redistribute index and info among clients 
    6363    void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, 
    64                                  const MPI_Comm& intraCommLevel, 
     64                                 const ep_lib::MPI_Comm& intraCommLevel, 
    6565                                 int level); 
    6666 
    6767    void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 
    68                                  const MPI_Comm& intraCommLevel, 
     68                                 const ep_lib::MPI_Comm& intraCommLevel, 
    6969                                 int level); 
    7070 
     
    7373 
    7474    void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 
    75                                       const MPI_Comm& intraCommLevel, 
     75                                      const ep_lib::MPI_Comm& intraCommLevel, 
    7676                                      int level); 
    7777 
     
    8585    // Send information to clients 
    8686    void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 
    87                            const MPI_Comm& clientIntraComm, 
    88                            std::vector<MPI_Request>& requestSendInfo); 
     87                           const ep_lib::MPI_Comm& clientIntraComm, 
     88                           std::vector<ep_lib::MPI_Request>& requestSendInfo); 
    8989 
    9090    void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 
    91                             const MPI_Comm& clientIntraComm, 
    92                             std::vector<MPI_Request>& requestRecvInfo); 
     91                            const ep_lib::MPI_Comm& clientIntraComm, 
     92                            std::vector<ep_lib::MPI_Request>& requestRecvInfo); 
    9393 
    9494    // Send global index to clients 
    9595    void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
    96                             const MPI_Comm& clientIntraComm, 
    97                             std::vector<MPI_Request>& requestSendIndexGlobal); 
     96                            const ep_lib::MPI_Comm& clientIntraComm, 
     97                            std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 
    9898 
    9999    void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 
    100                              const MPI_Comm& clientIntraComm, 
    101                              std::vector<MPI_Request>& requestRecvIndex); 
     100                             const ep_lib::MPI_Comm& clientIntraComm, 
     101                             std::vector<ep_lib::MPI_Request>& requestRecvIndex); 
    102102 
    103103    void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, 
  • XIOS/dev/branch_yushan/src/client_client_dht_template_impl.hpp

    r1037 r1053  
    1818{ 
    1919template<typename T, typename H> 
    20 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm) 
     20CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 
    2121  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    2222{ 
     
    3838template<typename T, typename H> 
    3939CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 
    40                                                         const MPI_Comm& clientIntraComm) 
     40                                                        const ep_lib::MPI_Comm& clientIntraComm) 
    4141  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    4242{ 
     
    6868template<typename T, typename H> 
    6969CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 
    70                                                         const MPI_Comm& clientIntraComm) 
     70                                                        const ep_lib::MPI_Comm& clientIntraComm) 
    7171  : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 
    7272{ 
     
    104104template<typename T, typename H> 
    105105void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 
    106                                                                  const MPI_Comm& commLevel, 
     106                                                                 const ep_lib::MPI_Comm& commLevel, 
    107107                                                                 int level) 
    108108{ 
     
    178178    recvIndexBuff = new unsigned long[recvNbIndexCount]; 
    179179 
    180   std::vector<MPI_Request> request; 
     180  std::vector<ep_lib::MPI_Request> request; 
    181181  std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 
    182182                             iteRecvIndex = recvRankClient.end(), 
     
    199199    sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 
    200200 
    201   std::vector<MPI_Status> status(request.size()); 
     201  std::vector<ep_lib::MPI_Status> status(request.size()); 
    202202 
    203203  //printf("1(%d): calling wait all for %lu requests\n", clientRank, request.size()); 
     
    259259  } 
    260260 
    261   std::vector<MPI_Request> requestOnReturn; 
     261  std::vector<ep_lib::MPI_Request> requestOnReturn; 
    262262  currentIndex = 0; 
    263263  for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 
     
    310310  } 
    311311 
    312   std::vector<MPI_Status> statusOnReturn(requestOnReturn.size()); 
     312  std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 
    313313  //printf("2(%d): calling wait all for %lu requests\n", clientRank, requestOnReturn.size()); 
    314314  MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 
     
    380380template<typename T, typename H> 
    381381void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 
    382                                                             const MPI_Comm& commLevel, 
     382                                                            const ep_lib::MPI_Comm& commLevel, 
    383383                                                            int level) 
    384384{ 
     
    465465  // it will send a message to the correct clients. 
    466466  // Contents of the message are index and its corresponding informatioin 
    467   std::vector<MPI_Request> request; 
     467  std::vector<ep_lib::MPI_Request> request; 
    468468  int currentIndex = 0; 
    469469  int nbRecvClient = recvRankClient.size(); 
     
    504504 
    505505  //printf("check 8 OK. clientRank = %d\n", clientRank); 
    506   std::vector<MPI_Status> status(request.size()); 
     506  std::vector<ep_lib::MPI_Status> status(request.size()); 
    507507 
    508508  MPI_Waitall(request.size(), &request[0], &status[0]); 
     
    564564template<typename T, typename H> 
    565565void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 
    566                                                        const MPI_Comm& clientIntraComm, 
    567                                                        std::vector<MPI_Request>& requestSendIndex) 
    568 { 
    569   MPI_Request request; 
     566                                                       const ep_lib::MPI_Comm& clientIntraComm, 
     567                                                       std::vector<ep_lib::MPI_Request>& requestSendIndex) 
     568{ 
     569  ep_lib::MPI_Request request; 
    570570  requestSendIndex.push_back(request); 
    571571 
     
    583583template<typename T, typename H> 
    584584void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 
    585                                                          const MPI_Comm& clientIntraComm, 
    586                                                          std::vector<MPI_Request>& requestRecvIndex) 
    587 { 
    588   MPI_Request request; 
     585                                                         const ep_lib::MPI_Comm& clientIntraComm, 
     586                                                         std::vector<ep_lib::MPI_Request>& requestRecvIndex) 
     587{ 
     588  ep_lib::MPI_Request request; 
    589589  requestRecvIndex.push_back(request); 
    590590 
     
    603603template<typename T, typename H> 
    604604void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 
    605                                                       const MPI_Comm& clientIntraComm, 
    606                                                       std::vector<MPI_Request>& requestSendInfo) 
    607 { 
    608   MPI_Request request; 
     605                                                      const ep_lib::MPI_Comm& clientIntraComm, 
     606                                                      std::vector<ep_lib::MPI_Request>& requestSendInfo) 
     607{ 
     608  ep_lib::MPI_Request request; 
    609609  requestSendInfo.push_back(request); 
    610610  //printf("MPI_IsendInfo(info, infoSize, MPI_CHAR,... char count = %d, dest = %d, buf_size = %d\n", infoSize, clientDestRank, sizeof(*info) ); 
     
    623623template<typename T, typename H> 
    624624void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 
    625                                                         const MPI_Comm& clientIntraComm, 
    626                                                         std::vector<MPI_Request>& requestRecvInfo) 
    627 { 
    628   MPI_Request request; 
     625                                                        const ep_lib::MPI_Comm& clientIntraComm, 
     626                                                        std::vector<ep_lib::MPI_Request>& requestRecvInfo) 
     627{ 
     628  ep_lib::MPI_Request request; 
    629629  requestRecvInfo.push_back(request); 
    630630 
     
    699699{ 
    700700  recvNbElements.resize(recvNbRank.size()); 
    701   std::vector<MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 
    702   std::vector<MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 
     701  std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 
     702  std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 
    703703 
    704704  int nRequest = 0; 
     
    751751  std::vector<int> recvBuff(recvBuffSize*2,0); 
    752752 
    753   std::vector<MPI_Request> request(sendBuffSize+recvBuffSize); 
    754   std::vector<MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 
     753  std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 
     754  std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 
    755755 
    756756  int nRequest = 0; 
  • XIOS/dev/branch_yushan/src/client_server_mapping.hpp

    r1037 r1053  
    4141 
    4242    static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 
    43                                                      MPI_Comm& clientIntraComm, 
     43                                                     ep_lib::MPI_Comm& clientIntraComm, 
    4444                                                     const std::vector<int>& connectedServerRank); 
    4545 
  • XIOS/dev/branch_yushan/src/client_server_mapping_distributed.hpp

    r835 r1053  
    3535    /** Default constructor */ 
    3636    CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 
    37                                     const MPI_Comm& clientIntraComm, 
     37                                    const ep_lib::MPI_Comm& clientIntraComm, 
    3838                                    bool isDataDistributed = true); 
    3939 
  • XIOS/dev/branch_yushan/src/context_client.cpp

    r1037 r1053  
    2020    \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode) 
    2121    */ 
    22     CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer) 
     22    CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer) 
    2323     : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) 
    2424    { 
     
    163163      for (itBuffer = bufferList.begin(), itSize = sizeList.begin(); itBuffer != bufferList.end(); itBuffer++, itSize++) 
    164164      { 
    165         retBuffer.push_back((*itBuffer)->getBuffer(*itSize)); 
     165        CBufferOut* m_buf = (*itBuffer)->getBuffer(*itSize); 
     166        //retBuffer.push_back((*itBuffer)->getBuffer(*itSize)); 
     167        //int m_size = retBuffer.size(); 
     168        //retBuffer.resize(m_size+1); 
     169        //m_size = retBuffer.size(); 
     170        retBuffer.push_back(m_buf); 
    166171      } 
    167172      return retBuffer; 
  • XIOS/dev/branch_yushan/src/context_client.hpp

    r1037 r1053  
    3131    public: 
    3232      // Contructor 
    33       CContextClient(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* parentServer = 0); 
     33      CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0); 
    3434 
    3535      // Send event to server 
     
    6666      int serverSize; //!< Size of server group 
    6767 
    68       MPI_Comm interComm; //!< Communicator of server group 
     68      ep_lib::MPI_Comm interComm; //!< Communicator of server group 
    6969 
    70       MPI_Comm intraComm; //!< Communicator of client group 
     70      ep_lib::MPI_Comm intraComm; //!< Communicator of client group 
    7171 
    7272      map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers 
  • XIOS/dev/branch_yushan/src/context_server.cpp

    r1037 r1053  
    1010#include "file.hpp" 
    1111#include "grid.hpp" 
    12 #include "mpi.hpp" 
     12#include "mpi_std.hpp" 
    1313#include "tracer.hpp" 
    1414#include "timer.hpp" 
     
    2323{ 
    2424 
    25   CContextServer::CContextServer(CContext* parent,MPI_Comm intraComm_,MPI_Comm interComm_) 
     25  CContextServer::CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_) 
    2626  { 
    2727    context=parent; 
     
    7171    int count; 
    7272    char * addr; 
    73     MPI_Status status; 
     73    ep_lib::MPI_Status status; 
    7474    map<int,CServerBuffer*>::iterator it; 
    7575 
     
    101101            { 
    102102              addr=(char*)it->second->getBuffer(count); 
    103               MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 
     103              ep_lib::MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 
    104104              bufferRequest[rank]=addr; 
    105105              //printf("find message, i-receiving to buffer %p, rank = %d, commSize = %d\n", addr, rank, commSize); 
     
    113113  void CContextServer::checkPendingRequest(void) 
    114114  { 
    115     map<int,MPI_Request>::iterator it; 
     115    map<int,ep_lib::MPI_Request>::iterator it; 
    116116    list<int> recvRequest; 
    117117    list<int>::iterator itRecv; 
     
    119119    int flag; 
    120120    int count; 
    121     MPI_Status status; 
     121    ep_lib::MPI_Status status; 
    122122 
    123123    //printf("enter checkPendingRequest\n"); 
  • XIOS/dev/branch_yushan/src/context_server.hpp

    r1037 r1053  
    1414    public: 
    1515 
    16     CContextServer(CContext* parent,MPI_Comm intraComm,MPI_Comm interComm) ; 
     16    CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm) ; 
    1717    bool eventLoop(void) ; 
    1818    void listen(void) ; 
     
    2525    bool hasFinished(void); 
    2626 
    27     MPI_Comm intraComm ; 
     27    ep_lib::MPI_Comm intraComm ; 
    2828    int intraCommSize ; 
    2929    int intraCommRank ; 
    3030 
    31     MPI_Comm interComm ; 
     31    ep_lib::MPI_Comm interComm ; 
    3232    int commSize ; 
    3333 
    3434    map<int,CServerBuffer*> buffers ; 
    35     map<int,MPI_Request> pendingRequest ; 
     35    map<int,ep_lib::MPI_Request> pendingRequest ; 
    3636    map<int,char*> bufferRequest ; 
    3737 
  • XIOS/dev/branch_yushan/src/cxios.cpp

    r1037 r1053  
    7979    MPI_Info info; 
    8080    MPI_Comm *ep_comm; 
    81     MPI_Comm_create_endpoints(MPI_COMM_WORLD, num_ep, info, ep_comm);  
     81    MPI_Comm_create_endpoints(MPI_COMM_WORLD, num_ep, info, ep_comm);  // servers should reach here too. 
    8282       
    8383    globalComm = ep_comm[0]; 
  • XIOS/dev/branch_yushan/src/cxios.hpp

    r1037 r1053  
    1919    public: 
    2020     static void initialize(void) ; 
    21      static void initClientSide(const string & codeId, MPI_Comm& localComm, MPI_Comm& returnComm) ; 
     21     static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 
    2222     static void initServerSide(void) ; 
    2323     static void clientFinalize(void) ; 
  • XIOS/dev/branch_yushan/src/dht_auto_indexing.cpp

    r1037 r1053  
    2222 
    2323  CDHTAutoIndexing::CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 
    24                                      const MPI_Comm& clientIntraComm) 
     24                                     const ep_lib::MPI_Comm& clientIntraComm) 
    2525    : CClientClientDHTTemplate<size_t>(clientIntraComm) 
    2626  { 
     
    5858  */ 
    5959  CDHTAutoIndexing::CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 
    60                                      const MPI_Comm& clientIntraComm) 
     60                                     const ep_lib::MPI_Comm& clientIntraComm) 
    6161    : CClientClientDHTTemplate<size_t>(clientIntraComm) 
    6262  { 
  • XIOS/dev/branch_yushan/src/dht_auto_indexing.hpp

    r1037 r1053  
    2828 
    2929    CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 
    30                      const MPI_Comm& clientIntraComm); 
     30                     const ep_lib::MPI_Comm& clientIntraComm); 
    3131 
    3232    CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 
    33                      const MPI_Comm& clientIntraComm); 
     33                     const ep_lib::MPI_Comm& clientIntraComm); 
    3434 
    3535    size_t getNbIndexesGlobal() const; 
  • XIOS/dev/branch_yushan/src/filter/filter.cpp

    r1037 r1053  
    1414    CDataPacketPtr outputPacket = engine->apply(data); 
    1515    if (outputPacket) 
     16    { 
     17      printf("filter/filter.cpp : deliverOuput(outputPacket)\n"); 
    1618      deliverOuput(outputPacket); 
     19      printf("filter/filter.cpp : deliverOuput(outputPacket) OKOK\n"); 
     20    } 
    1721  } 
    1822} // namespace xios 
  • XIOS/dev/branch_yushan/src/filter/input_pin.cpp

    r1037 r1053  
    3333      // Unregister before calling onInputReady in case the filter registers again 
    3434      gc.unregisterFilter(this, packet->timestamp); 
     35      printf("filter/input_pin.cpp : onInputReady\n"); 
    3536      onInputReady(it->second.packets); 
     37      printf("filter/input_pin.cpp : onInputReady OKOK\n"); 
    3638      inputs.erase(it); 
    3739    } 
  • XIOS/dev/branch_yushan/src/filter/output_pin.cpp

    r1037 r1053  
    2222    for (it = outputs.begin(), itEnd = outputs.end(); it != itEnd; ++it) 
    2323    { 
     24      printf("filter/output_pin.cpp : setInput\n"); 
    2425      it->first->setInput(it->second, packet); 
     26      printf("filter/output_pin.cpp : setInput OKOK\n"); 
    2527    } 
    2628  } 
  • XIOS/dev/branch_yushan/src/filter/source_filter.cpp

    r1037 r1053  
    2929    grid->inputField(data, packet->data); 
    3030 
     31    printf("filter/source_filter.cpp : deliverOuput(packet) \n"); 
    3132    deliverOuput(packet); 
     33    printf("filter/source_filter.cpp : deliverOuput(packet) OKOK\n"); 
    3234  } 
    3335 
  • XIOS/dev/branch_yushan/src/filter/spatial_transform_filter.cpp

    r1037 r1053  
    150150 
    151151      idxSendBuff = 0; 
    152       std::vector<MPI_Request> sendRecvRequest; 
     152      std::vector<ep_lib::MPI_Request> sendRecvRequest; 
    153153      for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 
    154154      { 
     
    160160          sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 
    161161        } 
    162         sendRecvRequest.push_back(MPI_Request()); 
     162        sendRecvRequest.push_back(ep_lib::MPI_Request()); 
    163163        MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 
    164164      } 
     
    178178        int srcRank = itRecv->first; 
    179179        int countSize = itRecv->second.size(); 
    180         sendRecvRequest.push_back(MPI_Request()); 
     180        sendRecvRequest.push_back(ep_lib::MPI_Request()); 
    181181        MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 
    182182        currentBuff += countSize; 
    183183      } 
    184       std::vector<MPI_Status> status(sendRecvRequest.size()); 
     184      std::vector<ep_lib::MPI_Status> status(sendRecvRequest.size()); 
    185185      MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]); 
    186186 
  • XIOS/dev/branch_yushan/src/interface/c/icdata.cpp

    r1037 r1053  
    2323#include "context.hpp" 
    2424#include "context_client.hpp" 
    25 #include "mpi.hpp" 
     25#include "mpi_std.hpp" 
    2626#include "timer.hpp" 
    2727#include "array_new.hpp" 
     
    5454   { 
    5555      std::string str; 
    56       MPI_Comm local_comm; 
    57       MPI_Comm return_comm; 
     56      ep_lib::MPI_Comm local_comm; 
     57      ep_lib::MPI_Comm return_comm; 
    5858       
    59       fc_comm_map.clear(); 
     59      ep_lib::fc_comm_map.clear(); 
    6060 
    6161      if (!cstr2string(client_id, len_client_id, str)) return; 
     
    6363      int initialized; 
    6464      MPI_Initialized(&initialized); 
    65       if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 
     65      //if (initialized) local_comm.mpi_comm = MPI_Comm_f2c(*f_local_comm); 
     66      if (initialized) local_comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_local_comm)); 
    6667      else local_comm = MPI_COMM_NULL; 
    6768       
     
    6970 
    7071      CXios::initClientSide(str, local_comm, return_comm); 
    71       *f_return_comm = MPI_Comm_c2f(return_comm); 
     72 
     73      *f_return_comm = ep_lib::EP_Comm_c2f(return_comm); 
    7274 
    7375      printf("in icdata.cpp, f_return_comm = %d\n", *f_return_comm); 
     
    8082   { 
    8183     std::string str; 
    82      MPI_Comm comm; 
     84     ep_lib::MPI_Comm comm; 
    8385 
    8486     if (!cstr2string(context_id, len_context_id, str)) return; 
    8587     CTimer::get("XIOS").resume(); 
    8688     CTimer::get("XIOS init context").resume(); 
    87      comm=MPI_Comm_f2c(*f_comm); 
     89     comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_comm)); 
    8890     
    8991     CClient::registerContext(str, comm); 
    9092      
    91      //printf("client register context OK\n"); 
     93     printf("icdata.cpp: client register context OK\n"); 
    9294      
    9395     CTimer::get("XIOS init context").suspend(); 
  • XIOS/dev/branch_yushan/src/interface/c/oasis_cinterface.cpp

    r1037 r1053  
    2626     
    2727    fxios_oasis_get_localcomm(&f_comm) ; 
    28     comm=MPI_Comm_f2c(f_comm) ; 
     28    //comm=MPI_Comm_f2c(f_comm) ; 
    2929  } 
    3030  
     
    3434     
    3535    fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 
    36     comm_client_server=MPI_Comm_f2c(f_comm) ; 
     36    //comm_client_server=MPI_Comm_f2c(f_comm) ; 
    3737  } 
    3838  
     
    4242     
    4343    fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 
    44     comm_client_server=MPI_Comm_f2c(f_comm) ; 
     44    //comm_client_server=MPI_Comm_f2c(f_comm) ; 
    4545  } 
    4646} 
  • XIOS/dev/branch_yushan/src/interface/fortran/idata.F90

    r1037 r1053  
    476476       
    477477      !print*, "in fortran, world_f = ", MPI_COMM_WORLD  
     478 
    478479      print*, "in fortran, f_return_comm = ", f_return_comm  
    479480 
  • XIOS/dev/branch_yushan/src/io/inetcdf4.cpp

    r948 r1053  
    1818    } 
    1919    mpi = comm && !multifile; 
     20    MPI_Info m_info; 
    2021 
    2122    // The file format will be detected automatically by NetCDF, it is safe to always set NC_MPIIO 
    2223    // even if Parallel NetCDF ends up being used. 
    2324    if (mpi) 
    24       CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, MPI_INFO_NULL, this->ncidp); 
     25      CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, m_info, this->ncidp); 
    2526    else 
    2627      CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); 
  • XIOS/dev/branch_yushan/src/io/inetcdf4.hpp

    r802 r1053  
    77#include "array_new.hpp" 
    88 
    9 #include "mpi.hpp" 
     9#include "mpi_std.hpp" 
    1010#include "netcdf.hpp" 
    1111 
  • XIOS/dev/branch_yushan/src/io/nc4_data_output.cpp

    r1037 r1053  
    2626      CNc4DataOutput::CNc4DataOutput 
    2727         (const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 
    28           MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 
     28          ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 
    2929            : SuperClass() 
    3030            , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) 
     
    450450      StdString domainName = domain->name; 
    451451      domain->assignMesh(domainName, domain->nvertex); 
    452       domain->mesh->createMeshEpsilon(server->intraComm, domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv); 
     452      domain->mesh->createMeshEpsilon(static_cast<MPI_Comm>(server->intraComm.mpi_comm), domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv); 
    453453 
    454454      StdString node_x = domainName + "_node_x"; 
  • XIOS/dev/branch_yushan/src/io/nc4_data_output.hpp

    r887 r1053  
    2727               (const StdString & filename, bool exist, bool useClassicFormat, 
    2828                bool useCFConvention, 
    29                 MPI_Comm comm_file, bool multifile, bool isCollective = true, 
     29                ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 
    3030                const StdString& timeCounterName = "time_counter"); 
    3131 
     
    116116 
    117117            /// Propriétés privées /// 
    118             MPI_Comm comm_file; 
     118            ep_lib::MPI_Comm comm_file; 
    119119            const StdString filename; 
    120120            std::map<Time, StdSize> timeToRecordCache; 
  • XIOS/dev/branch_yushan/src/io/netCdfInterface.hpp

    r1037 r1053  
    1616#endif 
    1717 
    18 #include "mpi.hpp" 
    19 //#include <mpi.h> 
     18#include "mpi_std.hpp" 
    2019#include "netcdf.hpp" 
    2120 
  • XIOS/dev/branch_yushan/src/io/netcdf.hpp

    r685 r1053  
    11#ifndef __XIOS_NETCDF_HPP__ 
    22#define __XIOS_NETCDF_HPP__ 
    3 #include "mpi.hpp" 
     3#include "mpi_std.hpp" 
    44#define MPI_INCLUDED 
    55#include <netcdf.h> 
     
    1818extern "C" 
    1919{ 
    20 include <netcdf_par.h> 
     20  #include <netcdf_par.h> 
    2121} 
    2222#  endif 
     
    3030namespace xios 
    3131{ 
    32   inline int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp) 
     32  inline int nc_create_par(const char *path, int cmode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 
    3333  { 
    3434#if defined(USING_NETCDF_PAR) 
    35     return ::nc_create_par(path, cmode, comm, info, ncidp) ; 
     35    return ::nc_create_par(path, cmode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 
    3636#else 
    3737    ERROR("int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)", 
     
    4141  } 
    4242 
    43   inline int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp) 
     43  inline int nc_open_par(const char *path, int mode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 
    4444  { 
    4545#if defined(USING_NETCDF_PAR) 
    46     return ::nc_open_par(path, mode, comm, info, ncidp) ; 
     46    return ::nc_open_par(path, mode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 
    4747#else 
    4848    ERROR("int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)", 
  • XIOS/dev/branch_yushan/src/io/onetcdf4.cpp

    r1037 r1053  
    33#include "onetcdf4.hpp" 
    44#include "group_template.hpp" 
    5 //#include "mpi_std.hpp" 
    65#include "netcdf.hpp" 
    76#include "netCdfInterface.hpp" 
     
    1211      /// ////////////////////// Définitions ////////////////////// /// 
    1312 
    14       CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 
    15                                                         bool useCFConvention, 
    16                            const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
     13      CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention,  
     14                           const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
    1715        : path() 
    1816        , wmpi(false) 
     
    3230 
    3331      void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention,  
    34                                  const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
     32                                 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 
    3533      { 
    3634         this->useClassicFormat = useClassicFormat; 
     
    5856         { 
    5957            if (wmpi) 
    60                CNetCdfInterface::createPar(filename, mode, *comm, info_null, this->ncidp); 
     58            { 
     59               CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null, this->ncidp); 
     60               printf("creating file with createPar\n"); 
     61            } 
    6162            else 
     63            { 
    6264               CNetCdfInterface::create(filename, mode, this->ncidp); 
     65               printf("creating file with create\n");   
     66            }   
     67                
    6368 
    6469            this->appendMode = false; 
     
    6873            mode |= NC_WRITE; 
    6974            if (wmpi) 
    70                CNetCdfInterface::openPar(filename, mode, *comm, info_null, this->ncidp); 
     75            { 
     76               CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null, this->ncidp); 
     77               printf("opening file with openPar\n"); 
     78            } 
    7179            else 
     80            { 
    7281               CNetCdfInterface::open(filename, mode, this->ncidp); 
     82               printf("opening file with open\n"); 
     83            } 
    7384 
    7485            this->appendMode = true; 
  • XIOS/dev/branch_yushan/src/io/onetcdf4.hpp

    r1037 r1053  
    77#include "data_output.hpp" 
    88#include "array_new.hpp" 
    9 #include "mpi.hpp" 
    10 //#include <mpi.h> 
     9#include "mpi_std.hpp" 
    1110#include "netcdf.hpp" 
    1211 
     
    2928            CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 
    3029                          bool useCFConvention = true, 
    31                       const MPI_Comm* comm = NULL, bool multifile = true, 
     30                      const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 
    3231                      const StdString& timeCounterName = "time_counter"); 
    3332 
     
    3837            /// Initialisation /// 
    3938            void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 
    40                             const MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 
     39                            const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 
    4140            void close(void); 
    4241            void sync(void); 
  • XIOS/dev/branch_yushan/src/mpi.hpp

    r1037 r1053  
    1212 
    1313#ifdef _usingEP 
    14   #include "../extern/src_ep/ep_lib.hpp" 
     14  #include "../extern/src_ep_dev/ep_lib.hpp" 
    1515  using namespace ep_lib; 
    1616#elif _usingMPI 
  • XIOS/dev/branch_yushan/src/node/axis.cpp

    r1037 r1053  
    742742      CContextServer* server = CContext::getCurrent()->server; 
    743743      axis->numberWrittenIndexes_ = axis->indexesToWrite.size(); 
    744       MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 
    745       MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 
     744      ep_lib::MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 
     745      ep_lib::MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 
    746746      axis->offsetWrittenIndexes_ -= axis->numberWrittenIndexes_; 
    747747    } 
  • XIOS/dev/branch_yushan/src/node/context.cpp

    r1037 r1053  
    236236 
    237237   //! Initialize client side 
    238    void CContext::initClient(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtServer /*= 0*/) 
     238   void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 
    239239   { 
    240240     hasClient=true; 
     
    248248     registryOut->setPath(getId()) ; 
    249249 
    250      MPI_Comm intraCommServer, interCommServer; 
     250     ep_lib::MPI_Comm intraCommServer, interCommServer; 
    251251     if (cxtServer) // Attached mode 
    252252     { 
     
    311311 
    312312   //! Initialize server 
    313    void CContext::initServer(MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient /*= 0*/) 
     313   void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 
    314314   { 
    315315     hasServer=true; 
     
    323323     registryOut->setPath(getId()) ; 
    324324 
    325      MPI_Comm intraCommClient, interCommClient; 
     325     ep_lib::MPI_Comm intraCommClient, interCommClient; 
    326326     if (cxtClient) // Attached mode 
    327327     { 
     
    369369          closeAllFile(); 
    370370          registryOut->hierarchicalGatherRegistry() ; 
     371          //registryOut->gatherRegistry() ; 
    371372          if (server->intraCommRank==0) CXios::globalRegistry->mergeRegistry(*registryOut) ; 
    372373        } 
    373374 
    374         for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
     375        for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 
    375376          MPI_Comm_free(&(*it)); 
    376377        comms.clear(); 
  • XIOS/dev/branch_yushan/src/node/context.hpp

    r1037 r1053  
    8888      public : 
    8989         // Initialize server or client 
    90          void initServer(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtClient = 0); 
    91          void initClient(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtServer = 0); 
     90         void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 
     91         void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 
    9292         bool isInitialized(void); 
    9393 
     
    229229         StdString idServer_; 
    230230         CGarbageCollector garbageCollector; 
    231          std::list<MPI_Comm> comms; //!< Communicators allocated internally 
     231         std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 
    232232 
    233233      public: // Some function maybe removed in the near future 
  • XIOS/dev/branch_yushan/src/node/domain.cpp

    r1037 r1053  
    475475   { 
    476476          CContext* context = CContext::getCurrent(); 
    477       CContextClient* client = context->client; 
     477    CContextClient* client = context->client; 
    478478          lon_g.resize(ni_glo) ; 
    479479          lat_g.resize(nj_glo) ; 
  • XIOS/dev/branch_yushan/src/node/field_impl.hpp

    r1037 r1053  
    2020    if (clientSourceFilter) 
    2121    { 
     22      printf("file_impl.hpp : clientSourceFilter->streamData\n"); 
    2223      clientSourceFilter->streamData(CContext::getCurrent()->getCalendar()->getCurrentDate(), _data); 
     24      printf("file_impl.hpp : clientSourceFilter->streamData OKOK\n"); 
    2325    } 
    2426    else if (!field_ref.isEmpty() || !content.empty()) 
     27    { 
    2528      ERROR("void CField::setData(const CArray<double, N>& _data)", 
    2629            << "Impossible to receive data from the model for a field [ id = " << getId() << " ] with a reference or an arithmetic operation."); 
     30    } 
    2731  } 
    2832 
  • XIOS/dev/branch_yushan/src/node/file.cpp

    r1037 r1053  
    564564 
    565565      if (isOpen) data_out->closeFile(); 
    566       if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective)); 
    567       else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name)); 
     566      if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective)); 
     567      else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective, time_counter_name)); 
    568568      isOpen = true; 
    569569    } 
  • XIOS/dev/branch_yushan/src/node/file.hpp

    r1037 r1053  
    159159         bool isOpen; 
    160160         bool allDomainEmpty; 
    161          MPI_Comm fileComm; 
     161         ep_lib::MPI_Comm fileComm; 
    162162 
    163163      private : 
  • XIOS/dev/branch_yushan/src/node/mesh.cpp

    r1037 r1053  
    493493 * \param [in] bounds_lat Array of boundary latitudes. Its size depends on the element type. 
    494494 */ 
    495   void CMesh::createMeshEpsilon(const MPI_Comm& comm, 
     495  void CMesh::createMeshEpsilon(const ep_lib::MPI_Comm& comm, 
    496496                                const CArray<double, 1>& lonvalue, const CArray<double, 1>& latvalue, 
    497497                                const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat) 
     
    15341534   */ 
    15351535 
    1536   void CMesh::getGloNghbFacesNodeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx, 
     1536  void CMesh::getGloNghbFacesNodeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 
    15371537                               const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 
    15381538                               CArray<int, 2>& nghbFaces) 
     
    16901690   */ 
    16911691 
    1692   void CMesh::getGloNghbFacesEdgeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx, 
     1692  void CMesh::getGloNghbFacesEdgeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 
    16931693                               const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 
    16941694                               CArray<int, 2>& nghbFaces) 
     
    18711871   */ 
    18721872 
    1873   void CMesh::getGlobalNghbFaces(const int nghbType, const MPI_Comm& comm, 
     1873  void CMesh::getGlobalNghbFaces(const int nghbType, const ep_lib::MPI_Comm& comm, 
    18741874                                 const CArray<int, 1>& face_idx, 
    18751875                                 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 
  • XIOS/dev/branch_yushan/src/node/mesh.hpp

    r931 r1053  
    6060                      const CArray<double, 2>&, const CArray<double, 2>& ); 
    6161                         
    62       void createMeshEpsilon(const MPI_Comm&, 
     62      void createMeshEpsilon(const ep_lib::MPI_Comm&, 
    6363                             const CArray<double, 1>&, const CArray<double, 1>&, 
    6464                             const CArray<double, 2>&, const CArray<double, 2>& ); 
    6565 
    66       void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&, 
     66      void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&, 
    6767                              const CArray<double, 2>&, const CArray<double, 2>&, 
    6868                              CArray<int, 2>&); 
     
    8484      CClientClientDHTSizet* pNodeGlobalIndex;                    // pointer to a map <nodeHash, nodeIdxGlo> 
    8585      CClientClientDHTSizet* pEdgeGlobalIndex;                    // pointer to a map <edgeHash, edgeIdxGlo> 
    86       void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
    87       void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
     86      void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
     87      void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 
    8888      void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 
    8989      void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 
  • XIOS/dev/branch_yushan/src/policy.hpp

    r855 r1053  
    3131{ 
    3232protected: 
    33   DivideAdaptiveComm(const MPI_Comm& mpiComm); 
     33  DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm); 
    3434 
    3535  void computeMPICommLevel(); 
     
    4141 
    4242protected: 
    43   const MPI_Comm& internalComm_; 
     43  const ep_lib::MPI_Comm& internalComm_; 
    4444  std::vector<std::vector<int> > groupParentsBegin_; 
    4545  std::vector<std::vector<int> > nbInGroupParents_; 
  • XIOS/dev/branch_yushan/src/registry.cpp

    r1037 r1053  
    11#include "registry.hpp" 
    22#include "type.hpp" 
    3 #include <mpi.hpp> 
    43#include <fstream> 
    54#include <sstream> 
     
    261260  void CRegistry::hierarchicalGatherRegistry(void) 
    262261  { 
    263     //hierarchicalGatherRegistry(communicator) ; 
     262    hierarchicalGatherRegistry(communicator) ; 
    264263  } 
    265264 
     
    288287      if (mpiRank==0 || mpiRank==mpiSize/2+mpiSize%2) color=0 ; 
    289288      else color=1 ; 
     289       
    290290      MPI_Comm_split(comm,color,mpiRank,&commDown) ; 
     291       
    291292      if (color==0) gatherRegistry(commDown) ; 
     293      printf("gatherRegistry OKOK\n"); 
    292294      MPI_Comm_free(&commDown) ;     
    293295    } 
  • XIOS/dev/branch_yushan/src/registry.hpp

    r1037 r1053  
    2828/** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 
    2929      
    30       CRegistry(const MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 
     30      CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 
    3131       
    3232      
     
    127127 
    128128/** MPI communicator used for broadcast and gather operation */ 
    129       MPI_Comm communicator ; 
     129      ep_lib::MPI_Comm communicator ; 
    130130  } ; 
    131131 
  • XIOS/dev/branch_yushan/src/test/test_client.f90

    r1037 r1053  
    4242 
    4343  CALL MPI_COMM_RANK(comm,rank,ierr) 
    44   print*, "test_client MPI_COMM_RANK OK" 
     44  print*, "test_client MPI_COMM_RANK OK", rank 
    4545  CALL MPI_COMM_SIZE(comm,size,ierr) 
     46  print*, "test_client MPI_COMM_SIZE OK", size 
    4647   
    4748 
     
    138139  PRINT*,"field field_A is active ? ",xios_field_is_active("field_A") 
    139140  !DO ts=1,24*10 
    140   DO ts=1,24 
     141  DO ts=1,6 
    141142    CALL xios_update_calendar(ts) 
    142143    print*, "xios_update_calendar OK, ts = ", ts 
  • XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.cpp

    r1037 r1053  
    173173 
    174174  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    175   std::vector<MPI_Request> requests; 
    176   std::vector<MPI_Status> status; 
     175  std::vector<ep_lib::MPI_Request> requests; 
     176  std::vector<ep_lib::MPI_Status> status; 
    177177  boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
    178178  boost::unordered_map<int, double* > sendValueToDest; 
     
    184184    sendValueToDest[recvRank] = new double [recvSize]; 
    185185 
    186     requests.push_back(MPI_Request()); 
     186    requests.push_back(ep_lib::MPI_Request()); 
    187187    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    188188  } 
     
    206206 
    207207    // Send global index source and mask 
    208     requests.push_back(MPI_Request()); 
     208    requests.push_back(ep_lib::MPI_Request()); 
    209209    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    210210  } 
     
    215215  //printf("(%d) src/transformation/axis_algorithm_inverse 1st waitall OK\n", clientRank); 
    216216 
    217   std::vector<MPI_Request>().swap(requests); 
    218   std::vector<MPI_Status>().swap(status); 
     217  std::vector<ep_lib::MPI_Request>().swap(requests); 
     218  std::vector<ep_lib::MPI_Status>().swap(status); 
    219219 
    220220  // Okie, on destination side, we will wait for information of masked index of source 
     
    224224    int recvSize = itSend->second; 
    225225 
    226     requests.push_back(MPI_Request()); 
     226    requests.push_back(ep_lib::MPI_Request()); 
    227227    MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
    228228  } 
     
    242242    } 
    243243    // Okie, now inform the destination which source index are masked 
    244     requests.push_back(MPI_Request()); 
     244    requests.push_back(ep_lib::MPI_Request()); 
    245245    MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 
    246246  } 
  • XIOS/dev/branch_yushan/src/transformation/axis_algorithm_inverse.hpp

    r933 r1053  
    1212#include "axis_algorithm_transformation.hpp" 
    1313#include "transformation.hpp" 
    14  
     14#ifdef _usingEP 
     15#include "ep_declaration.hpp" 
     16#endif 
     17    
    1518namespace xios { 
    1619 
  • XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.cpp

    r1037 r1053  
    371371  CContextClient* client=context->client; 
    372372 
    373   MPI_Comm poleComme(MPI_COMM_NULL); 
    374   MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 
     373  ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 
     374  ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 
    375375  if (MPI_COMM_NULL != poleComme) 
    376376  { 
    377377    int nbClientPole; 
    378     MPI_Comm_size(poleComme, &nbClientPole); 
     378    ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 
    379379 
    380380    std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, 
     
    541541  double* sendWeightBuff = new double [sendBuffSize]; 
    542542 
    543   std::vector<MPI_Request> sendRequest; 
     543  std::vector<ep_lib::MPI_Request> sendRequest; 
    544544 
    545545  int sendOffSet = 0, l = 0; 
     
    562562    } 
    563563 
    564     sendRequest.push_back(MPI_Request()); 
     564    sendRequest.push_back(ep_lib::MPI_Request()); 
    565565    MPI_Isend(sendIndexDestBuff + sendOffSet, 
    566566             k, 
     
    570570             client->intraComm, 
    571571             &sendRequest.back()); 
    572     sendRequest.push_back(MPI_Request()); 
     572    sendRequest.push_back(ep_lib::MPI_Request()); 
    573573    MPI_Isend(sendIndexSrcBuff + sendOffSet, 
    574574             k, 
     
    578578             client->intraComm, 
    579579             &sendRequest.back()); 
    580     sendRequest.push_back(MPI_Request()); 
     580    sendRequest.push_back(ep_lib::MPI_Request()); 
    581581    MPI_Isend(sendWeightBuff + sendOffSet, 
    582582             k, 
     
    597597  while (receivedSize < recvBuffSize) 
    598598  { 
    599     MPI_Status recvStatus; 
     599    ep_lib::MPI_Status recvStatus; 
    600600    MPI_Recv((recvIndexDestBuff + receivedSize), 
    601601             recvBuffSize, 
     
    637637  } 
    638638 
    639   std::vector<MPI_Status> requestStatus(sendRequest.size()); 
    640   MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 
     639  std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 
     640  ep_lib::MPI_Status stat_ignore; 
     641  MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 
     642  //MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 
    641643 
    642644  delete [] sendIndexDestBuff; 
     
    724726 
    725727  MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    726   MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
     728  ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 
    727729   
    728730  std::vector<StdSize> start(1, startIndex - localNbWeight); 
    729731  std::vector<StdSize> count(1, localNbWeight); 
    730732 
    731   WriteNetCdf netCdfWriter(filename, client->intraComm); 
     733  WriteNetCdf netCdfWriter(filename, static_cast<MPI_Comm>(client->intraComm.mpi_comm)); 
    732734 
    733735  // netCdfWriter = CONetCDF4(filename, false, false, true, client->intraComm, false); 
  • XIOS/dev/branch_yushan/src/transformation/domain_algorithm_interpolate.hpp

    r1037 r1053  
    1313#include "transformation.hpp" 
    1414#include "nc4_data_output.hpp" 
     15#ifdef _usingEP 
     16#include "ep_declaration.hpp" 
     17#endif 
    1518 
    1619namespace xios { 
  • XIOS/dev/branch_yushan/src/transformation/grid_transformation.cpp

    r1037 r1053  
    475475 
    476476  // Sending global index of grid source to corresponding process as well as the corresponding mask 
    477   std::vector<MPI_Request> requests; 
    478   std::vector<MPI_Status> status; 
     477  std::vector<ep_lib::MPI_Request> requests; 
     478  std::vector<ep_lib::MPI_Status> status; 
    479479  boost::unordered_map<int, unsigned char* > recvMaskDst; 
    480480  boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 
     
    486486    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 
    487487 
    488     requests.push_back(MPI_Request()); 
     488    requests.push_back(ep_lib::MPI_Request()); 
    489489    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 
    490     requests.push_back(MPI_Request()); 
     490    requests.push_back(ep_lib::MPI_Request()); 
    491491    MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 
    492492  } 
     
    524524 
    525525    // Send global index source and mask 
    526     requests.push_back(MPI_Request()); 
     526    requests.push_back(ep_lib::MPI_Request()); 
    527527    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 
    528     requests.push_back(MPI_Request()); 
     528    requests.push_back(ep_lib::MPI_Request()); 
    529529    MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 
    530530  } 
     
    536536 
    537537  // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 
    538   std::vector<MPI_Request>().swap(requests); 
    539   std::vector<MPI_Status>().swap(status); 
     538  std::vector<ep_lib::MPI_Request>().swap(requests); 
     539  std::vector<ep_lib::MPI_Status>().swap(status); 
    540540  // Okie, on destination side, we will wait for information of masked index of source 
    541541  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) 
     
    544544    int recvSize = itSend->second; 
    545545 
    546     requests.push_back(MPI_Request()); 
     546    requests.push_back(ep_lib::MPI_Request()); 
    547547    MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
    548548  } 
     
    581581 
    582582    // Okie, now inform the destination which source index are masked 
    583     requests.push_back(MPI_Request()); 
     583    requests.push_back(ep_lib::MPI_Request()); 
    584584    MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 
    585585  } 
Note: See TracChangeset for help on using the changeset viewer.