Changeset 1638
- Timestamp:
- 01/22/19 16:15:03 (6 years ago)
- Location:
- XIOS
- Files:
-
- 17 added
- 77 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/arch/arch-X64_ADA.env
r986 r1638 1 module unload compilerwrappers 1 module load gcc/6.4.0 2 module load intel/2018.2 2 3 module unload netcdf 3 4 module unload hdf5 4 5 module load netcdf/mpi/4.1.3 5 #module load netcdf/mpi/4.1.3 6 6 #module load hdf5/mpi/1.8.9 7 8 export NETCDF_INC_DIR=/smplocal/pub/NetCDF/4.1.3/mpi/include9 export NETCDF_LIB_DIR=/smplocal/pub/NetCDF/4.1.3/mpi/lib10 export HDF5_INC_DIR=/smplocal/pub/HDF5/1.8.9/par/include11 export HDF5_LIB_DIR=/smplocal/pub/HDF5/1.8.9/par/lib -
XIOS/dev/branch_openmp/arch/arch-X64_ADA.fcm
r1545 r1638 3 3 ################################################################################ 4 4 5 %CCOMPILER mpiicc - qopenmp -D_usingEP -D_intelmpi6 %FCOMPILER mpiifort -qopenmp -D_usingEP -D_intelmpi7 %LINKER mpiifort -nofor-main -qopenmp -D_usingEP -D_intelmpi5 %CCOMPILER mpiicc -std=c++11 6 %FCOMPILER mpiifort 7 %LINKER mpiifort -nofor-main 8 8 9 10 %BASE_CFLAGS -std=c++11 -diag-disable 1125 -diag-disable 279 9 %BASE_CFLAGS -diag-disable 1125 -diag-disable 279 11 10 %PROD_CFLAGS -O3 -D BOOST_DISABLE_ASSERTS 12 11 %DEV_CFLAGS -g -traceback 13 %DEBUG_CFLAGS -DBZ_DEBUG -g -fno-inline 14 12 %DEBUG_CFLAGS -DBZ_DEBUG -g -traceback -fno-inline 15 13 16 14 %BASE_FFLAGS -D__NONE__ 17 15 %PROD_FFLAGS -O3 18 16 %DEV_FFLAGS -g -O2 -traceback 19 %DEBUG_FFLAGS -g 17 %DEBUG_FFLAGS -g -traceback 20 18 21 19 %BASE_INC -D__NONE__ 22 %BASE_LD -lstdc++ 20 %BASE_LD -lstdc++ -Wl,-rpath=/smplocal/pub/NetCDF/4.1.3/mpi/lib:/smplocal/pub/HDF5/1.8.9/par/lib 23 21 24 22 %CPP mpiicc -EP -
XIOS/dev/branch_openmp/arch/arch-X64_ADA.path
r986 r1638 1 NETCDF_INCDIR="-I $NETCDF_INC_DIR"2 #NETCDF_LIBDIR="-L $NETCDF_LIB_DIR"3 #NETCDF_LIB="-lnetcdff -lnetcdf"1 NETCDF_INCDIR="-I/smplocal/pub/NetCDF/4.1.3/mpi/include" 2 NETCDF_LIBDIR="-L/smplocal/pub/NetCDF/4.1.3/mpi/lib" 3 NETCDF_LIB="-lnetcdff -lnetcdf" 4 4 5 5 MPI_INCDIR="" … … 7 7 MPI_LIB="" 8 8 9 #HDF5_INCDIR="-I $HDF5_INC_DIR"10 #HDF5_LIBDIR="-L $HDF5_LIB_DIR"11 #HDF5_LIB="-lhdf5_hl -lhdf5 -lhdf5 -lz"9 HDF5_INCDIR="-I/smplocal/pub/HDF5/1.8.9/par/include" 10 HDF5_LIBDIR="-L/smplocal/pub/HDF5/1.8.9/par/lib" 11 HDF5_LIB="-lhdf5_hl -lhdf5 -lhdf5 -lz" 12 12 13 13 OASIS_INCDIR="-I$PWD/../../oasis3-mct/BLD/build/lib/psmile.MPI1" -
XIOS/dev/branch_openmp/bld.cfg
r1556 r1638 8 8 # ------------------------------------------------------------------------------ 9 9 10 inc arch.fcm 11 inc config.fcm10 # Specifying the build directory and src::blitz if necessary. 11 inc bld_dir.cfg 12 12 13 13 %CFLAGS %BASE_CFLAGS %COMPIL_CFLAGS … … 15 15 %LD_FLAGS %ARCH_LD %BASE_LD 16 16 17 18 dest::root $PWD19 20 17 bld::infile_ext::cpp C::SOURCE 21 18 bld::infile_ext::conf CPP::INCLUDE 22 19 bld::infile_ext::hpp CPP::INCLUDE 23 20 21 # IMPORTANT: as the build directory may not necessarily coincide with the source directory, 22 # each source subfolder should be declared individually 23 search_src false 24 src::xios $PWD/src 25 src::xios::config $PWD/src/config 26 src::xios::date $PWD/src/date 27 src::xios::filter $PWD/src/filter 28 src::xios::functor $PWD/src/functor 29 src::xios::interface::c $PWD/src/interface/c 30 src::xios::interface::c_attr $PWD/src/interface/c_attr 31 src::xios::interface::fortran $PWD/src/interface/fortran 32 src::xios::interface::fortran_attr $PWD/src/interface/fortran_attr 33 src::xios::io $PWD/src/io 34 src::xios::node $PWD/src/node 35 src::xios::parse_expr $PWD/src/parse_expr 36 src::xios::test $PWD/src/test 37 src::xios::transformation $PWD/src/transformation 38 src::xios::transformation::Functions $PWD/src/transformation/Functions 39 src::xios::type $PWD/src/type 24 40 25 search_src true26 src::zzz .27 src::date $PWD/extern/boost/src/date_time28 src::blitz $PWD/extern/blitz/src29 41 src::netcdf $PWD/extern/netcdf4 30 42 src::remap $PWD/extern/remap/src 31 src::src_ep_dev $PWD/extern/src_ep_dev 32 #src::src_ep_dev $PWD/extern/ep_dev 43 src::ep $PWD/extern/src_ep_dev 44 #src::ep $PWD/extern/src_ep_dev2 45 33 46 bld::lib xios 34 bld::target libxios.a47 #bld::target libxios.a 35 48 #bld::target generate_fortran_interface.exe 36 bld::target xios_server.exe 49 #bld::target test_remap.exe 50 #bld::target xios_server.exe 37 51 #bld::target test_regular.exe 38 #bld::target test_expand_domain.exe 39 #bld::target test_new_features.exe 40 #bld::target test_unstruct_complete.exe 41 #bld::target test_omp.exe 42 #bld::target test_complete_omp.exe 43 #bld::target test_remap.exe 44 #bld::target test_remap_ref.exe 45 #bld::target test_remap_omp.exe 46 #bld::target test_unstruct_omp.exe 47 #bld::target test_netcdf_omp.exe 48 #bld::target test_client.exe 52 #bld::target test_xios2_cmip6.exe 53 #bld::target test_new_features.exe test_unstruct_complete.exe 54 #bld::target test_remap.exe#bld::target test_complete.exe 49 55 #bld::target test_complete.exe 50 #bld::target test_remap.exe 51 #bld::target test_xios2_cmip6.exe 52 #bld::target test_connectivity_expand.exe 53 #bld::target toy_cmip6.exe 54 #bld::target toy_cmip6_omp.exe 56 #bld::target test_client.exe 57 bld::target test_omp.exe 58 #bld::target test_unstruct_complete.exe 59 #bld::target test_unstructured.exe 55 60 bld::exe_dep 56 61 … … 63 68 bld::tool::ld %LINKER 64 69 bld::tool::ldflags %LD_FLAGS 65 bld::tool::cflags %CFLAGS %CBASE_INC -I${PWD}/extern/src_netcdf -I${PWD}/extern/boost/include -I${PWD}/extern/rapidxml/include -I${PWD}/extern/blitz/include 70 bld::tool::cflags %CFLAGS %CBASE_INC -I${PWD}/extern/src_netcdf -I${PWD}/extern/boost/include -I${PWD}/extern/rapidxml/include -I${PWD}/extern/blitz/include -I${PWD}/extern/src_ep_dev 71 #bld::tool::cflags %CFLAGS %CBASE_INC -I${PWD}/extern/src_netcdf -I${PWD}/extern/boost/include -I${PWD}/extern/rapidxml/include -I${PWD}/extern/blitz/include -I${PWD}/extern/src_ep_dev2 66 72 bld::tool::fflags %FFLAGS %FBASE_INC 67 73 bld::tool::cppkeys %CPP_KEY … … 71 77 # Pre-process code before analysing dependencies 72 78 bld::pp false 73 bld::pp:: interface/fortran true74 bld::pp:: interface/fortran_attr true79 bld::pp::xios::interface::fortran true 80 bld::pp::xios::interface::fortran_attr true 75 81 bld::excl_dep use::mod_prism_get_comm 76 82 bld::excl_dep use::mod_prism_get_localcomm_proto -
XIOS/dev/branch_openmp/inputs/iodef.xml
r1544 r1638 11 11 <field id="field_Domain" operation="average" freq_op="3600s" domain_ref="domain_A"/> 12 12 <field id="field_A_zoom" operation="average" freq_op="3600s" field_ref="field_A" grid_ref="grid_A_zoom"/> 13 <field id="field_Scalar" operation=" average" freq_op="3600s" grid_ref="grid_Scalar"/>13 <field id="field_Scalar" operation="instant" freq_op="3600s" grid_ref="grid_Scalar"/> 14 14 </field_definition> 15 15 16 16 17 17 <file_definition type="one_file" par_access="collective" output_freq="1h" output_level="10" enabled=".TRUE."> 18 <file id="output" name="output" enabled=".TRUE.">18 <file id="output" name="output" > 19 19 <field field_ref="field_A" name="field_A" /> 20 20 <field field_ref="field_A_zoom" name="field_B" /> 21 21 </file> 22 <file id="output1" name="output1" enabled=".TRUE.">22 <file id="output1" name="output1"> 23 23 <field field_ref="field_A" name="field_A" /> 24 24 </file> 25 <file id="output2" name="output2" enabled=".TRUE.">25 <file id="output2" name="output2" > 26 26 <field field_ref="field_Scalar" name="field_A" /> 27 27 </file> … … 71 71 <variable_group id="parameters" > 72 72 <variable id="using_server" type="bool">false</variable> 73 <variable id="info_level" type="int"> 50</variable>73 <variable id="info_level" type="int">100</variable> 74 74 <variable id="print_file" type="bool">true</variable> 75 75 </variable_group> -
XIOS/dev/branch_openmp/make_xios
r1157 r1638 5 5 use_oasis="false" 6 6 oasis="oasis3_mct" 7 build_path="./" 8 build_dir="./" 9 build_suffixed="false" 10 use_extern_boost="false" 11 use_extern_blitz="false" 7 12 use_memtrack="false" 8 13 job="1" 9 14 netcdf_lib="netcdf4_par" 10 15 compil_mode="prod" 11 arch_path= "arch"12 arch_default_path= "arch"16 arch_path=$PWD/"arch" 17 arch_default_path=$PWD/"arch" 13 18 arch_defined="FALSE" 14 19 arch_path_defined="FALSE" … … 29 34 echo " [--full] : to generate dependencies and recompile from scratch" 30 35 echo " [--use_oasis 'oasis3' 'oasis3_mct' : default oasis3_mct] : to use Oasis coupler" 36 echo " [--build_path : absolute path to the build directory" 37 echo " [--build_dir : name of the build directory" 38 echo " [--build_suffixed : generate automatically suffixed name of the build directory (e.g. config_X64_CURIE_prod)" 39 echo " [--use_extern_boost : to use external boost library" 40 echo " [--use_extern_blitz : to use external blitz library" 31 41 echo " [--doc] : to generate Doxygen documentation (not available yet)" 32 42 echo " [--job ntasks] : to use parallel compilation with ntasks" … … 44 54 "--full") compil_full="true" ; shift ;; 45 55 "--use_oasis") use_oasis="true" oasis=$2 ; shift ; shift ;; 56 "--build_path") build_path=$2 ; shift ; shift ;; 57 "--build_dir") build_dir=$2 ; shift ; shift ;; 58 "--build_suffixed") build_suffixed="true" ; shift ;; 59 "--use_extern_boost") use_extern_boost="true" ; shift ;; 60 "--use_extern_blitz") use_extern_blitz="true" ; shift ;; 46 61 "--doc") doc="true" ; shift ;; 47 62 "--job") job=$2 ; shift ; shift ;; 48 63 "--netcdf_lib") netcdf_lib=$2 ; shift ; shift ;; 49 64 "--memtrack") use_memtrack="true" memtrack=$2 ; shift ; shift ;; 50 51 52 65 *) code="$1" ; shift ;; 66 esac 67 done 53 68 54 69 # Installation des sources … … 58 73 echo -e "- uncompress archives ..." 59 74 for tarname in `ls $install_dir/tools/archive/*.tar.gz` ; do 75 if ( [[ ${tarname} == "${install_dir}/tools/archive/boost.tar.gz" ]] && [[ "$use_extern_boost" == "true" ]] ) || ( [[ ${tarname} == "${install_dir}/tools/archive/blitz.tar.gz" ]] && [[ "$use_extern_blitz" == "true" ]] ) 76 then 77 continue 78 fi 60 79 gunzip -f "$tarname" 61 80 tar -xf ${tarname%.gz} 62 81 done 63 82 fi 83 84 # Definition of the root directory of the build 85 86 if [[ "$build_path" == "./" ]]; then 87 install_dir=$PWD 88 else 89 install_dir=${build_path} 90 fi 91 92 if [[ "$build_suffixed" == "true" ]]; then 93 install_dir="${install_dir}/config_${arch}_${compil_mode}" 94 else 95 install_dir="${install_dir}/${build_dir}" 96 fi 97 98 mkdir -p $install_dir 99 100 rm -f ${PWD}/bld_dir.cfg 101 echo "inc ${install_dir}/arch.fcm" >> ${PWD}/bld_dir.cfg 102 echo "inc ${install_dir}/config.fcm" >> ${PWD}/bld_dir.cfg 103 echo "dir::root ${install_dir}" >> ${PWD}/bld_dir.cfg 64 104 65 105 # Vérification de la présence d'un identifiant d'architecture. … … 77 117 if [[ "$arch_defined" == "TRUE" ]] 78 118 then 79 rm -f arch.path80 rm -f arch.fcm81 rm -f arch.env119 rm -f ${install_dir}/arch.path 120 rm -f ${install_dir}/arch.fcm 121 rm -f ${install_dir}/arch.env 82 122 83 123 if test -f $arch_path/arch-${arch}.path 84 124 then 85 ln -s $arch_path/arch-${arch}.path arch.path125 ln -s $arch_path/arch-${arch}.path ${install_dir}/arch.path 86 126 elif test -f $arch_default_path/arch-${arch}.path 87 127 then 88 ln -s $arch_default_path/arch-${arch}.path arch.path128 ln -s $arch_default_path/arch-${arch}.path ${install_dir}/arch.path 89 129 fi 90 130 91 131 if test -f $arch_path/arch-${arch}.fcm 92 132 then 93 ln -s $arch_path/arch-${arch}.fcm arch.fcm133 ln -s $arch_path/arch-${arch}.fcm ${install_dir}/arch.fcm 94 134 elif test -f $arch_default_path/arch-${arch}.fcm 95 135 then 96 ln -s $arch_default_path/arch-${arch}.fcm arch.fcm136 ln -s $arch_default_path/arch-${arch}.fcm ${install_dir}/arch.fcm 97 137 fi 98 138 99 139 if test -f $arch_path/arch-${arch}.env 100 140 then 101 ln -s $arch_path/arch-${arch}.env arch.env141 ln -s $arch_path/arch-${arch}.env ${install_dir}/arch.env 102 142 elif test -f $arch_default_path/arch-${arch}.env 103 143 then 104 ln -s $arch_default_path/arch-${arch}.env arch.env144 ln -s $arch_default_path/arch-${arch}.env ${install_dir}/arch.env 105 145 else 106 ln -s .void_file arch.env146 ln -s .void_file ${install_dir}/arch.env 107 147 fi 108 source arch.env109 source arch.path148 source ${install_dir}/arch.env 149 source ${install_dir}/arch.path 110 150 else 111 151 echo "Please choose a target achitecture --> list all available architecture using make_xios --avail!" … … 170 210 fi 171 211 212 # Setting path for boost 213 if [[ "$use_extern_boost" == "true" ]] 214 then 215 rm -r $PWD/extern/boost 216 ln -s $PWD/.void_dir $PWD/extern/boost 217 else 218 export BOOST_INCDIR="-I${PWD}/extern/boost" 219 export BOOST_LIBDIR="" 220 export BOOST_LIB="" 221 fi 222 223 # Setting path for blitz 224 if [[ "$use_extern_blitz" == "true" ]] 225 then 226 rm -r $PWD/extern/blitz 227 ln -s $PWD/.void_dir $PWD/extern/blitz 228 else 229 echo "src::blitz $PWD/extern/blitz/src" >> ${PWD}/bld_dir.cfg 230 export BLITZ_INCDIR="-I${PWD}/extern/blitz" 231 export BLITZ_LIBDIR="" 232 export BLITZ_LIB="" 233 fi 234 235 172 236 if [[ "$use_memtrack" == "true" ]] 173 237 then 174 238 XIOS_CPPKEY="$XIOS_CPPKEY XIOS_MEMTRACK" 175 if [[ "$memtrack" == "light" ]] 176 then 177 XIOS_CPPKEY="$XIOS_CPPKEY XIOS_MEMTRACK_LIGHT" 239 240 if [[ "$memtrack" == "light" ]] 241 then 242 XIOS_CPPKEY="$XIOS_CPPKEY XIOS_MEMTRACK_LIGHT" 178 243 elif [[ "$memtrack" == "FULL" ]] 179 244 then … … 186 251 fi 187 252 188 XIOS_CINCDIR="$NETCDF_INCDIR $HDF5_INCDIR $MPI_INCDIR "189 XIOS_FINCDIR="$NETCDF_INCDIR $XIOS_FINCDIR $MPI_INCDIR "253 XIOS_CINCDIR="$NETCDF_INCDIR $HDF5_INCDIR $MPI_INCDIR $BOOST_INCDIR $BLITZ_INCDIR" 254 XIOS_FINCDIR="$NETCDF_INCDIR $XIOS_FINCDIR $MPI_INCDIR $BOOST_INCDIR $BLITZ_INCDIR" 190 255 191 256 XIOS_LIB="$XIOS_LIB $NETCDF_LIBDIR $HDF5_LIBDIR $MPI_LIBDIR $NETCDF_LIB $HDF5_LIB $MPI_LIB" 192 257 193 rm -f config.fcm194 echo "%COMPIL_CFLAGS $COMPIL_CFLAGS" >> config.fcm195 echo "%COMPIL_FFLAGS $COMPIL_FFLAGS" >> config.fcm196 echo "%CPP_KEY $XIOS_CPPKEY" >> config.fcm197 198 echo "%CBASE_INC $XIOS_CINCDIR" >> config.fcm199 echo "%FBASE_INC $XIOS_FINCDIR" >> config.fcm200 echo "%ARCH_LD $XIOS_LIB" >> config.fcm258 rm -f ${install_dir}/config.fcm 259 echo "%COMPIL_CFLAGS $COMPIL_CFLAGS" >> ${install_dir}/config.fcm 260 echo "%COMPIL_FFLAGS $COMPIL_FFLAGS" >> ${install_dir}/config.fcm 261 echo "%CPP_KEY $XIOS_CPPKEY" >> ${install_dir}/config.fcm 262 263 echo "%CBASE_INC $XIOS_CINCDIR" >> ${install_dir}/config.fcm 264 echo "%FBASE_INC $XIOS_FINCDIR" >> ${install_dir}/config.fcm 265 echo "%ARCH_LD $XIOS_LIB" >> ${install_dir}/config.fcm 201 266 202 267 echo "=> Using "$compil_mode" mode for compiling under architecture \""$arch"\" !" -
XIOS/trunk/extern/remap/src/libmapper.cpp
r1614 r1638 43 43 double* src_area=NULL ; 44 44 double* dst_area=NULL ; 45 mapper = new Mapper( MPI_COMM_WORLD);45 mapper = new Mapper(EP_COMM_WORLD); 46 46 mapper->setVerbosity(PROGRESS) ; 47 47 mapper->setSourceMesh(src_bounds_lon, src_bounds_lat, src_area, n_vert_per_cell_src, n_cell_src, src_pole ) ; -
XIOS/trunk/extern/remap/src/mapper.cpp
r1614 r1638 32 32 33 33 int mpiRank, mpiSize; 34 MPI_Comm_rank(communicator, &mpiRank);35 MPI_Comm_size(communicator, &mpiSize);34 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 35 ep_lib::MPI_Comm_size(communicator, &mpiSize); 36 36 37 37 sourceElements.reserve(nbCells); … … 43 43 long int offset ; 44 44 long int nb=nbCells ; 45 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ;45 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 46 46 offset=offset-nb ; 47 47 for(int i=0;i<nbCells;i++) sourceGlobalId[i]=offset+i ; … … 70 70 71 71 int mpiRank, mpiSize; 72 MPI_Comm_rank(communicator, &mpiRank);73 MPI_Comm_size(communicator, &mpiSize);72 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 73 ep_lib::MPI_Comm_size(communicator, &mpiSize); 74 74 75 75 targetElements.reserve(nbCells); … … 81 81 long int offset ; 82 82 long int nb=nbCells ; 83 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ;83 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 84 84 offset=offset-nb ; 85 85 for(int i=0;i<nbCells;i++) targetGlobalId[i]=offset+i ; … … 117 117 vector<double> timings; 118 118 int mpiSize, mpiRank; 119 MPI_Comm_size(communicator, &mpiSize);120 MPI_Comm_rank(communicator, &mpiRank);119 ep_lib::MPI_Comm_size(communicator, &mpiSize); 120 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 121 121 122 122 this->buildSSTree(sourceMesh, targetMesh); … … 173 173 { 174 174 int mpiSize, mpiRank; 175 MPI_Comm_size(communicator, &mpiSize);176 MPI_Comm_rank(communicator, &mpiRank);175 ep_lib::MPI_Comm_size(communicator, &mpiSize); 176 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 177 177 178 178 /* create list of intersections (super mesh elements) for each rank */ … … 235 235 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 236 236 int *nbRecvElement = new int[mpiSize]; 237 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator);237 ep_lib::MPI_Alltoall(nbSendElement, 1, EP_INT, nbRecvElement, 1, EP_INT, communicator); 238 238 239 239 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ … … 246 246 Coord **sendGrad = new Coord*[mpiSize]; 247 247 GloId **sendNeighIds = new GloId*[mpiSize]; 248 MPI_Request *sendRequest = newMPI_Request[5*mpiSize];249 MPI_Request *recvRequest = newMPI_Request[5*mpiSize];248 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[5*mpiSize]; 249 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[5*mpiSize]; 250 250 for (int rank = 0; rank < mpiSize; rank++) 251 251 { 252 252 if (nbSendElement[rank] > 0) 253 253 { 254 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);254 ep_lib::MPI_Issend(sendElement[rank], nbSendElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 255 255 nbSendRequest++; 256 256 } … … 271 271 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 272 272 } 273 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);273 ep_lib::MPI_Irecv(recvElement[rank], nbRecvElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 274 274 nbRecvRequest++; 275 275 } 276 276 } 277 MPI_Status *status = newMPI_Status[5*mpiSize];277 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[5*mpiSize]; 278 278 279 MPI_Waitall(nbSendRequest, sendRequest, status);280 MPI_Waitall(nbRecvRequest, recvRequest, status);279 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 280 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 281 281 282 282 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ … … 310 310 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 311 311 } 312 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);312 ep_lib::MPI_Issend(sendValue[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 313 313 nbSendRequest++; 314 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);314 ep_lib::MPI_Issend(sendArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 315 315 nbSendRequest++; 316 MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);316 ep_lib::MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 317 317 nbSendRequest++; 318 318 if (order == 2) 319 319 { 320 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);320 ep_lib::MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 321 321 nbSendRequest++; 322 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);322 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 323 323 //ym --> attention taille GloId 324 324 nbSendRequest++; … … 326 326 else 327 327 { 328 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);328 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 329 329 //ym --> attention taille GloId 330 330 nbSendRequest++; … … 333 333 if (nbSendElement[rank] > 0) 334 334 { 335 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);335 ep_lib::MPI_Irecv(recvValue[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 336 336 nbRecvRequest++; 337 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);337 ep_lib::MPI_Irecv(recvArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 338 338 nbRecvRequest++; 339 MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);339 ep_lib::MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 340 340 nbRecvRequest++; 341 341 if (order == 2) 342 342 { 343 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1),344 MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);343 ep_lib::MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 344 EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 345 345 nbRecvRequest++; 346 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);346 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 347 347 //ym --> attention taille GloId 348 348 nbRecvRequest++; … … 350 350 else 351 351 { 352 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);352 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 353 353 //ym --> attention taille GloId 354 354 nbRecvRequest++; … … 357 357 } 358 358 359 MPI_Waitall(nbSendRequest, sendRequest, status);360 MPI_Waitall(nbRecvRequest, recvRequest, status);359 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 360 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 361 361 362 362 … … 487 487 { 488 488 int mpiSize, mpiRank; 489 MPI_Comm_size(communicator, &mpiSize);490 MPI_Comm_rank(communicator, &mpiRank);489 ep_lib::MPI_Comm_size(communicator, &mpiSize); 490 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 491 491 492 492 vector<Node> *routingList = new vector<Node>[mpiSize]; … … 522 522 } 523 523 524 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);525 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);524 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 525 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 526 526 527 527 char **sendBuffer = new char*[mpiSize]; … … 549 549 int nbSendRequest = 0; 550 550 int nbRecvRequest = 0; 551 MPI_Request *sendRequest = newMPI_Request[mpiSize];552 MPI_Request *recvRequest = newMPI_Request[mpiSize];553 MPI_Status *status = newMPI_Status[mpiSize];551 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 552 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 553 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize]; 554 554 555 555 for (int rank = 0; rank < mpiSize; rank++) … … 557 557 if (nbSendNode[rank] > 0) 558 558 { 559 MPI_Issend(sendBuffer[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);559 ep_lib::MPI_Issend(sendBuffer[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 560 560 nbSendRequest++; 561 561 } 562 562 if (nbRecvNode[rank] > 0) 563 563 { 564 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);564 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 565 565 nbRecvRequest++; 566 566 } 567 567 } 568 568 569 MPI_Waitall(nbRecvRequest, recvRequest, status);570 MPI_Waitall(nbSendRequest, sendRequest, status);569 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 570 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 571 571 572 572 for (int rank = 0; rank < mpiSize; rank++) … … 615 615 616 616 617 MPI_Barrier(communicator);618 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);619 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);617 ep_lib::MPI_Barrier(communicator); 618 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 619 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 620 620 621 621 for (int rank = 0; rank < mpiSize; rank++) … … 629 629 if (nbSendNode[rank] > 0) 630 630 { 631 MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);631 ep_lib::MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 632 632 nbSendRequest++; 633 633 } 634 634 if (nbRecvNode[rank] > 0) 635 635 { 636 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);636 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 637 637 nbRecvRequest++; 638 638 } 639 639 } 640 640 641 MPI_Waitall(nbRecvRequest, recvRequest, status);642 MPI_Waitall(nbSendRequest, sendRequest, status);641 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 642 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 643 643 644 644 int nbNeighbourNodes = 0; … … 725 725 { 726 726 int mpiSize, mpiRank; 727 MPI_Comm_size(communicator, &mpiSize);728 MPI_Comm_rank(communicator, &mpiRank);729 730 MPI_Barrier(communicator);727 ep_lib::MPI_Comm_size(communicator, &mpiSize); 728 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 729 730 ep_lib::MPI_Barrier(communicator); 731 731 732 732 vector<Node> *routingList = new vector<Node>[mpiSize]; … … 753 753 cout << endl; 754 754 } 755 MPI_Barrier(communicator);755 ep_lib::MPI_Barrier(communicator); 756 756 757 757 int *nbSendNode = new int[mpiSize]; … … 771 771 } 772 772 773 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);774 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);773 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 774 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 775 775 776 776 int total = 0; … … 805 805 int nbSendRequest = 0; 806 806 int nbRecvRequest = 0; 807 MPI_Request *sendRequest = newMPI_Request[mpiSize];808 MPI_Request *recvRequest = newMPI_Request[mpiSize];809 MPI_Status *status = newMPI_Status[mpiSize];807 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 808 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 809 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize]; 810 810 811 811 for (int rank = 0; rank < mpiSize; rank++) … … 813 813 if (nbSendNode[rank] > 0) 814 814 { 815 MPI_Issend(sendBuffer[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);815 ep_lib::MPI_Issend(sendBuffer[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 816 816 nbSendRequest++; 817 817 } 818 818 if (nbRecvNode[rank] > 0) 819 819 { 820 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);820 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 821 821 nbRecvRequest++; 822 822 } 823 823 } 824 824 825 MPI_Waitall(nbRecvRequest, recvRequest, status);826 MPI_Waitall(nbSendRequest, sendRequest, status);825 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 826 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 827 827 char **sendBuffer2 = new char*[mpiSize]; 828 828 char **recvBuffer2 = new char*[mpiSize]; … … 883 883 884 884 if (verbose >= 2) cout << "Rank " << mpiRank << " Compute (internal) intersection " << cputime() - tic << " s" << endl; 885 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);885 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 886 886 887 887 for (int rank = 0; rank < mpiSize; rank++) … … 896 896 if (sentMessageSize[rank] > 0) 897 897 { 898 MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);898 ep_lib::MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 899 899 nbSendRequest++; 900 900 } 901 901 if (recvMessageSize[rank] > 0) 902 902 { 903 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);903 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 904 904 nbRecvRequest++; 905 905 } 906 906 } 907 907 908 MPI_Waitall(nbRecvRequest, recvRequest, status);909 MPI_Waitall(nbSendRequest, sendRequest, status);908 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 909 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 910 910 911 911 delete [] sendRequest; -
XIOS/trunk/extern/remap/src/mapper.hpp
r1614 r1638 18 18 { 19 19 public: 20 Mapper( MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {}20 Mapper(ep_lib::MPI_Comm comm=EP_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {} 21 21 ~Mapper(); 22 22 void setVerbosity(verbosity v) {verbose=v ;} … … 67 67 68 68 CParallelTree sstree; 69 MPI_Comm communicator ;69 ep_lib::MPI_Comm communicator ; 70 70 std::vector<Elt> sourceElements ; 71 71 std::vector<Node> sourceMesh ; -
XIOS/trunk/extern/remap/src/mpi_cascade.cpp
r688 r1638 4 4 namespace sphereRemap { 5 5 6 CMPICascade::CMPICascade(int nodes_per_level, MPI_Comm comm)6 CMPICascade::CMPICascade(int nodes_per_level, ep_lib::MPI_Comm comm) 7 7 { 8 8 int remaining_levels; 9 MPI_Comm intraComm;9 ep_lib::MPI_Comm intraComm; 10 10 int l = 0; // current level 11 11 do { … … 15 15 level[l].p_grp_size = level[l].size/level[l].group_size; 16 16 17 MPI_Comm_split(comm, level[l].colour(), level[l].key(), &intraComm);18 MPI_Comm_split(comm, level[l].p_colour(), level[l].p_key(), &(level[l].pg_comm));17 ep_lib::MPI_Comm_split(comm, level[l].colour(), level[l].key(), &intraComm); 18 ep_lib::MPI_Comm_split(comm, level[l].p_colour(), level[l].p_key(), &(level[l].pg_comm)); 19 19 comm = intraComm; 20 20 l++; -
XIOS/trunk/extern/remap/src/mpi_cascade.hpp
r694 r1638 12 12 { 13 13 public: 14 CCascadeLevel( MPI_Comm comm) : comm(comm)14 CCascadeLevel(ep_lib::MPI_Comm comm) : comm(comm) 15 15 { 16 MPI_Comm_size(comm, &size);17 MPI_Comm_rank(comm, &rank);16 ep_lib::MPI_Comm_size(comm, &size); 17 ep_lib::MPI_Comm_rank(comm, &rank); 18 18 } 19 19 int colour() const { return rank % group_size; }; … … 24 24 int p_key() const { return colour() + rank/(p_grp_size*group_size)*group_size; } 25 25 26 MPI_Comm comm, pg_comm;26 ep_lib::MPI_Comm comm, pg_comm; 27 27 int rank; 28 28 int size; … … 35 35 public: 36 36 // 37 CMPICascade(int nodes_per_level, MPI_Comm comm);37 CMPICascade(int nodes_per_level, ep_lib::MPI_Comm comm); 38 38 39 39 int num_levels; -
XIOS/trunk/extern/remap/src/mpi_routing.cpp
r694 r1638 10 10 const int verbose = 0; 11 11 12 CMPIRouting::CMPIRouting( MPI_Comm comm) : communicator(comm)13 { 14 MPI_Comm_rank(comm, &mpiRank);15 MPI_Comm_size(comm, &mpiSize);12 CMPIRouting::CMPIRouting(ep_lib::MPI_Comm comm) : communicator(comm) 13 { 14 ep_lib::MPI_Comm_rank(comm, &mpiRank); 15 ep_lib::MPI_Comm_size(comm, &mpiSize); 16 16 } 17 17 … … 19 19 but message lengths are *known* to receiver */ 20 20 template <typename T> 21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator)22 { 23 vector< MPI_Request> request(ranks.size() * 2);24 vector< MPI_Status> status(ranks.size() * 2);21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator) 22 { 23 vector<ep_lib::MPI_Request> request(ranks.size() * 2); 24 vector<ep_lib::MPI_Status> status(ranks.size() * 2); 25 25 26 26 // communicate data … … 28 28 for (int i = 0; i < ranks.size(); i++) 29 29 if (recv[i].size()) 30 MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);30 ep_lib::MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 31 31 for (int i = 0; i < ranks.size(); i++) 32 32 if (send[i].size()) 33 MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);34 MPI_Waitall(nbRequest, &request[0], &status[0]);33 ep_lib::MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 34 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]); 35 35 } 36 36 … … 38 38 but message lengths are *unknown* to receiver */ 39 39 template <typename T> 40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator)41 { 42 vector< MPI_Request> request(ranks.size() * 2);43 vector< MPI_Status> status(ranks.size() * 2);40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator) 41 { 42 vector<ep_lib::MPI_Request> request(ranks.size() * 2); 43 vector<ep_lib::MPI_Status> status(ranks.size() * 2); 44 44 45 45 // communicate sizes … … 50 50 sendSizes[i] = send[i].size(); 51 51 for (int i = 0; i < ranks.size(); i++) 52 MPI_Irecv(&recvSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]);52 ep_lib::MPI_Irecv(&recvSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]); 53 53 for (int i = 0; i < ranks.size(); i++) 54 MPI_Isend(&sendSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]);55 MPI_Waitall(nbRequest, &request[0], &status[0]);54 ep_lib::MPI_Isend(&sendSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]); 55 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]); 56 56 57 57 // allocate … … 118 118 CTimer::get("CMPIRouting::init(reduce_scatter)").reset(); 119 119 CTimer::get("CMPIRouting::init(reduce_scatter)").resume(); 120 MPI_Reduce_scatter(toSend, &nbSource, recvCount, MPI_INT, MPI_SUM, communicator);120 ep_lib::MPI_Reduce_scatter(toSend, &nbSource, recvCount, EP_INT, EP_SUM, communicator); 121 121 CTimer::get("CMPIRouting::init(reduce_scatter)").suspend(); 122 122 CTimer::get("CMPIRouting::init(reduce_scatter)").print(); 123 123 124 MPI_Alloc_mem(nbTarget *sizeof(int), MPI_INFO_NULL, &targetRank);125 MPI_Alloc_mem(nbSource *sizeof(int), MPI_INFO_NULL, &sourceRank);124 ep_lib::MPI_Alloc_mem(nbTarget *sizeof(int), EP_INFO_NULL, &targetRank); 125 ep_lib::MPI_Alloc_mem(nbSource *sizeof(int), EP_INFO_NULL, &sourceRank); 126 126 127 127 targetRankToIndex = new int[mpiSize]; … … 137 137 } 138 138 139 MPI_Barrier(communicator);139 ep_lib::MPI_Barrier(communicator); 140 140 CTimer::get("CMPIRouting::init(get_source)").reset(); 141 141 CTimer::get("CMPIRouting::init(get_source)").resume(); 142 142 143 MPI_Request *request = newMPI_Request[nbSource + nbTarget];144 MPI_Status *status = newMPI_Status[nbSource + nbTarget];143 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; 144 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; 145 145 146 146 int indexRequest = 0; … … 150 150 for (int i = 0; i < nbSource; i++) 151 151 { 152 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 153 indexRequest++; 152 #ifdef _usingMPI 153 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 154 #elif _usingEP 155 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 156 #endif 157 indexRequest++; 154 158 } 155 159 MPI_Barrier(communicator); 156 160 for (int i = 0; i < nbTarget; i++) 157 161 { 158 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);162 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 159 163 indexRequest++; 160 164 } … … 170 174 for (int i = 0; i < nbSource; i++) 171 175 { 172 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 173 indexRequest++; 174 } 175 176 for (int i = 0; i < nbTarget; i++) 177 { 178 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 176 #ifdef _usingMPI 177 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 178 #elif _usingEP 179 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 180 #endif 181 indexRequest++; 182 } 183 184 for (int i = 0; i < nbTarget; i++) 185 { 186 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 179 187 indexRequest++; 180 188 } … … 201 209 for (int i = 0; i < nbSource; i++) 202 210 { 203 MPI_Irecv(&nbSourceElement[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]);211 ep_lib::MPI_Irecv(&nbSourceElement[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 204 212 indexRequest++; 205 213 } … … 208 216 { 209 217 totalTargetElement += nbTargetElement[i]; 210 MPI_Isend(&nbTargetElement[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);218 ep_lib::MPI_Isend(&nbTargetElement[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 211 219 indexRequest++; 212 220 } … … 276 284 277 285 278 MPI_Request* request=newMPI_Request[nbSource+nbTarget];279 MPI_Status* status=newMPI_Status[nbSource+nbTarget];286 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget]; 287 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget]; 280 288 int indexRequest=0; 281 289 282 MPI_Barrier(communicator);290 ep_lib::MPI_Barrier(communicator); 283 291 CTimer::get("CMPIRouting::transferToTarget").reset(); 284 292 CTimer::get("CMPIRouting::transferToTarget").resume(); … … 286 294 for(int i=0; i<nbSource; i++) 287 295 { 288 MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);296 ep_lib::MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 289 297 indexRequest++; 290 298 } … … 292 300 for(int i=0;i<nbTarget; i++) 293 301 { 294 MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);295 indexRequest++; 296 } 297 298 MPI_Waitall(indexRequest,request,status);302 ep_lib::MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 303 indexRequest++; 304 } 305 306 ep_lib::MPI_Waitall(indexRequest,request,status); 299 307 300 308 CTimer::get("CMPIRouting::transferToTarget").suspend(); 301 309 CTimer::get("CMPIRouting::transferToTarget").print(); 302 MPI_Barrier(communicator);310 ep_lib::MPI_Barrier(communicator); 303 311 304 312 // unpack the data … … 340 348 } 341 349 342 MPI_Request *request = newMPI_Request[nbSource + nbTarget];343 MPI_Status *status = newMPI_Status[nbSource + nbTarget];350 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; 351 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; 344 352 int indexRequest = 0; 345 353 346 MPI_Barrier(communicator);354 ep_lib::MPI_Barrier(communicator); 347 355 CTimer::get("CMPIRouting::transferToTarget(messageSize)").reset(); 348 356 CTimer::get("CMPIRouting::transferToTarget(messageSize)").resume(); … … 350 358 for(int i=0; i<nbSource; i++) 351 359 { 352 MPI_Irecv(&sourceMessageSize[i],1,MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]);360 ep_lib::MPI_Irecv(&sourceMessageSize[i],1,EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 353 361 indexRequest++; 354 362 } … … 356 364 for(int i=0; i<nbTarget; i++) 357 365 { 358 MPI_Isend(&targetMessageSize[i],1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);359 indexRequest++; 360 } 361 362 MPI_Waitall(indexRequest,request,status);363 364 MPI_Barrier(communicator);366 ep_lib::MPI_Isend(&targetMessageSize[i],1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 367 indexRequest++; 368 } 369 370 ep_lib::MPI_Waitall(indexRequest,request,status); 371 372 ep_lib::MPI_Barrier(communicator); 365 373 CTimer::get("CMPIRouting::transferToTarget(messageSize)").suspend(); 366 374 CTimer::get("CMPIRouting::transferToTarget(messageSize)").print(); … … 395 403 for(int i=0; i<nbSource; i++) 396 404 { 397 MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);405 ep_lib::MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 398 406 indexRequest++; 399 407 } … … 401 409 for(int i=0;i<nbTarget; i++) 402 410 { 403 MPI_Isend(targetBuffer[i],targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);411 ep_lib::MPI_Isend(targetBuffer[i],targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 404 412 indexRequest++; 405 413 } … … 460 468 } 461 469 462 MPI_Request* request=newMPI_Request[nbSource+nbTarget];463 MPI_Status* status=newMPI_Status[nbSource+nbTarget];470 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget]; 471 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget]; 464 472 int indexRequest=0; 465 473 466 474 for(int i=0; i<nbSource; i++) 467 475 { 468 MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);476 ep_lib::MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 469 477 indexRequest++; 470 478 } … … 472 480 for(int i=0;i<nbTarget; i++) 473 481 { 474 MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);475 indexRequest++; 476 } 477 478 MPI_Waitall(indexRequest,request,status);482 ep_lib::MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 483 indexRequest++; 484 } 485 486 ep_lib::MPI_Waitall(indexRequest,request,status); 479 487 480 488 // unpack the data … … 516 524 } 517 525 518 MPI_Request *request = newMPI_Request[nbSource + nbTarget];519 MPI_Status *status = newMPI_Status[nbSource + nbTarget];526 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; 527 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; 520 528 int indexRequest = 0; 521 529 for (int i = 0; i < nbSource; i++) 522 530 { 523 MPI_Isend(&sourceMessageSize[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]);524 indexRequest++; 525 } 526 for (int i = 0; i < nbTarget; i++) 527 { 528 MPI_Irecv(&targetMessageSize[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);529 indexRequest++; 530 } 531 MPI_Waitall(indexRequest, request, status);531 ep_lib::MPI_Isend(&sourceMessageSize[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 532 indexRequest++; 533 } 534 for (int i = 0; i < nbTarget; i++) 535 { 536 ep_lib::MPI_Irecv(&targetMessageSize[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 537 indexRequest++; 538 } 539 ep_lib::MPI_Waitall(indexRequest, request, status); 532 540 533 541 for (int i = 0; i < nbTarget; i++) … … 557 565 for (int i = 0; i < nbSource; i++) 558 566 { 559 MPI_Isend(sourceBuffer[i], sourceMessageSize[i], MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);560 indexRequest++; 561 } 562 for (int i = 0; i < nbTarget; i++) 563 { 564 MPI_Irecv(targetBuffer[i], targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);565 indexRequest++; 566 } 567 MPI_Waitall(indexRequest, request, status);567 ep_lib::MPI_Isend(sourceBuffer[i], sourceMessageSize[i], EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 568 indexRequest++; 569 } 570 for (int i = 0; i < nbTarget; i++) 571 { 572 ep_lib::MPI_Irecv(targetBuffer[i], targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 573 indexRequest++; 574 } 575 ep_lib::MPI_Waitall(indexRequest, request, status); 568 576 569 577 // unpack the data … … 605 613 606 614 template void alltoalls_unknown(const std::vector<std::vector<NES> >& send, std::vector<std::vector<NES> >& recv, 607 const std::vector<int>& ranks, MPI_Comm communicator);615 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 608 616 609 617 template void alltoalls_known(const std::vector<std::vector<int> >& send, std::vector<std::vector<int> >& recv, 610 const std::vector<int>& ranks, MPI_Comm communicator);611 612 } 618 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 619 620 } -
XIOS/trunk/extern/remap/src/mpi_routing.hpp
r694 r1638 11 11 { 12 12 13 MPI_Comm communicator;13 ep_lib::MPI_Comm communicator; 14 14 int mpiRank; 15 15 int mpiSize; … … 29 29 30 30 public: 31 CMPIRouting( MPI_Comm comm);31 CMPIRouting(ep_lib::MPI_Comm comm); 32 32 ~CMPIRouting(); 33 33 template<typename T> void init(const std::vector<T>& route, CMPICascade *cascade = NULL); … … 44 44 template <typename T> 45 45 void alltoalls_known(const std::vector<std::vector<T> >& send, std::vector<std::vector<T> >& recv, 46 const std::vector<int>& ranks, MPI_Comm communicator);46 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 47 47 48 48 template <typename T> 49 49 void alltoalls_unknown(const std::vector<std::vector<T> >& send, std::vector<std::vector<T> >& recv, 50 const std::vector<int>& ranks, MPI_Comm communicator);50 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 51 51 } 52 52 #endif -
XIOS/trunk/extern/remap/src/parallel_tree.cpp
r923 r1638 115 115 116 116 //CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MIN_NODE_SZ*MIN_NODE_SZ, comm) 117 CParallelTree::CParallelTree( MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm)117 CParallelTree::CParallelTree(ep_lib::MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm) 118 118 { 119 119 treeCascade.reserve(cascade.num_levels); … … 151 151 152 152 int nrecv; // global number of samples THIS WILL BE THE NUMBER OF LEAFS IN THE SAMPLE TREE 153 MPI_Allreduce(&n, &nrecv, 1, MPI_INT, MPI_SUM, comm.comm); // => size of sample tree does not depend on keepNodes!153 ep_lib::MPI_Allreduce(&n, &nrecv, 1, EP_INT, EP_SUM, comm.comm); // => size of sample tree does not depend on keepNodes! 154 154 double ratio = blocSize / (1.0 * nrecv); 155 155 int nsend = ratio * n + 1; // nsend = n_local_samples / n_global_samples * blocksize + 1 = blocksize/comm.size … … 157 157 158 158 int *counts = new int[comm.size]; 159 MPI_Allgather(&nsend, 1, MPI_INT, counts, 1, MPI_INT, comm.comm);159 ep_lib::MPI_Allgather(&nsend, 1, EP_INT, counts, 1, EP_INT, comm.comm); 160 160 161 161 nrecv = 0; … … 183 183 /* each process needs the sample elements from all processes */ 184 184 double *recvBuffer = new double[nrecv*4]; 185 MPI_Allgatherv(sendBuffer, 4 * nsend, MPI_DOUBLE, recvBuffer, counts, displs, MPI_DOUBLE, comm.comm);185 ep_lib::MPI_Allgatherv(sendBuffer, 4 * nsend, EP_DOUBLE, recvBuffer, counts, displs, EP_DOUBLE, comm.comm); 186 186 delete[] sendBuffer; 187 187 delete[] counts; … … 241 241 << " node size : "<<node.size()<<" bloc size : "<<blocSize<<" total number of leaf : "<<tree.leafs.size()<<endl ; 242 242 /* 243 MPI_Allreduce(&ok, &allok, 1, MPI_INT, MPI_PROD, communicator);243 MPI_Allreduce(&ok, &allok, 1, EP_INT, MPI_PROD, communicator); 244 244 if (!allok) { 245 245 MPI_Finalize(); … … 247 247 } 248 248 */ 249 MPI_Abort(MPI_COMM_WORLD,-1) ;249 ep_lib::MPI_Abort(EP_COMM_WORLD,-1) ; 250 250 } 251 251 /* … … 265 265 { 266 266 CMPIRouting MPIRoute(communicator); 267 MPI_Barrier(communicator);267 ep_lib::MPI_Barrier(communicator); 268 268 CTimer::get("buildLocalTree(initRoute)").resume(); 269 269 MPIRoute.init(route); … … 290 290 291 291 int mpiRank; 292 MPI_Comm_rank(communicator, &mpiRank);292 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 293 293 localTree.leafs.reserve(nbLocalElements); 294 294 for (int i = 0; i < nbLocalElements; i++) … … 316 316 nb1=node.size() ; nb2=node2.size() ; 317 317 nb=nb1+nb2 ; 318 MPI_Allreduce(&nb, &nbTot, 1, MPI_LONG, MPI_SUM, communicator) ;318 ep_lib::MPI_Allreduce(&nb, &nbTot, 1, EP_LONG, EP_SUM, communicator) ; 319 319 int commSize ; 320 MPI_Comm_size(communicator,&commSize) ;320 ep_lib::MPI_Comm_size(communicator,&commSize) ; 321 321 322 322 // make multiple of two … … 501 501 // gather circles on this level of the cascade 502 502 int pg_size; 503 MPI_Comm_size(cascade.level[level].pg_comm, &pg_size);503 ep_lib::MPI_Comm_size(cascade.level[level].pg_comm, &pg_size); 504 504 vector<Coord> allRootCentres(pg_size); 505 505 vector<double> allRootRadia(pg_size); 506 MPI_Allgather(&rootCentre, 3, MPI_DOUBLE, &allRootCentres[0], 3, MPI_DOUBLE, cascade.level[level].pg_comm);507 MPI_Allgather(&rootRadius, 1, MPI_DOUBLE, &allRootRadia[0], 1, MPI_DOUBLE, cascade.level[level].pg_comm);506 ep_lib::MPI_Allgather(&rootCentre, 3, EP_DOUBLE, &allRootCentres[0], 3, EP_DOUBLE, cascade.level[level].pg_comm); 507 ep_lib::MPI_Allgather(&rootRadius, 1, EP_DOUBLE, &allRootRadia[0], 1, EP_DOUBLE, cascade.level[level].pg_comm); 508 508 509 509 // now allRootsRadia and allRootCentres must be inserted into second levels of us and propagated to root -
XIOS/trunk/extern/remap/src/parallel_tree.hpp
r694 r1638 12 12 { 13 13 public: 14 CParallelTree( MPI_Comm comm);14 CParallelTree(ep_lib::MPI_Comm comm); 15 15 ~CParallelTree(); 16 16 … … 34 34 vector<CSampleTree> treeCascade; // first for sample tree, then for routing tree 35 35 CMPICascade cascade; 36 MPI_Comm communicator ;36 ep_lib::MPI_Comm communicator ; 37 37 38 38 }; -
XIOS/trunk/src/buffer_client.cpp
r1227 r1638 12 12 size_t CClientBuffer::maxRequestSize = 0; 13 13 14 CClientBuffer::CClientBuffer( MPI_Comm interComm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents)14 CClientBuffer::CClientBuffer(ep_lib::MPI_Comm interComm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents) 15 15 : interComm(interComm) 16 16 , serverRank(serverRank) … … 83 83 bool CClientBuffer::checkBuffer(void) 84 84 { 85 MPI_Status status;85 ep_lib::MPI_Status status; 86 86 int flag; 87 87 … … 89 89 { 90 90 traceOff(); 91 MPI_Test(&request, &flag, &status);91 ep_lib::MPI_Test(&request, &flag, &status); 92 92 traceOn(); 93 93 if (flag == true) pending = false; … … 98 98 if (count > 0) 99 99 { 100 MPI_Issend(buffer[current], count, MPI_CHAR, serverRank, 20, interComm, &request);100 ep_lib::MPI_Issend(buffer[current], count, EP_CHAR, serverRank, 20, interComm, &request); 101 101 pending = true; 102 102 if (current == 1) current = 0; -
XIOS/trunk/src/buffer_client.hpp
r1227 r1638 14 14 static size_t maxRequestSize; 15 15 16 CClientBuffer( MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents);16 CClientBuffer(ep_lib::MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents); 17 17 ~CClientBuffer(); 18 18 … … 39 39 bool pending; 40 40 41 MPI_Request request;41 ep_lib::MPI_Request request; 42 42 43 43 CBufferOut* retBuffer; 44 const MPI_Comm interComm;44 const ep_lib::MPI_Comm interComm; 45 45 }; 46 46 } -
XIOS/trunk/src/client.cpp
r1587 r1638 9 9 #include "oasis_cinterface.hpp" 10 10 #include "mpi.hpp" 11 //#include "mpi_wrapper.hpp" 11 12 #include "timer.hpp" 12 13 #include "buffer_client.hpp" … … 16 17 { 17 18 18 MPI_Comm CClient::intraComm ;19 MPI_Comm CClient::interComm ;20 std::list< MPI_Comm> CClient::contextInterComms;19 ep_lib::MPI_Comm CClient::intraComm ; 20 ep_lib::MPI_Comm CClient::interComm ; 21 std::list<ep_lib::MPI_Comm> CClient::contextInterComms; 21 22 int CClient::serverLeader ; 22 23 bool CClient::is_MPI_Initialized ; … … 24 25 StdOFStream CClient::m_infoStream; 25 26 StdOFStream CClient::m_errorStream; 26 MPI_Comm& CClient::getInterComm(void) { return (interComm); }27 ep_lib::MPI_Comm& CClient::getInterComm(void) { return (interComm); } 27 28 28 29 ///--------------------------------------------------------------- … … 35 36 */ 36 37 37 void CClient::initialize(const string& codeId, MPI_Comm& localComm,MPI_Comm& returnComm)38 void CClient::initialize(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) 38 39 { 39 40 int initialized ; 40 MPI_Initialized(&initialized) ;41 ep_lib::MPI_Initialized(&initialized) ; 41 42 if (initialized) is_MPI_Initialized=true ; 42 43 else is_MPI_Initialized=false ; … … 47 48 { 48 49 // localComm isn't given 49 if (localComm == MPI_COMM_NULL)50 if (localComm == EP_COMM_NULL) 50 51 { 51 52 if (!is_MPI_Initialized) 52 53 { 53 MPI_Init(NULL, NULL);54 ep_lib::MPI_Init(NULL, NULL); 54 55 } 55 56 CTimer::get("XIOS").resume() ; … … 63 64 int myColor ; 64 65 int i,c ; 65 MPI_Comm newComm ; 66 67 MPI_Comm_size(CXios::globalComm,&size) ; 68 MPI_Comm_rank(CXios::globalComm,&rank_); 66 ep_lib::MPI_Comm newComm ; 67 68 ep_lib::MPI_Comm_size(CXios::globalComm,&size) ; 69 70 ep_lib::MPI_Comm_rank(CXios::globalComm,&rank_); 69 71 70 72 hashAll=new unsigned long[size] ; 71 73 72 MPI_Allgather(&hashClient,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ;74 ep_lib::MPI_Allgather(&hashClient,1,EP_LONG,hashAll,1,EP_LONG,CXios::globalComm) ; 73 75 74 76 map<unsigned long, int> colors ; … … 97 99 98 100 myColor=colors[hashClient]; 99 MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ;101 ep_lib::MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ; 100 102 101 103 if (CXios::usingServer) … … 104 106 serverLeader=leaders[hashServer] ; 105 107 int intraCommSize, intraCommRank ; 106 MPI_Comm_size(intraComm,&intraCommSize) ;107 MPI_Comm_rank(intraComm,&intraCommRank) ;108 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ; 109 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank) ; 108 110 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 109 111 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 110 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ;112 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 111 113 //rank_ = intraCommRank; 112 114 } 113 115 else 114 116 { 115 MPI_Comm_dup(intraComm,&interComm) ;117 ep_lib::MPI_Comm_dup(intraComm,&interComm) ; 116 118 } 117 119 delete [] hashAll ; … … 126 128 else 127 129 { 128 MPI_Comm_dup(localComm,&intraComm) ;129 MPI_Comm_dup(intraComm,&interComm) ;130 ep_lib::MPI_Comm_dup(localComm,&intraComm) ; 131 ep_lib::MPI_Comm_dup(intraComm,&interComm) ; 130 132 } 131 133 } … … 135 137 { 136 138 // localComm isn't given 137 if (localComm == MPI_COMM_NULL)139 if (localComm == EP_COMM_NULL) 138 140 { 139 141 if (!is_MPI_Initialized) oasis_init(codeId) ; 140 142 oasis_get_localcomm(localComm) ; 141 143 } 142 MPI_Comm_dup(localComm,&intraComm) ;144 ep_lib::MPI_Comm_dup(localComm,&intraComm) ; 143 145 144 146 CTimer::get("XIOS").resume() ; … … 147 149 if (CXios::usingServer) 148 150 { 149 MPI_Status status ;150 MPI_Comm_rank(intraComm,&rank_) ;151 ep_lib::MPI_Status status ; 152 ep_lib::MPI_Comm_rank(intraComm,&rank_) ; 151 153 152 154 oasis_get_intercomm(interComm,CXios::xiosCodeId) ; 153 if (rank_==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ;154 MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ;155 } 156 else MPI_Comm_dup(intraComm,&interComm) ;157 } 158 159 MPI_Comm_dup(intraComm,&returnComm) ;155 if (rank_==0) ep_lib::MPI_Recv(&serverLeader,1, EP_INT, 0, 0, interComm, &status) ; 156 ep_lib::MPI_Bcast(&serverLeader,1,EP_INT,0,intraComm) ; 157 } 158 else ep_lib::MPI_Comm_dup(intraComm,&interComm) ; 159 } 160 161 ep_lib::MPI_Comm_dup(intraComm,&returnComm) ; 160 162 } 161 163 … … 168 170 * Function is only called by client. 169 171 */ 170 void CClient::registerContext(const string& id, MPI_Comm contextComm)172 void CClient::registerContext(const string& id, ep_lib::MPI_Comm contextComm) 171 173 { 172 174 CContext::setCurrent(id) ; … … 178 180 // Attached mode 179 181 { 180 MPI_Comm contextInterComm ;181 MPI_Comm_dup(contextComm,&contextInterComm) ;182 ep_lib::MPI_Comm contextInterComm ; 183 ep_lib::MPI_Comm_dup(contextComm,&contextInterComm) ; 182 184 CContext* contextServer = CContext::create(idServer); 183 185 … … 198 200 size_t message_size ; 199 201 int leaderRank ; 200 MPI_Comm contextInterComm ;201 202 MPI_Comm_size(contextComm,&size) ;203 MPI_Comm_rank(contextComm,&rank) ;204 MPI_Comm_rank(CXios::globalComm,&globalRank) ;202 ep_lib::MPI_Comm contextInterComm ; 203 204 ep_lib::MPI_Comm_size(contextComm,&size) ; 205 ep_lib::MPI_Comm_rank(contextComm,&rank) ; 206 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank) ; 205 207 if (rank!=0) globalRank=0 ; 206 208 … … 214 216 buffer<<msg ; 215 217 216 MPI_Send((void*)buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ;217 218 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ;218 ep_lib::MPI_Send((void*)buff,buffer.count(),EP_CHAR,serverLeader,1,CXios::globalComm) ; 219 220 ep_lib::MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 219 221 info(10)<<"Register new Context : "<<id<<endl ; 220 MPI_Comm inter ;221 MPI_Intercomm_merge(contextInterComm,0,&inter) ;222 MPI_Barrier(inter) ;222 ep_lib::MPI_Comm inter ; 223 ep_lib::MPI_Intercomm_merge(contextInterComm,0,&inter) ; 224 ep_lib::MPI_Barrier(inter) ; 223 225 224 226 context->initClient(contextComm,contextInterComm) ; 225 227 226 228 contextInterComms.push_back(contextInterComm); 227 MPI_Comm_free(&inter);229 ep_lib::MPI_Comm_free(&inter); 228 230 delete [] buff ; 229 231 … … 251 253 int msg=0 ; 252 254 253 MPI_Comm_rank(intraComm,&rank) ;255 ep_lib::MPI_Comm_rank(intraComm,&rank) ; 254 256 if (rank==0) 255 257 { 256 MPI_Send(&msg,1,MPI_INT,0,5,interComm) ; // tags oasis_endded = 5258 ep_lib::MPI_Send(&msg,1,EP_INT,0,5,interComm) ; // tags oasis_endded = 5 257 259 } 258 260 … … 266 268 int msg=0 ; 267 269 268 MPI_Comm_rank(intraComm,&rank) ;270 ep_lib::MPI_Comm_rank(intraComm,&rank) ; 269 271 270 272 if (!CXios::isServer) 271 273 { 272 MPI_Comm_rank(intraComm,&rank) ;274 ep_lib::MPI_Comm_rank(intraComm,&rank) ; 273 275 if (rank==0) 274 276 { 275 MPI_Send(&msg,1,MPI_INT,0,0,interComm) ;276 } 277 } 278 279 for (std::list< MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++)280 MPI_Comm_free(&(*it));281 MPI_Comm_free(&interComm);282 MPI_Comm_free(&intraComm);277 ep_lib::MPI_Send(&msg,1,EP_INT,0,0,interComm) ; 278 } 279 } 280 281 for (std::list<ep_lib::MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 282 ep_lib::MPI_Comm_free(&(*it)); 283 ep_lib::MPI_Comm_free(&interComm); 284 ep_lib::MPI_Comm_free(&intraComm); 283 285 284 286 CTimer::get("XIOS init/finalize").suspend() ; … … 288 290 { 289 291 if (CXios::usingOasis) oasis_finalize(); 290 else MPI_Finalize() ;292 else ep_lib::MPI_Finalize() ; 291 293 } 292 294 … … 325 327 int size = 0; 326 328 int rank; 327 MPI_Comm_size(CXios::globalComm, &size);329 ep_lib::MPI_Comm_size(CXios::globalComm, &size); 328 330 while (size) 329 331 { … … 334 336 if (CXios::usingOasis) 335 337 { 336 MPI_Comm_rank(CXios::globalComm,&rank);338 ep_lib::MPI_Comm_rank(CXios::globalComm,&rank); 337 339 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << rank << ext; 338 340 } -
XIOS/trunk/src/client.hpp
r1587 r1638 10 10 { 11 11 public: 12 static void initialize(const string& codeId, MPI_Comm& localComm,MPI_Comm& returnComm);12 static void initialize(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm); 13 13 static void finalize(void); 14 static void registerContext(const string& id, MPI_Comm contextComm);14 static void registerContext(const string& id, ep_lib::MPI_Comm contextComm); 15 15 static void callOasisEnddef(void) ; 16 16 17 static MPI_Comm intraComm;18 static MPI_Comm interComm;19 static std::list< MPI_Comm> contextInterComms;17 static ep_lib::MPI_Comm intraComm; 18 static ep_lib::MPI_Comm interComm; 19 static std::list<ep_lib::MPI_Comm> contextInterComms; 20 20 static int serverLeader; 21 21 static bool is_MPI_Initialized ; 22 22 23 static MPI_Comm& getInterComm();23 static ep_lib::MPI_Comm& getInterComm(); 24 24 25 25 //! Get global rank without oasis and current rank in model intraComm in case of oasis -
XIOS/trunk/src/client_client_dht_template.hpp
r1542 r1638 40 40 public: 41 41 CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, 42 const MPI_Comm& clientIntraComm);42 const ep_lib::MPI_Comm& clientIntraComm); 43 43 44 44 CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 45 const MPI_Comm& clientIntraComm);45 const ep_lib::MPI_Comm& clientIntraComm); 46 46 47 47 void computeIndexInfoMapping(const CArray<size_t,1>& indices); … … 55 55 56 56 protected: 57 CClientClientDHTTemplate(const MPI_Comm& clientIntraComm);57 CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm); 58 58 59 59 protected: … … 62 62 // Redistribute index and info among clients 63 63 void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, 64 const MPI_Comm& intraCommLevel,64 const ep_lib::MPI_Comm& intraCommLevel, 65 65 int level); 66 66 67 67 void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 68 const MPI_Comm& intraCommLevel,68 const ep_lib::MPI_Comm& intraCommLevel, 69 69 int level); 70 70 … … 73 73 74 74 void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 75 const MPI_Comm& intraCommLevel,75 const ep_lib::MPI_Comm& intraCommLevel, 76 76 int level); 77 77 … … 85 85 // Send information to clients 86 86 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 87 const MPI_Comm& clientIntraComm, 88 std::vector<MPI_Request>& requestSendInfo); 87 const ep_lib::MPI_Comm& clientIntraComm, 88 std::vector<ep_lib::MPI_Request>& requestSendInfo); 89 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 90 const ep_lib::MPI_Comm& clientIntraComm, 91 ep_lib::MPI_Request* requestSendInfo); 89 92 90 93 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 91 const MPI_Comm& clientIntraComm, 92 std::vector<MPI_Request>& requestRecvInfo); 94 const ep_lib::MPI_Comm& clientIntraComm, 95 std::vector<ep_lib::MPI_Request>& requestRecvInfo); 96 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 97 const ep_lib::MPI_Comm& clientIntraComm, 98 ep_lib::MPI_Request* requestRecvInfo); 99 93 100 94 101 // Send global index to clients 95 102 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 96 const MPI_Comm& clientIntraComm, 97 std::vector<MPI_Request>& requestSendIndexGlobal); 103 const ep_lib::MPI_Comm& clientIntraComm, 104 std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 105 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 106 const ep_lib::MPI_Comm& clientIntraComm, 107 ep_lib::MPI_Request* requestSendIndexGlobal); 98 108 99 109 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 100 const MPI_Comm& clientIntraComm, 101 std::vector<MPI_Request>& requestRecvIndex); 110 const ep_lib::MPI_Comm& clientIntraComm, 111 std::vector<ep_lib::MPI_Request>& requestRecvIndex); 112 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 113 const ep_lib::MPI_Comm& clientIntraComm, 114 ep_lib::MPI_Request* requestRecvIndex); 102 115 103 116 void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, -
XIOS/trunk/src/client_client_dht_template_impl.hpp
r1542 r1638 14 14 { 15 15 template<typename T, typename H> 16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm)16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 17 17 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 18 18 { 19 MPI_Comm_size(clientIntraComm, &nbClient_);19 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 20 20 this->computeMPICommLevel(); 21 21 int nbLvl = this->getNbLevel(); … … 34 34 template<typename T, typename H> 35 35 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 36 const MPI_Comm& clientIntraComm)36 const ep_lib::MPI_Comm& clientIntraComm) 37 37 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 38 38 { 39 MPI_Comm_size(clientIntraComm, &nbClient_);39 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 40 40 this->computeMPICommLevel(); 41 41 int nbLvl = this->getNbLevel(); … … 59 59 template<typename T, typename H> 60 60 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 61 const MPI_Comm& clientIntraComm)61 const ep_lib::MPI_Comm& clientIntraComm) 62 62 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 63 63 { 64 MPI_Comm_size(clientIntraComm, &nbClient_);64 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 65 65 this->computeMPICommLevel(); 66 66 int nbLvl = this->getNbLevel(); … … 95 95 template<typename T, typename H> 96 96 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 97 const MPI_Comm& commLevel,97 const ep_lib::MPI_Comm& commLevel, 98 98 int level) 99 99 { 100 100 int clientRank; 101 MPI_Comm_rank(commLevel,&clientRank);101 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 102 102 int groupRankBegin = this->getGroupBegin()[level]; 103 103 int nbClient = this->getNbInGroup()[level]; … … 169 169 recvIndexBuff = new unsigned long[recvNbIndexCount]; 170 170 171 std::vector<MPI_Request> request; 171 int request_size = 0; 172 for (int idx = 0; idx < recvRankClient.size(); ++idx) 173 { 174 if (0 != recvNbIndexClientCount[idx]) 175 request_size ++; 176 } 177 178 request_size += client2ClientIndex.size(); 179 180 std::vector<ep_lib::MPI_Request> request(request_size); 172 181 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 173 182 iteRecvIndex = recvRankClient.end(), … … 176 185 int currentIndex = 0; 177 186 int nbRecvClient = recvRankClient.size(); 187 int request_position = 0; 178 188 for (int idx = 0; idx < nbRecvClient; ++idx) 179 189 { 180 190 if (0 != recvNbIndexClientCount[idx]) 181 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request);191 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 182 192 currentIndex += recvNbIndexClientCount[idx]; 183 193 } … … 186 196 iteIndex = client2ClientIndex.end(); 187 197 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 188 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request);189 190 std::vector< MPI_Status> status(request.size());191 MPI_Waitall(request.size(), &request[0], &status[0]);198 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 199 200 std::vector<ep_lib::MPI_Status> status(request.size()); 201 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 192 202 193 203 CArray<size_t,1>* tmpGlobalIndex; … … 242 252 } 243 253 244 std::vector<MPI_Request> requestOnReturn; 254 int requestOnReturn_size=0; 255 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 256 { 257 if (0 != recvNbIndexOnReturn[idx]) 258 { 259 requestOnReturn_size += 2; 260 } 261 } 262 263 for (int idx = 0; idx < nbRecvClient; ++idx) 264 { 265 if (0 != sendNbIndexOnReturn[idx]) 266 { 267 requestOnReturn_size += 2; 268 } 269 } 270 271 int requestOnReturn_position=0; 272 273 std::vector<ep_lib::MPI_Request> requestOnReturn(requestOnReturn_size); 245 274 currentIndex = 0; 246 275 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 248 277 if (0 != recvNbIndexOnReturn[idx]) 249 278 { 250 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn);279 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 251 280 recvInfoFromClients(recvRankOnReturn[idx], 252 281 recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 253 282 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 254 commLevel, requestOnReturn);283 commLevel, &requestOnReturn[requestOnReturn_position++]); 255 284 } 256 285 currentIndex += recvNbIndexOnReturn[idx]; … … 286 315 287 316 sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 288 sendNbIndexOnReturn[idx], commLevel, requestOnReturn);317 sendNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 289 318 sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 290 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn);319 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, &requestOnReturn[requestOnReturn_position++]); 291 320 } 292 321 currentIndex += recvNbIndexClientCount[idx]; 293 322 } 294 323 295 std::vector< MPI_Status> statusOnReturn(requestOnReturn.size());296 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]);324 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 325 ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 297 326 298 327 Index2VectorInfoTypeMap indexToInfoMapping; … … 360 389 template<typename T, typename H> 361 390 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 362 const MPI_Comm& commLevel,391 const ep_lib::MPI_Comm& commLevel, 363 392 int level) 364 393 { 365 394 int clientRank; 366 MPI_Comm_rank(commLevel,&clientRank);395 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 367 396 computeSendRecvRank(level, clientRank); 368 397 … … 439 468 // it will send a message to the correct clients. 440 469 // Contents of the message are index and its corresponding informatioin 441 std::vector<MPI_Request> request; 470 int request_size = 0; 471 for (int idx = 0; idx < recvRankClient.size(); ++idx) 472 { 473 if (0 != recvNbIndexClientCount[idx]) 474 { 475 request_size += 2; 476 } 477 } 478 479 request_size += client2ClientIndex.size(); 480 request_size += client2ClientInfo.size(); 481 482 std::vector<ep_lib::MPI_Request> request(request_size); 483 442 484 int currentIndex = 0; 443 485 int nbRecvClient = recvRankClient.size(); 486 int request_position=0; 444 487 for (int idx = 0; idx < nbRecvClient; ++idx) 445 488 { 446 489 if (0 != recvNbIndexClientCount[idx]) 447 490 { 448 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 491 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 492 //if(clientRank==0) printf("recv index request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request)); 449 493 recvInfoFromClients(recvRankClient[idx], 450 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 451 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 452 commLevel, request); 494 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 495 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 496 commLevel, &request[request_position++]); 497 //if(clientRank==0) printf("recv info request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request)); 453 498 } 454 499 currentIndex += recvNbIndexClientCount[idx]; … … 458 503 iteIndex = client2ClientIndex.end(); 459 504 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 460 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 505 { sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 506 } //if(clientRank==0) printf("send index request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request));} 461 507 std::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 462 508 iteInfo = client2ClientInfo.end(); 463 509 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 464 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 465 466 std::vector<MPI_Status> status(request.size()); 467 MPI_Waitall(request.size(), &request[0], &status[0]); 510 { sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, &request[request_position++]); 511 }// if(clientRank==0) printf("send info request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request));} 512 513 std::vector<ep_lib::MPI_Status> status(request.size()); 514 515 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 468 516 469 517 Index2VectorInfoTypeMap indexToInfoMapping; … … 518 566 template<typename T, typename H> 519 567 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 520 const MPI_Comm& clientIntraComm,521 std::vector< MPI_Request>& requestSendIndex)522 { 523 MPI_Request request;568 const ep_lib::MPI_Comm& clientIntraComm, 569 std::vector<ep_lib::MPI_Request>& requestSendIndex) 570 { 571 ep_lib::MPI_Request request; 524 572 requestSendIndex.push_back(request); 525 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG,573 ep_lib::MPI_Isend(indices, indiceSize, EP_UNSIGNED_LONG, 526 574 clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 575 } 576 577 /*! 578 Send message containing index to clients 579 \param [in] clientDestRank rank of destination client 580 \param [in] indices index to send 581 \param [in] indiceSize size of index array to send 582 \param [in] clientIntraComm communication group of client 583 \param [in] requestSendIndex sending request 584 */ 585 template<typename T, typename H> 586 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 587 const ep_lib::MPI_Comm& clientIntraComm, 588 ep_lib::MPI_Request* requestSendIndex) 589 { 590 ep_lib::MPI_Isend(indices, indiceSize, EP_UNSIGNED_LONG, 591 clientDestRank, MPI_DHT_INDEX, clientIntraComm, requestSendIndex); 527 592 } 528 593 … … 536 601 template<typename T, typename H> 537 602 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 538 const MPI_Comm& clientIntraComm,539 std::vector< MPI_Request>& requestRecvIndex)540 { 541 MPI_Request request;603 const ep_lib::MPI_Comm& clientIntraComm, 604 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 605 { 606 ep_lib::MPI_Request request; 542 607 requestRecvIndex.push_back(request); 543 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG,608 ep_lib::MPI_Irecv(indices, indiceSize, EP_UNSIGNED_LONG, 544 609 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 610 } 611 612 /*! 613 Receive message containing index to clients 614 \param [in] clientDestRank rank of destination client 615 \param [in] indices index to send 616 \param [in] clientIntraComm communication group of client 617 \param [in] requestRecvIndex receiving request 618 */ 619 template<typename T, typename H> 620 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 621 const ep_lib::MPI_Comm& clientIntraComm, 622 ep_lib::MPI_Request *requestRecvIndex) 623 { 624 ep_lib::MPI_Irecv(indices, indiceSize, EP_UNSIGNED_LONG, 625 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, requestRecvIndex); 545 626 } 546 627 … … 555 636 template<typename T, typename H> 556 637 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 557 const MPI_Comm& clientIntraComm,558 std::vector< MPI_Request>& requestSendInfo)559 { 560 MPI_Request request;638 const ep_lib::MPI_Comm& clientIntraComm, 639 std::vector<ep_lib::MPI_Request>& requestSendInfo) 640 { 641 ep_lib::MPI_Request request; 561 642 requestSendInfo.push_back(request); 562 643 563 MPI_Isend(info, infoSize, MPI_CHAR,644 ep_lib::MPI_Isend(info, infoSize, EP_CHAR, 564 645 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 646 } 647 648 /*! 649 Send message containing information to clients 650 \param [in] clientDestRank rank of destination client 651 \param [in] info info array to send 652 \param [in] infoSize info array size to send 653 \param [in] clientIntraComm communication group of client 654 \param [in] requestSendInfo sending request 655 */ 656 template<typename T, typename H> 657 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 658 const ep_lib::MPI_Comm& clientIntraComm, 659 ep_lib::MPI_Request *requestSendInfo) 660 { 661 ep_lib::MPI_Isend(info, infoSize, EP_CHAR, 662 clientDestRank, MPI_DHT_INFO, clientIntraComm, requestSendInfo); 565 663 } 566 664 … … 575 673 template<typename T, typename H> 576 674 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 577 const MPI_Comm& clientIntraComm,578 std::vector< MPI_Request>& requestRecvInfo)579 { 580 MPI_Request request;675 const ep_lib::MPI_Comm& clientIntraComm, 676 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 677 { 678 ep_lib::MPI_Request request; 581 679 requestRecvInfo.push_back(request); 582 680 583 MPI_Irecv(info, infoSize, MPI_CHAR,681 ep_lib::MPI_Irecv(info, infoSize, EP_CHAR, 584 682 clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 683 } 684 685 /*! 686 Receive message containing information from other clients 687 \param [in] clientDestRank rank of destination client 688 \param [in] info info array to receive 689 \param [in] infoSize info array size to receive 690 \param [in] clientIntraComm communication group of client 691 \param [in] requestRecvInfo list of receiving request 692 */ 693 template<typename T, typename H> 694 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 695 const ep_lib::MPI_Comm& clientIntraComm, 696 ep_lib::MPI_Request* requestRecvInfo) 697 { 698 ep_lib::MPI_Irecv(info, infoSize, EP_CHAR, 699 clientSrcRank, MPI_DHT_INFO, clientIntraComm, requestRecvInfo); 585 700 } 586 701 … … 651 766 { 652 767 recvNbElements.resize(recvNbRank.size()); 653 std::vector< MPI_Request> request(sendNbRank.size()+recvNbRank.size());654 std::vector< MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());768 std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 769 std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 655 770 656 771 int nRequest = 0; 657 772 for (int idx = 0; idx < recvNbRank.size(); ++idx) 658 773 { 659 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT,774 ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, EP_INT, 660 775 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 661 776 ++nRequest; … … 664 779 for (int idx = 0; idx < sendNbRank.size(); ++idx) 665 780 { 666 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT,781 ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, EP_INT, 667 782 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 668 783 ++nRequest; 669 784 } 670 785 671 MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]);786 ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 672 787 } 673 788 … … 696 811 std::vector<int> recvBuff(recvBuffSize*2,0); 697 812 698 std::vector< MPI_Request> request(sendBuffSize+recvBuffSize);699 std::vector< MPI_Status> requestStatus(sendBuffSize+recvBuffSize);813 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 814 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 700 815 701 816 int nRequest = 0; 702 817 for (int idx = 0; idx < recvBuffSize; ++idx) 703 818 { 704 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT,819 ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, EP_INT, 705 820 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 706 821 ++nRequest; … … 716 831 for (int idx = 0; idx < sendBuffSize; ++idx) 717 832 { 718 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT,833 ep_lib::MPI_Isend(&sendBuff[idx*2], 2, EP_INT, 719 834 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 720 835 ++nRequest; 721 836 } 722 837 723 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]);838 ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 724 839 int nbRecvRank = 0, nbRecvElements = 0; 725 840 recvNbRank.clear(); -
XIOS/trunk/src/client_server_mapping.cpp
r1025 r1638 30 30 */ 31 31 std::map<int,int> CClientServerMapping::computeConnectedClients(int nbServer, int nbClient, 32 MPI_Comm& clientIntraComm,32 ep_lib::MPI_Comm& clientIntraComm, 33 33 const std::vector<int>& connectedServerRank) 34 34 { … … 62 62 63 63 // get connected server for everybody 64 MPI_Allgather(&nbConnectedServer,1,MPI_INT,recvCount,1,MPI_INT,clientIntraComm) ;64 ep_lib::MPI_Allgather(&nbConnectedServer,1,EP_INT,recvCount,1,EP_INT,clientIntraComm) ; 65 65 66 66 displ[0]=0 ; … … 70 70 71 71 72 MPI_Allgatherv(sendBuff,nbConnectedServer,MPI_INT,recvBuff,recvCount,displ,MPI_INT,clientIntraComm) ;72 ep_lib::MPI_Allgatherv(sendBuff,nbConnectedServer,EP_INT,recvBuff,recvCount,displ,EP_INT,clientIntraComm) ; 73 73 for(int n=0;n<recvSize;n++) clientRes[recvBuff[n]]++ ; 74 74 -
XIOS/trunk/src/client_server_mapping.hpp
r1542 r1638 37 37 38 38 static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 39 MPI_Comm& clientIntraComm,39 ep_lib::MPI_Comm& clientIntraComm, 40 40 const std::vector<int>& connectedServerRank); 41 41 -
XIOS/trunk/src/client_server_mapping_distributed.cpp
r1542 r1638 20 20 21 21 CClientServerMappingDistributed::CClientServerMappingDistributed(const std::unordered_map<size_t,int>& globalIndexOfServer, 22 const MPI_Comm& clientIntraComm, bool isDataDistributed)22 const ep_lib::MPI_Comm& clientIntraComm, bool isDataDistributed) 23 23 : CClientServerMapping(), ccDHT_(0) 24 24 { -
XIOS/trunk/src/client_server_mapping_distributed.hpp
r1542 r1638 35 35 /** Default constructor */ 36 36 CClientServerMappingDistributed(const std::unordered_map<size_t,int>& globalIndexOfServer, 37 const MPI_Comm& clientIntraComm,37 const ep_lib::MPI_Comm& clientIntraComm, 38 38 bool isDataDistributed = true); 39 39 -
XIOS/trunk/src/context_client.cpp
r1615 r1638 21 21 \cxtSer [in] cxtSer Pointer to context of server side. (It is only used in case of attached mode). 22 22 */ 23 CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_, CContext* cxtSer)23 CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer) 24 24 : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) 25 25 { … … 27 27 intraComm = intraComm_; 28 28 interComm = interComm_; 29 MPI_Comm_rank(intraComm, &clientRank);30 MPI_Comm_size(intraComm, &clientSize);29 ep_lib::MPI_Comm_rank(intraComm, &clientRank); 30 ep_lib::MPI_Comm_size(intraComm, &clientSize); 31 31 32 32 int flag; 33 MPI_Comm_test_inter(interComm, &flag);34 if (flag) MPI_Comm_remote_size(interComm, &serverSize);35 else MPI_Comm_size(interComm, &serverSize);33 ep_lib::MPI_Comm_test_inter(interComm, &flag); 34 if (flag) ep_lib::MPI_Comm_remote_size(interComm, &serverSize); 35 else ep_lib::MPI_Comm_size(interComm, &serverSize); 36 36 37 37 computeLeader(clientRank, clientSize, serverSize, ranksServerLeader, ranksServerNotLeader); … … 102 102 classId_in=event.getClassId() ; 103 103 // MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ; // MPI_UINT64_T standardized by MPI 3 104 MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm) ;105 MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ;106 MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ;104 ep_lib::MPI_Allreduce(&timeLine,&timeLine_out, 1, EP_LONG_LONG_INT, EP_SUM, intraComm) ; 105 ep_lib::MPI_Allreduce(&typeId_in,&typeId, 1, EP_INT, EP_SUM, intraComm) ; 106 ep_lib::MPI_Allreduce(&classId_in,&classId, 1, EP_INT, EP_SUM, intraComm) ; 107 107 if (typeId/clientSize!=event.getTypeId() || classId/clientSize!=event.getClassId() || timeLine_out/clientSize!=timeLine) 108 108 { … … 343 343 if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio; 344 344 } 345 MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 345 #ifdef _usingMPI 346 ep_lib::MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, EP_DOUBLE, EP_MIN, intraComm); 347 #elif _usingEP 348 ep_lib::MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, EP_DOUBLE, EP_MIN, intraComm); 349 #endif 346 350 347 351 if (minBufferSizeEventSizeRatio < 1.0) -
XIOS/trunk/src/context_client.hpp
r1232 r1638 27 27 public: 28 28 // Contructor 29 CContextClient(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm, CContext* parentServer = 0);29 CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0); 30 30 31 31 // Send event to server … … 71 71 int serverSize; //!< Size of server group 72 72 73 MPI_Comm interComm; //!< Communicator of server group73 ep_lib::MPI_Comm interComm; //!< Communicator of server group 74 74 75 MPI_Comm intraComm; //!< Communicator of client group75 ep_lib::MPI_Comm intraComm; //!< Communicator of client group 76 76 77 77 map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers -
XIOS/trunk/src/context_server.cpp
r1230 r1638 23 23 { 24 24 25 CContextServer::CContextServer(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_)25 CContextServer::CContextServer(CContext* parent,ep_lib::MPI_Comm intraComm_,ep_lib::MPI_Comm interComm_) 26 26 { 27 27 context=parent; 28 28 intraComm=intraComm_; 29 MPI_Comm_size(intraComm,&intraCommSize);30 MPI_Comm_rank(intraComm,&intraCommRank);29 ep_lib::MPI_Comm_size(intraComm,&intraCommSize); 30 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank); 31 31 32 32 interComm=interComm_; 33 33 int flag; 34 MPI_Comm_test_inter(interComm,&flag);35 if (flag) MPI_Comm_remote_size(interComm,&commSize);36 else MPI_Comm_size(interComm,&commSize);34 ep_lib::MPI_Comm_test_inter(interComm,&flag); 35 if (flag) ep_lib::MPI_Comm_remote_size(interComm,&commSize); 36 else ep_lib::MPI_Comm_size(interComm,&commSize); 37 37 38 38 currentTimeLine=0; … … 76 76 int count; 77 77 char * addr; 78 MPI_Status status;78 ep_lib::MPI_Status status; 79 79 map<int,CServerBuffer*>::iterator it; 80 80 bool okLoop; 81 81 82 82 traceOff(); 83 #ifdef _usingMPI 83 84 MPI_Iprobe(MPI_ANY_SOURCE, 20,interComm,&flag,&status); 85 #elif _usingEP 86 ep_lib::MPI_Iprobe(-2, 20,interComm,&flag,&status); 87 #endif 84 88 traceOn(); 85 89 86 90 if (flag==true) 87 91 { 92 #ifdef _usingMPI 88 93 rank=status.MPI_SOURCE ; 94 #elif _usingEP 95 rank=status.ep_src ; 96 #endif 89 97 okLoop = true; 90 98 if (pendingRequest.find(rank)==pendingRequest.end()) … … 98 106 99 107 traceOff(); 100 MPI_Iprobe(rank, 20,interComm,&flag,&status);108 ep_lib::MPI_Iprobe(rank, 20,interComm,&flag,&status); 101 109 traceOn(); 102 110 if (flag==true) listenPendingRequest(status) ; … … 107 115 } 108 116 109 bool CContextServer::listenPendingRequest( MPI_Status& status)117 bool CContextServer::listenPendingRequest(ep_lib::MPI_Status& status) 110 118 { 111 119 int count; 112 120 char * addr; 113 121 map<int,CServerBuffer*>::iterator it; 122 #ifdef _usingMPI 114 123 int rank=status.MPI_SOURCE ; 124 #elif _usingEP 125 int rank=status.ep_src ; 126 #endif 115 127 116 128 it=buffers.find(rank); … … 118 130 { 119 131 StdSize buffSize = 0; 120 MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status);132 ep_lib::MPI_Recv(&buffSize, 1, EP_LONG, rank, 20, interComm, &status); 121 133 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 122 134 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; … … 125 137 else 126 138 { 127 MPI_Get_count(&status,MPI_CHAR,&count);139 ep_lib::MPI_Get_count(&status,EP_CHAR,&count); 128 140 if (it->second->isBufferFree(count)) 129 141 { 130 142 addr=(char*)it->second->getBuffer(count); 131 MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]);143 ep_lib::MPI_Irecv(addr,count,EP_CHAR,rank,20,interComm,&pendingRequest[rank]); 132 144 bufferRequest[rank]=addr; 133 145 return true; … … 141 153 void CContextServer::checkPendingRequest(void) 142 154 { 143 map<int, MPI_Request>::iterator it;155 map<int,ep_lib::MPI_Request>::iterator it; 144 156 list<int> recvRequest; 145 157 list<int>::iterator itRecv; … … 147 159 int flag; 148 160 int count; 149 MPI_Status status;161 ep_lib::MPI_Status status; 150 162 151 163 for(it=pendingRequest.begin();it!=pendingRequest.end();it++) … … 153 165 rank=it->first; 154 166 traceOff(); 155 MPI_Test(& it->second, &flag, &status);167 ep_lib::MPI_Test(& it->second, &flag, &status); 156 168 traceOn(); 157 169 if (flag==true) 158 170 { 159 171 recvRequest.push_back(rank); 160 MPI_Get_count(&status,MPI_CHAR,&count);172 ep_lib::MPI_Get_count(&status,EP_CHAR,&count); 161 173 processRequest(rank,bufferRequest[rank],count); 162 174 } … … 218 230 // The best way to properly solve this problem will be to use the event scheduler also in attached mode 219 231 // for now just set up a MPI barrier 220 if (!CServer::eventScheduler && CXios::isServer) MPI_Barrier(intraComm) ;232 if (!CServer::eventScheduler && CXios::isServer) ep_lib::MPI_Barrier(intraComm) ; 221 233 222 234 CTimer::get("Process events").resume(); -
XIOS/trunk/src/context_server.hpp
r1228 r1638 14 14 public: 15 15 16 CContextServer(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm) ;16 CContextServer(CContext* parent,ep_lib::MPI_Comm intraComm,ep_lib::MPI_Comm interComm) ; 17 17 bool eventLoop(bool enableEventsProcessing = true); 18 18 void listen(void) ; 19 bool listenPendingRequest( MPI_Status& status) ;19 bool listenPendingRequest(ep_lib::MPI_Status& status) ; 20 20 void checkPendingRequest(void) ; 21 21 void processRequest(int rank, char* buff,int count) ; … … 26 26 bool hasPendingEvent(void) ; 27 27 28 MPI_Comm intraComm ;28 ep_lib::MPI_Comm intraComm ; 29 29 int intraCommSize ; 30 30 int intraCommRank ; 31 31 32 MPI_Comm interComm ;32 ep_lib::MPI_Comm interComm ; 33 33 int commSize ; 34 34 35 35 map<int,CServerBuffer*> buffers ; 36 map<int, MPI_Request> pendingRequest ;36 map<int,ep_lib::MPI_Request> pendingRequest ; 37 37 map<int,char*> bufferRequest ; 38 38 -
XIOS/trunk/src/cxios.cpp
r1622 r1638 26 26 bool CXios::isClient ; 27 27 bool CXios::isServer ; 28 MPI_Comm CXios::globalComm ;28 ep_lib::MPI_Comm CXios::globalComm ; 29 29 bool CXios::usingOasis ; 30 30 bool CXios::usingServer = false; … … 90 90 91 91 checkEventSync = getin<bool>("check_event_sync", checkEventSync); 92 92 #ifdef _usingMPI 93 93 globalComm=MPI_COMM_WORLD ; 94 #elif _usingEP 95 ep_lib::MPI_Comm *ep_comm; 96 ep_lib::MPI_Info info; 97 ep_lib::MPI_Comm_create_endpoints(EP_COMM_WORLD->mpi_comm, 1, info, ep_comm); 98 ep_lib::passage = ep_comm; 99 globalComm=ep_lib::passage[0] ; 100 #endif 94 101 } 95 102 … … 100 107 \param [in/out] returnComm communicator corresponding to group of client with same codeId 101 108 */ 102 void CXios::initClientSide(const string& codeId, MPI_Comm& localComm,MPI_Comm& returnComm)109 void CXios::initClientSide(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) 103 110 TRY 104 111 { 112 isClient = true; 113 isServer = false; 114 105 115 initialize() ; 106 116 107 isClient = true;108 117 109 118 CClient::initialize(codeId,localComm,returnComm) ; 110 119 if (CClient::getRank()==0) globalRegistry = new CRegistry(returnComm) ; 120 111 121 112 122 // If there are no server processes then we are in attached mode … … 167 177 void CXios::initServerSide(void) 168 178 { 179 isClient = false; 180 isServer = true; 181 169 182 initServer(); 170 183 isClient = false; 171 184 isServer = true; 172 173 185 // Initialize all aspects MPI 174 186 CServer::initialize(); … … 223 235 int firstPoolGlobalRank = secondaryServerGlobalRanks[0]; 224 236 int rankGlobal; 225 MPI_Comm_rank(globalComm, &rankGlobal);237 ep_lib::MPI_Comm_rank(globalComm, &rankGlobal); 226 238 227 239 // Merge registries defined on each pools … … 235 247 globalRegistrySndServers.mergeRegistry(*globalRegistry) ; 236 248 int registrySize = globalRegistrySndServers.size(); 237 MPI_Send(®istrySize,1,MPI_LONG,firstPoolGlobalRank,15,CXios::globalComm) ;249 ep_lib::MPI_Send(®istrySize,1,EP_LONG,firstPoolGlobalRank,15,CXios::globalComm) ; 238 250 CBufferOut buffer(registrySize) ; 239 251 globalRegistrySndServers.toBuffer(buffer) ; 240 MPI_Send(buffer.start(),registrySize,MPI_CHAR,firstPoolGlobalRank,15,CXios::globalComm) ;252 ep_lib::MPI_Send(buffer.start(),registrySize,EP_CHAR,firstPoolGlobalRank,15,CXios::globalComm) ; 241 253 } 242 254 } … … 245 257 if (rankGlobal == firstPoolGlobalRank) 246 258 { 247 MPI_Status status;259 ep_lib::MPI_Status status; 248 260 char* recvBuff; 249 261 … … 254 266 int rank = secondaryServerGlobalRanks[i]; 255 267 int registrySize = 0; 256 MPI_Recv(®istrySize, 1, MPI_LONG, rank, 15, CXios::globalComm, &status);268 ep_lib::MPI_Recv(®istrySize, 1, EP_LONG, rank, 15, CXios::globalComm, &status); 257 269 recvBuff = new char[registrySize]; 258 MPI_Recv(recvBuff, registrySize, MPI_CHAR, rank, 15, CXios::globalComm, &status);270 ep_lib::MPI_Recv(recvBuff, registrySize, EP_CHAR, rank, 15, CXios::globalComm, &status); 259 271 CBufferIn buffer(recvBuff, registrySize) ; 260 272 CRegistry recvRegistry; -
XIOS/trunk/src/cxios.hpp
r1622 r1638 15 15 public: 16 16 static void initialize(void) ; 17 static void initClientSide(const string & codeId, MPI_Comm& localComm,MPI_Comm& returnComm) ;17 static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 18 18 static void initServerSide(void) ; 19 19 static void clientFinalize(void) ; … … 40 40 static bool isServer ; //!< Check if xios is server 41 41 42 static MPI_Comm globalComm ; //!< Global communicator42 static ep_lib::MPI_Comm globalComm ; //!< Global communicator 43 43 44 44 static bool printLogs2Files; //!< Printing out logs into files -
XIOS/trunk/src/dht_auto_indexing.cpp
r1158 r1638 22 22 23 23 CDHTAutoIndexing::CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 24 const MPI_Comm& clientIntraComm)24 const ep_lib::MPI_Comm& clientIntraComm) 25 25 : CClientClientDHTTemplate<size_t>(clientIntraComm) 26 26 { … … 28 28 nbIndexOnProc_ = hashValue.size(); 29 29 size_t nbIndexAccum; 30 MPI_Scan(&nbIndexOnProc_, &nbIndexAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, clientIntraComm);30 ep_lib::MPI_Scan(&nbIndexOnProc_, &nbIndexAccum, 1, EP_UNSIGNED_LONG, EP_SUM, clientIntraComm); 31 31 32 32 // Broadcasting the total number of indexes 33 33 int rank, size; 34 MPI_Comm_rank(clientIntraComm, &rank);35 MPI_Comm_size(clientIntraComm, &size);34 ep_lib::MPI_Comm_rank(clientIntraComm, &rank); 35 ep_lib::MPI_Comm_size(clientIntraComm, &size); 36 36 if (rank == (size-1)) nbIndexesGlobal_ = nbIndexAccum; 37 MPI_Bcast(&nbIndexesGlobal_, 1, MPI_UNSIGNED_LONG, size-1, clientIntraComm);37 ep_lib::MPI_Bcast(&nbIndexesGlobal_, 1, EP_UNSIGNED_LONG, size-1, clientIntraComm); 38 38 39 39 CArray<size_t,1>::const_iterator itbIdx = hashValue.begin(), itIdx, … … 58 58 */ 59 59 CDHTAutoIndexing::CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 60 const MPI_Comm& clientIntraComm)60 const ep_lib::MPI_Comm& clientIntraComm) 61 61 : CClientClientDHTTemplate<size_t>(clientIntraComm) 62 62 { … … 64 64 nbIndexOnProc_ = hashInitMap.size(); 65 65 size_t nbIndexAccum; 66 MPI_Scan(&nbIndexOnProc_, &nbIndexAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, clientIntraComm);66 ep_lib::MPI_Scan(&nbIndexOnProc_, &nbIndexAccum, 1, EP_UNSIGNED_LONG, EP_SUM, clientIntraComm); 67 67 68 68 int rank, size; 69 MPI_Comm_rank(clientIntraComm, &rank);70 MPI_Comm_size(clientIntraComm, &size);69 ep_lib::MPI_Comm_rank(clientIntraComm, &rank); 70 ep_lib::MPI_Comm_size(clientIntraComm, &size); 71 71 if (rank == (size-1)) nbIndexesGlobal_ = nbIndexAccum; 72 MPI_Bcast(&nbIndexesGlobal_, 1, MPI_UNSIGNED_LONG, size-1, clientIntraComm);72 ep_lib::MPI_Bcast(&nbIndexesGlobal_, 1, EP_UNSIGNED_LONG, size-1, clientIntraComm); 73 73 74 74 Index2VectorInfoTypeMap::iterator itbIdx = hashInitMap.begin(), itIdx, -
XIOS/trunk/src/dht_auto_indexing.hpp
r924 r1638 25 25 26 26 CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 27 const MPI_Comm& clientIntraComm);27 const ep_lib::MPI_Comm& clientIntraComm); 28 28 29 29 CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 30 const MPI_Comm& clientIntraComm);30 const ep_lib::MPI_Comm& clientIntraComm); 31 31 32 32 size_t getNbIndexesGlobal() const; -
XIOS/trunk/src/event_scheduler.cpp
r1224 r1638 8 8 9 9 10 CEventScheduler::CEventScheduler(const MPI_Comm& comm)11 { 12 MPI_Comm_dup(comm, &communicator) ;13 MPI_Comm_size(communicator,&mpiSize) ;14 MPI_Comm_rank(communicator,&mpiRank);10 CEventScheduler::CEventScheduler(const ep_lib::MPI_Comm& comm) 11 { 12 ep_lib::MPI_Comm_dup(comm, &communicator) ; 13 ep_lib::MPI_Comm_size(communicator,&mpiSize) ; 14 ep_lib::MPI_Comm_rank(communicator,&mpiRank); 15 15 16 16 … … 88 88 89 89 pendingSentParentRequest.push(sentRequest) ; 90 MPI_Isend(sentRequest->buffer,3, MPI_UNSIGNED_LONG, parent[lev], 0, communicator, &sentRequest->request) ;90 ep_lib::MPI_Isend(sentRequest->buffer,3, EP_UNSIGNED_LONG, parent[lev], 0, communicator, &sentRequest->request) ; 91 91 traceOn() ; 92 92 } … … 115 115 { 116 116 int completed ; 117 MPI_Status status ;117 ep_lib::MPI_Status status ; 118 118 int received ; 119 119 SPendingRequest* recvRequest ; … … 135 135 while(received) 136 136 { 137 #ifdef _usingMPI 137 138 MPI_Iprobe(MPI_ANY_SOURCE,1,communicator,&received, &status) ; 139 #elif _usingEP 140 ep_lib::MPI_Iprobe(-2,1,communicator,&received, &status) ; 141 #endif 138 142 if (received) 139 143 { 140 144 recvRequest=new SPendingRequest ; 145 #ifdef _usingMPI 141 146 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 1, communicator, &(recvRequest->request)) ; 147 #elif _usingEP 148 ep_lib::MPI_Irecv(recvRequest->buffer, 3, EP_UNSIGNED_LONG, -2, 1, communicator, &(recvRequest->request)) ; 149 #endif 142 150 pendingRecvParentRequest.push(recvRequest) ; 143 151 } … … 149 157 { 150 158 recvRequest=pendingRecvParentRequest.front() ; 151 MPI_Test( &(recvRequest->request), &completed, &status) ;159 ep_lib::MPI_Test( &(recvRequest->request), &completed, &status) ; 152 160 if (completed) 153 161 { … … 169 177 // function call only by parent mpi process 170 178 171 MPI_Status status ;179 ep_lib::MPI_Status status ; 172 180 int received ; 173 181 received=true ; … … 177 185 while(received) 178 186 { 187 #ifdef _usingMPI 179 188 MPI_Iprobe(MPI_ANY_SOURCE,0,communicator,&received, &status) ; 189 #elif _usingEP 190 ep_lib::MPI_Iprobe(-2,0,communicator,&received, &status) ; 191 #endif 180 192 if (received) 181 193 { 182 194 recvRequest=new SPendingRequest ; 195 #ifdef _usingMPI 183 196 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 0, communicator, &recvRequest->request) ; 197 #elif _usingEP 198 ep_lib::MPI_Irecv(recvRequest->buffer, 3, EP_UNSIGNED_LONG, -2, 0, communicator, &recvRequest->request) ; 199 #endif 184 200 pendingRecvChildRequest.push_back(recvRequest) ; 185 201 } … … 190 206 for(list<SPendingRequest*>::iterator it=pendingRecvChildRequest.begin(); it!=pendingRecvChildRequest.end() ; ) 191 207 { 192 MPI_Test(&((*it)->request),&received,&status) ;208 ep_lib::MPI_Test(&((*it)->request),&received,&status) ; 193 209 if (received) 194 210 { … … 228 244 for(list<SPendingRequest*>::iterator it=pendingSentChildRequest.begin(); it!=pendingSentChildRequest.end() ; ) 229 245 { 230 MPI_Test(&(*it)->request,&received,&status) ;246 ep_lib::MPI_Test(&(*it)->request,&received,&status) ; 231 247 if (received) 232 248 { … … 251 267 sentRequest->buffer[1]=contextHashId ; 252 268 sentRequest->buffer[2]=lev+1 ; 253 MPI_Isend(sentRequest->buffer,3, MPI_UNSIGNED_LONG, child[lev][i], 1, communicator, & sentRequest->request) ;269 ep_lib::MPI_Isend(sentRequest->buffer,3, EP_UNSIGNED_LONG, child[lev][i], 1, communicator, & sentRequest->request) ; 254 270 pendingSentChildRequest.push_back(sentRequest) ; 255 271 } -
XIOS/trunk/src/event_scheduler.hpp
r591 r1638 26 26 * @param[in] comm : MPI communicator du duplicate for internal use 27 27 */ 28 CEventScheduler(const MPI_Comm& comm) ;28 CEventScheduler(const ep_lib::MPI_Comm& comm) ; 29 29 30 30 … … 151 151 { 152 152 size_t buffer[3] ; /*!< communication buffer : timeLine, hashId, level */ 153 MPI_Request request ; /*!< pending MPI request */153 ep_lib::MPI_Request request ; /*!< pending MPI request */ 154 154 } ; 155 155 156 MPI_Comm communicator ; /*!< Internal MPI communicator */156 ep_lib::MPI_Comm communicator ; /*!< Internal MPI communicator */ 157 157 int mpiRank ; /*!< Rank in the communicator */ 158 158 int mpiSize ; /*!< Size of the communicator */ -
XIOS/trunk/src/filter/spatial_transform_filter.cpp
r1637 r1638 224 224 225 225 idxSendBuff = 0; 226 std::vector< MPI_Request> sendRecvRequest;226 std::vector<ep_lib::MPI_Request> sendRecvRequest; 227 227 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 228 228 { … … 234 234 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 235 235 } 236 sendRecvRequest.push_back( MPI_Request());237 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back());236 sendRecvRequest.push_back(ep_lib::MPI_Request()); 237 ep_lib::MPI_Isend(sendBuff[idxSendBuff], countSize, EP_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 238 238 } 239 239 … … 252 252 int srcRank = itRecv->first; 253 253 int countSize = itRecv->second.size(); 254 sendRecvRequest.push_back( MPI_Request());255 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back());254 sendRecvRequest.push_back(ep_lib::MPI_Request()); 255 ep_lib::MPI_Irecv(recvBuff + currentBuff, countSize, EP_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 256 256 currentBuff += countSize; 257 257 } 258 std::vector< MPI_Status> status(sendRecvRequest.size());259 MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]);258 std::vector<ep_lib::MPI_Status> status(sendRecvRequest.size()); 259 ep_lib::MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]); 260 260 261 261 dataCurrentDest.resize(*itNbListRecv); -
XIOS/trunk/src/interface/c/icdata.cpp
r1622 r1638 60 60 { 61 61 std::string str; 62 MPI_Comm local_comm;63 MPI_Comm return_comm;62 ep_lib::MPI_Comm local_comm; 63 ep_lib::MPI_Comm return_comm; 64 64 65 65 if (!cstr2string(client_id, len_client_id, str)) return; 66 66 67 67 int initialized; 68 MPI_Initialized(&initialized); 68 ep_lib::MPI_Initialized(&initialized); 69 #ifdef _usingMPI 69 70 if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 70 71 else local_comm=MPI_COMM_NULL; 72 #elif _usingEP 73 if (initialized) local_comm=EP_Comm_f2c(f_local_comm); 74 else local_comm=EP_COMM_NULL; 75 #endif 71 76 CXios::initClientSide(str, local_comm, return_comm); 77 #ifdef _usingMPI 72 78 *f_return_comm=MPI_Comm_c2f(return_comm); 79 #elif _usingEP 80 *f_return_comm=*static_cast<MPI_Fint* >(EP_Comm_c2f(return_comm)); 81 #endif 73 82 CTimer::get("XIOS init").suspend(); 74 83 CTimer::get("XIOS").suspend(); … … 80 89 { 81 90 std::string str; 82 MPI_Comm comm;91 ep_lib::MPI_Comm comm; 83 92 84 93 if (!cstr2string(context_id, len_context_id, str)) return; 85 94 CTimer::get("XIOS").resume(); 86 95 CTimer::get("XIOS init context").resume(); 96 #ifdef _usingMPI 87 97 comm=MPI_Comm_f2c(*f_comm); 98 #elif _usingEP 99 comm=EP_Comm_f2c(f_comm); 100 #endif 88 101 CClient::registerContext(str, comm); 89 102 CTimer::get("XIOS init context").suspend(); -
XIOS/trunk/src/interface/c/oasis_cinterface.cpp
r501 r1638 21 21 } 22 22 23 void oasis_get_localcomm( MPI_Comm& comm)23 void oasis_get_localcomm(ep_lib::MPI_Comm& comm) 24 24 { 25 MPI_Fint f_comm ;25 ep_lib::MPI_Fint f_comm ; 26 26 27 27 fxios_oasis_get_localcomm(&f_comm) ; 28 comm=MPI_Comm_f2c(f_comm) ;28 //comm=MPI_Comm_f2c(f_comm) ; 29 29 } 30 30 31 void oasis_get_intracomm( MPI_Comm& comm_client_server,const std::string& server_id)31 void oasis_get_intracomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) 32 32 { 33 MPI_Fint f_comm ;33 ep_lib::MPI_Fint f_comm ; 34 34 35 35 fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 36 comm_client_server=MPI_Comm_f2c(f_comm) ;36 //comm_client_server=MPI_Comm_f2c(f_comm) ; 37 37 } 38 38 39 void oasis_get_intercomm( MPI_Comm& comm_client_server,const std::string& server_id)39 void oasis_get_intercomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) 40 40 { 41 MPI_Fint f_comm ;41 ep_lib::MPI_Fint f_comm ; 42 42 43 43 fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 44 comm_client_server=MPI_Comm_f2c(f_comm) ;44 //comm_client_server=MPI_Comm_f2c(f_comm) ; 45 45 } 46 46 } -
XIOS/trunk/src/interface/c/oasis_cinterface.hpp
r501 r1638 10 10 void fxios_oasis_enddef(void) ; 11 11 void fxios_oasis_finalize(void) ; 12 void fxios_oasis_get_localcomm( MPI_Fint* f_comm) ;13 void fxios_oasis_get_intracomm( MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ;14 void fxios_oasis_get_intercomm( MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ;12 void fxios_oasis_get_localcomm(ep_lib::MPI_Fint* f_comm) ; 13 void fxios_oasis_get_intracomm(ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 14 void fxios_oasis_get_intercomm(ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 15 15 } 16 16 … … 20 20 void oasis_enddef(void) ; 21 21 void oasis_finalize(void) ; 22 void oasis_get_localcomm( MPI_Comm& comm) ;23 void oasis_get_intracomm( MPI_Comm& comm_client_server,const std::string& server_id) ;24 void oasis_get_intercomm( MPI_Comm& comm_client_server,const std::string& server_id) ;22 void oasis_get_localcomm(ep_lib::MPI_Comm& comm) ; 23 void oasis_get_intracomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ; 24 void oasis_get_intercomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ; 25 25 } 26 26 #endif -
XIOS/trunk/src/io/inetcdf4.cpp
r1534 r1638 7 7 namespace xios 8 8 { 9 CINetCDF4::CINetCDF4(const StdString& filename, const MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/,9 CINetCDF4::CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/, 10 10 bool readMetaDataPar /*= false*/, const StdString& timeCounterName /*= "time_counter"*/) 11 11 { … … 14 14 { 15 15 int commSize = 0; 16 MPI_Comm_size(*comm, &commSize);16 ep_lib::MPI_Comm_size(*comm, &commSize); 17 17 if (commSize <= 1) 18 18 comm = NULL; … … 23 23 // even if Parallel NetCDF ends up being used. 24 24 if (mpi) 25 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, MPI_INFO_NULL, this->ncidp);25 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, EP_INFO_NULL, this->ncidp); 26 26 else 27 27 CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); -
XIOS/trunk/src/io/inetcdf4.hpp
r1485 r1638 22 22 public: 23 23 /// Constructors /// 24 CINetCDF4(const StdString& filename, const MPI_Comm* comm = NULL, bool multifile = true,24 CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 25 25 bool readMetaDataPar = false, const StdString& timeCounterName = "time_counter"); 26 26 -
XIOS/trunk/src/io/nc4_data_input.cpp
r1622 r1638 10 10 namespace xios 11 11 { 12 CNc4DataInput::CNc4DataInput(const StdString& filename, MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/,12 CNc4DataInput::CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/, 13 13 bool readMetaDataPar /*= false*/, bool ugridConvention /*= false*/, const StdString& timeCounterName /*= "time_counter"*/) 14 14 : SuperClass() -
XIOS/trunk/src/io/nc4_data_input.hpp
r1486 r1638 23 23 24 24 /// Constructors /// 25 CNc4DataInput(const StdString& filename, MPI_Comm comm_file, bool multifile, bool isCollective = true,25 CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 26 26 bool readMetaDataPar = false, bool ugridConvention = false, const StdString& timeCounterName = "time_counter"); 27 27 CNc4DataInput(const CNc4DataInput& dataInput); // Not implemented. … … 70 70 private: 71 71 /// Private attributes /// 72 MPI_Comm comm_file;72 ep_lib::MPI_Comm comm_file; 73 73 const StdString filename; 74 74 bool isCollective; -
XIOS/trunk/src/io/nc4_data_output.cpp
r1637 r1638 28 28 CNc4DataOutput::CNc4DataOutput 29 29 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 30 MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName)30 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 31 31 : SuperClass() 32 32 , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) -
XIOS/trunk/src/io/nc4_data_output.hpp
r1542 r1638 27 27 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, 28 28 bool useCFConvention, 29 MPI_Comm comm_file, bool multifile, bool isCollective = true,29 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 30 30 const StdString& timeCounterName = "time_counter"); 31 31 … … 117 117 118 118 /// Propriétés privées /// 119 MPI_Comm comm_file;119 ep_lib::MPI_Comm comm_file; 120 120 const StdString filename; 121 121 std::map<Time, StdSize> timeToRecordCache; -
XIOS/trunk/src/io/netCdfInterface.cpp
r1454 r1638 10 10 #include "netCdfInterface.hpp" 11 11 #include "netCdfException.hpp" 12 12 #include "ep_mpi.hpp" 13 13 namespace xios 14 14 { … … 47 47 \return Status code 48 48 */ 49 int CNetCdfInterface::createPar(const StdString& fileName, int cMode, MPI_Comm comm, MPI_Info info, int& ncId) 50 { 49 int CNetCdfInterface::createPar(const StdString& fileName, int cMode, ep_lib::MPI_Comm comm, ep_lib::MPI_Info info, int& ncId) 50 { 51 #ifdef _usingMPI 51 52 int status = xios::nc_create_par(fileName.c_str(), cMode, comm, info, &ncId); 53 #elif _usingEP 54 int status = xios::nc_create_par(fileName.c_str(), cMode, to_mpi_comm(comm->mpi_comm), to_mpi_info(info), &ncId); 55 #endif 52 56 if (NC_NOERR != status) 53 57 { … … 100 104 \return Status code 101 105 */ 102 int CNetCdfInterface::openPar(const StdString& fileName, int oMode, MPI_Comm comm, MPI_Info info, int& ncId) 103 { 106 int CNetCdfInterface::openPar(const StdString& fileName, int oMode, ep_lib::MPI_Comm comm, ep_lib::MPI_Info info, int& ncId) 107 { 108 #ifdef _usingMPI 104 109 int status = xios::nc_open_par(fileName.c_str(), oMode, comm, info, &ncId); 110 #elif _usingEP 111 int status = xios::nc_open_par(fileName.c_str(), oMode, to_mpi_comm(comm->mpi_comm), to_mpi_info(info), &ncId); 112 #endif 105 113 if (NC_NOERR != status) 106 114 { -
XIOS/trunk/src/io/netCdfInterface.hpp
r811 r1638 32 32 33 33 //! Create a netcdf file on a parallel file system 34 static int createPar(const StdString& path, int cmode, MPI_Comm comm,MPI_Info info, int& ncId);34 static int createPar(const StdString& path, int cmode, ep_lib::MPI_Comm comm, ep_lib::MPI_Info info, int& ncId); 35 35 36 36 //! Open a netcdf file … … 38 38 39 39 //! Open a netcdf file 40 static int openPar(const StdString& path, int cmode, MPI_Comm comm,MPI_Info info, int& ncId);40 static int openPar(const StdString& path, int cmode, ep_lib::MPI_Comm comm, ep_lib::MPI_Info info, int& ncId); 41 41 42 42 //! Close a netcdf file -
XIOS/trunk/src/io/onetcdf4.cpp
r1456 r1638 15 15 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 16 16 bool useCFConvention, 17 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName)17 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 18 18 : path() 19 19 , wmpi(false) … … 33 33 34 34 void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 35 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName)35 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 36 36 { 37 37 this->useClassicFormat = useClassicFormat; … … 44 44 { 45 45 int commSize = 0; 46 MPI_Comm_size(*comm, &commSize);46 ep_lib::MPI_Comm_size(*comm, &commSize); 47 47 if (commSize <= 1) 48 48 comm = NULL; … … 58 58 CTimer::get("Files : create").resume(); 59 59 if (wmpi) 60 CNetCdfInterface::createPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp);60 CNetCdfInterface::createPar(filename, mode, *comm, EP_INFO_NULL, this->ncidp); 61 61 else 62 62 CNetCdfInterface::create(filename, mode, this->ncidp); … … 70 70 CTimer::get("Files : open").resume(); 71 71 if (wmpi) 72 CNetCdfInterface::openPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp);72 CNetCdfInterface::openPar(filename, mode, *comm, EP_INFO_NULL, this->ncidp); 73 73 else 74 74 CNetCdfInterface::open(filename, mode, this->ncidp); -
XIOS/trunk/src/io/onetcdf4.hpp
r1456 r1638 28 28 CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 29 29 bool useCFConvention = true, 30 const MPI_Comm* comm = NULL, bool multifile = true,30 const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 31 31 const StdString& timeCounterName = "time_counter"); 32 32 … … 37 37 /// Initialisation /// 38 38 void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 39 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName);39 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 40 40 void close(void); 41 41 void sync(void); -
XIOS/trunk/src/mpi.hpp
r501 r1638 11 11 #define OMPI_SKIP_MPICXX 12 12 13 #ifdef _usingEP 14 #include "ep_lib.hpp" 15 #include "ep_declaration.hpp" 16 #endif 17 13 18 #include <mpi.h> 14 19 20 #ifdef _usingMPI 21 22 #define ep_lib 23 24 #define EP_INT MPI_INT 25 #define EP_FLOAT MPI_FLOAT 26 #define EP_DOUBLE MPI_DOUBLE 27 #define EP_CHAR MPI_CHAR 28 #define EP_LONG MPI_LONG 29 #define EP_LONG_LONG_INT MPI_LONG_LONG_INT 30 #define EP_UNSIGNED_LONG MPI_UNSIGNED_LONG 31 #define EP_UNSIGNED_CHAR MPI_UNSIGNED_CHAR 32 33 34 #define EP_COMM_WORLD MPI_COMM_WORLD 35 #define EP_COMM_NULL MPI_COMM_NULL 36 #define EP_INFO_NULL MPI_INFO_NULL 37 38 #define EP_MAX MPI_MAX 39 #define EP_MIN MPI_MIN 40 #define EP_SUM MPI_SUM 41 #define EP_LOR MPI_LOR 42 15 43 #endif 44 45 #endif -
XIOS/trunk/src/node/axis.cpp
r1637 r1638 130 130 \return the number of indexes written by each server 131 131 */ 132 int CAxis::getNumberWrittenIndexes( MPI_Comm writtenCom)132 int CAxis::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 133 133 TRY 134 134 { 135 135 int writtenSize; 136 MPI_Comm_size(writtenCom, &writtenSize);136 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 137 137 return numberWrittenIndexes_[writtenSize]; 138 138 } … … 143 143 \return the total number of indexes written by the servers 144 144 */ 145 int CAxis::getTotalNumberWrittenIndexes( MPI_Comm writtenCom)145 int CAxis::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 146 146 TRY 147 147 { 148 148 int writtenSize; 149 MPI_Comm_size(writtenCom, &writtenSize);149 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 150 150 return totalNumberWrittenIndexes_[writtenSize]; 151 151 } … … 156 156 \return the offset of indexes written by each server 157 157 */ 158 int CAxis::getOffsetWrittenIndexes( MPI_Comm writtenCom)158 int CAxis::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 159 159 TRY 160 160 { 161 161 int writtenSize; 162 MPI_Comm_size(writtenCom, &writtenSize);162 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 163 163 return offsetWrittenIndexes_[writtenSize]; 164 164 } 165 165 CATCH_DUMP_ATTR 166 166 167 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer( MPI_Comm writtenCom)167 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 168 168 TRY 169 169 { 170 170 int writtenSize; 171 MPI_Comm_size(writtenCom, &writtenSize);171 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 172 172 return compressedIndexToWriteOnServer[writtenSize]; 173 173 } … … 768 768 CATCH_DUMP_ATTR 769 769 770 void CAxis::computeWrittenCompressedIndex( MPI_Comm writtenComm)770 void CAxis::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 771 771 TRY 772 772 { 773 773 int writtenCommSize; 774 MPI_Comm_size(writtenComm, &writtenCommSize);774 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 775 775 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 776 776 return; … … 850 850 { 851 851 852 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);853 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);852 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm); 853 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm); 854 854 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 855 855 } -
XIOS/trunk/src/node/axis.hpp
r1637 r1638 68 68 const std::set<StdString> & getRelFiles(void) const; 69 69 70 int getNumberWrittenIndexes( MPI_Comm writtenCom);71 int getTotalNumberWrittenIndexes( MPI_Comm writtenCom);72 int getOffsetWrittenIndexes( MPI_Comm writtenCom);73 CArray<int, 1>& getCompressedIndexToWriteOnServer( MPI_Comm writtenCom);70 int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 71 int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 72 int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 73 CArray<int, 1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 74 74 75 75 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, const std::vector<int>& globalDim, int orderPositionInGrid, … … 113 113 114 114 void computeWrittenIndex(); 115 void computeWrittenCompressedIndex( MPI_Comm);115 void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 116 116 bool hasTransformation(); 117 117 void solveInheritanceTransformation(); -
XIOS/trunk/src/node/context.cpp
r1622 r1638 265 265 266 266 //! Initialize client side 267 void CContext::initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer /*= 0*/)267 void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 268 268 TRY 269 269 { 270 270 271 271 hasClient = true; 272 MPI_Comm intraCommServer, interCommServer;272 ep_lib::MPI_Comm intraCommServer, interCommServer; 273 273 274 274 … … 284 284 else 285 285 { 286 MPI_Comm_dup(intraComm, &intraCommServer);286 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 287 287 comms.push_back(intraCommServer); 288 MPI_Comm_dup(interComm, &interCommServer);288 ep_lib::MPI_Comm_dup(interComm, &interCommServer); 289 289 comms.push_back(interCommServer); 290 290 } … … 309 309 { 310 310 clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 311 MPI_Comm_dup(intraComm, &intraCommServer);311 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 312 312 comms.push_back(intraCommServer); 313 MPI_Comm_dup(interComm, &interCommServer);313 ep_lib::MPI_Comm_dup(interComm, &interCommServer); 314 314 comms.push_back(interCommServer); 315 315 serverPrimServer.push_back(new CContextServer(this, intraCommServer, interCommServer)); … … 383 383 CATCH_DUMP_ATTR 384 384 385 void CContext::initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient /*= 0*/)385 void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 386 386 TRY 387 387 { … … 402 402 registryOut->setPath(contextRegistryId) ; 403 403 404 MPI_Comm intraCommClient, interCommClient;404 ep_lib::MPI_Comm intraCommClient, interCommClient; 405 405 if (cxtClient) // Attached mode 406 406 { … … 410 410 else 411 411 { 412 MPI_Comm_dup(intraComm, &intraCommClient);412 ep_lib::MPI_Comm_dup(intraComm, &intraCommClient); 413 413 comms.push_back(intraCommClient); 414 MPI_Comm_dup(interComm, &interCommClient);414 ep_lib::MPI_Comm_dup(interComm, &interCommClient); 415 415 comms.push_back(interCommClient); 416 416 } … … 502 502 503 503 //! Free internally allocated communicators 504 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)505 MPI_Comm_free(&(*it));504 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 505 ep_lib::MPI_Comm_free(&(*it)); 506 506 comms.clear(); 507 507 … … 544 544 545 545 //! Free internally allocated communicators 546 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)547 MPI_Comm_free(&(*it));546 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 547 ep_lib::MPI_Comm_free(&(*it)); 548 548 comms.clear(); 549 549 … … 560 560 TRY 561 561 { 562 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)563 MPI_Comm_free(&(*it));562 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 563 ep_lib::MPI_Comm_free(&(*it)); 564 564 comms.clear(); 565 565 } -
XIOS/trunk/src/node/context.hpp
r1622 r1638 88 88 public : 89 89 // Initialize server or client 90 void initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer = 0);91 void initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient = 0);90 void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 91 void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 92 92 bool isInitialized(void); 93 93 … … 263 263 StdString idServer_; 264 264 CGarbageCollector garbageCollector; 265 std::list< MPI_Comm> comms; //!< Communicators allocated internally265 std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 266 266 267 267 public: // Some function maybe removed in the near future -
XIOS/trunk/src/node/domain.cpp
r1637 r1638 99 99 \return the number of indexes written by each server 100 100 */ 101 int CDomain::getNumberWrittenIndexes( MPI_Comm writtenCom)101 int CDomain::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 102 102 TRY 103 103 { 104 104 int writtenSize; 105 MPI_Comm_size(writtenCom, &writtenSize);105 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 106 106 return numberWrittenIndexes_[writtenSize]; 107 107 } … … 112 112 \return the total number of indexes written by the servers 113 113 */ 114 int CDomain::getTotalNumberWrittenIndexes( MPI_Comm writtenCom)114 int CDomain::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 115 115 TRY 116 116 { 117 117 int writtenSize; 118 MPI_Comm_size(writtenCom, &writtenSize);118 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 119 119 return totalNumberWrittenIndexes_[writtenSize]; 120 120 } … … 125 125 \return the offset of indexes written by each server 126 126 */ 127 int CDomain::getOffsetWrittenIndexes( MPI_Comm writtenCom)127 int CDomain::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 128 128 TRY 129 129 { 130 130 int writtenSize; 131 MPI_Comm_size(writtenCom, &writtenSize);131 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 132 132 return offsetWrittenIndexes_[writtenSize]; 133 133 } 134 134 CATCH_DUMP_ATTR 135 135 136 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer( MPI_Comm writtenCom)136 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 137 137 TRY 138 138 { 139 139 int writtenSize; 140 MPI_Comm_size(writtenCom, &writtenSize);140 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 141 141 return compressedIndexToWriteOnServer[writtenSize]; 142 142 } … … 690 690 int v ; 691 691 v=ibegin ; 692 MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ;692 ep_lib::MPI_Allgather(&v,1,EP_INT,ibegin_g,1,EP_INT,client->intraComm) ; 693 693 v=jbegin ; 694 MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ;694 ep_lib::MPI_Allgather(&v,1,EP_INT,jbegin_g,1,EP_INT,client->intraComm) ; 695 695 v=ni ; 696 MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ;696 ep_lib::MPI_Allgather(&v,1,EP_INT,ni_g,1,EP_INT,client->intraComm) ; 697 697 v=nj ; 698 MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ;699 700 MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ;701 MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ;698 ep_lib::MPI_Allgather(&v,1,EP_INT,nj_g,1,EP_INT,client->intraComm) ; 699 700 ep_lib::MPI_Allgatherv(lon.dataFirst(),ni,EP_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,EP_DOUBLE,client->intraComm) ; 701 ep_lib::MPI_Allgatherv(lat.dataFirst(),nj,EP_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,EP_DOUBLE,client->intraComm) ; 702 702 703 703 delete[] ibegin_g ; … … 1932 1932 displs[0] = 0; 1933 1933 int localCount = connectedServerRank_[nbServer].size() ; 1934 MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ;1934 ep_lib::MPI_Gather(&localCount, 1, EP_INT, &counts[0], 1, EP_INT, 0, client->intraComm) ; 1935 1935 for (int i = 0; i < clientSize-1; ++i) 1936 1936 { … … 1938 1938 } 1939 1939 std::vector<int> allConnectedServers(displs[clientSize-1]+counts[clientSize-1]); 1940 MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm);1940 ep_lib::MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, EP_INT, &allConnectedServers[0], &counts[0], &displs[0], EP_INT, 0, client->intraComm); 1941 1941 1942 1942 if ((allConnectedServers.size() != nbServer) && (rank == 0)) … … 2003 2003 CATCH_DUMP_ATTR 2004 2004 2005 void CDomain::computeWrittenCompressedIndex( MPI_Comm writtenComm)2005 void CDomain::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 2006 2006 TRY 2007 2007 { 2008 2008 int writtenCommSize; 2009 MPI_Comm_size(writtenComm, &writtenCommSize);2009 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 2010 2010 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 2011 2011 return; … … 2064 2064 { 2065 2065 2066 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);2067 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);2066 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm); 2067 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm); 2068 2068 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 2069 2069 } -
XIOS/trunk/src/node/domain.hpp
r1637 r1638 94 94 bool isWrittenCompressed(const StdString& filename) const; 95 95 96 int getNumberWrittenIndexes( MPI_Comm writtenCom);97 int getTotalNumberWrittenIndexes( MPI_Comm writtenCom);98 int getOffsetWrittenIndexes( MPI_Comm writtenCom);99 CArray<int,1>& getCompressedIndexToWriteOnServer( MPI_Comm writtenCom);96 int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 97 int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 98 int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 99 CArray<int,1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 100 100 101 101 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, bool bufferForWriting = false); … … 116 116 117 117 void computeWrittenIndex(); 118 void computeWrittenCompressedIndex( MPI_Comm);118 void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 119 119 120 120 void AllgatherRectilinearLonLat(CArray<double,1>& lon, CArray<double,1>& lat, -
XIOS/trunk/src/node/field.cpp
r1637 r1638 531 531 if (!nstepMaxRead) 532 532 { 533 #ifdef _usingMPI 533 534 MPI_Allreduce(MPI_IN_PLACE, &nstepMax, 1, MPI_INT, MPI_MAX, context->server->intraComm); 535 #elif _usingEP 536 ep_lib::MPI_Allreduce(&nstepMax, &nstepMax, 1, EP_INT, EP_MAX, context->server->intraComm); 537 #endif 534 538 nstepMaxRead = true; 535 539 } -
XIOS/trunk/src/node/file.cpp
r1622 r1638 25 25 CFile::CFile(void) 26 26 : CObjectTemplate<CFile>(), CFileAttributes() 27 , vFieldGroup(), data_out(), enabledFields(), fileComm( MPI_COMM_NULL)27 , vFieldGroup(), data_out(), enabledFields(), fileComm(EP_COMM_NULL) 28 28 , isOpen(false), read_client(0), checkRead(false), allZoneEmpty(false) 29 29 { … … 34 34 CFile::CFile(const StdString & id) 35 35 : CObjectTemplate<CFile>(id), CFileAttributes() 36 , vFieldGroup(), data_out(), enabledFields(), fileComm( MPI_COMM_NULL)36 , vFieldGroup(), data_out(), enabledFields(), fileComm(EP_COMM_NULL) 37 37 , isOpen(false), read_client(0), checkRead(false), allZoneEmpty(false) 38 38 { … … 307 307 308 308 int color = allZoneEmpty ? 0 : 1; 309 MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm);310 if (allZoneEmpty) MPI_Comm_free(&fileComm);309 ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 310 if (allZoneEmpty) ep_lib::MPI_Comm_free(&fileComm); 311 311 } 312 312 CATCH_DUMP_ATTR … … 554 554 { 555 555 int commSize, commRank; 556 MPI_Comm_size(fileComm, &commSize);557 MPI_Comm_rank(fileComm, &commRank);556 ep_lib::MPI_Comm_size(fileComm, &commSize); 557 ep_lib::MPI_Comm_rank(fileComm, &commRank); 558 558 559 559 if (server->intraCommSize > 1) … … 634 634 CContext* context = CContext::getCurrent(); 635 635 CContextServer* server = context->server; 636 MPI_Comm readComm = this->fileComm;636 ep_lib::MPI_Comm readComm = this->fileComm; 637 637 638 638 if (!allZoneEmpty) … … 677 677 { 678 678 int commSize, commRank; 679 MPI_Comm_size(readComm, &commSize);680 MPI_Comm_rank(readComm, &commRank);679 ep_lib::MPI_Comm_size(readComm, &commSize); 680 ep_lib::MPI_Comm_rank(readComm, &commRank); 681 681 682 682 if (server->intraCommSize > 1) … … 722 722 isOpen = false; 723 723 } 724 if (fileComm != MPI_COMM_NULL)MPI_Comm_free(&fileComm);724 if (fileComm != EP_COMM_NULL) ep_lib::MPI_Comm_free(&fileComm); 725 725 } 726 726 CATCH_DUMP_ATTR -
XIOS/trunk/src/node/file.hpp
r1622 r1638 175 175 int nbAxis, nbDomains; 176 176 bool isOpen; 177 MPI_Comm fileComm;177 ep_lib::MPI_Comm fileComm; 178 178 179 179 private: -
XIOS/trunk/src/node/grid.cpp
r1637 r1638 661 661 { 662 662 CContextServer* server = CContext::getCurrent()->server; 663 MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);664 MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);663 ep_lib::MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, EP_INT, EP_SUM, server->intraComm); 664 ep_lib::MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, EP_INT, EP_SUM, server->intraComm); 665 665 offsetWrittenIndexes_ -= numberWrittenIndexes_; 666 666 } … … 856 856 displs[0] = 0; 857 857 int localCount = connectedServerRank_[receiverSize].size() ; 858 MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ;858 ep_lib::MPI_Gather(&localCount, 1, EP_INT, &counts[0], 1, EP_INT, 0, client->intraComm) ; 859 859 for (int i = 0; i < client->clientSize-1; ++i) 860 860 { … … 862 862 } 863 863 std::vector<int> allConnectedServers(displs[client->clientSize-1]+counts[client->clientSize-1]); 864 MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm);864 ep_lib::MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, EP_INT, &allConnectedServers[0], &counts[0], &displs[0], EP_INT, 0, client->intraComm); 865 865 866 866 if ((allConnectedServers.size() != receiverSize) && (client->clientRank == 0)) -
XIOS/trunk/src/node/mesh.cpp
r1542 r1638 414 414 * \param [in] bounds_lat Array of boundary latitudes. Its size depends on the element type. 415 415 */ 416 void CMesh::createMeshEpsilon(const MPI_Comm& comm,416 void CMesh::createMeshEpsilon(const ep_lib::MPI_Comm& comm, 417 417 const CArray<double, 1>& lonvalue, const CArray<double, 1>& latvalue, 418 418 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat) … … 421 421 int nvertex = (bounds_lon.numElements() == 0) ? 1 : bounds_lon.rows(); 422 422 int mpiRank, mpiSize; 423 MPI_Comm_rank(comm, &mpiRank);424 MPI_Comm_size(comm, &mpiSize);423 ep_lib::MPI_Comm_rank(comm, &mpiRank); 424 ep_lib::MPI_Comm_size(comm, &mpiSize); 425 425 double prec = 1e-11; // used in calculations of edge_lon/lat 426 426 … … 460 460 unsigned long nbEdgesOnProc = nbEdges_; 461 461 unsigned long nbEdgesAccum; 462 MPI_Scan(&nbEdgesOnProc, &nbEdgesAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);462 ep_lib::MPI_Scan(&nbEdgesOnProc, &nbEdgesAccum, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 463 463 nbEdgesAccum -= nbEdges_; 464 464 … … 590 590 unsigned long nodeCount = nodeIdx2Idx.size(); 591 591 unsigned long nodeStart, nbNodes; 592 MPI_Scan(&nodeCount, &nodeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);592 ep_lib::MPI_Scan(&nodeCount, &nodeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 593 593 int nNodes = nodeStart; 594 MPI_Bcast(&nNodes, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);594 ep_lib::MPI_Bcast(&nNodes, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 595 595 nbNodesGlo = nNodes; 596 596 … … 683 683 unsigned long nbFacesOnProc = nbFaces_; 684 684 unsigned long nbFacesAccum; 685 MPI_Scan(&nbFacesOnProc, &nbFacesAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);685 ep_lib::MPI_Scan(&nbFacesOnProc, &nbFacesAccum, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 686 686 nbFacesAccum -= nbFaces_; 687 687 … … 807 807 808 808 unsigned long edgeStart, nbEdges; 809 MPI_Scan(&edgeCount, &edgeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);809 ep_lib::MPI_Scan(&edgeCount, &edgeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 810 810 int nEdges = edgeStart; 811 MPI_Bcast(&nEdges, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);811 ep_lib::MPI_Bcast(&nEdges, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 812 812 nbEdgesGlo = nEdges; 813 813 … … 1028 1028 unsigned long edgeCount = edgeIdx2Idx.size(); 1029 1029 unsigned long edgeStart, nbEdges; 1030 MPI_Scan(&edgeCount, &edgeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);1030 ep_lib::MPI_Scan(&edgeCount, &edgeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 1031 1031 int nEdges = edgeStart; 1032 MPI_Bcast(&nEdges, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);1032 ep_lib::MPI_Bcast(&nEdges, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 1033 1033 nbEdgesGlo = nEdges; 1034 1034 … … 1298 1298 unsigned long nodeCount = nodeIdx2Idx.size(); 1299 1299 unsigned long nodeStart, nbNodes; 1300 MPI_Scan(&nodeCount, &nodeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);1300 ep_lib::MPI_Scan(&nodeCount, &nodeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 1301 1301 int nNodes = nodeStart; 1302 MPI_Bcast(&nNodes, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);1302 ep_lib::MPI_Bcast(&nNodes, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 1303 1303 nbNodesGlo = nNodes; 1304 1304 … … 1418 1418 unsigned long edgeCount = edgeIdx2Idx.size(); 1419 1419 unsigned long edgeStart, nbEdges; 1420 MPI_Scan(&edgeCount, &edgeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm);1420 ep_lib::MPI_Scan(&edgeCount, &edgeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm); 1421 1421 int nEdges = edgeStart; 1422 MPI_Bcast(&nEdges, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm);1422 ep_lib::MPI_Bcast(&nEdges, 1, EP_UNSIGNED_LONG, mpiSize-1, comm); 1423 1423 nbEdgesGlo = nEdges; 1424 1424 … … 1614 1614 */ 1615 1615 1616 void CMesh::getGloNghbFacesNodeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1616 void CMesh::getGloNghbFacesNodeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1617 1617 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1618 1618 CArray<int, 2>& nghbFaces) … … 1623 1623 1624 1624 int mpiRank, mpiSize; 1625 MPI_Comm_rank(comm, &mpiRank);1626 MPI_Comm_size(comm, &mpiSize);1625 ep_lib::MPI_Comm_rank(comm, &mpiRank); 1626 ep_lib::MPI_Comm_size(comm, &mpiSize); 1627 1627 1628 1628 // (1) Generating unique node indexes … … 1770 1770 */ 1771 1771 1772 void CMesh::getGloNghbFacesEdgeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1772 void CMesh::getGloNghbFacesEdgeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1773 1773 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1774 1774 CArray<int, 2>& nghbFaces) … … 1779 1779 1780 1780 int mpiRank, mpiSize; 1781 MPI_Comm_rank(comm, &mpiRank);1782 MPI_Comm_size(comm, &mpiSize);1781 ep_lib::MPI_Comm_rank(comm, &mpiRank); 1782 ep_lib::MPI_Comm_size(comm, &mpiSize); 1783 1783 1784 1784 // (1) Generating unique node indexes … … 1951 1951 */ 1952 1952 1953 void CMesh::getGlobalNghbFaces(const int nghbType, const MPI_Comm& comm,1953 void CMesh::getGlobalNghbFaces(const int nghbType, const ep_lib::MPI_Comm& comm, 1954 1954 const CArray<int, 1>& face_idx, 1955 1955 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, -
XIOS/trunk/src/node/mesh.hpp
r1542 r1638 60 60 const CArray<double, 2>&, const CArray<double, 2>& ); 61 61 62 void createMeshEpsilon(const MPI_Comm&,62 void createMeshEpsilon(const ep_lib::MPI_Comm&, 63 63 const CArray<double, 1>&, const CArray<double, 1>&, 64 64 const CArray<double, 2>&, const CArray<double, 2>& ); 65 65 66 void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&,66 void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&, 67 67 const CArray<double, 2>&, const CArray<double, 2>&, 68 68 CArray<int, 2>&); … … 87 87 CClientClientDHTSizet* pNodeGlobalIndex; // pointer to a map <nodeHash, nodeIdxGlo> 88 88 CClientClientDHTSizet* pEdgeGlobalIndex; // pointer to a map <edgeHash, edgeIdxGlo> 89 void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);90 void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);89 void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 90 void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 91 91 void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 92 92 void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); -
XIOS/trunk/src/policy.cpp
r855 r1638 49 49 //} 50 50 51 DivideAdaptiveComm::DivideAdaptiveComm(const MPI_Comm& mpiComm)51 DivideAdaptiveComm::DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm) 52 52 : internalComm_(mpiComm), level_(0), groupBegin_(), nbInGroup_(), computed_(false) 53 53 { … … 61 61 62 62 int mpiSize, mpiRank; 63 MPI_Comm_size(internalComm_,&mpiSize);64 MPI_Comm_rank(internalComm_,&mpiRank);63 ep_lib::MPI_Comm_size(internalComm_,&mpiSize); 64 ep_lib::MPI_Comm_rank(internalComm_,&mpiRank); 65 65 66 66 int maxChild=1; -
XIOS/trunk/src/policy.hpp
r855 r1638 31 31 { 32 32 protected: 33 DivideAdaptiveComm(const MPI_Comm& mpiComm);33 DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm); 34 34 35 35 void computeMPICommLevel(); … … 41 41 42 42 protected: 43 const MPI_Comm& internalComm_;43 const ep_lib::MPI_Comm& internalComm_; 44 44 std::vector<std::vector<int> > groupParentsBegin_; 45 45 std::vector<std::vector<int> > nbInGroupParents_; -
XIOS/trunk/src/registry.cpp
r696 r1638 191 191 { 192 192 int rank ; 193 MPI_Comm_rank(communicator,&rank);193 ep_lib::MPI_Comm_rank(communicator,&rank); 194 194 if (rank==0) 195 195 { … … 197 197 this->toBuffer(buffer) ; 198 198 int size=buffer.count() ; 199 MPI_Bcast(&size,1,MPI_INT,0,communicator) ;200 MPI_Bcast(buffer.start(),size,MPI_CHAR,0,communicator) ;199 ep_lib::MPI_Bcast(&size,1,EP_INT,0,communicator) ; 200 ep_lib::MPI_Bcast(buffer.start(),size,EP_CHAR,0,communicator) ; 201 201 } 202 202 else 203 203 { 204 204 int size ; 205 MPI_Bcast(&size,1,MPI_INT,0,communicator) ;205 ep_lib::MPI_Bcast(&size,1,EP_INT,0,communicator) ; 206 206 CBufferIn buffer(size) ; 207 MPI_Bcast(buffer.start(),size,MPI_CHAR,0,communicator) ;207 ep_lib::MPI_Bcast(buffer.start(),size,EP_CHAR,0,communicator) ; 208 208 this->fromBuffer(buffer) ; 209 209 } … … 214 214 } 215 215 216 void CRegistry::gatherRegistry(const MPI_Comm& comm)216 void CRegistry::gatherRegistry(const ep_lib::MPI_Comm& comm) 217 217 { 218 218 int rank,mpiSize ; 219 MPI_Comm_rank(comm,&rank);220 MPI_Comm_size(comm,&mpiSize);219 ep_lib::MPI_Comm_rank(comm,&rank); 220 ep_lib::MPI_Comm_size(comm,&mpiSize); 221 221 222 222 int* sizes=new int[mpiSize] ; … … 224 224 this->toBuffer(localBuffer) ; 225 225 int localSize=localBuffer.count() ; 226 MPI_Gather(&localSize,1,MPI_INT,sizes,1,MPI_INT,0,comm) ;226 ep_lib::MPI_Gather(&localSize,1,EP_INT,sizes,1,EP_INT,0,comm) ; 227 227 228 228 char* globalBuffer ; … … 240 240 241 241 globalBuffer=new char[globalBufferSize] ; 242 MPI_Gatherv(localBuffer.start(),localSize,MPI_CHAR,globalBuffer,sizes,displs,MPI_CHAR,0,comm) ;242 ep_lib::MPI_Gatherv(localBuffer.start(),localSize,EP_CHAR,globalBuffer,sizes,displs,EP_CHAR,0,comm) ; 243 243 for(int i=1;i<mpiSize;++i) 244 244 { … … 251 251 delete[] globalBuffer ; 252 252 } 253 else MPI_Gatherv(localBuffer.start(),localSize,MPI_CHAR,globalBuffer,sizes,displs,MPI_CHAR,0,comm) ;253 else ep_lib::MPI_Gatherv(localBuffer.start(),localSize,EP_CHAR,globalBuffer,sizes,displs,EP_CHAR,0,comm) ; 254 254 delete[] sizes ; 255 255 … … 261 261 } 262 262 263 void CRegistry::hierarchicalGatherRegistry(const MPI_Comm& comm)263 void CRegistry::hierarchicalGatherRegistry(const ep_lib::MPI_Comm& comm) 264 264 { 265 265 int mpiRank,mpiSize ; 266 MPI_Comm_rank(comm,&mpiRank);267 MPI_Comm_size(comm,&mpiSize);266 ep_lib::MPI_Comm_rank(comm,&mpiRank); 267 ep_lib::MPI_Comm_size(comm,&mpiSize); 268 268 269 269 if (mpiSize>2) … … 272 272 if (mpiRank<mpiSize/2+mpiSize%2) color=0 ; 273 273 else color=1 ; 274 MPI_Comm commUp ;275 MPI_Comm_split(comm,color,mpiRank,&commUp) ,274 ep_lib::MPI_Comm commUp ; 275 ep_lib::MPI_Comm_split(comm,color,mpiRank,&commUp) , 276 276 hierarchicalGatherRegistry(commUp) ; 277 MPI_Comm_free(&commUp) ;277 ep_lib::MPI_Comm_free(&commUp) ; 278 278 } 279 279 280 280 if (mpiSize>1) 281 281 { 282 MPI_Comm commDown ;282 ep_lib::MPI_Comm commDown ; 283 283 int color ; 284 284 285 285 if (mpiRank==0 || mpiRank==mpiSize/2+mpiSize%2) color=0 ; 286 286 else color=1 ; 287 MPI_Comm_split(comm,color,mpiRank,&commDown) ;287 ep_lib::MPI_Comm_split(comm,color,mpiRank,&commDown) ; 288 288 if (color==0) gatherRegistry(commDown) ; 289 MPI_Comm_free(&commDown) ;289 ep_lib::MPI_Comm_free(&commDown) ; 290 290 } 291 291 } -
XIOS/trunk/src/registry.hpp
r700 r1638 23 23 24 24 /** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 25 CRegistry(const MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {}25 CRegistry(const ep_lib::MPI_Comm& comm=EP_COMM_WORLD) : communicator(comm) {} 26 26 27 27 /** Copy constructor */ … … 106 106 107 107 /** use internally for recursivity */ 108 void gatherRegistry(const MPI_Comm& comm) ;108 void gatherRegistry(const ep_lib::MPI_Comm& comm) ; 109 109 110 110 /** use internally for recursivity */ 111 void hierarchicalGatherRegistry(const MPI_Comm& comm) ;111 void hierarchicalGatherRegistry(const ep_lib::MPI_Comm& comm) ; 112 112 113 113 … … 120 120 121 121 /** MPI communicator used for broadcast and gather operation */ 122 MPI_Comm communicator ;122 ep_lib::MPI_Comm communicator ; 123 123 } ; 124 124 -
XIOS/trunk/src/server.cpp
r1587 r1638 18 18 namespace xios 19 19 { 20 MPI_Comm CServer::intraComm ;21 std::list< MPI_Comm> CServer::interCommLeft ;22 std::list< MPI_Comm> CServer::interCommRight ;23 std::list< MPI_Comm> CServer::contextInterComms;24 std::list< MPI_Comm> CServer::contextIntraComms;20 ep_lib::MPI_Comm CServer::intraComm ; 21 std::list<ep_lib::MPI_Comm> CServer::interCommLeft ; 22 std::list<ep_lib::MPI_Comm> CServer::interCommRight ; 23 std::list<ep_lib::MPI_Comm> CServer::contextInterComms; 24 std::list<ep_lib::MPI_Comm> CServer::contextIntraComms; 25 25 int CServer::serverLevel = 0 ; 26 26 int CServer::nbContexts = 0; … … 48 48 { 49 49 int initialized ; 50 MPI_Initialized(&initialized) ;50 ep_lib::MPI_Initialized(&initialized) ; 51 51 if (initialized) is_MPI_Initialized=true ; 52 52 else is_MPI_Initialized=false ; … … 59 59 if (!is_MPI_Initialized) 60 60 { 61 MPI_Init(NULL, NULL);61 ep_lib::MPI_Init(NULL, NULL); 62 62 } 63 63 CTimer::get("XIOS").resume() ; … … 72 72 int myColor ; 73 73 int i,c ; 74 MPI_Comm newComm;75 76 MPI_Comm_size(CXios::globalComm, &size) ;77 MPI_Comm_rank(CXios::globalComm, &rank_);74 ep_lib::MPI_Comm newComm; 75 76 ep_lib::MPI_Comm_size(CXios::globalComm, &size) ; 77 ep_lib::MPI_Comm_rank(CXios::globalComm, &rank_); 78 78 79 79 hashAll=new unsigned long[size] ; 80 MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ;80 ep_lib::MPI_Allgather(&hashServer, 1, EP_LONG, hashAll, 1, EP_LONG, CXios::globalComm) ; 81 81 82 82 map<unsigned long, int> colors ; … … 174 174 // (2) Create intraComm 175 175 if (serverLevel != 2) myColor=colors[hashServer]; 176 MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ;176 ep_lib::MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ; 177 177 178 178 // (3) Create interComm … … 186 186 clientLeader=it->second ; 187 187 int intraCommSize, intraCommRank ; 188 MPI_Comm_size(intraComm,&intraCommSize) ;189 MPI_Comm_rank(intraComm,&intraCommRank) ;188 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ; 189 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank) ; 190 190 info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 191 191 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 192 192 193 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;193 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 194 194 interCommLeft.push_back(newComm) ; 195 195 } … … 207 207 clientLeader=it->second ; 208 208 int intraCommSize, intraCommRank ; 209 MPI_Comm_size(intraComm, &intraCommSize) ;210 MPI_Comm_rank(intraComm, &intraCommRank) ;209 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ; 210 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 211 211 info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 212 212 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 213 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;213 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 214 214 interCommLeft.push_back(newComm) ; 215 215 } … … 219 219 { 220 220 int intraCommSize, intraCommRank ; 221 MPI_Comm_size(intraComm, &intraCommSize) ;222 MPI_Comm_rank(intraComm, &intraCommRank) ;221 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ; 222 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 223 223 info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 224 224 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< sndServerGlobalRanks[i]<<endl ; 225 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ;225 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; 226 226 interCommRight.push_back(newComm) ; 227 227 } … … 232 232 clientLeader = leaders[hashString(CXios::xiosCodeId)]; 233 233 int intraCommSize, intraCommRank ; 234 MPI_Comm_size(intraComm, &intraCommSize) ;235 MPI_Comm_rank(intraComm, &intraCommRank) ;234 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ; 235 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 236 236 info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 237 237 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 238 238 239 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ;239 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 240 240 interCommLeft.push_back(newComm) ; 241 241 } … … 253 253 254 254 CTimer::get("XIOS").resume() ; 255 MPI_Comm localComm;255 ep_lib::MPI_Comm localComm; 256 256 oasis_get_localcomm(localComm); 257 MPI_Comm_rank(localComm,&rank_) ;257 ep_lib::MPI_Comm_rank(localComm,&rank_) ; 258 258 259 259 // (1) Create server intraComm 260 260 if (!CXios::usingServer2) 261 261 { 262 MPI_Comm_dup(localComm, &intraComm);262 ep_lib::MPI_Comm_dup(localComm, &intraComm); 263 263 } 264 264 else 265 265 { 266 266 int globalRank; 267 MPI_Comm_size(localComm,&size) ;268 MPI_Comm_rank(CXios::globalComm,&globalRank) ;267 ep_lib::MPI_Comm_size(localComm,&size) ; 268 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank) ; 269 269 srvGlobalRanks = new int[size] ; 270 MPI_Allgather(&globalRank, 1, MPI_INT, srvGlobalRanks, 1, MPI_INT, localComm) ;270 ep_lib::MPI_Allgather(&globalRank, 1, EP_INT, srvGlobalRanks, 1, EP_INT, localComm) ; 271 271 272 272 int reqNbProc = size*CXios::ratioServer2/100.; … … 276 276 << "It is impossible to dedicate the requested number of processes = "<<reqNbProc 277 277 <<" to secondary server. XIOS will run in the classical server mode."<<endl; 278 MPI_Comm_dup(localComm, &intraComm);278 ep_lib::MPI_Comm_dup(localComm, &intraComm); 279 279 } 280 280 else … … 339 339 } 340 340 if (serverLevel != 2) myColor=0; 341 MPI_Comm_split(localComm, myColor, rank_, &intraComm) ;341 ep_lib::MPI_Comm_split(localComm, myColor, rank_, &intraComm) ; 342 342 } 343 343 } … … 348 348 vector<string>::iterator it ; 349 349 350 MPI_Comm newComm ;350 ep_lib::MPI_Comm newComm ; 351 351 int globalRank ; 352 MPI_Comm_rank(CXios::globalComm,&globalRank);352 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank); 353 353 354 354 // (2) Create interComms with models … … 359 359 { 360 360 interCommLeft.push_back(newComm) ; 361 if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ;361 if (rank_==0) ep_lib::MPI_Send(&globalRank,1,EP_INT,0,0,newComm) ; 362 362 } 363 363 } … … 365 365 // (3) Create interComms between primary and secondary servers 366 366 int intraCommSize, intraCommRank ; 367 MPI_Comm_size(intraComm,&intraCommSize) ;368 MPI_Comm_rank(intraComm, &intraCommRank) ;367 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ; 368 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ; 369 369 370 370 if (serverLevel == 1) … … 375 375 info(50)<<"intercommCreate::client (server level 1) "<<globalRank<<" intraCommSize : "<<intraCommSize 376 376 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvSndLeader<<endl ; 377 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ;377 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ; 378 378 interCommRight.push_back(newComm) ; 379 379 } … … 383 383 info(50)<<"intercommCreate::server (server level 2)"<<globalRank<<" intraCommSize : "<<intraCommSize 384 384 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvGlobalRanks[0] <<endl ; 385 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvGlobalRanks[0], 0, &newComm) ;385 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvGlobalRanks[0], 0, &newComm) ; 386 386 interCommLeft.push_back(newComm) ; 387 387 } … … 393 393 394 394 395 MPI_Comm_rank(intraComm, &rank) ;395 ep_lib::MPI_Comm_rank(intraComm, &rank) ; 396 396 if (rank==0) isRoot=true; 397 397 else isRoot=false; … … 406 406 delete eventScheduler ; 407 407 408 for (std::list< MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++)409 MPI_Comm_free(&(*it));410 411 for (std::list< MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++)412 MPI_Comm_free(&(*it));408 for (std::list<ep_lib::MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 409 ep_lib::MPI_Comm_free(&(*it)); 410 411 for (std::list<ep_lib::MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++) 412 ep_lib::MPI_Comm_free(&(*it)); 413 413 414 414 // for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) … … 418 418 // MPI_Comm_free(&(*it)); 419 419 420 for (std::list< MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++)421 MPI_Comm_free(&(*it));422 423 MPI_Comm_free(&intraComm);420 for (std::list<ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) 421 ep_lib::MPI_Comm_free(&(*it)); 422 423 ep_lib::MPI_Comm_free(&intraComm); 424 424 425 425 if (!is_MPI_Initialized) 426 426 { 427 427 if (CXios::usingOasis) oasis_finalize(); 428 else MPI_Finalize() ;428 else ep_lib::MPI_Finalize() ; 429 429 } 430 430 report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; … … 465 465 void CServer::listenFinalize(void) 466 466 { 467 list< MPI_Comm>::iterator it, itr;467 list<ep_lib::MPI_Comm>::iterator it, itr; 468 468 int msg ; 469 469 int flag ; … … 471 471 for(it=interCommLeft.begin();it!=interCommLeft.end();it++) 472 472 { 473 MPI_Status status ;473 ep_lib::MPI_Status status ; 474 474 traceOff() ; 475 MPI_Iprobe(0,0,*it,&flag,&status) ;475 ep_lib::MPI_Iprobe(0,0,*it,&flag,&status) ; 476 476 traceOn() ; 477 477 if (flag==true) 478 478 { 479 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ;479 ep_lib::MPI_Recv(&msg,1,EP_INT,0,0,*it,&status) ; 480 480 info(20)<<" CServer : Receive client finalize"<<endl ; 481 481 // Sending server finalize message to secondary servers (if any) 482 482 for(itr=interCommRight.begin();itr!=interCommRight.end();itr++) 483 483 { 484 MPI_Send(&msg,1,MPI_INT,0,0,*itr) ;484 ep_lib::MPI_Send(&msg,1,EP_INT,0,0,*itr) ; 485 485 } 486 MPI_Comm_free(&(*it));486 ep_lib::MPI_Comm_free(&(*it)); 487 487 interCommLeft.erase(it) ; 488 488 break ; … … 493 493 { 494 494 int i,size ; 495 MPI_Comm_size(intraComm,&size) ;496 MPI_Request* requests= newMPI_Request[size-1] ;497 MPI_Status* status= newMPI_Status[size-1] ;498 499 for(int i=1;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,4,intraComm,&requests[i-1]) ;500 MPI_Waitall(size-1,requests,status) ;495 ep_lib::MPI_Comm_size(intraComm,&size) ; 496 ep_lib::MPI_Request* requests= new ep_lib::MPI_Request[size-1] ; 497 ep_lib::MPI_Status* status= new ep_lib::MPI_Status[size-1] ; 498 499 for(int i=1;i<size;i++) ep_lib::MPI_Isend(&msg,1,EP_INT,i,4,intraComm,&requests[i-1]) ; 500 ep_lib::MPI_Waitall(size-1,requests,status) ; 501 501 502 502 finished=true ; … … 510 510 { 511 511 int flag ; 512 MPI_Status status ;512 ep_lib::MPI_Status status ; 513 513 int msg ; 514 514 515 515 traceOff() ; 516 MPI_Iprobe(0,4,intraComm, &flag, &status) ;516 ep_lib::MPI_Iprobe(0,4,intraComm, &flag, &status) ; 517 517 traceOn() ; 518 518 if (flag==true) 519 519 { 520 MPI_Recv(&msg,1,MPI_INT,0,4,intraComm,&status) ;520 ep_lib::MPI_Recv(&msg,1,EP_INT,0,4,intraComm,&status) ; 521 521 finished=true ; 522 522 } … … 534 534 { 535 535 int flag ; 536 MPI_Status status ;537 list< MPI_Comm>::iterator it;536 ep_lib::MPI_Status status ; 537 list<ep_lib::MPI_Comm>::iterator it; 538 538 int msg ; 539 539 static int nbCompound=0 ; 540 540 int size ; 541 541 static bool sent=false ; 542 static MPI_Request* allRequests ;543 static MPI_Status* allStatus ;542 static ep_lib::MPI_Request* allRequests ; 543 static ep_lib::MPI_Status* allStatus ; 544 544 545 545 546 546 if (sent) 547 547 { 548 MPI_Comm_size(intraComm,&size) ;549 MPI_Testall(size,allRequests, &flag, allStatus) ;548 ep_lib::MPI_Comm_size(intraComm,&size) ; 549 ep_lib::MPI_Testall(size,allRequests, &flag, allStatus) ; 550 550 if (flag==true) 551 551 { … … 559 559 for(it=interCommLeft.begin();it!=interCommLeft.end();it++) 560 560 { 561 MPI_Status status ;561 ep_lib::MPI_Status status ; 562 562 traceOff() ; 563 MPI_Iprobe(0,5,*it,&flag,&status) ; // tags oasis_endded = 5563 ep_lib::MPI_Iprobe(0,5,*it,&flag,&status) ; // tags oasis_endded = 5 564 564 traceOn() ; 565 565 if (flag==true) 566 566 { 567 MPI_Recv(&msg,1,MPI_INT,0,5,*it,&status) ; // tags oasis_endded = 5567 ep_lib::MPI_Recv(&msg,1,EP_INT,0,5,*it,&status) ; // tags oasis_endded = 5 568 568 nbCompound++ ; 569 569 if (nbCompound==interCommLeft.size()) 570 570 { 571 for (std::list< MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++)571 for (std::list<ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) 572 572 { 573 MPI_Send(&msg,1,MPI_INT,0,5,*it) ; // tags oasis_endded = 5573 ep_lib::MPI_Send(&msg,1,EP_INT,0,5,*it) ; // tags oasis_endded = 5 574 574 } 575 MPI_Comm_size(intraComm,&size) ;576 allRequests= new MPI_Request[size] ;577 allStatus= new MPI_Status[size] ;578 for(int i=0;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,5,intraComm,&allRequests[i]) ; // tags oasis_endded = 5575 ep_lib::MPI_Comm_size(intraComm,&size) ; 576 allRequests= new ep_lib::MPI_Request[size] ; 577 allStatus= new ep_lib::MPI_Status[size] ; 578 for(int i=0;i<size;i++) ep_lib::MPI_Isend(&msg,1,EP_INT,i,5,intraComm,&allRequests[i]) ; // tags oasis_endded = 5 579 579 sent=true ; 580 580 } … … 590 590 { 591 591 int flag ; 592 MPI_Status status ;592 ep_lib::MPI_Status status ; 593 593 const int root=0 ; 594 594 int msg ; … … 607 607 608 608 traceOff() ; 609 MPI_Iprobe(root,5,intraComm, &flag, &status) ;609 ep_lib::MPI_Iprobe(root,5,intraComm, &flag, &status) ; 610 610 traceOn() ; 611 611 if (flag==true) 612 612 { 613 MPI_Recv(&msg,1,MPI_INT,root,5,intraComm,&status) ; // tags oasis_endded = 5613 ep_lib::MPI_Recv(&msg,1,EP_INT,root,5,intraComm,&status) ; // tags oasis_endded = 5 614 614 boost::hash<string> hashString; 615 615 size_t hashId = hashString("oasis_enddef"); … … 626 626 { 627 627 628 MPI_Status status ;628 ep_lib::MPI_Status status ; 629 629 int flag ; 630 630 static char* buffer ; 631 static MPI_Request request ;631 static ep_lib::MPI_Request request ; 632 632 static bool recept=false ; 633 633 int rank ; … … 637 637 { 638 638 traceOff() ; 639 MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 639 #ifdef _usingMPI 640 ep_lib::MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 641 #elif _usingEP 642 ep_lib::MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; 643 #endif 640 644 traceOn() ; 641 645 if (flag==true) 642 646 { 647 #ifdef _usingMPI 643 648 rank=status.MPI_SOURCE ; 644 MPI_Get_count(&status,MPI_CHAR,&count) ; 649 #elif _usingEP 650 rank=status.ep_src ; 651 #endif 652 ep_lib::MPI_Get_count(&status,EP_CHAR,&count) ; 645 653 buffer=new char[count] ; 646 MPI_Irecv((void*)buffer,count,MPI_CHAR,rank,1,CXios::globalComm,&request) ;654 ep_lib::MPI_Irecv((void*)buffer,count,EP_CHAR,rank,1,CXios::globalComm,&request) ; 647 655 recept=true ; 648 656 } … … 651 659 { 652 660 traceOff() ; 653 MPI_Test(&request,&flag,&status) ;661 ep_lib::MPI_Test(&request,&flag,&status) ; 654 662 traceOn() ; 655 663 if (flag==true) 656 664 { 665 #ifdef _usingMPI 657 666 rank=status.MPI_SOURCE ; 658 MPI_Get_count(&status,MPI_CHAR,&count) ; 667 #elif _usingEP 668 rank=status.ep_src ; 669 #endif 670 ep_lib::MPI_Get_count(&status,EP_CHAR,&count) ; 659 671 recvContextMessage((void*)buffer,count) ; 660 672 delete [] buffer ; … … 689 701 { 690 702 int size ; 691 MPI_Comm_size(intraComm,&size) ;703 ep_lib::MPI_Comm_size(intraComm,&size) ; 692 704 // MPI_Request* requests= new MPI_Request[size-1] ; 693 705 // MPI_Status* status= new MPI_Status[size-1] ; 694 MPI_Request* requests= newMPI_Request[size] ;695 MPI_Status* status= newMPI_Status[size] ;706 ep_lib::MPI_Request* requests= new ep_lib::MPI_Request[size] ; 707 ep_lib::MPI_Status* status= new ep_lib::MPI_Status[size] ; 696 708 697 709 CMessage msg ; … … 705 717 for(int i=0; i<size; i++) 706 718 { 707 MPI_Isend(sendBuff,sendBuffer.count(),MPI_CHAR,i,2,intraComm,&requests[i]) ;719 ep_lib::MPI_Isend(sendBuff,sendBuffer.count(),EP_CHAR,i,2,intraComm,&requests[i]) ; 708 720 } 709 721 … … 717 729 void CServer::listenRootContext(void) 718 730 { 719 MPI_Status status ;731 ep_lib::MPI_Status status ; 720 732 int flag ; 721 733 static std::vector<void*> buffers; 722 static std::vector< MPI_Request> requests ;734 static std::vector<ep_lib::MPI_Request> requests ; 723 735 static std::vector<int> counts ; 724 736 static std::vector<bool> isEventRegistered ; 725 737 static std::vector<bool> isEventQueued ; 726 MPI_Request request;738 ep_lib::MPI_Request request; 727 739 728 740 int rank ; … … 733 745 // (1) Receive context id from the root, save it into a buffer 734 746 traceOff() ; 735 MPI_Iprobe(root,2,intraComm, &flag, &status) ;747 ep_lib::MPI_Iprobe(root,2,intraComm, &flag, &status) ; 736 748 traceOn() ; 737 749 if (flag==true) 738 750 { 739 751 counts.push_back(0); 740 MPI_Get_count(&status,MPI_CHAR,&(counts.back())) ;752 ep_lib::MPI_Get_count(&status,EP_CHAR,&(counts.back())) ; 741 753 buffers.push_back(new char[counts.back()]) ; 742 754 requests.push_back(request); 743 MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&(requests.back())) ;755 ep_lib::MPI_Irecv((void*)(buffers.back()),counts.back(),EP_CHAR,root,2,intraComm,&(requests.back())) ; 744 756 isEventRegistered.push_back(false); 745 757 isEventQueued.push_back(false); … … 750 762 { 751 763 // (2) If context id is received, register an event 752 MPI_Test(&requests[ctxNb],&flag,&status) ;764 ep_lib::MPI_Test(&requests[ctxNb],&flag,&status) ; 753 765 if (flag==true && !isEventRegistered[ctxNb]) 754 766 { … … 787 799 // (1) create interComm (with a client) 788 800 // (2) initialize client and server (contextClient and contextServer) 789 MPI_Comm inter;801 ep_lib::MPI_Comm inter; 790 802 if (serverLevel < 2) 791 803 { 792 MPI_Comm contextInterComm;793 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm);794 MPI_Intercomm_merge(contextInterComm,1,&inter);795 MPI_Barrier(inter);796 MPI_Comm_free(&inter);804 ep_lib::MPI_Comm contextInterComm; 805 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm); 806 ep_lib::MPI_Intercomm_merge(contextInterComm,1,&inter); 807 ep_lib::MPI_Barrier(inter); 808 ep_lib::MPI_Comm_free(&inter); 797 809 context->initServer(intraComm,contextInterComm); 798 810 contextInterComms.push_back(contextInterComm); … … 807 819 else if (serverLevel == 2) 808 820 { 809 MPI_Comm_dup(interCommLeft.front(), &inter);821 ep_lib::MPI_Comm_dup(interCommLeft.front(), &inter); 810 822 contextInterComms.push_back(inter); 811 823 context->initServer(intraComm, contextInterComms.back()); … … 818 830 { 819 831 int i = 0, size; 820 MPI_Comm_size(intraComm, &size) ;821 for (std::list< MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i)832 ep_lib::MPI_Comm_size(intraComm, &size) ; 833 for (std::list<ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i) 822 834 { 823 835 StdString str = contextId +"_server_" + boost::lexical_cast<string>(i); … … 829 841 CBufferOut buffer(buff,messageSize) ; 830 842 buffer<<msg ; 831 MPI_Send(buff, buffer.count(), MPI_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ;832 MPI_Comm_dup(*it, &inter);843 ep_lib::MPI_Send(buff, buffer.count(), EP_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ; 844 ep_lib::MPI_Comm_dup(*it, &inter); 833 845 contextInterComms.push_back(inter); 834 MPI_Comm_dup(intraComm, &inter);846 ep_lib::MPI_Comm_dup(intraComm, &inter); 835 847 contextIntraComms.push_back(inter); 836 848 context->initClient(contextIntraComms.back(), contextInterComms.back()) ; … … 862 874 { 863 875 int rank; 864 MPI_Comm_rank(intraComm,&rank);876 ep_lib::MPI_Comm_rank(intraComm,&rank); 865 877 return rank; 866 878 } … … 885 897 int size = 0; 886 898 int id; 887 MPI_Comm_size(CXios::globalComm, &size);899 ep_lib::MPI_Comm_size(CXios::globalComm, &size); 888 900 while (size) 889 901 { -
XIOS/trunk/src/server.hpp
r1587 r1638 26 26 static void registerContext(void* buff,int count, int leaderRank=0); 27 27 28 static MPI_Comm intraComm;29 static std::list< MPI_Comm> interCommLeft; // interComm between server (primary, classical or secondary) and its client (client or primary server)30 static std::list< MPI_Comm> interCommRight; // interComm between primary server and secondary server (non-empty only for primary server pool)31 static std::list< MPI_Comm> contextInterComms; // list of context intercomms32 static std::list< MPI_Comm> contextIntraComms; // list of context intercomms (needed only in case of secondary servers)28 static ep_lib::MPI_Comm intraComm; 29 static std::list<ep_lib::MPI_Comm> interCommLeft; // interComm between server (primary, classical or secondary) and its client (client or primary server) 30 static std::list<ep_lib::MPI_Comm> interCommRight; // interComm between primary server and secondary server (non-empty only for primary server pool) 31 static std::list<ep_lib::MPI_Comm> contextInterComms; // list of context intercomms 32 static std::list<ep_lib::MPI_Comm> contextIntraComms; // list of context intercomms (needed only in case of secondary servers) 33 33 static CEventScheduler* eventScheduler; 34 34 -
XIOS/trunk/src/timer.cpp
r1158 r1638 1 1 #include "timer.hpp" 2 //#include "mpi_std.hpp" 2 3 #include "mpi.hpp" 3 4 #include <string> … … 6 7 #include <sstream> 7 8 #include "tracer.hpp" 9 //extern ::MPI_Comm MPI_COMM_WORLD; 8 10 9 11 namespace xios … … 18 20 double CTimer::getTime(void) 19 21 { 22 MPI_COMM_WORLD; 20 23 return MPI_Wtime(); 21 24 } -
XIOS/trunk/src/tracer.cpp
r501 r1638 1 1 #include "tracer.hpp" 2 2 #ifdef VTRACE 3 #include <vt_user.h> 3 //#include <vt_user.h> 4 #include <VT.h> 4 5 #endif 5 6 #include <string> … … 12 13 { 13 14 #ifdef VTRACE 14 VT_ON() ; 15 //VT_ON() ; 16 VT_traceon() ; 15 17 #endif 16 18 } … … 19 21 { 20 22 #ifdef VTRACE 21 VT_OFF() ; 23 //VT_OFF() ; 24 VT_traceoff() ; 22 25 #endif 23 26 } … … 26 29 { 27 30 #ifdef VTRACE 28 VT_USER_START(name.c_str()) ;31 //VT_USER_START(name.c_str()) ; 29 32 #endif 30 33 } … … 33 36 { 34 37 #ifdef VTRACE 35 VT_USER_END(name.c_str()) ;38 //VT_USER_END(name.c_str()) ; 36 39 #endif 37 40 } -
XIOS/trunk/src/transformation/axis_algorithm_interpolate.cpp
r1622 r1638 272 272 273 273 int* recvCount=new int[nbClient]; 274 MPI_Allgather(&numValue,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);274 ep_lib::MPI_Allgather(&numValue,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 275 275 276 276 int* displ=new int[nbClient]; … … 279 279 280 280 // Each client have enough global info of axis 281 MPI_Allgatherv(sendIndexBuff,numValue,MPI_INT,recvIndexBuff,recvCount,displ,MPI_INT,client->intraComm);282 MPI_Allgatherv(sendValueBuff,numValue,MPI_DOUBLE,&(recvBuff[0]),recvCount,displ,MPI_DOUBLE,client->intraComm);281 ep_lib::MPI_Allgatherv(sendIndexBuff,numValue,EP_INT,recvIndexBuff,recvCount,displ,EP_INT,client->intraComm); 282 ep_lib::MPI_Allgatherv(sendValueBuff,numValue,EP_DOUBLE,&(recvBuff[0]),recvCount,displ,EP_DOUBLE,client->intraComm); 283 283 284 284 for (int idx = 0; idx < srcSize; ++idx) -
XIOS/trunk/src/transformation/axis_algorithm_inverse.cpp
r1622 r1638 161 161 sendRankSizeMap[itIndex->first] = sendSize; 162 162 } 163 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);163 ep_lib::MPI_Allgather(&connectedClient,1,EP_INT,recvCount,1,EP_INT,client->intraComm); 164 164 165 165 displ[0]=0 ; … … 168 168 int* recvRankBuff=new int[recvSize]; 169 169 int* recvSizeBuff=new int[recvSize]; 170 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);171 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);170 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,EP_INT,recvRankBuff,recvCount,displ,EP_INT,client->intraComm); 171 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,EP_INT,recvSizeBuff,recvCount,displ,EP_INT,client->intraComm); 172 172 for (int i = 0; i < nbClient; ++i) 173 173 { … … 181 181 182 182 // Sending global index of grid source to corresponding process as well as the corresponding mask 183 std::vector< MPI_Request> requests;184