Changeset 256 for XMLIO_V2/dev/dev_rv/src4
- Timestamp:
- 07/12/11 16:32:19 (13 years ago)
- Location:
- XMLIO_V2/dev/dev_rv/src4/xmlio
- Files:
-
- 2 added
- 5 edited
Legend:
- Unmodified
- Added
- Removed
-
XMLIO_V2/dev/dev_rv/src4/xmlio/exception.hpp
r216 r256 4 4 5 5 #ifndef __EXCEPTION_HPP__ 6 #define 6 #define __EXCEPTION_HPP__ 7 7 8 8 #ifndef __XIOS_NO_EXTERN … … 15 15 // XMLIOServer headers 16 16 #include "object.hpp" 17 18 17 19 18 /** -
XMLIO_V2/dev/dev_rv/src4/xmlio/main_server.cpp
r253 r256 17 17 18 18 #include "mpi_interface.hpp" 19 #include "onetcdf4.hpp" 20 #include "onetcdf4_impl.hpp" 19 //~ #include "onetcdf4.hpp" 20 //~ #include "onetcdf4_impl.hpp" 21 #include "array_util.hpp" 22 #include "array_util_impl.hpp" 21 23 22 24 namespace xios = xmlioserver; 23 25 //~ using namespace xios::data; 24 using namespace xios::io;26 //~ using namespace xios::io; 25 27 //~ using namespace xios::tree; 26 28 //~ using namespace xios::xml; -
XMLIO_V2/dev/dev_rv/src4/xmlio/mpi/mpi_interface.cpp
r242 r256 4 4 5 5 /** 6 * \file mpi_interface. hpp6 * \file mpi_interface.cpp 7 7 * \brief Gestion des communications MPI via une surcouche interne (implémentation). 8 8 * \author Hervé Ozdoba … … 78 78 XIOS_ERROR("CMPIManager::Barrier(comm)", << " MPI Error !"); 79 79 } 80 81 bool CMPIManager::DispatchClient(bool _is_server, 82 MPI_Comm & _comm_client, 83 MPI_Comm & _comm_client_server, 84 MPI_Comm & _comm_server, 85 MPI_Comm _comm_parent) 86 { 87 int value = (_is_server) ? 1 : 2; 88 std::size_t nbClient = 0, nbServer = 0, nbClientByServer = 0; 89 std::vector<int> info, rank_client, rank_server; 90 CMPIManager::AllGather(value, info, _comm_parent); 91 92 for (std::size_t s = 0; s < info.size(); s++) 93 { 94 if (info[s] == 1) rank_server.push_back(s); 95 else rank_client.push_back(s); 96 } 97 nbClient = rank_client.size(); 98 nbServer = rank_server.size(); 99 100 101 _comm_client = CMPIManager::CreateComm(CMPIManager::CreateSubGroup( 102 CMPIManager::GetGroupWorld(), rank_client), _comm_parent); 103 104 if (nbServer != 0) 105 { 106 std::size_t currentServer = 0; 107 nbClientByServer = nbClient/nbServer; 108 _comm_server = CMPIManager::CreateComm(CMPIManager::CreateSubGroup( 109 CMPIManager::GetGroupWorld(), rank_server), _comm_parent); 110 111 //std::cout << nbClient << "," << nbServer << "," << nbClientByServer << std::endl; 112 113 for (std::size_t mm = 0; mm < nbClient; mm += nbClientByServer) 114 { 115 std::vector<int> group_rank; 116 group_rank.push_back(rank_server[currentServer++]); 117 for (std::size_t nn = 0; nn < nbClientByServer; nn++) 118 group_rank.push_back(rank_client[nn+mm]); 119 MPI_Comm comm_client_server_ = CMPIManager::CreateComm(CMPIManager::CreateSubGroup( 120 CMPIManager::GetGroupWorld(), group_rank), _comm_parent); 121 122 if (std::find(group_rank.begin(), group_rank.end(), 123 CMPIManager::GetCommRank(_comm_parent)) != group_rank.end()) 124 { 125 _comm_client_server = comm_client_server_; 126 } 127 128 group_rank.clear(); 129 } 130 return (true); 131 } 132 else 133 { 134 _comm_server = _comm_client; 135 return (false); 136 } 137 } 138 80 139 81 140 // --------------------------------- Groupes -------------------------------- … … 174 233 CMPIManager::Wait (request); // Temporaire 175 234 } 235 236 void CMPIManager::AllGather(int _indata, std::vector<int> & _outdata, MPI_Comm _comm) 237 { 238 std::vector<int> data; data.push_back(_indata); 239 CMPIManager::AllGather(data, _outdata, _comm); 240 } 241 242 void CMPIManager::AllGather(const std::vector<int> & _indata, 243 std::vector<int> & _outdata, MPI_Comm _comm) 244 { 245 int sendcount = _indata.size(), 246 recvcount = _indata.size() * CMPIManager::GetCommSize(_comm); 247 _outdata.resize(recvcount); 248 if (MPI_Allgather ( const_cast<int*>(&(_indata[0])), sendcount, MPI_INTEGER, 249 &(_outdata[0]) , recvcount, MPI_INTEGER, _comm) != MPI_SUCCESS) 250 XIOS_ERROR("CMPIManager::AllGather (indata, outdata, comm)", << " MPI Error !"); 251 } 176 252 177 253 // ------------------------- Communication 'complexe' ----------------------- -
XMLIO_V2/dev/dev_rv/src4/xmlio/mpi/mpi_interface.hpp
r242 r256 40 40 typedef MPI_Fint MPI_F_DataType; /*!< \brief Identifiant de type de données MPI (Fortran). */ 41 41 42 /** 43 * \class CMPIManager 44 * \brief Surcouche interne de la biblià othÚque M(essage) P(assing) I(nterface). 45 */ 42 46 class CMPIManager 43 47 { … … 49 53 public : // Communicateurs 50 54 51 static int GetCommRank(MPI_Comm comm = CMPIManager::GetCommWorld());52 static int GetCommSize(MPI_Comm comm = CMPIManager::GetCommWorld());53 static MPI_Comm CreateComm(MPI_Group group, MPI_Commpcomm = CMPIManager::GetCommWorld());55 static int GetCommRank(MPI_Comm _comm = CMPIManager::GetCommWorld()); 56 static int GetCommSize(MPI_Comm _comm = CMPIManager::GetCommWorld()); 57 static MPI_Comm CreateComm(MPI_Group _group, MPI_Comm _pcomm = CMPIManager::GetCommWorld()); 54 58 static MPI_Comm GetCommWorld(void); 55 59 56 60 public : // Autre 57 61 58 static void Barrier(MPI_Comm comm = CMPIManager::GetCommWorld()); 62 static void Barrier(MPI_Comm _comm = CMPIManager::GetCommWorld()); 63 64 static bool DispatchClient(bool _is_server, 65 MPI_Comm & _comm_client, 66 MPI_Comm & _comm_client_server, 67 MPI_Comm & _comm_server, 68 MPI_Comm _comm_parent = CMPIManager::GetCommWorld()); 59 69 60 70 public : // Groupes 61 71 62 72 static MPI_Group GetGroupWorld(void); 63 static MPI_Group CreateSubGroup(MPI_Group pgroup, const std::vector<int> &ranks);64 static MPI_Group CreateSubGroup(MPI_Group pgroup, int min_rank, int max_rank, intintval = 1);73 static MPI_Group CreateSubGroup(MPI_Group _pgroup, const std::vector<int> & _ranks); 74 static MPI_Group CreateSubGroup(MPI_Group _pgroup, int _min_rank, int _max_rank, int _intval = 1); 65 75 66 76 public : // Tests 67 77 68 static bool IsMaster(MPI_Comm comm = CMPIManager::GetCommWorld());69 static bool IsRank(int rank, MPI_Commcomm = CMPIManager::GetCommWorld());78 static bool IsMaster(MPI_Comm _comm = CMPIManager::GetCommWorld()); 79 static bool IsRank(int _rank, MPI_Comm _comm = CMPIManager::GetCommWorld()); 70 80 71 81 public : // Communication simple 72 82 73 static void Send (MPI_Comm comm, int dest_rank, char *data,74 std::size_t size, MPI_Request &request);75 static void Wait (MPI_Request & request);76 static bool Test (MPI_Request & request);83 static void Send (MPI_Comm _comm, int _dest_rank, char * _data, 84 std::size_t _size, MPI_Request & _request); 85 static void Wait (MPI_Request & _request); 86 static bool Test (MPI_Request & _request); 77 87 78 static bool HasReceivedData(MPI_Comm comm, int src_rank); 79 static std::size_t GetReceivedDataSize(MPI_Comm comm, int src_rank); 80 static void Receive(MPI_Comm comm, int src_rank, char * data); 88 static bool HasReceivedData(MPI_Comm _comm, int _src_rank); 89 static std::size_t GetReceivedDataSize(MPI_Comm _comm, int _src_rank); 90 static void Receive(MPI_Comm _comm, int _src_rank, char * _data); 91 92 static void AllGather(int _indata, std::vector<int> & _outdata, 93 MPI_Comm _comm = CMPIManager::GetCommWorld()); 94 95 static void AllGather(const std::vector<int> & _indata, 96 std::vector<int> & _outdata, 97 MPI_Comm _comm = CMPIManager::GetCommWorld()); 98 81 99 82 100 public : // Communication 'complexe' … … 89 107 public : // Mémoire (non fonctionnel ....) 90 108 91 static void AllocMemory(void * data, std::size_tsize);92 static void FreeMemory (void * data);109 static void AllocMemory(void * _data, std::size_t _size); 110 static void FreeMemory (void * _data); 93 111 94 112 }; // class CMPIManager -
XMLIO_V2/dev/dev_rv/src4/xmlio/netcdf/onetcdf4.hpp
r249 r256 14 14 #define MPI_INCLUDED 15 15 #include <netcdf.h> 16 extern "C" { 17 #include <netcdf_par.h> 18 } 16 19 17 20 // C++ standard headers
Note: See TracChangeset
for help on using the changeset viewer.