Changeset 267
- Timestamp:
- 09/07/11 14:25:56 (13 years ago)
- Location:
- XMLIO_V2/dev/dev_rv
- Files:
-
- 2 added
- 4 deleted
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
XMLIO_V2/dev/dev_rv/Makefile.wk
r264 r267 345 345 346 346 # Sources pour le traitement des noeuds xml (contenues dans le dossier node) 347 NSOURCE = axis domain field file grid context347 NSOURCE = axis domain field file grid variable context 348 348 349 349 # Sources pour les opérations à  effectuer sur les champs (contenues dans le dossier functor) 350 FSOURCE = once instant average 350 FSOURCE = once instant average maximum minimum 351 351 352 352 # Sources contenant les différents type de calendriers (contenues dans le dossier date) … … 417 417 ELEMS_ALL = $(ELEMS_CPP) $(ELEMS_FOR) 418 418 419 OBJECTS = $(addprefix $(OBJ_PATH)/, $(addsuffix .o, $(ELEMS_ALL))) $(OBJ_PATH)/impi_interface.o419 OBJECTS = $(addprefix $(OBJ_PATH)/, $(addsuffix .o, $(ELEMS_ALL))) 420 420 ifeq ($(VERSION), 4) 421 421 OBJECTS = $(addprefix $(OBJ_PATH)/, $(addsuffix .o, $(ELEMS_ALL))) -
XMLIO_V2/dev/dev_rv/src/xmlio/buffer_pair.cpp
r204 r267 1 1 #include "buffer_pair.hpp" 2 2 3 #include " impi_interface.hpp"3 #include "mpi_manager.hpp" 4 4 5 5 namespace xmlioserver … … 11 11 : com_client_server(com_client_server) 12 12 , first(BUFFER_CLIENT_SIZE), second(BUFFER_CLIENT_SIZE) 13 , first_request( mpi_request_null), second_request(mpi_request_null)13 , first_request(MPI_REQUEST_NULL), second_request(MPI_REQUEST_NULL) 14 14 , currentBuffer(0) 15 15 { /* Ne rien faire de plus */ } … … 23 23 { 24 24 if ((currentBuffer == 0) && (first.getUsedSize() != 0) && 25 ((second_request == mpi_request_null) || CMPIManager::Test (second_request)))25 ((second_request == MPI_REQUEST_NULL) || CMPIManager::Test (second_request))) 26 26 return (true); 27 27 28 28 if ((currentBuffer == 1) && (second.getUsedSize() != 0) && 29 ((first_request == mpi_request_null) || CMPIManager::Test (first_request)))29 ((first_request == MPI_REQUEST_NULL) || CMPIManager::Test (first_request))) 30 30 return (true); 31 31 … … 35 35 //--------------------------------------------------------------- 36 36 37 int CBufferPair::wait(void)37 MPI_Request CBufferPair::wait(void) 38 38 { 39 39 if (this->currentBuffer == 0) 40 40 { 41 41 CMPIManager::Wait(this->second_request); 42 this->second_request = mpi_request_null;42 this->second_request = MPI_REQUEST_NULL; 43 43 return (this->second_request); 44 44 } … … 46 46 { 47 47 CMPIManager::Wait(this->first_request); 48 this->first_request = mpi_request_null;48 this->first_request = MPI_REQUEST_NULL; 49 49 return (this->first_request); 50 50 } … … 60 60 (this->com_client_server, 0, this->first, this->first_request); 61 61 this->currentBuffer = 1; 62 this->second_request = mpi_request_null;62 this->second_request = MPI_REQUEST_NULL; 63 63 this->second.clear(); 64 64 } … … 68 68 (this->com_client_server, 0, this->second, this->second_request); 69 69 this->currentBuffer = 0; 70 this->first_request = mpi_request_null;70 this->first_request = MPI_REQUEST_NULL; 71 71 this->first.clear(); 72 72 } -
XMLIO_V2/dev/dev_rv/src/xmlio/buffer_pair.hpp
r199 r267 38 38 39 39 /// Traitements divers /// 40 int wait(void);40 MPI_Request wait(void); 41 41 void sendCurrentBuffer(void); 42 42 … … 54 54 MPIComm com_client_server; 55 55 CLinearBuffer first, second; 56 MPI Request first_request, second_request;56 MPI_Request first_request, second_request; 57 57 int currentBuffer; 58 58 -
XMLIO_V2/dev/dev_rv/src/xmlio/config/node_type.conf
r152 r267 19 19 #endif //__XMLIO_CGrid__ 20 20 21 #ifdef __XMLIO_CMethod__22 DECLARE_NODE(Method, method)23 #endif //__XMLIO_CMethod__21 //#ifdef __XMLIO_CMethod__ 22 // DECLARE_NODE(Method, method) 23 //#endif //__XMLIO_CMethod__ 24 24 25 #ifdef __XMLIO_CVar __26 DECLARE_NODE(Var , var)27 #endif //__XMLIO_CVar __25 #ifdef __XMLIO_CVariable__ 26 DECLARE_NODE(Variable, variable) 27 #endif //__XMLIO_CVariable__ 28 28 29 29 #ifdef __XMLIO_CContext__ -
XMLIO_V2/dev/dev_rv/src/xmlio/data_treatment.hpp
r265 r267 130 130 boost::shared_ptr<CField> field = *it; 131 131 boost::shared_ptr<CFile> file = field->getRelFile(); 132 // 132 // std::cout << ">> " << fieldId << ", " << file->getId() << std::endl; 133 133 if (field->updateData(currDate, timestep, data)) 134 134 { -
XMLIO_V2/dev/dev_rv/src/xmlio/exception.hpp
r171 r267 46 46 "In file \'" __FILE__ "\', line " << __LINE__ << " -> " x << std::endl; 47 47 48 #ifdef XIOS_DEBUG48 #ifdef __XIOS_DEBUG 49 49 # define DEBUG(x) std::clog << "> Debug " << INFO(x) 50 50 #else -
XMLIO_V2/dev/dev_rv/src/xmlio/iface/interface.cpp.in
r265 r267 369 369 if ((comm_client_server != -1)) 370 370 { 371 MPI Request request = 0;371 MPI_Request request = 0; 372 372 StdOStringStream ostrs; 373 373 if (CMPIManager::GetCommRank(comm_client_server) == 1) -
XMLIO_V2/dev/dev_rv/src/xmlio/main_server.cpp
r256 r267 18 18 MPIComm comm_client, comm_client_server, comm_server; 19 19 //comm::CMPIManager::Initialise(&argc, &argv); // < seulement en mode connecté 20 21 //CTreeManager::ParseFile ("test/iodef_simple_test.xml"); 22 //CTreeManager::PrintTreeToFile("wk/def/test.xml"); 23 20 24 //comm::CMPIManager::Finalize(); // < seulement en mode connecté 21 25 22 26 CXIOSManager::Initialise (CXIOSManager::CLIENT_SERVER, &argc, &argv); 23 //CXIOSManager::AddClient ("nemo" , 4, 2, &nemo_fake_entry); 27 28 //---------------------------------------------------------------------- 29 //CXIOSManager::AddClient("nemo" , 4, 2, &nemo_fake_entry); 24 30 //CXIOSManager::AddClient("orchidee", 1, 1, &orchidee_fake_entry); 25 31 //CXIOSManager::AddClient("lmdz" , 4, 2, &lmdz_fake_entry); 32 26 33 CMPIManager::DispatchClient(true, comm_client, comm_client_server, comm_server); 27 34 CXIOSManager::RunServer("Nemo", comm_client_server, comm_server); 28 //CXIOSManager::RunClientServer (comm::CMPIManager::GetCommWorld ()); 35 //CXIOSManager::RunClientServer (comm::CMPIManager::GetCommWorld ()); 29 36 CXIOSManager::Finalize (); 30 37 } -
XMLIO_V2/dev/dev_rv/src/xmlio/manager/mpi_manager.cpp
r254 r267 1 /* ************************************************************************** * 2 * Copyright © IPSL/LSCE, XMLIOServer, Avril 2010 - Octobre 2011 * 3 * ************************************************************************** */ 4 5 /** 6 * \file mpi_interface.cpp 7 * \brief Gestion des communications MPI via une surcouche interne (implémentation). 8 * \author Hervé Ozdoba 9 * \version 0.4 10 * \date 28 Juin 2011 11 */ 12 13 // XMLIOServer headers 1 14 #include "mpi_manager.hpp" 2 15 3 #include "impi_interface.hpp" 4 5 namespace xmlioserver 6 { 7 namespace comm 8 { 9 /// ////////////////////// Définitions ////////////////////// /// 10 11 void CMPIManager::Initialise(int * UNUSED(argc), char *** UNUSED(argv)) 16 17 // /////////////////////////////// Définitions ////////////////////////////// // 18 19 namespace xmlioserver { 20 namespace comm { 21 22 // ---------------------- Initialisation & Finalisation --------------------- 23 24 void CMPIManager::Initialise(int * _argc, char *** _argv) 25 { 26 int flag = 0; 27 if (MPI_Initialized(&flag) != MPI_SUCCESS) 28 ERROR("CMPIManager::Initialise(arc, argv)", << " MPI Error !"); 29 if (!flag) 12 30 { 13 int error = 0; 14 bool flag = false; 15 16 mpi_initialized(&flag, &error); 17 if (error != mpi_success) 18 ERROR("CMPIManager::Initialise(arc, argv)", << " MPI Error !"); 19 if (!flag) 20 { 21 mpi_init(&error); 22 if (error != mpi_success) 23 ERROR("CMPIManager::Initialise(arc, argv)", << " MPI Error !"); 24 } 25 } 26 27 void CMPIManager::Finalize(void) 28 { 29 int error = 0; 30 mpi_finalize(&error); 31 if (error != mpi_success) 31 if (MPI_Init(_argc, _argv) != MPI_SUCCESS) 32 32 ERROR("CMPIManager::Initialise(arc, argv)", << " MPI Error !"); 33 33 } 34 34 35 ///-------------------------------------------------------------- 36 37 int CMPIManager::GetCommRank(MPIComm comm) 35 } 36 37 void CMPIManager::Finalize(void) 38 { 39 if (MPI_Finalize() != MPI_SUCCESS) 40 ERROR("CMPIManager::Finalize(void)", << " MPI Error !"); 41 } 42 43 // ------------------------------ Communicateurs ---------------------------- 44 45 int CMPIManager::GetCommRank(MPI_Comm _comm) 46 { 47 int rank = 0; 48 if (MPI_Comm_rank(_comm, &rank) != MPI_SUCCESS) 49 ERROR("CMPIManager::GetCommRank(comm)", << " MPI Error !"); 50 return (rank); 51 } 52 53 int CMPIManager::GetCommSize(MPI_Comm _comm) 54 { 55 int size = 0; 56 if (MPI_Comm_size(_comm, &size) != MPI_SUCCESS) 57 ERROR("CMPIManager::GetCommSize(comm)", << " MPI Error !"); 58 return (size); 59 } 60 61 MPI_Comm CMPIManager::CreateComm(MPI_Group _group, MPI_Comm _pcomm) 62 { 63 MPI_Comm commu; 64 if (MPI_Comm_create(_pcomm, _group, &commu) != MPI_SUCCESS) 65 ERROR("CMPIManager::CreateComm(group, pcomm)", << " MPI Error !"); 66 return (commu); 67 } 68 69 //MPI_Comm CMPIManager::GetCommWorld(void) 70 //{ 71 // return (MPI_COMM_WORLD); 72 //} 73 74 // ---------------------------------- Autre --------------------------------- 75 76 void CMPIManager::Barrier(MPI_Comm _comm) 77 { 78 if (MPI_Barrier(_comm) != MPI_SUCCESS) 79 ERROR("CMPIManager::Barrier(comm)", << " MPI Error !"); 80 } 81 82 bool CMPIManager::DispatchClient(bool _is_server, 83 MPI_Comm & _comm_client, 84 MPI_Comm & _comm_client_server, 85 MPI_Comm & _comm_server, 86 MPI_Comm _comm_parent) 87 { 88 int value = (_is_server) ? 1 : 2; 89 std::size_t nbClient = 0, nbServer = 0, nbClientByServer = 0; 90 std::vector<int> info, rank_client, rank_server; 91 CMPIManager::AllGather(value, info, _comm_parent); 92 93 for (std::size_t s = 0; s < info.size(); s++) 38 94 { 39 int rank = 0, error = 0; 40 mpi_comm_rank(&comm, &rank, &error); 41 if (error != mpi_success) 42 ERROR("CMPIManager::GetCommRank(comm)", << " MPI Error !"); 43 return (rank); 95 if (info[s] == 1) rank_server.push_back(s); 96 else rank_client.push_back(s); 44 97 } 45 46 int CMPIManager::GetCommSize(MPIComm comm) 98 nbClient = rank_client.size(); 99 nbServer = rank_server.size(); 100 101 if (nbClient == 0) 102 ERROR("CMPIManager::DispatchClient()", << " Aucun client disponible !"); 103 104 105 _comm_client = CMPIManager::CreateComm(CMPIManager::CreateSubGroup( 106 CMPIManager::GetGroupWorld(), rank_client), _comm_parent); 107 108 if (nbServer != 0) 47 109 { 48 int size = 0, error = 0; 49 mpi_comm_size(&comm, &size, &error); 50 if (error != mpi_success) 51 ERROR("CMPIManager::GetCommSize(comm)", << " MPI Error !"); 52 return (size); 110 std::size_t currentServer = 0; 111 nbClientByServer = nbClient/nbServer; 112 _comm_server = CMPIManager::CreateComm(CMPIManager::CreateSubGroup( 113 CMPIManager::GetGroupWorld(), rank_server), _comm_parent); 114 115 //std::cout << nbClient << "," << nbServer << "," << nbClientByServer << std::endl; 116 117 for (std::size_t mm = 0; mm < nbClient; mm += nbClientByServer) 118 { 119 std::vector<int> group_rank; 120 group_rank.push_back(rank_server[currentServer++]); 121 for (std::size_t nn = 0; nn < nbClientByServer; nn++) 122 group_rank.push_back(rank_client[nn+mm]); 123 MPI_Comm comm_client_server_ = CMPIManager::CreateComm(CMPIManager::CreateSubGroup( 124 CMPIManager::GetGroupWorld(), group_rank), _comm_parent); 125 126 if (std::find(group_rank.begin(), group_rank.end(), 127 CMPIManager::GetCommRank(_comm_parent)) != group_rank.end()) 128 { 129 _comm_client_server = comm_client_server_; 130 } 131 132 group_rank.clear(); 133 } 134 return (true); 53 135 } 54 55 MPIComm CMPIManager::GetCommWorld(void)56 {57 return ( mpi_comm_world);136 else 137 { 138 _comm_server = _comm_client; 139 return (false); 58 140 } 59 60 bool CMPIManager::IsMaster(MPIComm comm) 61 { 62 return (CMPIManager::GetCommRank(comm) == 0); 63 } 64 65 bool CMPIManager::IsRank(MPIComm comm, int rank) 66 { 67 return (CMPIManager::GetCommRank(comm) == rank); 68 } 69 70 MPIComm CMPIManager::CreateComm(MPIGroup group, MPIComm pcomm) 71 { 72 MPIComm commu = 0; 73 int error = 0; 74 mpi_comm_create(&pcomm, &group, &commu, &error); 75 if (error != mpi_success) 76 ERROR("CMPIManager::CreateComm(group, pcomm)", << " MPI Error !"); 77 return (commu); 78 } 79 80 //--------------------------------------------------------------- 81 82 void CMPIManager::Barrier(MPIComm comm) 83 { 84 int error = 0; 85 mpi_barrier(&comm, &error); 86 if (error != mpi_success) 87 ERROR("CMPIManager::Barrier(comm)", << " MPI Error !"); 88 } 89 90 bool CMPIManager::DispatchClient(bool is_server, 91 MPIComm & comm_client, 92 MPIComm & comm_client_server, 93 MPIComm & comm_server, 94 MPIComm comm_parent) 95 { 96 int value = (is_server) ? 1 : 2; 97 StdSize nbClient = 0, nbServer = 0, nbClientByServer = 0; 98 std::vector<int> info, rank_client, rank_server; 99 CMPIManager::AllGather(value, info, comm_parent); 100 101 for (StdSize s = 0; s < info.size(); s++) 102 { 103 if (info[s] == 1) rank_server.push_back(s); 104 else rank_client.push_back(s); 105 } 106 nbClient = rank_client.size(); 107 nbServer = rank_server.size(); 108 109 110 comm_client = CMPIManager::CreateComm(CMPIManager::CreateSubGroup( 111 CMPIManager::GetGroupWorld(), rank_client), comm_parent); 112 113 if (nbServer != 0) 114 { 115 StdSize currentServer = 0; 116 nbClientByServer = nbClient/nbServer; 117 comm_server = CMPIManager::CreateComm(CMPIManager::CreateSubGroup( 118 CMPIManager::GetGroupWorld(), rank_server), comm_parent); 119 120 //std::cout << nbClient << "," << nbServer << "," << nbClientByServer << std::endl; 121 122 for (StdSize mm = 0; mm < nbClient; mm += nbClientByServer) 123 { 124 std::vector<int> group_rank; 125 group_rank.push_back(rank_server[currentServer++]); 126 for (StdSize nn = 0; nn < nbClientByServer; nn++) 127 group_rank.push_back(rank_client[nn+mm]); 128 MPIComm comm_client_server_ = CMPIManager::CreateComm(CMPIManager::CreateSubGroup( 129 CMPIManager::GetGroupWorld(), group_rank), comm_parent); 130 131 if (std::find(group_rank.begin(), group_rank.end(), CMPIManager::GetCommRank()) != group_rank.end()) 132 { 133 comm_client_server = comm_client_server_; 134 } 135 136 group_rank.clear(); 137 } 138 return (true); 139 } 140 else 141 { 142 comm_server = comm_client; 143 return (false); 144 } 145 } 146 147 //--------------------------------------------------------------- 148 149 MPIGroup CMPIManager::GetGroupWorld(void) 150 { 151 MPIGroup group = 0; 152 int error = 0; 153 MPIComm commu = CMPIManager::GetCommWorld(); 154 mpi_comm_group(&commu, &group, &error); 155 if (error != mpi_success) 156 ERROR("CMPIManager::GetGroupWorld()", << " MPI Error !"); 157 return (group); 158 } 159 160 MPIGroup CMPIManager::CreateSubGroup(MPIGroup pgroup, const std::vector<int> & ranks) 161 { 162 MPIGroup group = 0; 163 int size = ranks.size(); 164 int error = 0; 165 mpi_group_incl(&pgroup, &size, &(ranks[0]), &group, &error); 166 if (error != mpi_success) 167 ERROR("CMPIManager::CreateSubGroup(pgroup, ranks)", << " MPI Error !"); 168 return (group); 169 } 170 171 MPIGroup CMPIManager::CreateSubGroup(MPIGroup pgroup, int min_rank, int max_rank, int intval) 172 { 173 std::vector<int> ranks; 174 for (int i = min_rank; i <= max_rank; i += intval) 175 ranks.push_back(i); 176 return (CMPIManager::CreateSubGroup(pgroup, ranks)); 177 } 178 179 //--------------------------------------------------------------- 180 181 void CMPIManager::AllocMem(void * data, StdSize size) 182 { 183 if (MPI_Alloc_mem(sizeof(char) * size, MPI_INFO_NULL, data) != MPI_SUCCESS) 184 ERROR("CMPIManager::AllocMem(data, size)", << " MPI Error !"); 185 } 186 187 void CMPIManager::FreeMem(void * data) 188 { 189 MPI_Free_mem(data); 190 } 191 192 //-------------------------------------------------------------- 193 194 void CMPIManager::Send (MPIComm comm, int dest_rank, char * data, 195 StdSize size, MPIRequest & request) 196 { 197 MPIDataType type = mpi_char; 198 int nsize = size; 199 int tag = 0, error = 0; 200 mpi_issend(data, &nsize, &type, &dest_rank, &tag, &comm, &request, &error); 201 if (error != mpi_success) 202 ERROR("CMPIManager::Send (comm, dest_rank, data, size, request)", << " MPI Error !"); 203 } 204 205 void CMPIManager::Wait (MPIRequest & request) 206 { 207 MPIStatus status = new int[mpi_status_size](); 208 int error = 0; 209 mpi_wait(&request, status, &error); 210 if (error != mpi_success) 211 ERROR("CMPIManager::Wait (request)", << " MPI Error !"); 212 delete [] status; 213 } 214 215 bool CMPIManager::Test (MPIRequest & request) 216 { 217 MPIStatus status = new int[mpi_status_size](); 218 bool flag = false; 219 int error = 0; 220 mpi_test(&request, &flag, status, &error); 221 if (error != mpi_success) 222 ERROR("CMPIManager::Test (request)", << " MPI Error !"); 223 delete [] status; 224 return (flag); 225 } 226 227 bool CMPIManager::HasReceivedData(MPIComm comm, int src_rank) 228 { 229 MPIStatus status = new int[mpi_status_size](); 230 bool flag = false; 231 int error = 0, tag = mpi_any_tag; 232 mpi_iprobe(&src_rank, &tag, &comm, &flag, status, &error); 233 if (error != mpi_success) 234 ERROR("CMPIManager::hasReceivedData (comm, rank)", << " MPI Error !"); 235 delete [] status; 236 return (flag); 237 } 238 239 StdSize CMPIManager::GetReceivedDataSize(MPIComm comm, int src_rank) 240 { 241 MPIDataType type = mpi_char; 242 MPIStatus status = new int[mpi_status_size](); 243 bool flag = false; 244 int error = 0, size = 0, tag = mpi_any_tag; 245 246 mpi_iprobe(&src_rank, &tag, &comm, &flag, status, &error); 247 if (error != mpi_success) 248 ERROR("CMPIManager::getReceivedDataSize (comm, rank)", << " MPI Error !"); 249 if (flag == false) return (0); 250 mpi_get_count(status, &type, &size, &error); 251 if (error != mpi_success) 252 ERROR("CMPIManager::getReceivedDataSize (comm, rank)", << " MPI Error !"); 253 delete [] status; 254 return (size); 255 } 256 257 void CMPIManager::Receive(MPIComm comm, int src_rank, char * data) 258 { 259 MPIRequest req = 0; 260 MPIDataType type = mpi_char; 261 int error = 0, tag = mpi_any_tag; 262 int size = CMPIManager::GetReceivedDataSize(comm, src_rank); 263 264 mpi_irecv(data, &size, &type, &src_rank, &tag, &comm, &req, &error); 265 if (error != mpi_success) 266 ERROR("CMPIManager::Receive (comm, src_rank, data)", << " MPI Error !"); 267 CMPIManager::Wait (req); // Temporaire 268 } 269 270 void CMPIManager::AllGather(int indata, std::vector<int> & outdata, MPIComm comm) 271 { 272 std::vector<int> data; data.push_back(indata); 273 CMPIManager::AllGather(data, outdata, comm); 274 } 275 276 void CMPIManager::AllGather(std::vector<int> & indata, 277 std::vector<int> & outdata, MPIComm comm) 278 { 279 int error = 0; 280 int sendcount = indata.size(), recvcount = indata.size() * CMPIManager::GetCommSize(comm); 281 outdata.resize(recvcount); 282 mpi_allgather(&(indata[0]), &sendcount, &(outdata[0]), &sendcount, &comm, &error); 283 if (error != mpi_success) 284 ERROR("CMPIManager::AllGather (indata, outdata, comm)", << " MPI Error !"); 285 } 286 287 //-------------------------------------------------------------- 288 289 void CMPIManager::SendLinearBuffer 290 (MPIComm comm, int dest_rank, CLinearBuffer & buff, MPIRequest & request) 291 { 292 CMPIManager::Send(comm, dest_rank, buff, buff.getUsedSize(), request); 293 buff.clear(); 294 } 295 296 void CMPIManager::ReceiveLinearBuffer(MPIComm comm, int src_rank, CLinearBuffer & buff) 297 { 298 CMPIManager::Receive(comm, src_rank, buff); 299 buff.computeBufferData(); 300 } 301 302 boost::shared_ptr<CLinearBuffer> CMPIManager::ReceiveLinearBuffer(MPIComm comm, int src_rank) 303 { 304 boost::shared_ptr<CLinearBuffer> buff_ptr 305 (new CLinearBuffer(CMPIManager::GetReceivedDataSize(comm, src_rank))); 306 CMPIManager::ReceiveLinearBuffer(comm, src_rank, *buff_ptr); 307 return (buff_ptr); 308 } 309 310 void CMPIManager::ReceiveCircularBuffer(MPIComm comm, int src_rank, CCircularBuffer & buff) 311 { 312 StdSize data_size = CMPIManager::GetReceivedDataSize(comm, src_rank); 313 StdSize data_begin = buff.prepareNextDataPosition(data_size); 314 CMPIManager::Receive(comm, src_rank, buff.getData(data_begin)); 315 buff.updateNbRequests(data_begin, data_begin + data_size); 316 } 317 318 ///-------------------------------------------------------------- 319 320 } // namespace comm 141 } 142 143 144 // --------------------------------- Groupes -------------------------------- 145 146 MPI_Group CMPIManager::GetGroupWorld(void) 147 { 148 MPI_Group group = 0; 149 if (MPI_Comm_group(MPI_COMM_WORLD, &group) != MPI_SUCCESS) 150 ERROR("CMPIManager::GetGroupWorld()", << " MPI Error !"); 151 return (group); 152 } 153 154 MPI_Group CMPIManager::CreateSubGroup(MPI_Group _pgroup, const std::vector<int> & _ranks) 155 { 156 MPI_Group group = 0; 157 if (MPI_Group_incl(_pgroup, _ranks.size(), const_cast<int*>(&(_ranks[0])), &group) != MPI_SUCCESS) 158 ERROR("CMPIManager::CreateSubGroup(pgroup, ranks)", << " MPI Error !"); 159 return (group); 160 } 161 162 MPI_Group CMPIManager::CreateSubGroup 163 (MPI_Group _pgroup, int _min_rank, int _max_rank, int _intval) 164 { 165 std::vector<int> ranks; 166 for (int i = _min_rank; i <= _max_rank; i += _intval) 167 ranks.push_back(i); 168 return (CMPIManager::CreateSubGroup(_pgroup, ranks)); 169 } 170 171 // ----------------------------------- Tests -------------------------------- 172 173 bool CMPIManager::IsMaster(MPI_Comm _comm) 174 { 175 return (CMPIManager::GetCommRank(_comm) == 0); 176 } 177 178 bool CMPIManager::IsRank(int _rank, MPI_Comm _comm) 179 { 180 return (CMPIManager::GetCommRank(_comm) == _rank); 181 } 182 183 // --------------------------- Communication simple ------------------------- 184 185 void CMPIManager::Send (MPI_Comm _comm, int _dest_rank, char * _data, 186 std::size_t _size, MPI_Request & _request) 187 { 188 int nsize = _size; 189 if (MPI_Issend(_data, nsize, MPI_CHAR, _dest_rank, 0, _comm, &_request) != MPI_SUCCESS) 190 ERROR("CMPIManager::Send (comm, dest_rank, data, size, request)", << " MPI Error !"); 191 } 192 193 void CMPIManager::Wait (MPI_Request & _request) 194 { 195 MPI_Status status; 196 if (MPI_Wait(&_request, &status) != MPI_SUCCESS) 197 ERROR("CMPIManager::Wait (request)", << " MPI Error !"); 198 } 199 200 bool CMPIManager::Test (MPI_Request & _request) 201 { 202 MPI_Status status; 203 int flag = 0; 204 if (MPI_Test(&_request, &flag, &status) != MPI_SUCCESS) 205 ERROR("CMPIManager::Test (request)", << " MPI Error !"); 206 return (flag); 207 } 208 209 bool CMPIManager::HasReceivedData(MPI_Comm _comm, int _src_rank) 210 { 211 MPI_Status status; 212 int flag = 0; 213 if (MPI_Iprobe(_src_rank, MPI_ANY_TAG, _comm, &flag, &status) != MPI_SUCCESS) 214 ERROR("CMPIManager::HasReceivedData (comm, rank)", << " MPI Error !"); 215 return (flag); 216 } 217 218 std::size_t CMPIManager::GetReceivedDataSize(MPI_Comm _comm, int _src_rank) 219 { 220 MPI_Status status; 221 int flag = 0, size = 0; 222 if (MPI_Iprobe(_src_rank, MPI_ANY_TAG, _comm, &flag, &status) != MPI_SUCCESS) 223 ERROR("CMPIManager::getReceivedDataSize (comm, rank)", << " MPI Error !"); 224 if (!flag) return (0); 225 if (MPI_Get_count(&status, MPI_CHAR, &size) != MPI_SUCCESS) 226 ERROR("CMPIManager::getReceivedDataSize (comm, rank)", << " MPI Error !"); 227 228 return (size); 229 } 230 231 void CMPIManager::Receive(MPI_Comm _comm, int _src_rank, char * _data) 232 { 233 MPI_Request request = 0; 234 int size = CMPIManager::GetReceivedDataSize(_comm, _src_rank); 235 if (MPI_Irecv(_data, size, MPI_CHAR, _src_rank, MPI_ANY_TAG, _comm, &request) != MPI_SUCCESS) 236 ERROR("CMPIManager::Receive (comm, src_rank, data)", << " MPI Error !"); 237 CMPIManager::Wait (request); // Temporaire 238 } 239 240 void CMPIManager::AllGather(int _indata, std::vector<int> & _outdata, MPI_Comm _comm) 241 { 242 std::vector<int> data; data.push_back(_indata); 243 CMPIManager::AllGather(data, _outdata, _comm); 244 } 245 246 void CMPIManager::AllGather(const std::vector<int> & _indata, 247 std::vector<int> & _outdata, MPI_Comm _comm) 248 { 249 int sendcount = _indata.size(), 250 recvcount = _indata.size() * CMPIManager::GetCommSize(_comm); 251 _outdata.resize(recvcount); 252 if (MPI_Allgather ( const_cast<int*>(&(_indata[0])), sendcount, MPI_INTEGER, 253 &(_outdata[0]) , recvcount, MPI_INTEGER, _comm) != MPI_SUCCESS) 254 ERROR("CMPIManager::AllGather (indata, outdata, comm)", << " MPI Error !"); 255 } 256 257 // ------------------------- Communication 'complexe' ----------------------- 258 259 void CMPIManager::SendLinearBuffer(MPI_Comm _comm, int _dest_rank, CLinearBuffer & _lbuffer, MPI_Request & _request) 260 { 261 CMPIManager::Send(_comm, _dest_rank, _lbuffer, _lbuffer.getUsedSize(), _request); 262 _lbuffer.clear(); 263 } 264 265 void CMPIManager::ReceiveLinearBuffer(MPI_Comm _comm, int _src_rank, CLinearBuffer & _lbuffer) 266 { 267 CMPIManager::Receive(_comm, _src_rank, _lbuffer); 268 _lbuffer.computeBufferData(); 269 } 270 271 boost::shared_ptr<CLinearBuffer> CMPIManager::ReceiveLinearBuffer(MPI_Comm _comm, int _src_rank) 272 { 273 boost::shared_ptr<CLinearBuffer> buff_ptr 274 (new CLinearBuffer(CMPIManager::GetReceivedDataSize(_comm, _src_rank))); 275 CMPIManager::ReceiveLinearBuffer(_comm, _src_rank, *buff_ptr); 276 return (buff_ptr); 277 } 278 279 void CMPIManager::ReceiveCircularBuffer(MPI_Comm _comm, int _src_rank, CCircularBuffer & _cbuffer) 280 { 281 std::size_t data_size = CMPIManager::GetReceivedDataSize(_comm, _src_rank); 282 std::size_t data_begin = _cbuffer.prepareNextDataPosition(data_size); 283 CMPIManager::Receive(_comm, _src_rank, _cbuffer.getData(data_begin)); 284 _cbuffer.updateNbRequests(data_begin, data_begin + data_size); 285 } 286 287 // ---------------------- Mémoire (non fonctionnel ....) -------------------- 288 289 void CMPIManager::AllocMemory(void * _data, std::size_t _size) 290 { 291 if (MPI_Alloc_mem(sizeof(char) * _size, MPI_INFO_NULL, _data) != MPI_SUCCESS) 292 ERROR("CMPIManager::AllocMem(data, size)", << " MPI Error !"); 293 } 294 295 void CMPIManager::FreeMemory (void * _data) 296 { 297 MPI_Free_mem(_data); 298 } 299 300 } // namespace comm 321 301 } // namespace xmlioserver 302 -
XMLIO_V2/dev/dev_rv/src/xmlio/manager/mpi_manager.hpp
r254 r267 1 #ifndef __XMLIO_CMPIManager__ 2 #define __XMLIO_CMPIManager__ 1 /* ************************************************************************** * 2 * Copyright © IPSL/LSCE, XMLIOServer, Avril 2010 - Octobre 2011 * 3 * ************************************************************************** */ 3 4 4 /// MPI headers /// 5 #ifndef __MPI_INTERFACE_HPP__ 6 #define __MPI_INTERFACE_HPP__ 7 8 /** 9 * \file mpi_interface.hpp 10 * \brief Gestion des communications MPI via une surcouche interne (entête). 11 * \author Hervé Ozdoba 12 * \version 0.4 13 * \date 28 Juin 2011 14 */ 15 16 #ifndef __XIOS_NO_EXTERN 17 18 // M(essage) P(assing) I(nterface) headers 5 19 #include <mpi.h> 6 20 7 /// xmlioserver headers /// 21 // C++ standard headers 22 #include <vector> 23 24 #endif //__XIOS_NO_EXTERN 25 26 // XMLIOServer headers 8 27 #include "xmlioserver_spl.hpp" 9 #include "exception.hpp"10 28 #include "buffer.hpp" 11 29 #include "circular_buffer.hpp" 12 30 #include "linear_buffer.hpp" 13 31 32 // ////////////////////////////// Déclarations ///////////////////////////// // 33 14 34 namespace xmlioserver 15 35 { 36 /// \brief Espace de nommage pour les communications via la bibliothÚque MPI. 16 37 namespace comm 17 38 { 18 /// ////////////////////// Déclarations ////////////////////// /// 19 typedef MPI_Fint MPIComm; 20 typedef MPI_Fint MPIGroup; 21 typedef MPI_Fint MPIRequest; 22 typedef MPI_Fint *MPIStatus; 23 typedef MPI_Fint MPIDataType; 24 39 typedef MPI_Fint MPIComm; /*!< \brief Identifiant de communicateur MPI (Fortran). */ 40 typedef MPI_Fint MPIGroup; /*!< \brief Identifiant de groupe MPI (Fortran). */ 41 typedef MPI_Fint MPIRequest; /*!< \brief Identifiant de requête MPI (Fortran). */ 42 typedef MPI_Fint *MPIStatus; /*!< \brief Identifiant de statut MPI (Fortran). */ 43 typedef MPI_Fint MPIDataType; /*!< \brief Identifiant de type de données MPI (Fortran). */ 44 45 /** 46 * \class CMPIManager 47 * \brief Surcouche interne de la biblià othÚque M(essage) P(assing) I(nterface). 48 */ 25 49 class CMPIManager 26 50 { 27 public : 28 29 /// Initialisation & Finalisation /// 51 public : // Initialisation & Finalisation 52 30 53 static void Initialise(int * argc, char *** argv); 31 54 static void Finalize(void); 32 55 33 /// Communicateurs /// 34 static int GetCommRank(MPIComm comm = CMPIManager::GetCommWorld()); 35 static int GetCommSize(MPIComm comm = CMPIManager::GetCommWorld()); 36 static MPIComm CreateComm(MPIGroup group, MPIComm pcomm = CMPIManager::GetCommWorld()); 37 static MPIComm GetCommWorld(void); 56 public : // Communicateurs 57 58 59 static inline int GetCommRank(MPIComm _comm) 60 { return (CMPIManager::GetCommRank(MPI_Comm_f2c(_comm))); } 61 62 static inline int GetCommSize(MPIComm _comm) 63 { return (CMPIManager::GetCommSize(MPI_Comm_f2c(_comm))); } 64 65 static inline MPIComm GetCommWorld(void) 66 { return (MPI_Comm_c2f(MPI_COMM_WORLD)); } 67 68 static inline MPIComm CreateComm(MPI_Group _group, MPIComm _pcomm = CMPIManager::GetCommWorld()) 69 { return (MPI_Comm_c2f(CMPIManager::CreateComm( _group, MPI_Comm_f2c(_pcomm)))); } 70 71 static int GetCommRank(MPI_Comm _comm = MPI_COMM_WORLD); 72 static int GetCommSize(MPI_Comm _comm = MPI_COMM_WORLD); 73 74 static MPI_Comm CreateComm(MPI_Group _group, MPI_Comm _pcomm); 38 75 39 /// Autre /// 40 static void Barrier(MPIComm comm = CMPIManager::GetCommWorld()); 76 public : // Autre 77 78 static void Barrier(MPI_Comm _comm = MPI_COMM_WORLD); 79 80 static inline bool DispatchClient(bool _is_server, 81 MPIComm & _comm_client, 82 MPIComm & _comm_client_server, 83 MPIComm & _comm_server, 84 MPIComm _comm_parent = CMPIManager::GetCommWorld()) 85 { 86 MPI_Comm comm_client = MPI_Comm_f2c(_comm_client); 87 MPI_Comm comm_client_server = MPI_Comm_f2c(_comm_client_server); 88 MPI_Comm comm_server = MPI_Comm_f2c(_comm_server); 89 MPI_Comm comm_parent = MPI_Comm_f2c(_comm_parent); 90 bool ret = CMPIManager::DispatchClient(_is_server, comm_client, comm_client_server, comm_server, comm_parent); 91 _comm_client = MPI_Comm_c2f(comm_client); 92 _comm_client_server = MPI_Comm_c2f(comm_client_server); 93 _comm_server = MPI_Comm_c2f(comm_server); 94 return (ret); 95 } 96 97 98 static bool DispatchClient(bool _is_server, 99 MPI_Comm & _comm_client, 100 MPI_Comm & _comm_client_server, 101 MPI_Comm & _comm_server, 102 MPI_Comm _comm_parent = MPI_COMM_WORLD); 41 103 42 static bool DispatchClient(bool is_server,43 MPIComm & comm_client,44 MPIComm & comm_client_server,45 MPIComm & comm_server,46 MPIComm comm_parent = CMPIManager::GetCommWorld());104 public : // Groupes 105 106 static MPI_Group GetGroupWorld(void); 107 static MPI_Group CreateSubGroup(MPI_Group _pgroup, const std::vector<int> & _ranks); 108 static MPI_Group CreateSubGroup(MPI_Group _pgroup, int _min_rank, int _max_rank, int _intval = 1); 47 109 48 /// Groupes /// 49 static MPIGroup GetGroupWorld(void); 50 static MPIGroup CreateSubGroup(MPIGroup pgroup, const std::vector<int> & ranks); 51 static MPIGroup CreateSubGroup(MPIGroup pgroup, int min_rank, int max_rank, int intval = 1); 110 public : // Tests 111 112 static inline bool IsMaster(MPIComm _comm) 113 { return (CMPIManager::IsMaster(MPI_Comm_f2c(_comm))); } 114 115 static inline bool IsRank(int _rank, MPIComm _comm) 116 { return (CMPIManager::IsRank(_rank, MPI_Comm_f2c(_comm))); } 117 118 static bool IsMaster(MPI_Comm _comm = MPI_COMM_WORLD); 119 static bool IsRank(int _rank, MPI_Comm _comm = MPI_COMM_WORLD); 52 120 53 /// Tests /// 54 static bool IsMaster(MPIComm comm = CMPIManager::GetCommWorld()); 55 static bool IsRank(MPIComm comm, int rank); 121 public : // Communication simple 122 123 static void Send (MPI_Comm _comm, int _dest_rank, char * _data, 124 std::size_t _size, MPI_Request & _request); 125 static void Wait (MPI_Request & _request); 126 static bool Test (MPI_Request & _request); 56 127 57 /// Communication simple///58 static void Send (MPIComm comm, int dest_rank, char * data, StdSize size, MPIRequest & request);59 static void Wait (MPIRequest & request);60 static bool Test (MPIRequest & request);61 128 62 static void AllGather(int indata, std::vector<int> & outdata, 63 MPIComm comm = CMPIManager::GetCommWorld()); 129 static inline bool HasReceivedData(MPIComm _comm, int _src_rank) 130 { return (CMPIManager::HasReceivedData(MPI_Comm_f2c(_comm), _src_rank)); } 131 132 static inline std::size_t GetReceivedDataSize(MPIComm _comm, int _src_rank) 133 { return (CMPIManager::GetReceivedDataSize(MPI_Comm_f2c(_comm), _src_rank)); } 134 135 136 137 static bool HasReceivedData(MPI_Comm _comm, int _src_rank); 138 139 static std::size_t GetReceivedDataSize(MPI_Comm _comm, int _src_rank); 140 static void Receive(MPI_Comm _comm, int _src_rank, char * _data); 141 142 static void AllGather(int _indata, std::vector<int> & _outdata, 143 MPI_Comm _comm = MPI_COMM_WORLD); 64 144 65 static void AllGather( std::vector<int> &indata,66 std::vector<int> &outdata,67 MPI Comm comm = CMPIManager::GetCommWorld());145 static void AllGather(const std::vector<int> & _indata, 146 std::vector<int> & _outdata, 147 MPI_Comm _comm = MPI_COMM_WORLD); 68 148 69 static bool HasReceivedData(MPIComm comm, int src_rank);70 static StdSize GetReceivedDataSize(MPIComm comm, int src_rank);71 static void Receive(MPIComm comm, int src_rank, char * data);72 149 73 /// Communication 'complexe' /// 74 static void SendLinearBuffer(MPIComm comm, int dest_rank, CLinearBuffer & buff, MPIRequest & request); 75 static void ReceiveLinearBuffer(MPIComm comm, int src_rank, CLinearBuffer & buff); 76 static boost::shared_ptr<CLinearBuffer> ReceiveLinearBuffer(MPIComm comm, int src_rank); 77 static void ReceiveCircularBuffer(MPIComm comm, int src_rank, CCircularBuffer & buff); 150 public : // Communication 'complexe' 151 152 static inline void SendLinearBuffer(MPIComm _comm, int _dest_rank, CLinearBuffer & _lbuffer, MPI_Request & _request) 153 { CMPIManager::SendLinearBuffer(MPI_Comm_f2c(_comm), _dest_rank, _lbuffer, _request); } 154 155 static inline void ReceiveLinearBuffer(MPIComm _comm, int _src_rank, CLinearBuffer & _lbuffer) 156 { CMPIManager::ReceiveLinearBuffer(MPI_Comm_f2c(_comm), _src_rank, _lbuffer); } 157 158 static inline boost::shared_ptr<CLinearBuffer> ReceiveLinearBuffer(MPIComm _comm, int _src_rank) 159 { return (CMPIManager::ReceiveLinearBuffer(MPI_Comm_f2c(_comm), _src_rank)); } 160 161 static inline void ReceiveCircularBuffer(MPIComm _comm, int _src_rank, CCircularBuffer & _cbuffer) 162 { CMPIManager::ReceiveCircularBuffer(MPI_Comm_f2c(_comm), _src_rank, _cbuffer); } 163 164 165 static void SendLinearBuffer(MPI_Comm _comm, int _dest_rank, CLinearBuffer & _lbuffer, MPI_Request & _request); 166 static void ReceiveLinearBuffer(MPI_Comm _comm, int _src_rank, CLinearBuffer & _lbuffer); 167 static boost::shared_ptr<CLinearBuffer> ReceiveLinearBuffer(MPI_Comm _comm, int _src_rank); 168 static void ReceiveCircularBuffer(MPI_Comm _comm, int _src_rank, CCircularBuffer & _cbuffer); 169 170 78 171 79 /// Mémoire (non fonctionnel ....)/// 80 static void AllocMem(void * data, StdSize size); 81 static void FreeMem(void * data); 172 public : // Mémoire (non fonctionnel ....) 173 174 static void AllocMemory(void * _data, std::size_t _size); 175 static void FreeMemory (void * _data); 176 177 }; // class CMPIManager 178 82 179 83 }; // class CMPIManager 84 180 85 181 } // namespace comm 86 182 } // namespace xmlioserver 87 183 88 #endif // __XMLIO_CMPIManager__ 89 184 #endif //__MPI_INTERFACE_HPP__ -
XMLIO_V2/dev/dev_rv/src/xmlio/node/context.cpp
r206 r267 130 130 #define DECLARE_NODE_PAR(Name_, name_) 131 131 #include "node_type.conf" 132 132 std::cout << name << std::endl; 133 133 DEBUG(<< "L'élément nommé \'" << name 134 134 << "\' dans le contexte \'" << CObjectFactory::GetCurrentContextId() -
XMLIO_V2/dev/dev_rv/src/xmlio/node/field.hpp
r265 r267 84 84 template <StdSize N> 85 85 inline bool updateData 86 (const date::CDate & currDate, const date::CDuration & timestep, const ARRAY(double, N) data); 86 (const date::CDate & currDate, 87 const date::CDuration & timestep, 88 const ARRAY(double, N) data); 87 89 88 90 bool updateDataServer 89 (const date::CDate & currDate, const std::deque<ARRAY(double, 1)> storedClient); 91 (const date::CDate & currDate, 92 const std::deque<ARRAY(double, 1)> storedClient); 90 93 91 94 public : -
XMLIO_V2/dev/dev_rv/src/xmlio/node/node_type.hpp
r152 r267 7 7 #include "file.hpp" 8 8 #include "grid.hpp" 9 //#include "method.hpp" 10 //#include "var.hpp" 9 #include "variable.hpp" 11 10 #include "context.hpp" 12 11 -
XMLIO_V2/dev/dev_rv/src/xmlio/output/nc4_data_output.cpp
r265 r267 68 68 69 69 if (domain->isEmpty()) return; 70 71 int dvm = 1;72 70 73 71 std::vector<StdString> dim0, dim1; -
XMLIO_V2/dev/dev_rv/src4/xmlio/mpi/mpi_interface.cpp
r256 r267 253 253 // ------------------------- Communication 'complexe' ----------------------- 254 254 255 //void SendLinearBuffer(MPIComm comm, int dest_rank, CLinearBuffer & buff, MPIRequest & request); 256 //void ReceiveLinearBuffer(MPIComm comm, int src_rank, CLinearBuffer & buff); 257 //boost::shared_ptr<CLinearBuffer> ReceiveLinearBuffer(MPIComm comm, int src_rank); 258 //void ReceiveCircularBuffer(MPIComm comm, int src_rank, CCircularBuffer & buff); 255 void CMPIManager::SendLinearBuffer(MPI_Comm _comm, int _dest_rank, CLinearBuffer & _lbuffer, MPI_Request & _request) 256 { 257 CMPIManager::Send(_comm, _dest_rank, _lbuffer, _lbuffer.getUsedSize(), _request); 258 buff.clear(); 259 } 260 261 void CMPIManager::ReceiveLinearBuffer(MPI_Comm _comm, int _src_rank, CLinearBuffer & _lbuffer) 262 { 263 CMPIManager::Receive(_comm, _src_rank, _lbuffer); 264 _lbuffer.computeBufferData(); 265 } 266 267 boost::shared_ptr<CLinearBuffer> CMPIManager::ReceiveLinearBuffer(MPI_Comm _comm, int _src_rank) 268 { 269 boost::shared_ptr<CLinearBuffer> buff_ptr 270 (new CLinearBuffer(CMPIManager::GetReceivedDataSize(_comm, _src_rank))); 271 CMPIManager::ReceiveLinearBuffer(_comm, _src_rank, *buff_ptr); 272 return (buff_ptr); 273 } 274 275 void CMPIManager::ReceiveCircularBuffer(MPI_Comm _comm, int _src_rank, CCircularBuffer & _cbuffer) 276 { 277 std::size_t data_size = CMPIManager::GetReceivedDataSize(_comm, _src_rank); 278 std::size_t data_begin = buff.prepareNextDataPosition(data_size); 279 CMPIManager::Receive(comm, src_rank, buff.getData(data_begin)); 280 _cbuffer.updateNbRequests(data_begin, data_begin + data_size); 281 } 259 282 260 283 // ---------------------- Mémoire (non fonctionnel ....) -------------------- -
XMLIO_V2/dev/dev_rv/src4/xmlio/mpi/mpi_interface.hpp
r256 r267 100 100 public : // Communication 'complexe' 101 101 102 // static void SendLinearBuffer(MPIComm comm, int dest_rank, CLinearBuffer & buff, MPIRequest &request);103 // static void ReceiveLinearBuffer(MPIComm comm, int src_rank, CLinearBuffer & buff);104 // static boost::shared_ptr<CLinearBuffer> ReceiveLinearBuffer(MPIComm comm, intsrc_rank);105 // static void ReceiveCircularBuffer(MPIComm comm, int src_rank, CCircularBuffer & buff);102 static void SendLinearBuffer(MPI_Comm _comm, int _dest_rank, CLinearBuffer & _lbuffer, MPI_Request & _request); 103 static void ReceiveLinearBuffer(MPI_Comm _comm, int _src_rank, CLinearBuffer & _lbuffer); 104 static boost::shared_ptr<CLinearBuffer> ReceiveLinearBuffer(MPI_Comm _comm, int _src_rank); 105 static void ReceiveCircularBuffer(MPI_Comm _comm, int _src_rank, CCircularBuffer & _cbuffer); 106 106 107 107 public : // Mémoire (non fonctionnel ....)
Note: See TracChangeset
for help on using the changeset viewer.