- Timestamp:
- 11/19/18 15:52:54 (5 years ago)
- Location:
- XIOS/dev/dev_trunk_omp/src
- Files:
-
- 127 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_omp/src/attribute_map.hpp
r1158 r1601 76 76 /// Propriété statique /// 77 77 static CAttributeMap * Current; 78 #pragma omp threadprivate(Current) 78 79 79 80 }; // class CAttributeMap -
XIOS/dev/dev_trunk_omp/src/attribute_template.hpp
r1478 r1601 53 53 void reset(void) ; 54 54 void checkEmpty(void) const; 55 56 55 57 56 void setInheritedValue(const CAttributeTemplate& attr ); -
XIOS/dev/dev_trunk_omp/src/attribute_template_impl.hpp
r1478 r1601 82 82 83 83 template <class T> 84 84 T CAttributeTemplate<T>::getValue(void) const 85 85 { 86 86 return CType<T>::get() ; … … 112 112 113 113 template <class T> 114 114 void CAttributeTemplate<T>::setValue(const T & value) 115 115 { 116 116 CType<T>::set(value) ; -
XIOS/dev/dev_trunk_omp/src/buffer_client.cpp
r1227 r1601 7 7 #include "mpi.hpp" 8 8 #include "tracer.hpp" 9 10 11 using namespace ep_lib; 9 12 10 13 namespace xios … … 27 30 buffer[1] = new char[bufferSize]; 28 31 retBuffer = new CBufferOut(buffer[current], bufferSize); 32 #pragma omp critical (_output) 29 33 info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl; 30 34 } -
XIOS/dev/dev_trunk_omp/src/buffer_client.hpp
r1227 r1601 13 13 public: 14 14 static size_t maxRequestSize; 15 #pragma omp threadprivate(maxRequestSize) 15 16 16 CClientBuffer( MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents);17 CClientBuffer(ep_lib::MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents); 17 18 ~CClientBuffer(); 18 19 … … 39 40 bool pending; 40 41 41 MPI_Request request;42 ep_lib::MPI_Request request; 42 43 43 44 CBufferOut* retBuffer; 44 const MPI_Comm interComm;45 const ep_lib::MPI_Comm interComm; 45 46 }; 46 47 } -
XIOS/dev/dev_trunk_omp/src/calendar.cpp
r1357 r1601 127 127 const CDate& CCalendar::update(int step) 128 128 { 129 info(20) << "update step : " << step << " timestep " << this->timestep << std::endl; 129 #pragma omp critical (_output) 130 info(80) << "update step : " << step << " timestep " << this->timestep << std::endl; 130 131 this->step = step; 131 132 return (this->currentDate = this->getInitDate() + step * this->timestep); -
XIOS/dev/dev_trunk_omp/src/client.cpp
r1587 r1601 12 12 #include "buffer_client.hpp" 13 13 #include "string_tools.hpp" 14 using namespace ep_lib; 14 15 15 16 namespace xios … … 18 19 MPI_Comm CClient::intraComm ; 19 20 MPI_Comm CClient::interComm ; 20 std::list<MPI_Comm> CClient::contextInterComms;21 std::list<MPI_Comm> *CClient::contextInterComms_ptr = 0; 21 22 int CClient::serverLeader ; 22 23 bool CClient::is_MPI_Initialized ; … … 24 25 StdOFStream CClient::m_infoStream; 25 26 StdOFStream CClient::m_errorStream; 27 28 StdOFStream CClient::array_infoStream[16]; 29 26 30 MPI_Comm& CClient::getInterComm(void) { return (interComm); } 27 31 28 32 ///--------------------------------------------------------------- 29 33 /*! … … 106 110 MPI_Comm_size(intraComm,&intraCommSize) ; 107 111 MPI_Comm_rank(intraComm,&intraCommRank) ; 108 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 112 113 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 114 #pragma omp critical (_output) 115 { 116 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 109 117 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 110 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 111 //rank_ = intraCommRank; 118 } 112 119 } 113 120 else … … 191 198 CContext::setCurrent(id); 192 199 193 contextInterComms.push_back(contextInterComm); 200 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 201 contextInterComms_ptr->push_back(contextInterComm); 194 202 } 195 203 else … … 217 225 218 226 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 227 #pragma omp critical (_output) 219 228 info(10)<<"Register new Context : "<<id<<endl ; 220 229 MPI_Comm inter ; … … 224 233 context->initClient(contextComm,contextInterComm) ; 225 234 226 contextInterComms.push_back(contextInterComm); 235 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 236 contextInterComms_ptr->push_back(contextInterComm); 237 227 238 MPI_Comm_free(&inter); 228 239 delete [] buff ; … … 277 288 } 278 289 279 for (std::list<MPI_Comm>::iterator it = contextInterComms .begin(); it != contextInterComms.end(); it++)290 for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); it++) 280 291 MPI_Comm_free(&(*it)); 281 292 MPI_Comm_free(&interComm); … … 287 298 if (!is_MPI_Initialized) 288 299 { 289 if (CXios::usingOasis) oasis_finalize(); 290 else MPI_Finalize() ; 291 } 292 300 //if (CXios::usingOasis) oasis_finalize(); 301 //else 302 MPI_Finalize() ; 303 } 304 #pragma omp critical (_output) 293 305 info(20) << "Client side context is finalized"<<endl ; 294 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 295 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 296 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 297 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 298 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 306 307 #pragma omp critical (_output) 308 { 309 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 310 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 311 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 312 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 313 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 299 314 // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 300 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 301 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 302 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 315 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 316 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 317 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 318 } 303 319 } 304 320 … … 355 371 void CClient::openInfoStream(const StdString& fileName) 356 372 { 357 std::filebuf* fb = m_infoStream.rdbuf(); 358 openStream(fileName, ".out", fb); 359 360 info.write2File(fb); 361 report.write2File(fb); 373 info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 374 375 openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 376 377 info.write2File(info_FB[omp_get_thread_num()]); 378 report.write2File(info_FB[omp_get_thread_num()]); 362 379 } 363 380 -
XIOS/dev/dev_trunk_omp/src/client.hpp
r1587 r1601 7 7 namespace xios 8 8 { 9 10 11 12 static void initialize(const string& codeId, MPI_Comm& localComm,MPI_Comm& returnComm);13 14 static void registerContext(const string& id,MPI_Comm contextComm);15 9 class CClient 10 { 11 public: 12 static void initialize(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm); 13 static void finalize(void); 14 static void registerContext(const string& id, ep_lib::MPI_Comm contextComm); 15 static void callOasisEnddef(void) ; 16 16 17 static MPI_Comm intraComm; 18 static MPI_Comm interComm; 19 static std::list<MPI_Comm> contextInterComms; 20 static int serverLeader; 21 static bool is_MPI_Initialized ; 17 static ep_lib::MPI_Comm intraComm; 18 #pragma omp threadprivate(intraComm) 22 19 23 static MPI_Comm& getInterComm(); 20 static ep_lib::MPI_Comm interComm; 21 #pragma omp threadprivate(interComm) 24 22 25 //! Get global rank without oasis and current rank in model intraComm in case of oasis 26 static int getRank(); 23 //static std::list<MPI_Comm> contextInterComms; 24 static std::list<ep_lib::MPI_Comm> *contextInterComms_ptr; 25 #pragma omp threadprivate(contextInterComms_ptr) 27 26 28 //! Open a file stream to write the info logs 29 static void openInfoStream(const StdString& fileName); 30 //! Write the info logs to standard output 31 static void openInfoStream(); 32 //! Close the info logs file if it opens 33 static void closeInfoStream(); 27 static int serverLeader; 28 #pragma omp threadprivate(serverLeader) 34 29 35 //! Open a file stream to write the error log 36 static void openErrorStream(const StdString& fileName); 37 //! Write the error log to standard error output 38 static void openErrorStream(); 39 //! Close the error log file if it opens 40 static void closeErrorStream(); 30 static bool is_MPI_Initialized ; 31 #pragma omp threadprivate(is_MPI_Initialized) 41 32 42 protected: 43 static int rank_; //!< Rank in model intraComm 44 static StdOFStream m_infoStream; 45 static StdOFStream m_errorStream; 33 static ep_lib::MPI_Comm& getInterComm(); 46 34 47 static void openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb); 48 }; 35 //! Get global rank without oasis and current rank in model intraComm in case of oasis 36 static int getRank(); 37 38 //! Open a file stream to write the info logs 39 static void openInfoStream(const StdString& fileName); 40 //! Write the info logs to standard output 41 static void openInfoStream(); 42 //! Close the info logs file if it opens 43 static void closeInfoStream(); 44 45 //! Open a file stream to write the error log 46 static void openErrorStream(const StdString& fileName); 47 //! Write the error log to standard error output 48 static void openErrorStream(); 49 //! Close the error log file if it opens 50 static void closeErrorStream(); 51 52 protected: 53 static int rank_; //!< Rank in model intraComm 54 #pragma omp threadprivate(rank_) 55 56 static StdOFStream m_infoStream; 57 #pragma omp threadprivate(m_infoStream) 58 static StdOFStream m_errorStream; 59 #pragma omp threadprivate(m_errorStream) 60 61 static StdOFStream array_infoStream[16]; 62 static void openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb); 63 }; 49 64 } 50 65 -
XIOS/dev/dev_trunk_omp/src/client_client_dht_template.hpp
r1542 r1601 40 40 public: 41 41 CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, 42 const MPI_Comm& clientIntraComm);42 const ep_lib::MPI_Comm& clientIntraComm); 43 43 44 44 CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 45 const MPI_Comm& clientIntraComm);45 const ep_lib::MPI_Comm& clientIntraComm); 46 46 47 47 void computeIndexInfoMapping(const CArray<size_t,1>& indices); … … 55 55 56 56 protected: 57 CClientClientDHTTemplate(const MPI_Comm& clientIntraComm);57 CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm); 58 58 59 59 protected: … … 62 62 // Redistribute index and info among clients 63 63 void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, 64 const MPI_Comm& intraCommLevel,64 const ep_lib::MPI_Comm& intraCommLevel, 65 65 int level); 66 66 67 67 void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 68 const MPI_Comm& intraCommLevel,68 const ep_lib::MPI_Comm& intraCommLevel, 69 69 int level); 70 70 … … 73 73 74 74 void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 75 const MPI_Comm& intraCommLevel,75 const ep_lib::MPI_Comm& intraCommLevel, 76 76 int level); 77 77 … … 85 85 // Send information to clients 86 86 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 87 const MPI_Comm& clientIntraComm, 88 std::vector<MPI_Request>& requestSendInfo); 87 const ep_lib::MPI_Comm& clientIntraComm, 88 std::vector<ep_lib::MPI_Request>& requestSendInfo); 89 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 90 const ep_lib::MPI_Comm& clientIntraComm, 91 ep_lib::MPI_Request* requestSendInfo); 89 92 90 93 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 91 const MPI_Comm& clientIntraComm, 92 std::vector<MPI_Request>& requestRecvInfo); 94 const ep_lib::MPI_Comm& clientIntraComm, 95 std::vector<ep_lib::MPI_Request>& requestRecvInfo); 96 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 97 const ep_lib::MPI_Comm& clientIntraComm, 98 ep_lib::MPI_Request* requestRecvInfo); 99 93 100 94 101 // Send global index to clients 95 102 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 96 const MPI_Comm& clientIntraComm, 97 std::vector<MPI_Request>& requestSendIndexGlobal); 103 const ep_lib::MPI_Comm& clientIntraComm, 104 std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 105 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 106 const ep_lib::MPI_Comm& clientIntraComm, 107 ep_lib::MPI_Request* requestSendIndexGlobal); 98 108 99 109 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 100 const MPI_Comm& clientIntraComm, 101 std::vector<MPI_Request>& requestRecvIndex); 110 const ep_lib::MPI_Comm& clientIntraComm, 111 std::vector<ep_lib::MPI_Request>& requestRecvIndex); 112 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 113 const ep_lib::MPI_Comm& clientIntraComm, 114 ep_lib::MPI_Request* requestRecvIndex); 102 115 103 116 void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, -
XIOS/dev/dev_trunk_omp/src/client_client_dht_template_impl.hpp
r1542 r1601 14 14 { 15 15 template<typename T, typename H> 16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm)16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 17 17 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 18 18 { 19 MPI_Comm_size(clientIntraComm, &nbClient_);19 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 20 20 this->computeMPICommLevel(); 21 21 int nbLvl = this->getNbLevel(); … … 34 34 template<typename T, typename H> 35 35 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 36 const MPI_Comm& clientIntraComm)36 const ep_lib::MPI_Comm& clientIntraComm) 37 37 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 38 38 { 39 MPI_Comm_size(clientIntraComm, &nbClient_);39 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 40 40 this->computeMPICommLevel(); 41 41 int nbLvl = this->getNbLevel(); … … 59 59 template<typename T, typename H> 60 60 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 61 const MPI_Comm& clientIntraComm)61 const ep_lib::MPI_Comm& clientIntraComm) 62 62 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 63 63 { 64 MPI_Comm_size(clientIntraComm, &nbClient_);64 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 65 65 this->computeMPICommLevel(); 66 66 int nbLvl = this->getNbLevel(); … … 95 95 template<typename T, typename H> 96 96 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 97 const MPI_Comm& commLevel,97 const ep_lib::MPI_Comm& commLevel, 98 98 int level) 99 99 { 100 100 int clientRank; 101 MPI_Comm_rank(commLevel,&clientRank);101 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 102 102 int groupRankBegin = this->getGroupBegin()[level]; 103 103 int nbClient = this->getNbInGroup()[level]; … … 169 169 recvIndexBuff = new unsigned long[recvNbIndexCount]; 170 170 171 std::vector<MPI_Request> request; 171 int request_size = 0; 172 for (int idx = 0; idx < recvRankClient.size(); ++idx) 173 { 174 if (0 != recvNbIndexClientCount[idx]) 175 request_size ++; 176 } 177 178 request_size += client2ClientIndex.size(); 179 180 std::vector<ep_lib::MPI_Request> request(request_size); 181 172 182 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 173 183 iteRecvIndex = recvRankClient.end(), … … 176 186 int currentIndex = 0; 177 187 int nbRecvClient = recvRankClient.size(); 188 int request_position = 0; 178 189 for (int idx = 0; idx < nbRecvClient; ++idx) 179 190 { 180 191 if (0 != recvNbIndexClientCount[idx]) 181 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request);192 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 182 193 currentIndex += recvNbIndexClientCount[idx]; 183 194 } … … 186 197 iteIndex = client2ClientIndex.end(); 187 198 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 188 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request);189 190 std::vector< MPI_Status> status(request.size());191 MPI_Waitall(request.size(), &request[0], &status[0]);199 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 200 201 std::vector<ep_lib::MPI_Status> status(request.size()); 202 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 192 203 193 204 CArray<size_t,1>* tmpGlobalIndex; … … 242 253 } 243 254 244 std::vector<MPI_Request> requestOnReturn; 255 int requestOnReturn_size=0; 256 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 257 { 258 if (0 != recvNbIndexOnReturn[idx]) 259 { 260 requestOnReturn_size += 2; 261 } 262 } 263 264 for (int idx = 0; idx < nbRecvClient; ++idx) 265 { 266 if (0 != sendNbIndexOnReturn[idx]) 267 { 268 requestOnReturn_size += 2; 269 } 270 } 271 272 int requestOnReturn_position=0; 273 274 std::vector<ep_lib::MPI_Request> requestOnReturn(requestOnReturn_size); 245 275 currentIndex = 0; 246 276 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 248 278 if (0 != recvNbIndexOnReturn[idx]) 249 279 { 250 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn);280 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 251 281 recvInfoFromClients(recvRankOnReturn[idx], 252 282 recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 253 283 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 254 commLevel, requestOnReturn);284 commLevel, &requestOnReturn[requestOnReturn_position++]); 255 285 } 256 286 currentIndex += recvNbIndexOnReturn[idx]; … … 286 316 287 317 sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 288 sendNbIndexOnReturn[idx], commLevel, requestOnReturn);318 sendNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 289 319 sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 290 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn);320 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, &requestOnReturn[requestOnReturn_position++]); 291 321 } 292 322 currentIndex += recvNbIndexClientCount[idx]; 293 323 } 294 324 295 std::vector< MPI_Status> statusOnReturn(requestOnReturn.size());296 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]);325 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 326 ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 297 327 298 328 Index2VectorInfoTypeMap indexToInfoMapping; … … 360 390 template<typename T, typename H> 361 391 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 362 const MPI_Comm& commLevel,392 const ep_lib::MPI_Comm& commLevel, 363 393 int level) 364 394 { 365 395 int clientRank; 366 MPI_Comm_rank(commLevel,&clientRank);396 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 367 397 computeSendRecvRank(level, clientRank); 368 398 … … 439 469 // it will send a message to the correct clients. 440 470 // Contents of the message are index and its corresponding informatioin 441 std::vector<MPI_Request> request; 471 int request_size = 0; 472 for (int idx = 0; idx < recvRankClient.size(); ++idx) 473 { 474 if (0 != recvNbIndexClientCount[idx]) 475 { 476 request_size += 2; 477 } 478 } 479 480 request_size += client2ClientIndex.size(); 481 request_size += client2ClientInfo.size(); 482 483 std::vector<ep_lib::MPI_Request> request(request_size); 442 484 int currentIndex = 0; 443 485 int nbRecvClient = recvRankClient.size(); 486 int request_position=0; 444 487 for (int idx = 0; idx < nbRecvClient; ++idx) 445 488 { 446 489 if (0 != recvNbIndexClientCount[idx]) 447 490 { 448 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request);449 recvInfoFromClients(recvRankClient[idx],450 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(),451 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(),452 commLevel, request);491 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 492 recvInfoFromClients(recvRankClient[idx], 493 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 494 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 495 commLevel, &request[request_position++]); 453 496 } 454 497 currentIndex += recvNbIndexClientCount[idx]; … … 458 501 iteIndex = client2ClientIndex.end(); 459 502 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 460 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request);503 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 461 504 std::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 462 505 iteInfo = client2ClientInfo.end(); 463 506 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 464 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request);465 466 std::vector< MPI_Status> status(request.size());467 MPI_Waitall(request.size(), &request[0], &status[0]);507 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, &request[request_position++]); 508 509 std::vector<ep_lib::MPI_Status> status(request.size()); 510 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 468 511 469 512 Index2VectorInfoTypeMap indexToInfoMapping; … … 518 561 template<typename T, typename H> 519 562 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 520 const MPI_Comm& clientIntraComm,521 std::vector< MPI_Request>& requestSendIndex)522 { 523 MPI_Request request;563 const ep_lib::MPI_Comm& clientIntraComm, 564 std::vector<ep_lib::MPI_Request>& requestSendIndex) 565 { 566 ep_lib::MPI_Request request; 524 567 requestSendIndex.push_back(request); 525 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG,568 ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 526 569 clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 570 } 571 572 /*! 573 Send message containing index to clients 574 \param [in] clientDestRank rank of destination client 575 \param [in] indices index to send 576 \param [in] indiceSize size of index array to send 577 \param [in] clientIntraComm communication group of client 578 \param [in] requestSendIndex sending request 579 */ 580 template<typename T, typename H> 581 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 582 const ep_lib::MPI_Comm& clientIntraComm, 583 ep_lib::MPI_Request* requestSendIndex) 584 { 585 ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 586 clientDestRank, MPI_DHT_INDEX, clientIntraComm, requestSendIndex); 527 587 } 528 588 … … 536 596 template<typename T, typename H> 537 597 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 538 const MPI_Comm& clientIntraComm,539 std::vector< MPI_Request>& requestRecvIndex)540 { 541 MPI_Request request;598 const ep_lib::MPI_Comm& clientIntraComm, 599 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 600 { 601 ep_lib::MPI_Request request; 542 602 requestRecvIndex.push_back(request); 543 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG,603 ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 544 604 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 605 } 606 607 /*! 608 Receive message containing index to clients 609 \param [in] clientDestRank rank of destination client 610 \param [in] indices index to send 611 \param [in] clientIntraComm communication group of client 612 \param [in] requestRecvIndex receiving request 613 */ 614 template<typename T, typename H> 615 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 616 const ep_lib::MPI_Comm& clientIntraComm, 617 ep_lib::MPI_Request *requestRecvIndex) 618 { 619 ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 620 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, requestRecvIndex); 545 621 } 546 622 … … 555 631 template<typename T, typename H> 556 632 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 557 const MPI_Comm& clientIntraComm,558 std::vector< MPI_Request>& requestSendInfo)559 { 560 MPI_Request request;633 const ep_lib::MPI_Comm& clientIntraComm, 634 std::vector<ep_lib::MPI_Request>& requestSendInfo) 635 { 636 ep_lib::MPI_Request request; 561 637 requestSendInfo.push_back(request); 562 638 563 MPI_Isend(info, infoSize, MPI_CHAR,639 ep_lib::MPI_Isend(info, infoSize, MPI_CHAR, 564 640 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 641 } 642 643 /*! 644 Send message containing information to clients 645 \param [in] clientDestRank rank of destination client 646 \param [in] info info array to send 647 \param [in] infoSize info array size to send 648 \param [in] clientIntraComm communication group of client 649 \param [in] requestSendInfo sending request 650 */ 651 template<typename T, typename H> 652 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 653 const ep_lib::MPI_Comm& clientIntraComm, 654 ep_lib::MPI_Request *requestSendInfo) 655 { 656 ep_lib::MPI_Isend(info, infoSize, MPI_CHAR, 657 clientDestRank, MPI_DHT_INFO, clientIntraComm, requestSendInfo); 565 658 } 566 659 … … 575 668 template<typename T, typename H> 576 669 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 577 const MPI_Comm& clientIntraComm,578 std::vector< MPI_Request>& requestRecvInfo)579 { 580 MPI_Request request;670 const ep_lib::MPI_Comm& clientIntraComm, 671 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 672 { 673 ep_lib::MPI_Request request; 581 674 requestRecvInfo.push_back(request); 582 675 583 MPI_Irecv(info, infoSize, MPI_CHAR,676 ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR, 584 677 clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 678 } 679 680 /*! 681 Receive message containing information from other clients 682 \param [in] clientDestRank rank of destination client 683 \param [in] info info array to receive 684 \param [in] infoSize info array size to receive 685 \param [in] clientIntraComm communication group of client 686 \param [in] requestRecvInfo list of receiving request 687 */ 688 template<typename T, typename H> 689 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 690 const ep_lib::MPI_Comm& clientIntraComm, 691 ep_lib::MPI_Request* requestRecvInfo) 692 { 693 ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR, 694 clientSrcRank, MPI_DHT_INFO, clientIntraComm, requestRecvInfo); 585 695 } 586 696 … … 651 761 { 652 762 recvNbElements.resize(recvNbRank.size()); 653 std::vector< MPI_Request> request(sendNbRank.size()+recvNbRank.size());654 std::vector< MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());763 std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 764 std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 655 765 656 766 int nRequest = 0; 657 767 for (int idx = 0; idx < recvNbRank.size(); ++idx) 658 768 { 659 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT,769 ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 660 770 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 661 771 ++nRequest; … … 664 774 for (int idx = 0; idx < sendNbRank.size(); ++idx) 665 775 { 666 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT,776 ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 667 777 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 668 778 ++nRequest; 669 779 } 670 780 671 MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]);781 ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 672 782 } 673 783 … … 696 806 std::vector<int> recvBuff(recvBuffSize*2,0); 697 807 698 std::vector<MPI_Request> request(sendBuffSize+recvBuffSize); 699 std::vector<MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 700 808 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 809 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 701 810 int nRequest = 0; 702 811 for (int idx = 0; idx < recvBuffSize; ++idx) 703 812 { 704 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 705 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 706 ++nRequest; 813 ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 814 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 707 815 } 708 816 … … 716 824 for (int idx = 0; idx < sendBuffSize; ++idx) 717 825 { 718 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 719 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 720 ++nRequest; 721 } 722 723 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 826 ep_lib::MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 827 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest++]); 828 } 829 830 ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 724 831 int nbRecvRank = 0, nbRecvElements = 0; 725 832 recvNbRank.clear(); -
XIOS/dev/dev_trunk_omp/src/client_server_mapping.cpp
r1025 r1601 8 8 */ 9 9 #include "client_server_mapping.hpp" 10 11 using namespace ep_lib; 10 12 11 13 namespace xios { -
XIOS/dev/dev_trunk_omp/src/client_server_mapping.hpp
r1542 r1601 37 37 38 38 static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 39 MPI_Comm& clientIntraComm,39 ep_lib::MPI_Comm& clientIntraComm, 40 40 const std::vector<int>& connectedServerRank); 41 41 -
XIOS/dev/dev_trunk_omp/src/client_server_mapping_distributed.cpp
r1542 r1601 15 15 #include "context.hpp" 16 16 #include "context_client.hpp" 17 using namespace ep_lib; 17 18 18 19 namespace xios -
XIOS/dev/dev_trunk_omp/src/client_server_mapping_distributed.hpp
r1542 r1601 35 35 /** Default constructor */ 36 36 CClientServerMappingDistributed(const std::unordered_map<size_t,int>& globalIndexOfServer, 37 const MPI_Comm& clientIntraComm,37 const ep_lib::MPI_Comm& clientIntraComm, 38 38 bool isDataDistributed = true); 39 39 -
XIOS/dev/dev_trunk_omp/src/context_client.cpp
r1475 r1601 12 12 #include "cxios.hpp" 13 13 #include "server.hpp" 14 using namespace ep_lib; 14 15 15 16 namespace xios … … 101 102 typeId_in=event.getTypeId() ; 102 103 classId_in=event.getClassId() ; 103 // MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ; // MPI_UINT64_T standardized by MPI 3 104 MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm) ; 104 MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm) ; 105 105 MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ; 106 106 MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ; … … 341 341 if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio; 342 342 } 343 MPI_Allreduce( MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm);343 MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 344 344 345 345 if (minBufferSizeEventSizeRatio < 1.0) … … 425 425 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 426 426 { 427 #pragma omp critical (_output) 427 428 info(100)<<"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ; 428 429 event.push(*itRank, 1, msg); … … 450 451 for (itMap = itbMap; itMap != iteMap; ++itMap) 451 452 { 453 #pragma omp critical (_output) 452 454 report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl 453 455 << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 454 456 totalBuf += itMap->second; 455 457 } 458 #pragma omp critical (_output) 456 459 report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; 457 460 -
XIOS/dev/dev_trunk_omp/src/context_client.hpp
r1232 r1601 27 27 public: 28 28 // Contructor 29 CContextClient(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm, CContext* parentServer = 0);29 CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0); 30 30 31 31 // Send event to server … … 71 71 int serverSize; //!< Size of server group 72 72 73 MPI_Comm interComm; //!< Communicator of server group73 ep_lib::MPI_Comm interComm; //!< Communicator of server group 74 74 75 MPI_Comm intraComm; //!< Communicator of client group75 ep_lib::MPI_Comm intraComm; //!< Communicator of client group 76 76 77 77 map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers -
XIOS/dev/dev_trunk_omp/src/context_server.cpp
r1230 r1601 18 18 #include <boost/functional/hash.hpp> 19 19 20 20 using namespace ep_lib; 21 21 22 22 namespace xios … … 81 81 82 82 traceOff(); 83 MPI_Iprobe( MPI_ANY_SOURCE, 20,interComm,&flag,&status);83 MPI_Iprobe(-2, 20,interComm,&flag,&status); 84 84 traceOn(); 85 85 86 86 if (flag==true) 87 87 { 88 #ifdef _usingMPI 88 89 rank=status.MPI_SOURCE ; 90 #elif _usingEP 91 rank=status.ep_src ; 92 #endif 89 93 okLoop = true; 90 94 if (pendingRequest.find(rank)==pendingRequest.end()) … … 112 116 char * addr; 113 117 map<int,CServerBuffer*>::iterator it; 118 #ifdef _usingMPI 114 119 int rank=status.MPI_SOURCE ; 120 #elif _usingEP 121 int rank=status.ep_src; 122 #endif 115 123 116 124 it=buffers.find(rank); … … 118 126 { 119 127 StdSize buffSize = 0; 120 MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status); 128 MPI_Request request; 129 130 MPI_Irecv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &request); 131 MPI_Wait(&request, &status); 121 132 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 122 133 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; … … 132 143 bufferRequest[rank]=addr; 133 144 return true; 134 145 } 135 146 else 136 147 return false; … … 253 264 { 254 265 finished=true; 266 #pragma omp critical (_output) 255 267 info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; 256 268 context->finalize(); … … 260 272 { 261 273 rank = itMap->first; 274 #pragma omp critical (_output) 262 275 report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl 263 276 << " +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl; 264 277 totalBuf += itMap->second; 265 278 } 279 #pragma omp critical (_output) 266 280 report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 267 281 } -
XIOS/dev/dev_trunk_omp/src/context_server.hpp
r1228 r1601 14 14 public: 15 15 16 CContextServer(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm) ;16 CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm) ; 17 17 bool eventLoop(bool enableEventsProcessing = true); 18 18 void listen(void) ; 19 bool listenPendingRequest( MPI_Status& status);19 bool listenPendingRequest(ep_lib::MPI_Status& status); 20 20 void checkPendingRequest(void) ; 21 21 void processRequest(int rank, char* buff,int count) ; … … 26 26 bool hasPendingEvent(void) ; 27 27 28 MPI_Comm intraComm ;28 ep_lib::MPI_Comm intraComm ; 29 29 int intraCommSize ; 30 30 int intraCommRank ; 31 31 32 MPI_Comm interComm ;32 ep_lib::MPI_Comm interComm ; 33 33 int commSize ; 34 34 35 35 map<int,CServerBuffer*> buffers ; 36 map<int, MPI_Request> pendingRequest ;36 map<int,ep_lib::MPI_Request> pendingRequest ; 37 37 map<int,char*> bufferRequest ; 38 38 -
XIOS/dev/dev_trunk_omp/src/cxios.cpp
r1519 r1601 11 11 #include "memtrack.hpp" 12 12 #include "registry.hpp" 13 using namespace ep_lib; 13 14 14 15 namespace xios 15 16 { 16 string CXios::rootFile="./iodef.xml" ;17 string CXios::xiosCodeId="xios.x" ;18 string CXios::clientFile="./xios_client";19 string CXios::serverFile="./xios_server";20 string CXios::serverPrmFile="./xios_server1";21 string CXios::serverSndFile="./xios_server2";17 const string CXios::rootFile="./iodef.xml" ; 18 const string CXios::xiosCodeId="xios.x" ; 19 const string CXios::clientFile="./xios_client"; 20 const string CXios::serverFile="./xios_server"; 21 const string CXios::serverPrmFile="./xios_server1"; 22 const string CXios::serverSndFile="./xios_server2"; 22 23 23 24 bool CXios::isClient ; … … 43 44 { 44 45 set_new_handler(noMemory); 45 parseFile(rootFile); 46 int tmp_rank; 47 MPI_Comm_rank(MPI_COMM_WORLD, &tmp_rank); 48 #pragma omp critical 49 { 50 std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsing rootfile"<<std::endl; 51 parseFile(rootFile); 52 std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsed rootfile"<<std::endl; 53 } 54 #pragma omp barrier 46 55 parseXiosConfig(); 47 56 } … … 81 90 checkEventSync = getin<bool>("check_event_sync", checkEventSync); 82 91 83 globalComm=MPI_COMM_WORLD ; 92 //globalComm=MPI_COMM_WORLD ; 93 int num_ep; 94 if(isClient) 95 { 96 num_ep = omp_get_num_threads(); 97 } 98 99 if(isServer) 100 { 101 num_ep = 1; 102 } 103 104 MPI_Info info; 105 #pragma omp master 106 { 107 MPI_Comm *ep_comm; 108 MPI_Comm_create_endpoints(MPI_COMM_WORLD->mpi_comm, num_ep, info, ep_comm); // servers should reach here too. 109 passage = ep_comm; 110 } 111 112 #pragma omp barrier 113 114 115 CXios::globalComm = passage[omp_get_thread_num()]; 84 116 } 85 117 … … 92 124 void CXios::initClientSide(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 93 125 { 126 isClient = true; 127 isServer = false; 128 94 129 initialize() ; 95 96 isClient = true;97 130 98 131 CClient::initialize(codeId,localComm,returnComm) ; … … 105 138 if (printLogs2Files) 106 139 { 140 #pragma omp critical 107 141 CClient::openInfoStream(clientFile); 108 142 CClient::openErrorStream(clientFile); … … 120 154 if (CClient::getRank()==0) 121 155 { 156 #pragma omp critical (_output) 122 157 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 123 158 globalRegistry->toFile("xios_registry.bin") ; … … 155 190 void CXios::initServerSide(void) 156 191 { 157 initServer(); 192 158 193 isClient = false; 159 194 isServer = true; 195 196 initServer(); 160 197 161 198 // Initialize all aspects MPI -
XIOS/dev/dev_trunk_omp/src/cxios.hpp
r1377 r1601 14 14 { 15 15 public: 16 static void initialize(void) ;17 static void initClientSide(const string & codeId, MPI_Comm& localComm,MPI_Comm& returnComm) ;18 static void initServerSide(void) ;19 static void clientFinalize(void) ;20 static void parseFile(const string& filename) ;16 static void initialize(void) ; 17 static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 18 static void initServerSide(void) ; 19 static void clientFinalize(void) ; 20 static void parseFile(const string& filename) ; 21 21 22 template <typename T>23 static T getin(const string& id,const T& defaultValue) ;22 template <typename T> 23 static T getin(const string& id,const T& defaultValue) ; 24 24 25 template <typename T>26 static T getin(const string& id) ;25 template <typename T> 26 static T getin(const string& id) ; 27 27 28 28 public: 29 static string rootFile ; //!< Configuration filename30 static string xiosCodeId ; //!< Identity for XIOS31 static string clientFile; //!< Filename template for client32 static string serverFile; //!< Filename template for server33 static string serverPrmFile; //!< Filename template for primary server in case of two server levels34 static string serverSndFile; //!< Filename template for secondary server in case of two server levels29 static const string rootFile ; //!< Configuration filename 30 static const string xiosCodeId ; //!< Identity for XIOS 31 static const string clientFile; //!< Filename template for client 32 static const string serverFile; //!< Filename template for server 33 static const string serverPrmFile; //!< Filename template for primary server in case of two server levels 34 static const string serverSndFile; //!< Filename template for secondary server in case of two server levels 35 35 36 36 static bool isClient ; //!< Check if xios is client 37 #pragma omp threadprivate(isClient) 37 38 static bool isServer ; //!< Check if xios is server 39 #pragma omp threadprivate(isServer) 38 40 39 static MPI_Comm globalComm ; //!< Global communicator 41 static ep_lib::MPI_Comm globalComm ; //!< Global communicator 42 #pragma omp threadprivate(globalComm) 40 43 41 44 static bool printLogs2Files; //!< Printing out logs into files 45 #pragma omp threadprivate(printLogs2Files) 42 46 static bool usingOasis ; //!< Using Oasis 47 #pragma omp threadprivate(usingOasis) 43 48 static bool usingServer ; //!< Using server (server mode) 49 #pragma omp threadprivate(usingServer) 44 50 static bool usingServer2 ; //!< Using secondary server (server mode). IMPORTANT: Use this variable ONLY in CServer::initialize(). 51 #pragma omp threadprivate(usingServer2) 45 52 static int ratioServer2 ; //!< Percentage of server processors dedicated to secondary server 53 #pragma omp threadprivate(ratioServer2) 46 54 static int nbPoolsServer2 ; //!< Number of pools created on the secondary server 55 #pragma omp threadprivate(nbPoolsServer2) 47 56 static double bufferSizeFactor; //!< Factor used to tune the buffer size 57 #pragma omp threadprivate(bufferSizeFactor) 48 58 static const double defaultBufferSizeFactor; //!< Default factor value 49 59 static StdSize minBufferSize; //!< Minimum buffer size 60 #pragma omp threadprivate(minBufferSize) 50 61 static StdSize maxBufferSize; //!< Maximum buffer size 62 #pragma omp threadprivate(minBufferSize) 51 63 static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 64 #pragma omp threadprivate(isOptPerformance) 52 65 static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 66 #pragma omp threadprivate(globalRegistry) 53 67 static double recvFieldTimeout; //!< Time to wait for data before issuing an error when receiving a field 68 #pragma omp threadprivate(recvFieldTimeout) 54 69 static bool checkEventSync; //!< For debuuging, check if event are coherent and synchrone on client side 55 70 -
XIOS/dev/dev_trunk_omp/src/dht_auto_indexing.cpp
r1158 r1601 8 8 */ 9 9 #include "dht_auto_indexing.hpp" 10 using namespace ep_lib; 10 11 11 12 namespace xios -
XIOS/dev/dev_trunk_omp/src/dht_auto_indexing.hpp
r924 r1601 25 25 26 26 CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 27 const MPI_Comm& clientIntraComm);27 const ep_lib::MPI_Comm& clientIntraComm); 28 28 29 29 CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 30 const MPI_Comm& clientIntraComm);30 const ep_lib::MPI_Comm& clientIntraComm); 31 31 32 32 size_t getNbIndexesGlobal() const; -
XIOS/dev/dev_trunk_omp/src/event_client.cpp
r1377 r1601 50 50 std::list<CMessage*>::iterator itMsg = messages.begin(); 51 51 52 if (CXios::checkEventSync) info(100)<<"Send event "<<timeLine<<" classId : "<<classId<<" typeId : "<<typeId<<endl ; 52 if (CXios::checkEventSync) 53 { 54 #pragma omp critical(_output) 55 info(100)<<"Send event "<<timeLine<<" classId : "<<classId<<" typeId : "<<typeId<<endl ; 56 } 53 57 for (; itBuff != buffers.end(); ++itBuff, ++itSizes, ++itSenders, ++itMsg) 54 58 { -
XIOS/dev/dev_trunk_omp/src/event_scheduler.cpp
r1224 r1601 3 3 #include "mpi.hpp" 4 4 #include "tracer.hpp" 5 6 using namespace ep_lib; 5 7 6 8 namespace xios … … 135 137 while(received) 136 138 { 137 MPI_Iprobe( MPI_ANY_SOURCE,1,communicator,&received, &status) ;139 MPI_Iprobe(-2,1,communicator,&received, &status) ; 138 140 if (received) 139 141 { 140 142 recvRequest=new SPendingRequest ; 141 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 1, communicator, &(recvRequest->request)) ;143 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -2, 1, communicator, &(recvRequest->request)) ; 142 144 pendingRecvParentRequest.push(recvRequest) ; 143 145 } … … 177 179 while(received) 178 180 { 179 MPI_Iprobe( MPI_ANY_SOURCE,0,communicator,&received, &status) ;181 MPI_Iprobe(-2,0,communicator,&received, &status) ; 180 182 if (received) 181 183 { 182 184 recvRequest=new SPendingRequest ; 183 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 0, communicator, &recvRequest->request) ;185 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -2, 0, communicator, &recvRequest->request) ; 184 186 pendingRecvChildRequest.push_back(recvRequest) ; 185 187 } -
XIOS/dev/dev_trunk_omp/src/event_scheduler.hpp
r591 r1601 26 26 * @param[in] comm : MPI communicator du duplicate for internal use 27 27 */ 28 CEventScheduler(const MPI_Comm& comm) ;28 CEventScheduler(const ep_lib::MPI_Comm& comm) ; 29 29 30 30 … … 151 151 { 152 152 size_t buffer[3] ; /*!< communication buffer : timeLine, hashId, level */ 153 MPI_Request request ; /*!< pending MPI request */153 ep_lib::MPI_Request request ; /*!< pending MPI request */ 154 154 } ; 155 155 156 MPI_Comm communicator ; /*!< Internal MPI communicator */156 ep_lib::MPI_Comm communicator ; /*!< Internal MPI communicator */ 157 157 int mpiRank ; /*!< Rank in the communicator */ 158 158 int mpiSize ; /*!< Size of the communicator */ -
XIOS/dev/dev_trunk_omp/src/filter/spatial_transform_filter.cpp
r1542 r1601 1 #include "mpi.hpp" 1 2 #include "spatial_transform_filter.hpp" 2 3 #include "grid_transformation.hpp" … … 4 5 #include "context_client.hpp" 5 6 #include "timer.hpp" 7 using namespace ep_lib; 6 8 7 9 namespace xios … … 140 142 } 141 143 142 std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> > CSpatialTransformFilterEngine::engines;144 std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> > *CSpatialTransformFilterEngine::engines_ptr = 0; 143 145 144 146 CSpatialTransformFilterEngine* CSpatialTransformFilterEngine::get(CGridTransformation* gridTransformation) … … 147 149 ERROR("CSpatialTransformFilterEngine& CSpatialTransformFilterEngine::get(CGridTransformation* gridTransformation)", 148 150 "Impossible to get the requested engine, the grid transformation is invalid."); 149 150 std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> >::iterator it = engines.find(gridTransformation); 151 if (it == engines.end()) 151 152 if(engines_ptr == NULL) engines_ptr = new std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> >; 153 154 155 std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> >::iterator it = engines_ptr->find(gridTransformation); 156 if (it == engines_ptr->end()) 152 157 { 153 158 std::shared_ptr<CSpatialTransformFilterEngine> engine(new CSpatialTransformFilterEngine(gridTransformation)); 154 it = engines .insert(std::make_pair(gridTransformation, engine)).first;159 it = engines_ptr->insert(std::make_pair(gridTransformation, engine)).first; 155 160 } 156 161 … … 230 235 231 236 idxSendBuff = 0; 232 std::vector<MPI_Request> sendRecvRequest; 237 std::vector<MPI_Request> sendRecvRequest(localIndexToSend.size() + itListRecv->size()); 238 int position = 0; 233 239 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 234 240 { … … 240 246 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 241 247 } 242 sendRecvRequest.push_back(MPI_Request()); 243 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 248 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest[position++]); 244 249 } 245 250 … … 258 263 int srcRank = itRecv->first; 259 264 int countSize = itRecv->second.size(); 260 sendRecvRequest.push_back(MPI_Request()); 261 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 265 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest[position++]); 262 266 currentBuff += countSize; 263 267 } -
XIOS/dev/dev_trunk_omp/src/filter/spatial_transform_filter.hpp
r1542 r1601 141 141 142 142 //! The allocated engines 143 static std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> > engines; 143 144 static std::map<CGridTransformation*, std::shared_ptr<CSpatialTransformFilterEngine> > *engines_ptr; 145 #pragma omp threadprivate(engines_ptr) 144 146 }; // class CSpatialTransformFilterEngine 145 147 } // namespace xios -
XIOS/dev/dev_trunk_omp/src/filter/temporal_filter.cpp
r1523 r1601 22 22 this->samplingOffset.second, this->samplingOffset.timestep) 23 23 , initDate(initDate) 24 // , nextSamplingDate(initDate + (this->samplingOffset + initDate.getRelCalendar().getTimeStep()))25 24 , nextSamplingDate(initDate + offsetMonth + ( offsetAllButMonth + initDate.getRelCalendar().getTimeStep())) 26 25 , nbOperationDates(1) -
XIOS/dev/dev_trunk_omp/src/group_factory.cpp
r501 r1601 4 4 { 5 5 /// ////////////////////// Définitions ////////////////////// /// 6 StdString CGroupFactory::CurrContext("");6 StdString *CGroupFactory::CurrContext_ptr = 0; 7 7 8 8 void CGroupFactory::SetCurrentContextId(const StdString & context) 9 { 10 CGroupFactory::CurrContext = context; 9 { 10 if(CGroupFactory::CurrContext_ptr == 0 ) CGroupFactory::CurrContext_ptr = new StdString; 11 CGroupFactory::CurrContext_ptr->assign(context); 11 12 } 12 13 13 14 StdString & CGroupFactory::GetCurrentContextId(void) 14 15 { 15 return ( CGroupFactory::CurrContext);16 return (*CGroupFactory::CurrContext_ptr); 16 17 } 17 18 -
XIOS/dev/dev_trunk_omp/src/group_factory.hpp
r1542 r1601 66 66 67 67 /// Propriétés statiques /// 68 static StdString CurrContext; 68 static StdString *CurrContext_ptr; 69 #pragma omp threadprivate(CurrContext_ptr) 69 70 70 71 }; // class CGroupFactory -
XIOS/dev/dev_trunk_omp/src/indent.hpp
r501 r1601 10 10 public: 11 11 static int defaultIncSize; 12 #pragma omp threadprivate(defaultIncSize) 12 13 static int index ; 14 #pragma omp threadprivate(index) 13 15 int incSize ; 14 16 int offset ; -
XIOS/dev/dev_trunk_omp/src/indent_xml.hpp
r591 r1601 22 22 /// Propriétés statiques /// 23 23 static unsigned int Indent; 24 #pragma omp threadprivate(Indent) 24 25 static StdString Increm; 26 #pragma omp threadprivate(Increm) 25 27 static bool WithLine; 28 #pragma omp threadprivate(WithLine) 26 29 27 30 }; // class CIndent -
XIOS/dev/dev_trunk_omp/src/interface/c/icdata.cpp
r1587 r1601 9 9 #include <iostream> 10 10 11 11 #include "mpi_std.hpp" 12 12 #include "xios.hpp" 13 #include "oasis_cinterface.hpp"13 //#include "oasis_cinterface.hpp" 14 14 15 15 #include "attribute_template.hpp" … … 23 23 #include "context.hpp" 24 24 #include "context_client.hpp" 25 #include "mpi.hpp" 25 26 26 #include "timer.hpp" 27 27 #include "array_new.hpp" … … 55 55 { 56 56 std::string str; 57 MPI_Comm local_comm;58 MPI_Comm return_comm;57 ep_lib::MPI_Comm local_comm; 58 ep_lib::MPI_Comm return_comm; 59 59 60 60 if (!cstr2string(client_id, len_client_id, str)) return; … … 62 62 int initialized; 63 63 MPI_Initialized(&initialized); 64 #ifdef _usingMPI 64 65 if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 65 66 else local_comm=MPI_COMM_NULL; 67 #elif _usingEP 68 ep_lib::fc_comm_map.clear(); 69 if (initialized) local_comm=ep_lib::EP_Comm_f2c((f_local_comm)); 70 else local_comm=MPI_COMM_NULL; 71 #endif 72 73 74 66 75 CXios::initClientSide(str, local_comm, return_comm); 76 #ifdef _usingMPI 67 77 *f_return_comm=MPI_Comm_c2f(return_comm); 78 #elif _usingEP 79 *f_return_comm=*static_cast<MPI_Fint*>(ep_lib::EP_Comm_c2f(return_comm)); 80 #endif 68 81 CTimer::get("XIOS init").suspend(); 69 82 CTimer::get("XIOS").suspend(); … … 73 86 { 74 87 std::string str; 75 MPI_Comm comm;88 ep_lib::MPI_Comm comm; 76 89 77 90 if (!cstr2string(context_id, len_context_id, str)) return; 78 91 CTimer::get("XIOS").resume(); 79 92 CTimer::get("XIOS init context").resume(); 93 #ifdef _usingMPI 80 94 comm=MPI_Comm_f2c(*f_comm); 95 #elif _usingEP 96 comm = ep_lib::EP_Comm_f2c(f_comm); 97 #endif 81 98 CClient::registerContext(str, comm); 82 99 CTimer::get("XIOS init context").suspend(); -
XIOS/dev/dev_trunk_omp/src/interface/c/oasis_cinterface.cpp
r501 r1601 1 1 #include "oasis_cinterface.hpp" 2 2 #include <string> 3 #include "mpi.hpp" 3 using namespace ep_lib; 4 4 5 5 namespace xios … … 26 26 27 27 fxios_oasis_get_localcomm(&f_comm) ; 28 comm=MPI_Comm_f2c(f_comm) ;28 //comm=MPI_Comm_f2c(f_comm) ; 29 29 } 30 30 … … 34 34 35 35 fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 36 comm_client_server=MPI_Comm_f2c(f_comm) ;36 //comm_client_server=MPI_Comm_f2c(f_comm) ; 37 37 } 38 38 … … 42 42 43 43 fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 44 comm_client_server=MPI_Comm_f2c(f_comm) ;44 //comm_client_server=MPI_Comm_f2c(f_comm) ; 45 45 } 46 46 } -
XIOS/dev/dev_trunk_omp/src/interface/c/oasis_cinterface.hpp
r501 r1601 10 10 void fxios_oasis_enddef(void) ; 11 11 void fxios_oasis_finalize(void) ; 12 void fxios_oasis_get_localcomm( MPI_Fint* f_comm) ;13 void fxios_oasis_get_intracomm( MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ;14 void fxios_oasis_get_intercomm( MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ;12 void fxios_oasis_get_localcomm(ep_lib::MPI_Fint* f_comm) ; 13 void fxios_oasis_get_intracomm(ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 14 void fxios_oasis_get_intercomm(ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 15 15 } 16 16 … … 20 20 void oasis_enddef(void) ; 21 21 void oasis_finalize(void) ; 22 void oasis_get_localcomm( MPI_Comm& comm) ;23 void oasis_get_intracomm( MPI_Comm& comm_client_server,const std::string& server_id) ;24 void oasis_get_intercomm( MPI_Comm& comm_client_server,const std::string& server_id) ;22 void oasis_get_localcomm(ep_lib::MPI_Comm& comm) ; 23 void oasis_get_intracomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ; 24 void oasis_get_intercomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ; 25 25 } 26 26 #endif -
XIOS/dev/dev_trunk_omp/src/io/inetcdf4.cpp
r1534 r1601 2 2 #include "netCdfInterface.hpp" 3 3 #include "netCdf_cf_constant.hpp" 4 4 #include "ep_mpi.hpp" 5 5 #include <boost/algorithm/string.hpp> 6 6 7 7 namespace xios 8 8 { 9 CINetCDF4::CINetCDF4(const StdString& filename, const MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/,9 CINetCDF4::CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/, 10 10 bool readMetaDataPar /*= false*/, const StdString& timeCounterName /*= "time_counter"*/) 11 11 { … … 14 14 { 15 15 int commSize = 0; 16 MPI_Comm_size(*comm, &commSize);16 ep_lib::MPI_Comm_size(*comm, &commSize); 17 17 if (commSize <= 1) 18 18 comm = NULL; … … 23 23 // even if Parallel NetCDF ends up being used. 24 24 if (mpi) 25 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, MPI_INFO_NULL, this->ncidp);25 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, to_mpi_comm((*comm)->mpi_comm), to_mpi_info(MPI_INFO_NULL), this->ncidp); 26 26 else 27 27 CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); -
XIOS/dev/dev_trunk_omp/src/io/inetcdf4.hpp
r1485 r1601 7 7 #include "array_new.hpp" 8 8 9 #include "mpi .hpp"9 #include "mpi_std.hpp" 10 10 #include "netcdf.hpp" 11 11 … … 22 22 public: 23 23 /// Constructors /// 24 CINetCDF4(const StdString& filename, const MPI_Comm* comm = NULL, bool multifile = true,24 CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 25 25 bool readMetaDataPar = false, const StdString& timeCounterName = "time_counter"); 26 26 -
XIOS/dev/dev_trunk_omp/src/io/nc4_data_input.cpp
r1582 r1601 10 10 namespace xios 11 11 { 12 CNc4DataInput::CNc4DataInput(const StdString& filename, MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/,12 CNc4DataInput::CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/, 13 13 bool readMetaDataPar /*= false*/, bool ugridConvention /*= false*/, const StdString& timeCounterName /*= "time_counter"*/) 14 14 : SuperClass() … … 55 55 CArray<double,1> fieldData(grid->getWrittenDataSize()); 56 56 if (!field->default_value.isEmpty()) fieldData = field->default_value; 57 57 #ifdef _usingEP 58 SuperClass::type = ONE_FILE; 59 printf("SuperClass::type = %d\n", SuperClass::type); 60 #endif 61 58 62 switch (SuperClass::type) 59 63 { -
XIOS/dev/dev_trunk_omp/src/io/nc4_data_input.hpp
r1486 r1601 3 3 4 4 /// XIOS headers /// 5 #include "mpi_std.hpp" 5 6 #include "xios_spl.hpp" 6 7 #include "data_input.hpp" … … 23 24 24 25 /// Constructors /// 25 CNc4DataInput(const StdString& filename, MPI_Comm comm_file, bool multifile, bool isCollective = true,26 CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 26 27 bool readMetaDataPar = false, bool ugridConvention = false, const StdString& timeCounterName = "time_counter"); 27 28 CNc4DataInput(const CNc4DataInput& dataInput); // Not implemented. … … 70 71 private: 71 72 /// Private attributes /// 72 MPI_Comm comm_file;73 ep_lib::MPI_Comm comm_file; 73 74 const StdString filename; 74 75 bool isCollective; -
XIOS/dev/dev_trunk_omp/src/io/nc4_data_output.cpp
r1559 r1601 28 28 CNc4DataOutput::CNc4DataOutput 29 29 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 30 MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName)30 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 31 31 : SuperClass() 32 32 , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) -
XIOS/dev/dev_trunk_omp/src/io/nc4_data_output.hpp
r1542 r1601 4 4 /// XIOS headers /// 5 5 #include "xios_spl.hpp" 6 #include "mpi_std.hpp" 6 7 #include "onetcdf4.hpp" 7 8 #include "data_output.hpp" … … 27 28 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, 28 29 bool useCFConvention, 29 MPI_Comm comm_file, bool multifile, bool isCollective = true,30 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 30 31 const StdString& timeCounterName = "time_counter"); 31 32 … … 117 118 118 119 /// Propriétés privées /// 119 MPI_Comm comm_file;120 ep_lib::MPI_Comm comm_file; 120 121 const StdString filename; 121 122 std::map<Time, StdSize> timeToRecordCache; -
XIOS/dev/dev_trunk_omp/src/io/netCdfInterface.cpp
r1454 r1601 10 10 #include "netCdfInterface.hpp" 11 11 #include "netCdfException.hpp" 12 12 #include "ep_mpi.hpp" 13 13 namespace xios 14 14 { … … 22 22 int CNetCdfInterface::create(const StdString& fileName, int cMode, int& ncId) 23 23 { 24 int status = nc_create(fileName.c_str(), cMode, &ncId); 24 int status; 25 #pragma omp critical (_netcdf) 26 { 27 info(100)<<"start nc_create"<<std::endl; 28 status = nc_create(fileName.c_str(), cMode, &ncId); 29 info(100)<<"end nc_create"<<std::endl; 30 } 25 31 if (NC_NOERR != status) 26 32 { … … 49 55 int CNetCdfInterface::createPar(const StdString& fileName, int cMode, MPI_Comm comm, MPI_Info info, int& ncId) 50 56 { 51 int status = xios::nc_create_par(fileName.c_str(), cMode, comm, info, &ncId); 57 int status = xios::nc_create_par(fileName.c_str(), cMode, comm, to_mpi_info(MPI_INFO_NULL), &ncId); 58 52 59 if (NC_NOERR != status) 53 60 { … … 74 81 int CNetCdfInterface::open(const StdString& fileName, int oMode, int& ncId) 75 82 { 76 int status = nc_open(fileName.c_str(), oMode, &ncId); 83 int status; 84 #pragma omp critical (_netcdf) 85 { 86 info(100)<<"start nc_open"<<std::endl; 87 status = nc_open(fileName.c_str(), oMode, &ncId); 88 info(100)<<"end nc_open"<<std::endl; 89 } 77 90 if (NC_NOERR != status) 78 91 { … … 102 115 int CNetCdfInterface::openPar(const StdString& fileName, int oMode, MPI_Comm comm, MPI_Info info, int& ncId) 103 116 { 104 int status = xios::nc_open_par(fileName.c_str(), oMode, comm, info, &ncId); 117 int status = xios::nc_open_par(fileName.c_str(), oMode, comm, to_mpi_info(MPI_INFO_NULL), &ncId); 118 105 119 if (NC_NOERR != status) 106 120 { … … 125 139 int CNetCdfInterface::close(int ncId) 126 140 { 127 int status = nc_close(ncId); 141 int status = NC_NOERR; 142 #pragma omp critical (_netcdf) 143 { 144 info(100)<<"start nc_close"<<std::endl; 145 status = nc_close(ncId); 146 info(100)<<"end nc_close"<<std::endl; 147 } 148 128 149 if (NC_NOERR != status) 129 150 { … … 147 168 int CNetCdfInterface::reDef(int ncId) 148 169 { 149 int status = nc_redef(ncId); 170 int status; 171 #pragma omp critical (_netcdf) 172 { 173 info(100)<<"start nc_reDef"<<std::endl; 174 status = nc_redef(ncId); 175 info(100)<<"end nc_reDef"<<std::endl; 176 } 177 150 178 if (NC_NOERR != status) 151 179 { … … 169 197 int CNetCdfInterface::endDef(int ncId) 170 198 { 171 int status = nc_enddef(ncId); 199 int status; 200 #pragma omp critical (_netcdf) 201 { 202 info(100)<<"start nc_enddef"<<std::endl; 203 status = nc_enddef(ncId); 204 info(100)<<"end nc_enddef"<<std::endl; 205 } 172 206 if (NC_NOERR != status) 173 207 { … … 194 228 int CNetCdfInterface::inqNcId(int ncid, const StdString& grpName, int& grpId) 195 229 { 196 int status = nc_inq_ncid(ncid, grpName.c_str(), &grpId); 230 int status; 231 #pragma omp critical (_netcdf) 232 { 233 info(100)<<"start nc_inq_ncid"<<std::endl; 234 status = nc_inq_ncid(ncid, grpName.c_str(), &grpId); 235 info(100)<<"end nc_inq_ncid"<<std::endl; 236 } 237 197 238 if (NC_NOERR != status) 198 239 { … … 220 261 int CNetCdfInterface::inqVarId(int ncid, const StdString& varName, int& varId) 221 262 { 222 int status = nc_inq_varid(ncid, varName.c_str(), &varId); 263 int status; 264 #pragma omp critical (_netcdf) 265 { 266 info(100)<<"start nc_inq_varid"<<std::endl; 267 status = nc_inq_varid(ncid, varName.c_str(), &varId); 268 info(100)<<"end nc_inq_varid"<<std::endl; 269 } 223 270 if (NC_NOERR != status) 224 271 { … … 245 292 int CNetCdfInterface::inqDimId(int ncid, const StdString& dimName, int& dimId) 246 293 { 247 int status = nc_inq_dimid(ncid, dimName.c_str(), &dimId); 294 int status; 295 #pragma omp critical (_netcdf) 296 { 297 info(100)<<"start nc_inq_dimid"<<std::endl; 298 status = nc_inq_dimid(ncid, dimName.c_str(), &dimId); 299 info(100)<<"end nc_inq_dimid"<<std::endl; 300 } 301 248 302 if (NC_NOERR != status) 249 303 { … … 271 325 { 272 326 char varNameBuff[NC_MAX_NAME + 1]; 273 int status = nc_inq_varname(ncid, varId, varNameBuff); 327 int status; 328 #pragma omp critical (_netcdf) 329 { 330 info(100)<<"start nc_inq_varname"<<std::endl; 331 status = nc_inq_varname(ncid, varId, varNameBuff); 332 info(100)<<"end nc_inq_varname"<<std::endl; 333 } 274 334 if (NC_NOERR != status) 275 335 { … … 295 355 int CNetCdfInterface::inqUnLimDim(int ncid, int& dimId) 296 356 { 297 int status = nc_inq_unlimdim(ncid, &dimId); 357 int status; 358 #pragma omp critical (_netcdf) 359 { 360 info(100)<<"start nc_inq_unlimdim"<<std::endl; 361 status = nc_inq_unlimdim(ncid, &dimId); 362 info(100)<<"end nc_inq_unlimdim"<<std::endl; 363 } 298 364 if (NC_NOERR != status) 299 365 { … … 321 387 { 322 388 char fullNameIn[NC_MAX_NAME + 1]; 323 int status = nc_inq_dimname(ncid, dimId, fullNameIn); 389 int status; 390 #pragma omp critical (_netcdf) 391 { 392 info(100)<<"start nc_inq_dimname"<<std::endl; 393 status = nc_inq_dimname(ncid, dimId, fullNameIn); 394 info(100)<<"end nc_inq_dimname"<<std::endl; 395 } 324 396 if (NC_NOERR != status) 325 397 { … … 346 418 int CNetCdfInterface::inqDimLen(int ncid, int dimId, StdSize& dimLen) 347 419 { 348 int status = nc_inq_dimlen(ncid, dimId, &dimLen); 420 int status; 421 #pragma omp critical (_netcdf) 422 { 423 info(100)<<"start nc_inq_dimlen"<<std::endl; 424 status = nc_inq_dimlen(ncid, dimId, &dimLen); 425 info(100)<<"end nc_inq_dimlen"<<std::endl; 426 } 349 427 if (NC_NOERR != status) 350 428 { … … 371 449 int CNetCdfInterface::inqVarNDims(int ncid, int varId, int& nDims) 372 450 { 373 int status = nc_inq_varndims(ncid, varId, &nDims); 451 int status; 452 #pragma omp critical (_netcdf) 453 { 454 info(100)<<"start nc_inq_varndims"<<std::endl; 455 status = nc_inq_varndims(ncid, varId, &nDims); 456 info(100)<<"end nc_inq_varndims"<<std::endl; 457 } 374 458 if (NC_NOERR != status) 375 459 { … … 396 480 int CNetCdfInterface::inqVarDimId(int ncid, int varId, int* dimIds) 397 481 { 398 int status = nc_inq_vardimid(ncid, varId, dimIds); 482 int status; 483 #pragma omp critical (_netcdf) 484 { 485 info(100)<<"start nc_inq_vardimid"<<std::endl; 486 status = nc_inq_vardimid(ncid, varId, dimIds); 487 info(100)<<"end nc_inq_vardimid"<<std::endl; 488 } 399 489 if (NC_NOERR != status) 400 490 { … … 422 512 int CNetCdfInterface::inqDimIds(int ncid, int& nDims, int* dimIds, int includeParents) 423 513 { 424 int status = nc_inq_dimids(ncid, &nDims, dimIds, includeParents); 514 int status; 515 #pragma omp critical (_netcdf) 516 { 517 info(100)<<"start nc_inq_dimids"<<std::endl; 518 status = nc_inq_dimids(ncid, &nDims, dimIds, includeParents); 519 info(100)<<"end nc_inq_dimids"<<std::endl; 520 } 425 521 if (NC_NOERR != status) 426 522 { … … 449 545 StdSize strlen = 0; 450 546 std::vector<char> buff; 451 int status = nc_inq_grpname_full(ncid, &strlen, NULL); 452 if (NC_NOERR == status) 453 { 454 buff.resize(strlen + 1); 455 status = nc_inq_grpname_full(ncid, NULL, &buff[0]); 456 } 457 547 int status; 548 #pragma omp critical (_netcdf) 549 { 550 info(100)<<"start nc_inq_grpname_full"<<std::endl; 551 status = nc_inq_grpname_full(ncid, &strlen, NULL); 552 info(100)<<"end nc_inq_grpname_full"<<std::endl; 553 554 if (NC_NOERR == status) 555 { 556 buff.resize(strlen + 1); 557 status = nc_inq_grpname_full(ncid, NULL, &buff[0]); 558 } 559 info(100)<<"start nc_inq_grpname_full"<<std::endl; 560 } 458 561 if (NC_NOERR != status) 459 562 { … … 482 585 int CNetCdfInterface::inqGrpIds(int ncid, int& numgrps, int* ncids) 483 586 { 484 int status = nc_inq_grps(ncid, &numgrps, ncids); 587 int status; 588 #pragma omp critical (_netcdf) 589 { 590 info(100)<<"start nc_inq_grps"<<std::endl; 591 status = nc_inq_grps(ncid, &numgrps, ncids); 592 info(100)<<"end nc_inq_grps"<<std::endl; 593 } 485 594 if (NC_NOERR != status) 486 595 { … … 507 616 int CNetCdfInterface::inqVarIds(int ncid, int& nvars, int* varids) 508 617 { 509 int status = nc_inq_varids(ncid, &nvars, varids); 618 int status; 619 #pragma omp critical (_netcdf) 620 { 621 info(100)<<"start nc_inq_varids"<<std::endl; 622 status = nc_inq_varids(ncid, &nvars, varids); 623 info(100)<<"end nc_inq_varids"<<std::endl; 624 } 510 625 if (NC_NOERR != status) 511 626 { … … 534 649 int CNetCdfInterface::inqAtt(int ncid, int varid, const StdString& name, nc_type& type, size_t& len) 535 650 { 536 int status = nc_inq_att(ncid, varid, name.c_str(), &type, &len); 651 int status; 652 #pragma omp critical (_netcdf) 653 { 654 info(100)<<"start nc_inq_att"<<std::endl; 655 status = nc_inq_att(ncid, varid, name.c_str(), &type, &len); 656 info(100)<<"end nc_inq_att"<<std::endl; 657 } 658 537 659 if (NC_NOERR != status) 538 660 { … … 558 680 int CNetCdfInterface::inqNAtts(int ncid, int& ngatts) 559 681 { 560 int status = nc_inq_natts(ncid, &ngatts); 682 int status; 683 #pragma omp critical (_netcdf) 684 { 685 info(100)<<"start nc_inq_natts"<<std::endl; 686 status = nc_inq_natts(ncid, &ngatts); 687 info(100)<<"end nc_inq_natts"<<std::endl; 688 } 561 689 if (NC_NOERR != status) 562 690 { … … 583 711 int CNetCdfInterface::inqVarNAtts(int ncid, int varid, int& natts) 584 712 { 585 int status = nc_inq_varnatts(ncid, varid, &natts); 713 int status; 714 #pragma omp critical (_netcdf) 715 { 716 info(100)<<"start nc_inq_varnatts"<<std::endl; 717 status = nc_inq_varnatts(ncid, varid, &natts); 718 info(100)<<"end nc_inq_varnatts"<<std::endl; 719 } 586 720 if (NC_NOERR != status) 587 721 { … … 604 738 { 605 739 std::vector<char> attName(NC_MAX_NAME + 1,' '); 606 int status = nc_inq_attname(ncid, varid, attnum, &attName[0]); 740 int status; 741 #pragma omp critical (_netcdf) 742 { 743 info(100)<<"start nc_inq_attname"<<std::endl; 744 status = nc_inq_attname(ncid, varid, attnum, &attName[0]); 745 info(100)<<"end nc_inq_attname"<<std::endl; 746 } 607 747 if (NC_NOERR != status) 608 748 { … … 635 775 int CNetCdfInterface::defGrp(int parentNcid, const StdString& grpName, int& grpId) 636 776 { 637 int status = nc_def_grp(parentNcid, grpName.c_str(), &grpId); 777 int status; 778 #pragma omp critical (_netcdf) 779 { 780 info(100)<<"start nc_def_grp"<<std::endl; 781 status = nc_def_grp(parentNcid, grpName.c_str(), &grpId); 782 info(100)<<"end nc_def_grp"<<std::endl; 783 } 638 784 if (NC_NOERR != status) 639 785 { … … 660 806 int CNetCdfInterface::defDim(int ncid, const StdString& dimName, StdSize dimLen, int& dimId) 661 807 { 662 int status = nc_def_dim(ncid, dimName.c_str(), dimLen, &dimId); 808 int status; 809 #pragma omp critical (_netcdf) 810 { 811 info(100)<<"start nc_def_dim"<<std::endl; 812 status = nc_def_dim(ncid, dimName.c_str(), dimLen, &dimId); 813 info(100)<<"end nc_def_dim"<<std::endl; 814 } 663 815 if (NC_NOERR != status) 664 816 { … … 691 843 int nDims, const int dimIds[], int& varId) 692 844 { 693 int status = nc_def_var(ncid, varName.c_str(), xtype, nDims, dimIds, &varId); 845 int status; 846 #pragma omp critical (_netcdf) 847 { 848 info(100)<<"start nc_def_var"<<std::endl; 849 status = nc_def_var(ncid, varName.c_str(), xtype, nDims, dimIds, &varId); 850 info(100)<<"end nc_def_var"<<std::endl; 851 } 694 852 if (NC_NOERR != status) 695 853 { … … 720 878 int CNetCdfInterface::defVarChunking(int ncid, int varId, int storage, StdSize chunkSize[]) 721 879 { 722 int status = nc_def_var_chunking(ncid, varId, storage, chunkSize); 880 int status; 881 #pragma omp critical (_netcdf) 882 { 883 info(100)<<"start nc_def_var_chunking"<<std::endl; 884 status = nc_def_var_chunking(ncid, varId, storage, chunkSize); 885 info(100)<<"end nc_def_var_chunking"<<std::endl; 886 } 723 887 if (NC_NOERR != status) 724 888 { … … 748 912 749 913 if (compressionLevel == 0) return NC_NOERR ; 750 int status = nc_def_var_deflate(ncid, varId, (compressionLevel > 0), (compressionLevel > 0), compressionLevel); 914 int status; 915 #pragma omp critical (_netcdf) 916 { 917 info(100)<<"start nc_def_var_deflate"<<std::endl; 918 status = nc_def_var_deflate(ncid, varId, (compressionLevel > 0), (compressionLevel > 0), compressionLevel); 919 info(100)<<"end nc_def_var_deflate"<<std::endl; 920 } 751 921 if (NC_NOERR != status) 752 922 { … … 774 944 { 775 945 int old_fill_mode; 776 int status = nc_set_fill(ncid, fill ? NC_FILL: NC_NOFILL, &old_fill_mode); 946 int status; 947 #pragma omp critical (_netcdf) 948 { 949 info(100)<<"start nc_set_fill"<<std::endl; 950 status = nc_set_fill(ncid, fill ? NC_FILL: NC_NOFILL, &old_fill_mode); 951 info(100)<<"end nc_set_fill"<<std::endl; 952 } 777 953 if (NC_NOERR != status) 778 954 { … … 801 977 int CNetCdfInterface::defVarFill(int ncid, int varId, int noFill, void* fillValue) 802 978 { 803 int status = nc_def_var_fill(ncid, varId, noFill, fillValue); 979 int status; 980 #pragma omp critical (_netcdf) 981 { 982 info(100)<<"start nc_def_var_fill"<<std::endl; 983 status = nc_def_var_fill(ncid, varId, noFill, fillValue); 984 info(100)<<"end nc_def_var_fill"<<std::endl; 985 } 804 986 if (NC_NOERR != status) 805 987 { … … 829 1011 int CNetCdfInterface::varParAccess(int ncid, int varId, int access) 830 1012 { 831 int status = nc_var_par_access(ncid, varId, access); 1013 int status; 1014 #pragma omp critical (_netcdf) 1015 { 1016 info(100)<<"start nc_var_par_access"<<std::endl; 1017 status = nc_var_par_access(ncid, varId, access); 1018 info(100)<<"end nc_var_par_access"<<std::endl; 1019 } 832 1020 if (NC_NOERR != status) 833 1021 { … … 852 1040 int CNetCdfInterface::sync(int ncid) 853 1041 { 854 int status = nc_sync(ncid); 1042 int status; 1043 #pragma omp critical (_netcdf) 1044 { 1045 info(100)<<"start nc_sync"<<std::endl; 1046 status = nc_sync(ncid); 1047 info(100)<<"end nc_sync"<<std::endl; 1048 } 855 1049 if (NC_NOERR != status) 856 1050 { … … 872 1066 int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, double* data) 873 1067 { 874 return nc_get_att_double(ncid, varid, attrName, data); 1068 int status; 1069 #pragma omp critical (_netcdf) 1070 { 1071 info(100)<<"start nc_get_att_double"<<std::endl; 1072 status = nc_get_att_double(ncid, varid, attrName, data); 1073 info(100)<<"end nc_get_att_double"<<std::endl; 1074 } 1075 return status; 875 1076 } 876 1077 … … 878 1079 int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, float* data) 879 1080 { 880 return nc_get_att_float(ncid, varid, attrName, data); 1081 int status; 1082 #pragma omp critical (_netcdf) 1083 { 1084 info(100)<<"start nc_get_att_float"<<std::endl; 1085 status = nc_get_att_float(ncid, varid, attrName, data); 1086 info(100)<<"end nc_get_att_float"<<std::endl; 1087 } 1088 return status; 881 1089 } 882 1090 … … 884 1092 int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, int* data) 885 1093 { 886 return nc_get_att_int(ncid, varid, attrName, data); 1094 int status; 1095 #pragma omp critical (_netcdf) 1096 { 1097 info(100)<<"start nc_get_att_int"<<std::endl; 1098 status = nc_get_att_int(ncid, varid, attrName, data); 1099 info(100)<<"end nc_get_att_int"<<std::endl; 1100 } 1101 return status; 887 1102 } 888 1103 … … 890 1105 int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, long* data) 891 1106 { 892 return nc_get_att_long(ncid, varid, attrName, data); 1107 int status; 1108 #pragma omp critical (_netcdf) 1109 { 1110 info(100)<<"start nc_get_att_long"<<std::endl; 1111 status = nc_get_att_long(ncid, varid, attrName, data); 1112 info(100)<<"end nc_get_att_long"<<std::endl; 1113 } 1114 return status; 893 1115 } 894 1116 … … 896 1118 int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, short* data) 897 1119 { 898 return nc_get_att_short(ncid, varid, attrName, data); 1120 int status; 1121 #pragma omp critical (_netcdf) 1122 { 1123 info(100)<<"start nc_get_att_short"<<std::endl; 1124 status = nc_get_att_short(ncid, varid, attrName, data); 1125 info(100)<<"end nc_get_att_short"<<std::endl; 1126 } 1127 return status; 899 1128 } 900 1129 … … 902 1131 int CNetCdfInterface::ncGetAttType(int ncid, int varid, const char* attrName, char* data) 903 1132 { 904 return nc_get_att_text(ncid, varid, attrName, data); 1133 int status; 1134 #pragma omp critical (_netcdf) 1135 { 1136 info(100)<<"start nc_get_att_text"<<std::endl; 1137 status = nc_get_att_text(ncid, varid, attrName, data); 1138 info(100)<<"end nc_get_att_text"<<std::endl; 1139 } 1140 return status; 905 1141 } 906 1142 … … 910 1146 StdSize numVal, const double* data) 911 1147 { 912 return nc_put_att_double(ncid, varid, attrName, NC_DOUBLE, numVal, data); 1148 int status; 1149 #pragma omp critical (_netcdf) 1150 { 1151 info(100)<<"start nc_put_att_double"<<std::endl; 1152 status = nc_put_att_double(ncid, varid, attrName, NC_DOUBLE, numVal, data); 1153 info(100)<<"end nc_put_att_double"<<std::endl; 1154 } 1155 return status; 913 1156 } 914 1157 … … 917 1160 StdSize numVal, const float* data) 918 1161 { 919 return nc_put_att_float(ncid, varid, attrName, NC_FLOAT, numVal, data); 1162 int status; 1163 #pragma omp critical (_netcdf) 1164 { 1165 info(100)<<"start nc_put_att_float"<<std::endl; 1166 status = nc_put_att_float(ncid, varid, attrName, NC_FLOAT, numVal, data); 1167 info(100)<<"end nc_put_att_float"<<std::endl; 1168 } 1169 return status; 920 1170 } 921 1171 … … 924 1174 StdSize numVal, const int* data) 925 1175 { 926 return nc_put_att_int(ncid, varid, attrName, NC_INT, numVal, data); 1176 int status; 1177 #pragma omp critical (_netcdf) 1178 { 1179 info(100)<<"start nc_put_att_int"<<std::endl; 1180 status = nc_put_att_int(ncid, varid, attrName, NC_INT, numVal, data); 1181 info(100)<<"end nc_put_att_int"<<std::endl; 1182 } 1183 return status; 927 1184 } 928 1185 … … 931 1188 StdSize numVal, const long* data) 932 1189 { 933 return nc_put_att_long(ncid, varid, attrName, NC_LONG, numVal, data); 1190 int status; 1191 #pragma omp critical (_netcdf) 1192 { 1193 info(100)<<"start nc_put_att_long"<<std::endl; 1194 status = nc_put_att_long(ncid, varid, attrName, NC_LONG, numVal, data); 1195 info(100)<<"end nc_put_att_long"<<std::endl; 1196 } 1197 return status; 934 1198 } 935 1199 … … 938 1202 StdSize numVal, const short* data) 939 1203 { 940 return nc_put_att_short(ncid, varid, attrName, NC_SHORT, numVal, data); 1204 int status; 1205 #pragma omp critical (_netcdf) 1206 { 1207 info(100)<<"start nc_put_att_short"<<std::endl; 1208 status = nc_put_att_short(ncid, varid, attrName, NC_SHORT, numVal, data); 1209 info(100)<<"end nc_put_att_short"<<std::endl; 1210 } 1211 return status; 941 1212 } 942 1213 … … 945 1216 StdSize numVal, const char* data) 946 1217 { 947 return nc_put_att_text(ncid, varid, attrName, numVal, data); 1218 int status; 1219 #pragma omp critical (_netcdf) 1220 { 1221 info(100)<<"start nc_put_att_text"<<std::endl; 1222 status = nc_put_att_text(ncid, varid, attrName, numVal, data); 1223 info(100)<<"end nc_put_att_text"<<std::endl; 1224 } 1225 return status; 948 1226 } 949 1227 … … 952 1230 int CNetCdfInterface::ncGetVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, double* data) 953 1231 { 954 return nc_get_vara_double(ncid, varid, start, count, data); 1232 int status; 1233 #pragma omp critical (_netcdf) 1234 { 1235 info(100)<<"start nc_get_vara_double"<<std::endl; 1236 status = nc_get_vara_double(ncid, varid, start, count, data); 1237 info(100)<<"end nc_get_vara_double"<<std::endl; 1238 } 1239 return status; 955 1240 } 956 1241 … … 958 1243 int CNetCdfInterface::ncGetVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, float* data) 959 1244 { 960 return nc_get_vara_float(ncid, varid, start, count, data); 1245 int status; 1246 #pragma omp critical (_netcdf) 1247 { 1248 info(100)<<"start nc_get_vara_float"<<std::endl; 1249 status = nc_get_vara_float(ncid, varid, start, count, data); 1250 info(100)<<"end nc_get_vara_float"<<std::endl; 1251 } 1252 return status; 961 1253 } 962 1254 … … 964 1256 int CNetCdfInterface::ncGetVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, int* data) 965 1257 { 966 return nc_get_vara_int(ncid, varid, start, count, data); 1258 int status; 1259 #pragma omp critical (_netcdf) 1260 { 1261 info(100)<<"start nc_get_vara_int"<<std::endl; 1262 status = nc_get_vara_int(ncid, varid, start, count, data); 1263 info(100)<<"end nc_get_vara_int"<<std::endl; 1264 } 1265 return status; 967 1266 } 968 1267 … … 970 1269 int CNetCdfInterface::ncGetVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, char* data) 971 1270 { 972 return nc_get_vara_text(ncid, varid, start, count, data); 1271 int status; 1272 #pragma omp critical (_netcdf) 1273 { 1274 info(100)<<"start nc_get_vara_text"<<std::endl; 1275 status = nc_get_vara_text(ncid, varid, start, count, data); 1276 info(100)<<"end nc_get_vara_text"<<std::endl; 1277 } 1278 return status; 973 1279 } 974 1280 … … 977 1283 int CNetCdfInterface::ncPutVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, const double* data) 978 1284 { 979 return nc_put_vara_double(ncid, varid, start, count, data); 1285 int status; 1286 #pragma omp critical (_netcdf) 1287 { 1288 info(100)<<"start nc_put_vara_double"<<std::endl; 1289 status = nc_put_vara_double(ncid, varid, start, count, data); 1290 info(100)<<"end nc_put_vara_double"<<std::endl; 1291 } 1292 return status; 980 1293 } 981 1294 … … 983 1296 int CNetCdfInterface::ncPutVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, const float* data) 984 1297 { 985 return nc_put_vara_float(ncid, varid, start, count, data); 1298 int status; 1299 #pragma omp critical (_netcdf) 1300 { 1301 info(100)<<"start nc_put_vara_float"<<std::endl; 1302 status = nc_put_vara_float(ncid, varid, start, count, data); 1303 info(100)<<"end nc_put_vara_float"<<std::endl; 1304 } 1305 return status; 986 1306 } 987 1307 … … 989 1309 int CNetCdfInterface::ncPutVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, const int* data) 990 1310 { 991 return nc_put_vara_int(ncid, varid, start, count, data); 1311 int status; 1312 #pragma omp critical (_netcdf) 1313 { 1314 info(100)<<"start nc_put_vara_int"<<std::endl; 1315 status = nc_put_vara_int(ncid, varid, start, count, data); 1316 info(100)<<"end nc_put_vara_int"<<std::endl; 1317 } 1318 return status; 992 1319 } 993 1320 … … 995 1322 int CNetCdfInterface::ncPutVaraType(int ncid, int varid, const StdSize* start, const StdSize* count, const char* data) 996 1323 { 997 return nc_put_vara_text(ncid, varid, start, count, data); 1324 int status; 1325 #pragma omp critical (_netcdf) 1326 { 1327 info(100)<<"start nc_put_vara_text"<<std::endl; 1328 status = nc_put_vara_text(ncid, varid, start, count, data); 1329 info(100)<<"end nc_put_vara_text"<<std::endl; 1330 } 1331 return status; 998 1332 } 999 1333 … … 1008 1342 { 1009 1343 int varId = 0; 1010 return (NC_NOERR == (nc_inq_varid(ncId, varName.c_str(), &varId))); 1344 int status; 1345 #pragma omp critical (_netcdf) 1346 { 1347 info(100)<<"start isVarExisted"<<std::endl; 1348 status = nc_inq_varid(ncId, varName.c_str(), &varId); 1349 info(100)<<"end isVarExisted"<<std::endl; 1350 } 1351 return (NC_NOERR == status); 1011 1352 } 1012 1353 … … 1014 1355 { 1015 1356 int dimId = 0; 1016 return (NC_NOERR == (nc_inq_dimid(ncId, dimName.c_str(), &dimId))); 1357 int status; 1358 #pragma omp critical (_netcdf) 1359 { 1360 info(100)<<"start isDimExisted"<<std::endl; 1361 status = nc_inq_dimid(ncId, dimName.c_str(), &dimId); 1362 info(100)<<"end isDimExisted"<<std::endl; 1363 } 1364 return (NC_NOERR == status); 1017 1365 } 1018 1366 -
XIOS/dev/dev_trunk_omp/src/io/netCdfInterface.hpp
r811 r1601 10 10 #define __NETCDF_INTERFACE_HPP_ 11 11 12 #include "mpi_std.hpp" 12 13 #include "xios_spl.hpp" 13 14 … … 16 17 #endif 17 18 18 #include "mpi.hpp" 19 19 20 #include "netcdf.hpp" 20 21 -
XIOS/dev/dev_trunk_omp/src/io/netcdf.hpp
r685 r1601 1 1 #ifndef __XIOS_NETCDF_HPP__ 2 2 #define __XIOS_NETCDF_HPP__ 3 #include "mpi .hpp"3 #include "mpi_std.hpp" 4 4 #define MPI_INCLUDED 5 5 #include <netcdf.h> … … 33 33 { 34 34 #if defined(USING_NETCDF_PAR) 35 return ::nc_create_par(path, cmode, comm, info, ncidp) ; 35 int status; 36 #pragma omp critical (_netcdf) 37 { 38 status = ::nc_create_par(path, cmode, comm, info, ncidp) ; 39 } 40 return status; 36 41 #else 37 42 ERROR("int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)", … … 44 49 { 45 50 #if defined(USING_NETCDF_PAR) 46 return ::nc_open_par(path, mode, comm, info, ncidp) ; 51 int status; 52 #pragma omp critical (_netcdf) 53 { 54 status = ::nc_open_par(path, mode, comm, info, ncidp) ; 55 } 56 return status; 47 57 #else 48 58 ERROR("int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)", … … 55 65 { 56 66 #if defined(USING_NETCDF_PAR) 57 return ::nc_var_par_access(ncid, varid, par_access) ; 67 int status = ::nc_var_par_access(ncid, varid, par_access) ; 68 69 return status; 58 70 #else 59 71 ERROR("int nc_var_par_access(int ncid, int varid, int par_access)", -
XIOS/dev/dev_trunk_omp/src/io/onetcdf4.cpp
r1456 r1601 3 3 #include "onetcdf4.hpp" 4 4 #include "group_template.hpp" 5 #include " mpi.hpp"5 #include "ep_mpi.hpp" 6 6 #include "netcdf.hpp" 7 7 #include "netCdfInterface.hpp" … … 15 15 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 16 16 bool useCFConvention, 17 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName)17 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 18 18 : path() 19 19 , wmpi(false) … … 33 33 34 34 void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 35 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName)35 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 36 36 { 37 37 this->useClassicFormat = useClassicFormat; … … 44 44 { 45 45 int commSize = 0; 46 MPI_Comm_size(*comm, &commSize);46 ep_lib::MPI_Comm_size(*comm, &commSize); 47 47 if (commSize <= 1) 48 48 comm = NULL; … … 58 58 CTimer::get("Files : create").resume(); 59 59 if (wmpi) 60 CNetCdfInterface::createPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp);60 CNetCdfInterface::createPar(filename, mode, to_mpi_comm((*comm)->mpi_comm), to_mpi_info(MPI_INFO_NULL), this->ncidp); 61 61 else 62 62 CNetCdfInterface::create(filename, mode, this->ncidp); … … 70 70 CTimer::get("Files : open").resume(); 71 71 if (wmpi) 72 CNetCdfInterface::openPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp);72 CNetCdfInterface::openPar(filename, mode, to_mpi_comm((*comm)->mpi_comm), to_mpi_info(MPI_INFO_NULL), this->ncidp); 73 73 else 74 74 CNetCdfInterface::open(filename, mode, this->ncidp); -
XIOS/dev/dev_trunk_omp/src/io/onetcdf4.hpp
r1456 r1601 4 4 /// XIOS headers /// 5 5 #include "xios_spl.hpp" 6 #include "mpi_std.hpp" 6 7 #include "exception.hpp" 7 8 #include "data_output.hpp" 8 9 #include "array_new.hpp" 9 #include "mpi.hpp"10 10 #include "netcdf.hpp" 11 11 … … 28 28 CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 29 29 bool useCFConvention = true, 30 const MPI_Comm* comm = NULL, bool multifile = true,30 const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 31 31 const StdString& timeCounterName = "time_counter"); 32 32 … … 37 37 /// Initialisation /// 38 38 void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 39 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName);39 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 40 40 void close(void); 41 41 void sync(void); -
XIOS/dev/dev_trunk_omp/src/log.cpp
r523 r1601 1 1 #include "log.hpp" 2 #include <string> 3 #include <iostream> 4 #include <string> 2 5 3 6 namespace xios 4 7 { 8 std::filebuf* info_FB[16]; 9 10 5 11 CLog info("info") ; 6 12 CLog report("report") ; 7 13 CLog error("error", cerr.rdbuf()) ; 14 15 16 CLog& CLog::operator()(int l) 17 { 18 if (l<=level) 19 { 20 omp_set_lock( &mutex ); 21 rdbuf(strBuf_array[omp_get_thread_num()]); 22 *this<<"-> "<<name<<" : " ; 23 omp_unset_lock( &mutex ); 24 } 25 else rdbuf(NULL) ; 26 return *this; 27 } 8 28 } -
XIOS/dev/dev_trunk_omp/src/log.hpp
r523 r1601 5 5 #include <iostream> 6 6 #include <string> 7 #include <stdio.h> 8 #include <omp.h> 7 9 8 10 namespace xios … … 14 16 public : 15 17 CLog(const string& name_, std::streambuf* sBuff = cout.rdbuf()) 16 : ostream(sBuff), level(0), name(name_), strBuf_(sBuff) {} 17 CLog& operator()(int l) 18 : ostream(sBuff), level(0), name(name_), strBuf_(sBuff) 18 19 { 19 if (l<=level) 20 { 21 rdbuf(strBuf_); 22 *this<<"-> "<<name<<" : " ; 23 } 24 else rdbuf(NULL) ; 25 return *this; 20 omp_init_lock( &mutex ); 21 for(int i=0; i<16; i++) 22 strBuf_array[i] = sBuff; 26 23 } 24 25 ~CLog() 26 { 27 omp_destroy_lock( &mutex ); 28 } 29 30 CLog& operator()(int l); 27 31 void setLevel(int l) {level=l; } 28 32 int getLevel() {return level ;} … … 46 50 * \param [in] pointer to new streambuf 47 51 */ 48 void changeStreamBuff(std::streambuf* sBuff) { strBuf_ = sBuff; rdbuf(sBuff); } 52 void changeStreamBuff(std::streambuf* sBuff) 53 { 54 strBuf_ = sBuff; 55 strBuf_array[omp_get_thread_num()] = sBuff; 56 rdbuf(sBuff); 57 } 49 58 50 59 int level ; 51 60 string name ; 52 61 std::streambuf* strBuf_; 62 std::streambuf* strBuf_array[16]; 63 omp_lock_t mutex; 53 64 }; 54 65 … … 56 67 extern CLog report; 57 68 extern CLog error; 69 70 extern std::filebuf* info_FB[16]; 58 71 } 59 72 #endif -
XIOS/dev/dev_trunk_omp/src/mpi.hpp
r501 r1601 10 10 /* skip C++ Binding for OpenMPI */ 11 11 #define OMPI_SKIP_MPICXX 12 #ifdef _usingEP 13 #include <omp.h> 14 #include "../extern/src_ep_dev/ep_lib.hpp" 15 #include "../extern/src_ep_dev/ep_declaration.hpp" 16 //using namespace ep_lib; 17 #elif _usingMPI 18 #include <mpi.h> 19 #endif 12 20 13 #include <mpi.h>14 21 15 22 #endif -
XIOS/dev/dev_trunk_omp/src/node/axis.cpp
r1566 r1601 14 14 #include "distribution_client.hpp" 15 15 16 using namespace ep_lib; 17 16 18 namespace xios { 17 19 … … 26 28 , transformationMap_(), hasValue(false), hasLabel(false) 27 29 , computedWrittenIndex_(false) 28 30 , clients() 29 31 { 30 32 } … … 38 40 , transformationMap_(), hasValue(false), hasLabel(false) 39 41 , computedWrittenIndex_(false) 40 42 , clients() 41 43 { 42 44 } … … 45 47 { /* Ne rien faire de plus */ } 46 48 47 std::map<StdString, ETranformationType> CAxis::transformationMapList_ = std::map<StdString, ETranformationType>();48 bool CAxis::dummyTransformationMapList_ = CAxis::initializeTransformationMap(CAxis::transformationMapList_); 49 std::map<StdString, ETranformationType> *CAxis::transformationMapList_ptr = 0; 50 49 51 bool CAxis::initializeTransformationMap(std::map<StdString, ETranformationType>& m) 50 52 { … … 58 60 m["temporal_splitting"] = TRANS_TEMPORAL_SPLITTING; 59 61 m["duplicate_scalar"] = TRANS_DUPLICATE_SCALAR_TO_AXIS; 60 62 } 63 64 bool CAxis::initializeTransformationMap() 65 { 66 if(CAxis::transformationMapList_ptr == 0) CAxis::transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 67 (*CAxis::transformationMapList_ptr)["zoom_axis"] = TRANS_ZOOM_AXIS; 68 (*CAxis::transformationMapList_ptr)["interpolate_axis"] = TRANS_INTERPOLATE_AXIS; 69 (*CAxis::transformationMapList_ptr)["extract_axis"] = TRANS_EXTRACT_AXIS; 70 (*CAxis::transformationMapList_ptr)["inverse_axis"] = TRANS_INVERSE_AXIS; 71 (*CAxis::transformationMapList_ptr)["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_AXIS; 72 (*CAxis::transformationMapList_ptr)["extract_domain"] = TRANS_EXTRACT_DOMAIN_TO_AXIS; 73 (*CAxis::transformationMapList_ptr)["reduce_axis"] = TRANS_REDUCE_AXIS_TO_AXIS; 74 (*CAxis::transformationMapList_ptr)["temporal_splitting"] = TRANS_TEMPORAL_SPLITTING; 75 (*CAxis::transformationMapList_ptr)["duplicate_scalar"] = TRANS_DUPLICATE_SCALAR_TO_AXIS; 61 76 } 62 77 … … 114 129 \return the number of indexes written by each server 115 130 */ 116 int CAxis::getNumberWrittenIndexes( MPI_Comm writtenCom)131 int CAxis::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 117 132 { 118 133 int writtenSize; 119 MPI_Comm_size(writtenCom, &writtenSize);134 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 120 135 return numberWrittenIndexes_[writtenSize]; 121 136 } … … 125 140 \return the total number of indexes written by the servers 126 141 */ 127 int CAxis::getTotalNumberWrittenIndexes( MPI_Comm writtenCom)142 int CAxis::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 128 143 { 129 144 int writtenSize; 130 MPI_Comm_size(writtenCom, &writtenSize);145 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 131 146 return totalNumberWrittenIndexes_[writtenSize]; 132 147 } … … 136 151 \return the offset of indexes written by each server 137 152 */ 138 int CAxis::getOffsetWrittenIndexes( MPI_Comm writtenCom)153 int CAxis::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 139 154 { 140 155 int writtenSize; 141 MPI_Comm_size(writtenCom, &writtenSize);156 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 142 157 return offsetWrittenIndexes_[writtenSize]; 143 158 } 144 159 145 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer( MPI_Comm writtenCom)160 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 146 161 { 147 162 int writtenSize; 148 MPI_Comm_size(writtenCom, &writtenSize);163 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 149 164 return compressedIndexToWriteOnServer[writtenSize]; 150 165 } … … 689 704 } 690 705 691 void CAxis::computeWrittenCompressedIndex( MPI_Comm writtenComm)706 void CAxis::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 692 707 { 693 708 int writtenCommSize; 694 MPI_Comm_size(writtenComm, &writtenCommSize);709 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 695 710 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 696 711 return; … … 750 765 { 751 766 752 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);753 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);767 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 768 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 754 769 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 755 770 } … … 1346 1361 1347 1362 nodeElementName = node.getElementName(); 1348 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 1349 it = transformationMapList_.find(nodeElementName); 1363 1364 if(transformationMapList_ptr == 0) initializeTransformationMap(); 1365 std::map<StdString, ETranformationType>::const_iterator ite = (*CAxis::transformationMapList_ptr).end(), it; 1366 it = (*CAxis::transformationMapList_ptr).find(nodeElementName); 1350 1367 if (ite != it) 1351 1368 { -
XIOS/dev/dev_trunk_omp/src/node/axis.hpp
r1562 r1601 16 16 #include "transformation.hpp" 17 17 #include "transformation_enum.hpp" 18 19 #include "mpi_std.hpp" 18 20 19 21 namespace xios { … … 68 70 const std::set<StdString> & getRelFiles(void) const; 69 71 70 int getNumberWrittenIndexes( MPI_Comm writtenCom);71 int getTotalNumberWrittenIndexes( MPI_Comm writtenCom);72 int getOffsetWrittenIndexes( MPI_Comm writtenCom);73 CArray<int, 1>& getCompressedIndexToWriteOnServer( MPI_Comm writtenCom);72 int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 73 int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 74 int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 75 CArray<int, 1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 74 76 75 77 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, const std::vector<int>& globalDim, int orderPositionInGrid, … … 113 115 114 116 void computeWrittenIndex(); 115 void computeWrittenCompressedIndex( MPI_Comm);117 void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 116 118 bool hasTransformation(); 117 119 void solveInheritanceTransformation(); … … 177 179 private: 178 180 static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 179 static std::map<StdString, ETranformationType> transformationMapList_; 180 static bool dummyTransformationMapList_; 181 static bool initializeTransformationMap(); 182 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 183 #pragma omp threadprivate(transformationMapList_ptr) 184 //static bool dummyTransformationMapList_; 181 185 182 186 DECLARE_REF_FUNC(Axis,axis) -
XIOS/dev/dev_trunk_omp/src/node/compute_connectivity_domain.hpp
r934 r1601 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CComputeConnectivityDomain 63 64 -
XIOS/dev/dev_trunk_omp/src/node/context.cpp
r1542 r1601 21 21 #include "distribute_file_server2.hpp" 22 22 23 using namespace ep_lib; 24 23 25 namespace xios { 24 26 25 std::shared_ptr<CContextGroup> CContext::root;27 std::shared_ptr<CContextGroup> * CContext::root_ptr = 0; 26 28 27 29 /// ////////////////////// Définitions ////////////////////// /// … … 66 68 CContextGroup* CContext::getRoot(void) 67 69 { 68 if (root.get()==NULL) root=std::shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName()));69 return root .get();70 if(root_ptr == 0) root_ptr = new std::shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName())); 71 return root_ptr->get(); 70 72 } 71 73 … … 248 250 249 251 //! Initialize client side 250 void CContext::initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer /*= 0*/)252 void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 251 253 { 252 254 253 255 hasClient = true; 254 MPI_Comm intraCommServer, interCommServer;256 ep_lib::MPI_Comm intraCommServer, interCommServer; 255 257 256 258 257 if (CServer::serverLevel != 1) 258 // initClient is called by client 259 if (CServer::serverLevel != 1) // initClient is called by client 259 260 { 260 261 client = new CContextClient(this, intraComm, interComm, cxtServer); … … 266 267 else 267 268 { 268 MPI_Comm_dup(intraComm, &intraCommServer);269 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 269 270 comms.push_back(intraCommServer); 270 MPI_Comm_dup(interComm, &interCommServer);271 ep_lib::MPI_Comm_dup(interComm, &interCommServer); 271 272 comms.push_back(interCommServer); 272 273 } … … 287 288 server = new CContextServer(this, intraCommServer, interCommServer); 288 289 } 289 else 290 // initClient is called by primary server290 291 else // initClient is called by primary server 291 292 { 292 293 clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 293 MPI_Comm_dup(intraComm, &intraCommServer);294 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 294 295 comms.push_back(intraCommServer); 295 MPI_Comm_dup(interComm, &interCommServer);296 ep_lib::MPI_Comm_dup(interComm, &interCommServer); 296 297 comms.push_back(interCommServer); 297 298 serverPrimServer.push_back(new CContextServer(this, intraCommServer, interCommServer)); … … 361 362 } 362 363 363 void CContext::initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient /*= 0*/)364 void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 364 365 { 365 366 hasServer=true; … … 379 380 registryOut->setPath(contextRegistryId) ; 380 381 381 MPI_Comm intraCommClient, interCommClient;382 ep_lib::MPI_Comm intraCommClient, interCommClient; 382 383 if (cxtClient) // Attached mode 383 384 { … … 387 388 else 388 389 { 389 MPI_Comm_dup(intraComm, &intraCommClient);390 ep_lib::MPI_Comm_dup(intraComm, &intraCommClient); 390 391 comms.push_back(intraCommClient); 391 MPI_Comm_dup(interComm, &interCommClient);392 ep_lib::MPI_Comm_dup(interComm, &interCommClient); 392 393 comms.push_back(interCommClient); 393 394 } … … 475 476 476 477 //! Free internally allocated communicators 477 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)478 MPI_Comm_free(&(*it));478 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 479 ep_lib::MPI_Comm_free(&(*it)); 479 480 comms.clear(); 480 481 482 #pragma omp critical (_output) 481 483 info(20)<<"CContext: Context <"<<getId()<<"> is finalized."<<endl; 482 484 } … … 494 496 { 495 497 // Blocking send of context finalize message to its client (e.g. primary server or model) 498 #pragma omp critical (_output) 496 499 info(100)<<"DEBUG: context "<<getId()<<" Send client finalize<<"<<endl ; 497 500 client->finalize(); … … 517 520 518 521 //! Free internally allocated communicators 519 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)520 MPI_Comm_free(&(*it));522 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 523 ep_lib::MPI_Comm_free(&(*it)); 521 524 comms.clear(); 522 525 526 #pragma omp critical (_output) 523 527 info(20)<<"CContext: Context <"<<getId()<<"> is finalized."<<endl; 524 528 } … … 531 535 void CContext::freeComms(void) 532 536 { 533 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)534 MPI_Comm_free(&(*it));537 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 538 ep_lib::MPI_Comm_free(&(*it)); 535 539 comms.clear(); 536 540 } … … 1002 1006 } 1003 1007 1004 for (std::multimap<double,int>:: iterator it=poolDataSize.begin() ; it!=poolDataSize.end(); ++it) info(30)<<"Load Balancing for servers (perfect=1) : "<<it->second<<" : ratio "<<it->first*1./dataPerPool<<endl ; 1005 1008 for (std::multimap<double,int>:: iterator it=poolDataSize.begin() ; it!=poolDataSize.end(); ++it) 1009 { 1010 #pragma omp critical (_output) 1011 info(30)<<"Load Balancing for servers (perfect=1) : "<<it->second<<" : ratio "<<it->first*1./dataPerPool<<endl ; 1012 } 1013 1006 1014 for (int i = 0; i < this->enabledReadModeFiles.size(); ++i) 1007 1015 { … … 1091 1099 } 1092 1100 1093 for (int i = 0; i < nbPools; ++i) info(100)<<"Pool server level2 "<<i<<" assigned file bandwith "<<bandwithSize[i]*86400.*4./1024/1024.<<" Mb / days"<<endl ; 1094 for (int i = 0; i < nbPools; ++i) info(100)<<"Pool server level2 "<<i<<" assigned grid memory "<<memorySize[i]*100/1024./1024.<<" Mb"<<endl ; 1095 1101 for (int i = 0; i < nbPools; ++i) 1102 { 1103 #pragma omp critical (_output) 1104 info(100)<<"Pool server level2 "<<i<<" assigned file bandwith "<<bandwithSize[i]*86400.*4./1024/1024.<<" Mb / days"<<endl ; 1105 } 1106 for (int i = 0; i < nbPools; ++i) 1107 { 1108 #pragma omp critical (_output) 1109 info(100)<<"Pool server level2 "<<i<<" assigned grid memory "<<memorySize[i]*100/1024./1024.<<" Mb"<<endl ; 1110 } 1096 1111 1097 1112 for (int i = 0; i < this->enabledReadModeFiles.size(); ++i) … … 1144 1159 for (; it != end; it++) 1145 1160 { 1161 #pragma omp critical (_output) 1146 1162 info(30)<<"Closing File : "<<(*it)->getId()<<endl; 1147 1163 (*it)->close(); … … 1831 1847 } 1832 1848 1849 #pragma omp critical (_output) 1833 1850 info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl; 1834 1851 calendar->update(step); 1852 #pragma omp critical (_output) 1835 1853 info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl; 1836 1854 #ifdef XIOS_MEMTRACK_LIGHT 1855 #pragma omp critical (_output) 1837 1856 info(50) << " Current memory used by XIOS : "<< MemTrack::getCurrentMemorySize()*1.0/(1024*1024)<<" Mbyte, at timestep "<<step<<" of context "<<this->getId()<<endl ; 1838 1857 #endif … … 1845 1864 } 1846 1865 else if (prevStep == step) 1866 { 1867 #pragma omp critical (_output) 1847 1868 info(50) << "updateCalendar: already at step " << step << ", no operation done." << endl; 1869 } 1848 1870 else // if (prevStep > step) 1849 1871 ERROR("void CContext::updateCalendar(int step)", … … 1901 1923 CContext* context = CObjectFactory::CreateObject<CContext>(id).get(); 1902 1924 getRoot(); 1903 if (!hasctxt) CGroupFactory::AddChild( root, context->getShared());1925 if (!hasctxt) CGroupFactory::AddChild(*root_ptr, context->getShared()); 1904 1926 1905 1927 #define DECLARE_NODE(Name_, name_) \ -
XIOS/dev/dev_trunk_omp/src/node/context.hpp
r1542 r1601 5 5 #include "xios_spl.hpp" 6 6 //#include "node_type.hpp" 7 #include "mpi_std.hpp" 7 8 #include "calendar_wrapper.hpp" 8 9 … … 11 12 #include "garbage_collector.hpp" 12 13 #include "registry.hpp" 13 #include "mpi.hpp"14 14 15 15 … … 88 88 public : 89 89 // Initialize server or client 90 void initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer = 0);91 void initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient = 0);90 void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 91 void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 92 92 bool isInitialized(void); 93 93 … … 239 239 240 240 // Context root 241 static std::shared_ptr<CContextGroup> root; 241 static std::shared_ptr<CContextGroup> *root_ptr; 242 #pragma omp threadprivate(root_ptr) 242 243 243 244 // Determine context on client or not … … 262 263 StdString idServer_; 263 264 CGarbageCollector garbageCollector; 264 std::list< MPI_Comm> comms; //!< Communicators allocated internally265 std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 265 266 266 267 public: // Some function maybe removed in the near future -
XIOS/dev/dev_trunk_omp/src/node/domain.cpp
r1578 r1601 19 19 #include "client_server_mapping_distributed.hpp" 20 20 21 using namespace ep_lib; 22 21 23 #include <algorithm> 22 24 … … 68 70 } 69 71 70 std::map<StdString, ETranformationType> CDomain::transformationMapList_ = std::map<StdString, ETranformationType>(); 71 bool CDomain::_dummyTransformationMapList = CDomain::initializeTransformationMap(CDomain::transformationMapList_); 72 std::map<StdString, ETranformationType> *CDomain::transformationMapList_ptr = 0; 72 73 73 74 bool CDomain::initializeTransformationMap(std::map<StdString, ETranformationType>& m) … … 82 83 } 83 84 85 bool CDomain::initializeTransformationMap() 86 { 87 CDomain::transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 88 (*CDomain::transformationMapList_ptr)["zoom_domain"] = TRANS_ZOOM_DOMAIN; 89 (*CDomain::transformationMapList_ptr)["interpolate_domain"] = TRANS_INTERPOLATE_DOMAIN; 90 (*CDomain::transformationMapList_ptr)["generate_rectilinear_domain"] = TRANS_GENERATE_RECTILINEAR_DOMAIN; 91 (*CDomain::transformationMapList_ptr)["compute_connectivity_domain"] = TRANS_COMPUTE_CONNECTIVITY_DOMAIN; 92 (*CDomain::transformationMapList_ptr)["expand_domain"] = TRANS_EXPAND_DOMAIN; 93 (*CDomain::transformationMapList_ptr)["reorder_domain"] = TRANS_REORDER_DOMAIN; 94 (*CDomain::transformationMapList_ptr)["extract_domain"] = TRANS_EXTRACT_DOMAIN; 95 } 96 84 97 const std::set<StdString> & CDomain::getRelFiles(void) const 85 98 { … … 92 105 \return the number of indexes written by each server 93 106 */ 94 int CDomain::getNumberWrittenIndexes( MPI_Comm writtenCom)107 int CDomain::getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 95 108 { 96 109 int writtenSize; 97 MPI_Comm_size(writtenCom, &writtenSize);110 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 98 111 return numberWrittenIndexes_[writtenSize]; 99 112 } … … 103 116 \return the total number of indexes written by the servers 104 117 */ 105 int CDomain::getTotalNumberWrittenIndexes( MPI_Comm writtenCom)118 int CDomain::getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom) 106 119 { 107 120 int writtenSize; 108 MPI_Comm_size(writtenCom, &writtenSize);121 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 109 122 return totalNumberWrittenIndexes_[writtenSize]; 110 123 } … … 114 127 \return the offset of indexes written by each server 115 128 */ 116 int CDomain::getOffsetWrittenIndexes( MPI_Comm writtenCom)129 int CDomain::getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom) 117 130 { 118 131 int writtenSize; 119 MPI_Comm_size(writtenCom, &writtenSize);132 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 120 133 return offsetWrittenIndexes_[writtenSize]; 121 134 } 122 135 123 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer( MPI_Comm writtenCom)136 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom) 124 137 { 125 138 int writtenSize; 126 MPI_Comm_size(writtenCom, &writtenSize);139 ep_lib::MPI_Comm_size(writtenCom, &writtenSize); 127 140 return compressedIndexToWriteOnServer[writtenSize]; 128 141 } … … 654 667 int v ; 655 668 v=ibegin ; 656 MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ;669 ep_lib::MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ; 657 670 v=jbegin ; 658 MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ;671 ep_lib::MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ; 659 672 v=ni ; 660 MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ;673 ep_lib::MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ; 661 674 v=nj ; 662 MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ;663 664 MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ;665 MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ;675 ep_lib::MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ; 676 677 ep_lib::MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ; 678 ep_lib::MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ; 666 679 667 680 delete[] ibegin_g ; … … 1901 1914 } 1902 1915 1903 void CDomain::computeWrittenCompressedIndex( MPI_Comm writtenComm)1916 void CDomain::computeWrittenCompressedIndex(ep_lib::MPI_Comm writtenComm) 1904 1917 { 1905 1918 int writtenCommSize; 1906 MPI_Comm_size(writtenComm, &writtenCommSize);1919 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize); 1907 1920 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 1908 1921 return; … … 1961 1974 { 1962 1975 1963 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);1964 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm);1976 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 1977 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 1965 1978 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 1966 1979 } … … 3045 3058 3046 3059 nodeElementName = node.getElementName(); 3047 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 3048 it = transformationMapList_.find(nodeElementName); 3060 if(transformationMapList_ptr == 0) initializeTransformationMap(); 3061 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_ptr->end(), it; 3062 it = transformationMapList_ptr->find(nodeElementName); 3049 3063 if (ite != it) 3050 3064 { -
XIOS/dev/dev_trunk_omp/src/node/domain.hpp
r1578 r1601 17 17 #include "transformation_enum.hpp" 18 18 #include "server_distribution_description.hpp" 19 #include "mpi_std.hpp" 19 20 #include "mesh.hpp" 20 21 … … 94 95 bool isWrittenCompressed(const StdString& filename) const; 95 96 96 int getNumberWrittenIndexes( MPI_Comm writtenCom);97 int getTotalNumberWrittenIndexes( MPI_Comm writtenCom);98 int getOffsetWrittenIndexes( MPI_Comm writtenCom);99 CArray<int,1>& getCompressedIndexToWriteOnServer( MPI_Comm writtenCom);97 int getNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 98 int getTotalNumberWrittenIndexes(ep_lib::MPI_Comm writtenCom); 99 int getOffsetWrittenIndexes(ep_lib::MPI_Comm writtenCom); 100 CArray<int,1>& getCompressedIndexToWriteOnServer(ep_lib::MPI_Comm writtenCom); 100 101 101 102 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, bool bufferForWriting = false); … … 116 117 117 118 void computeWrittenIndex(); 118 void computeWrittenCompressedIndex( MPI_Comm);119 void computeWrittenCompressedIndex(ep_lib::MPI_Comm); 119 120 120 121 void AllgatherRectilinearLonLat(CArray<double,1>& lon, CArray<double,1>& lat, … … 234 235 private: 235 236 static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 236 static std::map<StdString, ETranformationType> transformationMapList_; 237 static bool initializeTransformationMap(); 238 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 239 #pragma omp threadprivate(transformationMapList_ptr) 237 240 static bool _dummyTransformationMapList; 241 #pragma omp threadprivate(_dummyTransformationMapList) 238 242 239 243 DECLARE_REF_FUNC(Domain,domain) -
XIOS/dev/dev_trunk_omp/src/node/duplicate_scalar_to_axis.hpp
r1314 r1601 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CReduceAxisToAxis 63 64 -
XIOS/dev/dev_trunk_omp/src/node/expand_domain.hpp
r935 r1601 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CExpandDomain 63 64 -
XIOS/dev/dev_trunk_omp/src/node/extract_axis.hpp
r1558 r1601 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CExtractAxis 63 64 -
XIOS/dev/dev_trunk_omp/src/node/extract_axis_to_scalar.hpp
r960 r1601 60 60 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CExtractAxisToScalar 63 64 -
XIOS/dev/dev_trunk_omp/src/node/extract_domain.hpp
r1549 r1601 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CExtractDomain 63 64 -
XIOS/dev/dev_trunk_omp/src/node/extract_domain_to_axis.hpp
r895 r1601 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CExtractDomainToAxis 63 64 -
XIOS/dev/dev_trunk_omp/src/node/field.cpp
r1574 r1601 343 343 while (currentDate >= lastDataRequestedFromServer) 344 344 { 345 info(20) << "currentDate : " << currentDate << endl ; 346 info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ; 347 info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ; 348 info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 345 #pragma omp critical (_output) 346 { 347 info(20) << "currentDate : " << currentDate << endl ; 348 info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ; 349 info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ; 350 info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 351 } 349 352 350 353 dataRequested |= sendReadDataRequest(lastDataRequestedFromServer + file->output_freq); … … 502 505 if (!nstepMaxRead) 503 506 { 504 MPI_Allreduce( MPI_IN_PLACE, &nstepMax, 1, MPI_INT, MPI_MAX, context->server->intraComm);507 MPI_Allreduce(&nstepMax, &nstepMax, 1, MPI_INT, MPI_MAX, context->server->intraComm); 505 508 nstepMaxRead = true; 506 509 } … … 919 922 { 920 923 areAllReferenceSolved = true; 921 924 922 925 if (context->hasClient && !context->hasServer) 923 926 { -
XIOS/dev/dev_trunk_omp/src/node/file.cpp
r1542 r1601 289 289 290 290 int color = allZoneEmpty ? 0 : 1; 291 MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm);292 if (allZoneEmpty) MPI_Comm_free(&fileComm);291 ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 292 if (allZoneEmpty) ep_lib::MPI_Comm_free(&fileComm); 293 293 } 294 294 … … 524 524 { 525 525 int commSize, commRank; 526 MPI_Comm_size(fileComm, &commSize);527 MPI_Comm_rank(fileComm, &commRank);526 ep_lib::MPI_Comm_size(fileComm, &commSize); 527 ep_lib::MPI_Comm_rank(fileComm, &commRank); 528 528 529 529 if (server->intraCommSize > 1) … … 602 602 CContext* context = CContext::getCurrent(); 603 603 CContextServer* server = context->server; 604 MPI_Comm readComm = this->fileComm;604 ep_lib::MPI_Comm readComm = this->fileComm; 605 605 606 606 if (!allZoneEmpty) … … 645 645 { 646 646 int commSize, commRank; 647 MPI_Comm_size(readComm, &commSize);648 MPI_Comm_rank(readComm, &commRank);647 ep_lib::MPI_Comm_size(readComm, &commSize); 648 ep_lib::MPI_Comm_rank(readComm, &commRank); 649 649 650 650 if (server->intraCommSize > 1) … … 688 688 isOpen = false; 689 689 } 690 if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm);690 //if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 691 691 } 692 692 //---------------------------------------------------------------- … … 713 713 714 714 // Read necessary value from file 715 #pragma omp critical (_func) 715 716 this->data_in->readFieldAttributesValues(enabledFields[idx]); 716 717 -
XIOS/dev/dev_trunk_omp/src/node/file.hpp
r1542 r1601 4 4 /// XIOS headers /// 5 5 #include "xios_spl.hpp" 6 #include "mpi_std.hpp" 6 7 #include "field.hpp" 7 8 #include "data_output.hpp" … … 12 13 #include "attribute_enum_impl.hpp" 13 14 #include "context_client.hpp" 14 #include "mpi.hpp" 15 15 16 16 17 namespace xios { … … 173 174 int nbAxis, nbDomains; 174 175 bool isOpen; 175 MPI_Comm fileComm;176 ep_lib::MPI_Comm fileComm; 176 177 177 178 private: -
XIOS/dev/dev_trunk_omp/src/node/generate_rectilinear_domain.hpp
r836 r1601 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CGenerateRectilinearDomain 63 64 -
XIOS/dev/dev_trunk_omp/src/node/interpolate_axis.hpp
r836 r1601 62 62 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 63 63 static bool _dummyRegistered; 64 #pragma omp threadprivate(_dummyRegistered) 64 65 }; // class CInterpolateAxis 65 66 -
XIOS/dev/dev_trunk_omp/src/node/interpolate_domain.hpp
r1021 r1601 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CInterpolateDomain 63 64 -
XIOS/dev/dev_trunk_omp/src/node/inverse_axis.hpp
r836 r1601 59 59 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 60 60 static bool _dummyRegistered; 61 #pragma omp threadprivate(_dummyRegistered) 61 62 62 63 }; // class CInverseAxis -
XIOS/dev/dev_trunk_omp/src/node/mesh.cpp
r1542 r1601 6 6 7 7 #include "mesh.hpp" 8 using namespace ep_lib; 8 9 #include <boost/functional/hash.hpp> 9 //#include <unordered_map>10 10 11 11 namespace xios { … … 33 33 } 34 34 35 std::map <StdString, CMesh> CMesh::meshList = std::map <StdString, CMesh>();36 std::map <StdString, vector<int> > CMesh::domainList = std::map <StdString, vector<int> >();35 std::map <StdString, CMesh> *CMesh::meshList_ptr = 0; 36 std::map <StdString, vector<int> > *CMesh::domainList_ptr = 0; 37 37 38 38 ///--------------------------------------------------------------- … … 45 45 CMesh* CMesh::getMesh (StdString meshName, int nvertex) 46 46 { 47 CMesh::domainList[meshName].push_back(nvertex); 48 49 if ( CMesh::meshList.begin() != CMesh::meshList.end() ) 50 { 51 for (std::map<StdString, CMesh>::iterator it=CMesh::meshList.begin(); it!=CMesh::meshList.end(); ++it) 47 if(CMesh::domainList_ptr == NULL) CMesh::domainList_ptr = new std::map <StdString, vector<int> >(); 48 if(CMesh::meshList_ptr == NULL) CMesh::meshList_ptr = new std::map <StdString, CMesh>(); 49 50 CMesh::domainList_ptr->at(meshName).push_back(nvertex); 51 52 if ( CMesh::meshList_ptr->begin() != CMesh::meshList_ptr->end() ) 53 { 54 for (std::map<StdString, CMesh>::iterator it=CMesh::meshList_ptr->begin(); it!=CMesh::meshList_ptr->end(); ++it) 52 55 { 53 56 if (it->first == meshName) 54 return &meshList [meshName];57 return &meshList_ptr->at(meshName); 55 58 else 56 59 { 57 60 CMesh newMesh; 58 CMesh::meshList .insert( make_pair(meshName, newMesh) );59 return &meshList [meshName];61 CMesh::meshList_ptr->insert( make_pair(meshName, newMesh) ); 62 return &meshList_ptr->at(meshName); 60 63 } 61 64 } … … 64 67 { 65 68 CMesh newMesh; 66 CMesh::meshList .insert( make_pair(meshName, newMesh) );67 return &meshList [meshName];69 CMesh::meshList_ptr->insert( make_pair(meshName, newMesh) ); 70 return &meshList_ptr->at(meshName); 68 71 } 69 72 } -
XIOS/dev/dev_trunk_omp/src/node/mesh.hpp
r1542 r1601 60 60 const CArray<double, 2>&, const CArray<double, 2>& ); 61 61 62 void createMeshEpsilon(const MPI_Comm&,62 void createMeshEpsilon(const ep_lib::MPI_Comm&, 63 63 const CArray<double, 1>&, const CArray<double, 1>&, 64 64 const CArray<double, 2>&, const CArray<double, 2>& ); 65 65 66 void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&,66 void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&, 67 67 const CArray<double, 2>&, const CArray<double, 2>&, 68 68 CArray<int, 2>&); … … 83 83 int nbFaces_; 84 84 85 static std::map <StdString, CMesh> meshList; 86 static std::map <StdString, vector<int> > domainList; 85 static std::map <StdString, CMesh> *meshList_ptr; 86 #pragma omp threadprivate(meshList_ptr) 87 static std::map <StdString, vector<int> > *domainList_ptr; 88 #pragma omp threadprivate(domainList_ptr) 87 89 CClientClientDHTSizet* pNodeGlobalIndex; // pointer to a map <nodeHash, nodeIdxGlo> 88 90 CClientClientDHTSizet* pEdgeGlobalIndex; // pointer to a map <edgeHash, edgeIdxGlo> 89 void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);90 void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);91 void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 92 void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 91 93 void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 92 94 void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); -
XIOS/dev/dev_trunk_omp/src/node/reduce_axis_to_axis.hpp
r1301 r1601 59 59 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 60 60 static bool _dummyRegistered; 61 #pragma omp threadprivate(_dummyRegistered) 61 62 }; // class CReduceAxisToAxis 62 63 -
XIOS/dev/dev_trunk_omp/src/node/reduce_axis_to_scalar.hpp
r888 r1601 59 59 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 60 60 static bool _dummyRegistered; 61 #pragma omp threadprivate(_dummyRegistered) 61 62 }; // class CReduceAxisToScalar 62 63 -
XIOS/dev/dev_trunk_omp/src/node/reduce_domain_to_axis.hpp
r895 r1601 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CReduceDomainToAxis 63 64 -
XIOS/dev/dev_trunk_omp/src/node/reduce_domain_to_scalar.hpp
r976 r1601 60 60 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CReduceDomainToScalar 63 64 -
XIOS/dev/dev_trunk_omp/src/node/reduce_scalar_to_scalar.hpp
r1314 r1601 59 59 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 60 60 static bool _dummyRegistered; 61 #pragma omp threadprivate(_dummyRegistered) 61 62 }; // class CReduceScalarToScalar 62 63 -
XIOS/dev/dev_trunk_omp/src/node/reorder_domain.hpp
r1457 r1601 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CReorderDomain 63 64 -
XIOS/dev/dev_trunk_omp/src/node/scalar.cpp
r1314 r1601 27 27 { /* Ne rien faire de plus */ } 28 28 29 std::map<StdString, ETranformationType> CScalar::transformationMapList_ = std::map<StdString, ETranformationType>(); 30 bool CScalar::dummyTransformationMapList_ = CScalar::initializeTransformationMap(CScalar::transformationMapList_); 29 30 std::map<StdString, ETranformationType> *CScalar::transformationMapList_ptr = 0; 31 31 32 bool CScalar::initializeTransformationMap(std::map<StdString, ETranformationType>& m) 32 33 { … … 35 36 m["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_SCALAR; 36 37 m["reduce_scalar"] = TRANS_REDUCE_SCALAR_TO_SCALAR; 38 } 39 40 bool CScalar::initializeTransformationMap() 41 { 42 CScalar::transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 43 (*CScalar::transformationMapList_ptr)["reduce_axis"] = TRANS_REDUCE_AXIS_TO_SCALAR; 44 (*CScalar::transformationMapList_ptr)["extract_axis"] = TRANS_EXTRACT_AXIS_TO_SCALAR; 45 (*CScalar::transformationMapList_ptr)["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_SCALAR; 46 (*CScalar::transformationMapList_ptr)["reduce_scalar"] = TRANS_REDUCE_SCALAR_TO_SCALAR; 37 47 } 38 48 … … 165 175 166 176 nodeElementName = node.getElementName(); 167 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 168 it = transformationMapList_.find(nodeElementName); 177 if(CScalar::transformationMapList_ptr == 0) initializeTransformationMap(); 178 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_ptr->end(), it; 179 it = transformationMapList_ptr->find(nodeElementName); 169 180 if (ite != it) 170 181 { -
XIOS/dev/dev_trunk_omp/src/node/scalar.hpp
r1436 r1601 83 83 TransMapTypes transformationMap_; 84 84 85 85 void setTransformations(const TransMapTypes&); 86 86 87 87 private: 88 88 static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 89 static std::map<StdString, ETranformationType> transformationMapList_; 89 static bool initializeTransformationMap(); 90 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 91 #pragma omp threadprivate(transformationMapList_ptr) 90 92 static bool dummyTransformationMapList_; 93 #pragma omp threadprivate(dummyTransformationMapList_) 91 94 92 95 -
XIOS/dev/dev_trunk_omp/src/node/temporal_splitting.hpp
r1275 r1601 59 59 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 60 60 static bool _dummyRegistered; 61 #pragma omp threadprivate(_dummyRegistered) 61 62 }; // class CTemporalSplitting 62 63 -
XIOS/dev/dev_trunk_omp/src/node/zoom_axis.hpp
r836 r1601 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CZoomAxis 63 64 -
XIOS/dev/dev_trunk_omp/src/node/zoom_domain.hpp
r836 r1601 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CZoomDomain 63 64 -
XIOS/dev/dev_trunk_omp/src/object_factory.cpp
r501 r1601 5 5 /// ////////////////////// Définitions ////////////////////// /// 6 6 7 StdString CObjectFactory::CurrContext("");7 StdString *CObjectFactory::CurrContext_ptr = 0; 8 8 9 9 void CObjectFactory::SetCurrentContextId(const StdString & context) 10 { CObjectFactory::CurrContext = context; } 10 { 11 if(CObjectFactory::CurrContext_ptr == 0 ) CObjectFactory::CurrContext_ptr = new StdString; 12 CObjectFactory::CurrContext_ptr->assign(context); 13 } 11 14 12 15 StdString & CObjectFactory::GetCurrentContextId(void) 13 { return (CObjectFactory::CurrContext); } 16 { 17 return (*CObjectFactory::CurrContext_ptr); 18 } 14 19 15 20 } // namespace xios -
XIOS/dev/dev_trunk_omp/src/object_factory.hpp
r1542 r1601 58 58 59 59 /// Propriétés statiques /// 60 static StdString CurrContext; 60 static StdString *CurrContext_ptr; 61 #pragma omp threadprivate(CurrContext_ptr) 61 62 62 63 }; // class CObjectFactory -
XIOS/dev/dev_trunk_omp/src/object_factory_impl.hpp
r1542 r1601 10 10 int CObjectFactory::GetObjectNum(void) 11 11 { 12 if (CurrContext .size() == 0)12 if (CurrContext_ptr->size() == 0) 13 13 ERROR("CObjectFactory::GetObjectNum(void)", 14 14 << "please define current context id !"); 15 return (U::AllVectObj[CObjectFactory::CurrContext].size()); 15 if(U::AllVectObj_ptr == NULL) return 0; 16 return (*U::AllVectObj_ptr)[*CObjectFactory::CurrContext_ptr].size(); 16 17 } 17 18 … … 19 20 int CObjectFactory::GetObjectIdNum(void) 20 21 { 21 if (CurrContext .size() == 0)22 if (CurrContext_ptr->size() == 0) 22 23 ERROR("CObjectFactory::GetObjectIdNum(void)", 23 24 << "please define current context id !"); 24 return (U::AllMapObj[CObjectFactory::CurrContext].size()); 25 if(U::AllMapObj_ptr == NULL) return 0; 26 return (*U::AllMapObj_ptr)[*CObjectFactory::CurrContext_ptr].size(); 25 27 } 26 28 … … 28 30 bool CObjectFactory::HasObject(const StdString & id) 29 31 { 30 if (CurrContext .size() == 0)32 if (CurrContext_ptr->size() == 0) 31 33 ERROR("CObjectFactory::HasObject(const StdString & id)", 32 34 << "[ id = " << id << " ] please define current context id !"); 33 return (U::AllMapObj[CObjectFactory::CurrContext].find(id) != 34 U::AllMapObj[CObjectFactory::CurrContext].end()); 35 if(U::AllMapObj_ptr == NULL) return false; 36 return ((*U::AllMapObj_ptr)[*CObjectFactory::CurrContext_ptr].find(id) != 37 (*U::AllMapObj_ptr)[*CObjectFactory::CurrContext_ptr].end()); 38 35 39 } 36 40 … … 38 42 bool CObjectFactory::HasObject(const StdString & context, const StdString & id) 39 43 { 40 if (U::AllMapObj.find(context) == U::AllMapObj.end()) return false ; 41 else return (U::AllMapObj[context].find(id) != U::AllMapObj[context].end()); 44 if(U::AllMapObj_ptr == NULL) return false; 45 46 if (U::AllMapObj_ptr->find(context) == U::AllMapObj_ptr->end()) return false ; 47 else return ((*U::AllMapObj_ptr)[context].find(id) != (*U::AllMapObj_ptr)[context].end()); 42 48 } 43 49 … … 45 51 std::shared_ptr<U> CObjectFactory::GetObject(const U * const object) 46 52 { 47 if (CurrContext.size() == 0) 53 if(U::AllVectObj_ptr == NULL) return (std::shared_ptr<U>()); 54 if (CurrContext_ptr->size() == 0) 48 55 ERROR("CObjectFactory::GetObject(const U * const object)", 49 56 << "please define current context id !"); 50 std::vector<std::shared_ptr<U> > & vect = 51 U::AllVectObj[CObjectFactory::CurrContext]; 57 std::vector<std::shared_ptr<U> > & vect = (*U::AllVectObj_ptr)[*CObjectFactory::CurrContext_ptr]; 52 58 53 59 typename std::vector<std::shared_ptr<U> >::const_iterator … … 70 76 std::shared_ptr<U> CObjectFactory::GetObject(const StdString & id) 71 77 { 72 if (CurrContext.size() == 0) 78 if(U::AllMapObj_ptr == NULL) return (std::shared_ptr<U>()); 79 if (CurrContext_ptr->size() == 0) 73 80 ERROR("CObjectFactory::GetObject(const StdString & id)", 74 81 << "[ id = " << id << " ] please define current context id !"); … … 77 84 << "[ id = " << id << ", U = " << U::GetName() << " ] " 78 85 << "object was not found."); 79 return ( U::AllMapObj[CObjectFactory::CurrContext][id]);86 return (*U::AllMapObj_ptr)[*CObjectFactory::CurrContext_ptr][id]; 80 87 } 81 88 … … 83 90 std::shared_ptr<U> CObjectFactory::GetObject(const StdString & context, const StdString & id) 84 91 { 92 if(U::AllMapObj_ptr == NULL) return (std::shared_ptr<U>()); 93 85 94 if (!CObjectFactory::HasObject<U>(context,id)) 86 95 ERROR("CObjectFactory::GetObject(const StdString & id)", 87 96 << "[ id = " << id << ", U = " << U::GetName() <<", context = "<<context<< " ] " 88 97 << "object was not found."); 89 return ( U::AllMapObj[context][id]);98 return (*U::AllMapObj_ptr)[context][id]; 90 99 } 91 100 … … 93 102 std::shared_ptr<U> CObjectFactory::CreateObject(const StdString& id) 94 103 { 95 if (CurrContext.empty()) 104 if(U::AllVectObj_ptr == NULL) U::AllVectObj_ptr = new xios_map<StdString, std::vector<std::shared_ptr<U> > >; 105 if(U::AllMapObj_ptr == NULL) U::AllMapObj_ptr = new xios_map<StdString, xios_map<StdString, std::shared_ptr<U> > >; 106 107 if (CurrContext_ptr->empty()) 96 108 ERROR("CObjectFactory::CreateObject(const StdString& id)", 97 109 << "[ id = " << id << " ] please define current context id !"); … … 105 117 std::shared_ptr<U> value(new U(id.empty() ? CObjectFactory::GenUId<U>() : id)); 106 118 107 U::AllVectObj[CObjectFactory::CurrContext].insert(U::AllVectObj[CObjectFactory::CurrContext].end(), value);108 U::AllMapObj[CObjectFactory::CurrContext].insert(std::make_pair(value->getId(), value));119 (*U::AllVectObj_ptr)[*CObjectFactory::CurrContext_ptr].insert((*U::AllVectObj_ptr)[*CObjectFactory::CurrContext_ptr].end(), value); 120 (*U::AllMapObj_ptr) [*CObjectFactory::CurrContext_ptr].insert(std::make_pair(value->getId(), value)); 109 121 110 122 return value; … … 116 128 CObjectFactory::GetObjectVector(const StdString & context) 117 129 { 118 return ( U::AllVectObj[context]);130 return (*U::AllVectObj_ptr)[context]; 119 131 } 120 132 … … 130 142 { 131 143 StdOStringStream oss; 132 oss << GetUIdBase<U>() << U::GenId[CObjectFactory::CurrContext]++; 144 if(U::GenId_ptr == NULL) U::GenId_ptr = new xios_map< StdString, long int >; 145 oss << GetUIdBase<U>() << (*U::GenId_ptr)[*CObjectFactory::CurrContext_ptr]++; 133 146 return oss.str(); 134 147 } -
XIOS/dev/dev_trunk_omp/src/object_template.hpp
r1542 r1601 108 108 static xios_map<StdString, 109 109 xios_map<StdString, 110 std::shared_ptr<DerivedType> > > AllMapObj; 110 std::shared_ptr<DerivedType> > > *AllMapObj_ptr; 111 #pragma omp threadprivate(AllMapObj_ptr) 111 112 static xios_map<StdString, 112 std::vector<std::shared_ptr<DerivedType> > > AllVectObj; 113 std::vector<std::shared_ptr<DerivedType> > > *AllVectObj_ptr; 114 #pragma omp threadprivate(AllVectObj_ptr) 113 115 114 static xios_map< StdString, long int > GenId ; 116 static xios_map< StdString, long int > *GenId_ptr ; 117 #pragma omp threadprivate(GenId_ptr) 115 118 116 119 }; // class CObjectTemplate -
XIOS/dev/dev_trunk_omp/src/object_template_impl.hpp
r1542 r1601 24 24 xios_map<StdString, 25 25 xios_map<StdString, 26 std::shared_ptr<T> > > CObjectTemplate<T>::AllMapObj;26 std::shared_ptr<T> > > *CObjectTemplate<T>::AllMapObj_ptr = 0; 27 27 28 28 template <class T> 29 29 xios_map<StdString, 30 std::vector<std::shared_ptr<T> > > CObjectTemplate<T>::AllVectObj;31 32 template <class T> 33 xios_map<StdString,long int> CObjectTemplate<T>::GenId;30 std::vector<std::shared_ptr<T> > > *CObjectTemplate<T>::AllVectObj_ptr = 0; 31 32 template <class T> 33 xios_map<StdString,long int> *CObjectTemplate<T>::GenId_ptr = 0; 34 34 35 35 template <class T> … … 66 66 CObjectTemplate<T>::GetAllVectobject(const StdString & contextId) 67 67 { 68 return (CObjectTemplate<T>::AllVectObj[contextId]);68 return (CObjectTemplate<T>::AllVectObj_ptr->at(contextId)); 69 69 } 70 70 … … 426 426 const vector<T*> CObjectTemplate<T>::getAll() 427 427 { 428 const vector< std::shared_ptr<T> >& shared_vect= CObjectFactory::GetObjectVector<T>( );428 const vector< std::shared_ptr<T> >& shared_vect= CObjectFactory::GetObjectVector<T>(CObjectFactory::GetCurrentContextId()); 429 429 vector<T*> vect; 430 430 -
XIOS/dev/dev_trunk_omp/src/parse_expr/yacc_parser.cpp
r1158 r1601 80 80 } 81 81 82 IFilterExprNode* parsed; 83 std::string globalInputText; 84 size_t globalReadOffset = 0; 82 static IFilterExprNode* parsed; 83 static std::string globalInputText; 84 static std::string *globalInputText_ptr = 0; 85 static size_t globalReadOffset = 0; 86 #pragma omp threadprivate(parsed, globalInputText_ptr, globalReadOffset) 85 87 86 88 int readInputForLexer(char* buffer, size_t* numBytesRead, size_t maxBytesToRead) 87 89 { 90 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 88 91 size_t numBytesToRead = maxBytesToRead; 89 size_t bytesRemaining = globalInputText.length()-globalReadOffset;92 size_t bytesRemaining = (*globalInputText_ptr).length()-globalReadOffset; 90 93 size_t i; 91 94 if (numBytesToRead > bytesRemaining) numBytesToRead = bytesRemaining; 92 for (i = 0; i < numBytesToRead; i++) buffer[i] = globalInputText.c_str()[globalReadOffset + i];95 for (i = 0; i < numBytesToRead; i++) buffer[i] = (*globalInputText_ptr).c_str()[globalReadOffset + i]; 93 96 *numBytesRead = numBytesToRead; 94 97 globalReadOffset += numBytesToRead; … … 2002 2005 IFilterExprNode* parseExpr(const string& strExpr) 2003 2006 { 2004 globalInputText = strExpr; 2005 globalReadOffset = 0; 2006 yyparse(); 2007 #pragma omp critical (_parser) 2008 { 2009 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 2010 (*globalInputText_ptr).assign (strExpr); 2011 globalReadOffset = 0; 2012 yyparse(); 2013 } 2007 2014 return parsed; 2008 2015 } -
XIOS/dev/dev_trunk_omp/src/parse_expr/yacc_parser.yacc
r1158 r1601 15 15 } 16 16 17 IFilterExprNode* parsed; 18 std::string globalInputText; 19 size_t globalReadOffset = 0; 17 static IFilterExprNode* parsed; 18 static std::string globalInputText; 19 static std::string *globalInputText_ptr = 0; 20 static size_t globalReadOffset = 0; 21 #pragma omp threadprivate(parsed, globalInputText_ptr, globalReadOffset) 20 22 21 23 int readInputForLexer(char* buffer, size_t* numBytesRead, size_t maxBytesToRead) 22 24 { 25 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 23 26 size_t numBytesToRead = maxBytesToRead; 24 size_t bytesRemaining = globalInputText.length()-globalReadOffset;27 size_t bytesRemaining = (*globalInputText_ptr).length()-globalReadOffset; 25 28 size_t i; 26 29 if (numBytesToRead > bytesRemaining) numBytesToRead = bytesRemaining; 27 for (i = 0; i < numBytesToRead; i++) buffer[i] = globalInputText.c_str()[globalReadOffset + i];30 for (i = 0; i < numBytesToRead; i++) buffer[i] = (*globalInputText_ptr).c_str()[globalReadOffset + i]; 28 31 *numBytesRead = numBytesToRead; 29 32 globalReadOffset += numBytesToRead; … … 145 148 IFilterExprNode* parseExpr(const string& strExpr) 146 149 { 147 globalInputText = strExpr; 148 globalReadOffset = 0; 149 yyparse(); 150 #pragma omp critical (_parser) 151 { 152 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 153 (*globalInputText_ptr).assign (strExpr); 154 globalReadOffset = 0; 155 yyparse(); 156 } 150 157 return parsed; 151 158 } -
XIOS/dev/dev_trunk_omp/src/policy.cpp
r855 r1601 10 10 #include "policy.hpp" 11 11 #include <cmath> 12 using namespace ep_lib; 12 13 13 14 namespace xios -
XIOS/dev/dev_trunk_omp/src/policy.hpp
r855 r1601 31 31 { 32 32 protected: 33 DivideAdaptiveComm(const MPI_Comm& mpiComm);33 DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm); 34 34 35 35 void computeMPICommLevel(); … … 41 41 42 42 protected: 43 const MPI_Comm& internalComm_;43 const ep_lib::MPI_Comm& internalComm_; 44 44 std::vector<std::vector<int> > groupParentsBegin_; 45 45 std::vector<std::vector<int> > nbInGroupParents_; -
XIOS/dev/dev_trunk_omp/src/registry.cpp
r696 r1601 4 4 #include <fstream> 5 5 #include <sstream> 6 using namespace ep_lib; 6 7 7 8 namespace xios … … 258 259 void CRegistry::hierarchicalGatherRegistry(void) 259 260 { 260 hierarchicalGatherRegistry(communicator) ; 261 //hierarchicalGatherRegistry(communicator) ; 262 gatherRegistry(communicator) ; 261 263 } 262 264 -
XIOS/dev/dev_trunk_omp/src/registry.hpp
r700 r1601 23 23 24 24 /** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 25 CRegistry(const MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {}25 CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 26 26 27 27 /** Copy constructor */ … … 106 106 107 107 /** use internally for recursivity */ 108 void gatherRegistry(const MPI_Comm& comm) ;108 void gatherRegistry(const ep_lib::MPI_Comm& comm) ; 109 109 110 110 /** use internally for recursivity */ 111 void hierarchicalGatherRegistry(const MPI_Comm& comm) ;111 void hierarchicalGatherRegistry(const ep_lib::MPI_Comm& comm) ; 112 112 113 113 … … 120 120 121 121 /** MPI communicator used for broadcast and gather operation */ 122 MPI_Comm communicator ;122 ep_lib::MPI_Comm communicator ; 123 123 } ; 124 124 -
XIOS/dev/dev_trunk_omp/src/server.cpp
r1587 r1601 15 15 #include "event_scheduler.hpp" 16 16 #include "string_tools.hpp" 17 using namespace ep_lib; 17 18 18 19 namespace xios … … 47 48 void CServer::initialize(void) 48 49 { 49 int initialized ;50 MPI_Initialized(&initialized) ;51 if (initialized) is_MPI_Initialized=true ;52 else is_MPI_Initialized=false ;50 //int initialized ; 51 //MPI_Initialized(&initialized) ; 52 //if (initialized) is_MPI_Initialized=true ; 53 //else is_MPI_Initialized=false ; 53 54 int rank ; 54 55 … … 57 58 { 58 59 59 if (!is_MPI_Initialized)60 {61 MPI_Init(NULL, NULL);62 }60 //if (!is_MPI_Initialized) 61 //{ 62 // MPI_Init(NULL, NULL); 63 //} 63 64 CTimer::get("XIOS").resume() ; 64 65 … … 152 153 if (serverLevel==2) 153 154 { 155 #pragma omp critical (_output) 154 156 info(50)<<"The number of secondary server pools is "<< sndServerGlobalRanks.size() <<endl ; 155 157 for (i=0; i<sndServerGlobalRanks.size(); i++) … … 188 190 MPI_Comm_size(intraComm,&intraCommSize) ; 189 191 MPI_Comm_rank(intraComm,&intraCommRank) ; 190 info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 192 193 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 194 #pragma omp critical (_output) 195 { 196 info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 191 197 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 192 193 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;198 } 199 194 200 interCommLeft.push_back(newComm) ; 195 201 } … … 209 215 MPI_Comm_size(intraComm, &intraCommSize) ; 210 216 MPI_Comm_rank(intraComm, &intraCommRank) ; 211 info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 217 218 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 219 #pragma omp critical (_output) 220 { 221 info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 212 222 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 213 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;223 } 214 224 interCommLeft.push_back(newComm) ; 215 225 } … … 221 231 MPI_Comm_size(intraComm, &intraCommSize) ; 222 232 MPI_Comm_rank(intraComm, &intraCommRank) ; 223 info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 233 234 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; 235 #pragma omp critical (_output) 236 { 237 info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 224 238 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< sndServerGlobalRanks[i]<<endl ; 225 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ;239 } 226 240 interCommRight.push_back(newComm) ; 227 241 } … … 234 248 MPI_Comm_size(intraComm, &intraCommSize) ; 235 249 MPI_Comm_rank(intraComm, &intraCommRank) ; 236 info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 250 251 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 252 #pragma omp critical (_output) 253 { 254 info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 237 255 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 238 239 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 256 } 257 240 258 interCommLeft.push_back(newComm) ; 241 259 } … … 426 444 { 427 445 if (CXios::usingOasis) oasis_finalize(); 428 else MPI_Finalize() ;446 //else MPI_Finalize() ; 429 447 } 448 430 449 report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; 431 450 report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; … … 637 656 { 638 657 traceOff() ; 639 MPI_Iprobe( MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ;658 MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; 640 659 traceOn() ; 641 660 if (flag==true) 642 661 { 662 #ifdef _usingMPI 643 663 rank=status.MPI_SOURCE ; 664 #elif _usingEP 665 rank=status.ep_src; 666 #endif 644 667 MPI_Get_count(&status,MPI_CHAR,&count) ; 645 668 buffer=new char[count] ; … … 655 678 if (flag==true) 656 679 { 680 #ifdef _usingMPI 657 681 rank=status.MPI_SOURCE ; 682 #elif _usingEP 683 rank=status.ep_src; 684 #endif 658 685 MPI_Get_count(&status,MPI_CHAR,&count) ; 659 686 recvContextMessage((void*)buffer,count) ; … … 740 767 MPI_Get_count(&status,MPI_CHAR,&(counts.back())) ; 741 768 buffers.push_back(new char[counts.back()]) ; 769 MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&request) ; 742 770 requests.push_back(request); 743 MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&(requests.back())) ;744 771 isEventRegistered.push_back(false); 745 772 isEventQueued.push_back(false); … … 750 777 { 751 778 // (2) If context id is received, register an event 752 MPI_Test(&requests[ctxNb],&flag,&status) ;779 if(!isEventRegistered[ctxNb]) MPI_Test(&requests[ctxNb],&flag,&status) ; 753 780 if (flag==true && !isEventRegistered[ctxNb]) 754 781 { … … 794 821 MPI_Intercomm_merge(contextInterComm,1,&inter); 795 822 MPI_Barrier(inter); 796 MPI_Comm_free(&inter);797 823 context->initServer(intraComm,contextInterComm); 798 824 contextInterComms.push_back(contextInterComm); 799 825 826 MPI_Comm_free(&inter); 800 827 } 801 828 // Secondary server: create communication channel with a primary server -
XIOS/dev/dev_trunk_omp/src/server.hpp
r1587 r1601 26 26 static void registerContext(void* buff,int count, int leaderRank=0); 27 27 28 static MPI_Comm intraComm;29 static std::list< MPI_Comm> interCommLeft; // interComm between server (primary, classical or secondary) and its client (client or primary server)30 static std::list< MPI_Comm> interCommRight; // interComm between primary server and secondary server (non-empty only for primary server pool)31 static std::list< MPI_Comm> contextInterComms; // list of context intercomms32 static std::list< MPI_Comm> contextIntraComms; // list of context intercomms (needed only in case of secondary servers)28 static ep_lib::MPI_Comm intraComm; 29 static std::list<ep_lib::MPI_Comm> interCommLeft; // interComm between server (primary, classical or secondary) and its client (client or primary server) 30 static std::list<ep_lib::MPI_Comm> interCommRight; // interComm between primary server and secondary server (non-empty only for primary server pool) 31 static std::list<ep_lib::MPI_Comm> contextInterComms; // list of context intercomms 32 static std::list<ep_lib::MPI_Comm> contextIntraComms; // list of context intercomms (needed only in case of secondary servers) 33 33 static CEventScheduler* eventScheduler; 34 34 -
XIOS/dev/dev_trunk_omp/src/timer.cpp
r1158 r1601 6 6 #include <sstream> 7 7 #include "tracer.hpp" 8 using namespace ep_lib; 8 9 9 10 namespace xios 10 11 { 11 std::map<std::string,CTimer> CTimer::allTimer;12 std::map<std::string,CTimer> *CTimer::allTimer_ptr = 0; 12 13 13 14 CTimer::CTimer(const std::string& name_) : name(name_) … … 54 55 CTimer& CTimer::get(const std::string name) 55 56 { 56 std::map<std::string,CTimer>::iterator it = allTimer.find(name); 57 if (it == allTimer.end()) 58 it = allTimer.insert(std::make_pair(name, CTimer(name))).first; 57 if(allTimer_ptr == NULL) allTimer_ptr = new std::map<std::string,CTimer>; 58 59 std::map<std::string,CTimer>::iterator it = allTimer_ptr->find(name); 60 61 if (it == allTimer_ptr->end()) 62 it = allTimer_ptr->insert(std::make_pair(name, CTimer(name))).first; 63 59 64 return it->second; 60 65 } … … 63 68 { 64 69 std::ostringstream strOut ; 65 for(std::map<std::string,CTimer>::iterator it=allTimer.begin();it!=allTimer.end();++it) 70 if(allTimer_ptr == 0) allTimer_ptr = new std::map<std::string,CTimer>; 71 72 for(std::map<std::string,CTimer>::iterator it=allTimer_ptr->begin();it!=allTimer_ptr->end();++it) 66 73 strOut<<"Timer : "<<it->first<<" --> cumulated time : "<<it->second.getCumulatedTime()<<std::endl ; 67 74 return strOut.str() ; -
XIOS/dev/dev_trunk_omp/src/timer.hpp
r1158 r1601 20 20 void reset(void); 21 21 double getCumulatedTime(void); 22 static std::map<std::string,CTimer> allTimer; 22 static std::map<std::string,CTimer> *allTimer_ptr; 23 #pragma omp threadprivate(allTimer_ptr) 23 24 static double getTime(void); 24 25 static CTimer& get(std::string name); -
XIOS/dev/dev_trunk_omp/src/transformation/Functions/reduction.cpp
r979 r1601 9 9 10 10 CReductionAlgorithm::CallBackMap* CReductionAlgorithm::reductionCreationCallBacks_ = 0; 11 std::map<StdString,EReductionType> CReductionAlgorithm::ReductionOperations = std::map<StdString,EReductionType>(); 11 std::map<StdString,EReductionType> *CReductionAlgorithm::ReductionOperations_ptr = 0; 12 12 13 bool CReductionAlgorithm::initReductionOperation(std::map<StdString,EReductionType>& m) 13 14 { … … 29 30 } 30 31 31 bool CReductionAlgorithm::_dummyInit = CReductionAlgorithm::initReductionOperation(CReductionAlgorithm::ReductionOperations); 32 bool CReductionAlgorithm::initReductionOperation() 33 { 34 CReductionAlgorithm::ReductionOperations_ptr = new std::map<StdString,EReductionType>(); 35 // So so stupid way to intialize operation but it works ... 36 (*CReductionAlgorithm::ReductionOperations_ptr)["sum"] = TRANS_REDUCE_SUM; 37 CSumReductionAlgorithm::registerTrans(); 38 39 (*CReductionAlgorithm::ReductionOperations_ptr)["min"] = TRANS_REDUCE_MIN; 40 CMinReductionAlgorithm::registerTrans(); 41 42 (*CReductionAlgorithm::ReductionOperations_ptr)["max"] = TRANS_REDUCE_MAX; 43 CMaxReductionAlgorithm::registerTrans(); 44 45 (*CReductionAlgorithm::ReductionOperations_ptr)["extract"] = TRANS_REDUCE_EXTRACT; 46 CExtractReductionAlgorithm::registerTrans(); 47 48 (*CReductionAlgorithm::ReductionOperations_ptr)["average"] = TRANS_REDUCE_AVERAGE; 49 CAverageReductionAlgorithm::registerTrans(); 50 } 51 32 52 33 53 CReductionAlgorithm* CReductionAlgorithm::createOperation(EReductionType reduceType) -
XIOS/dev/dev_trunk_omp/src/transformation/Functions/reduction.hpp
r1260 r1601 23 23 { 24 24 public: 25 static std::map<StdString,EReductionType> ReductionOperations;26 25 static std::map<StdString,EReductionType> *ReductionOperations_ptr; 26 #pragma omp threadprivate(ReductionOperations_ptr) 27 27 public: 28 28 CReductionAlgorithm() {} … … 61 61 typedef std::map<EReductionType, CreateOperationCallBack> CallBackMap; 62 62 static CallBackMap* reductionCreationCallBacks_; 63 #pragma omp threadprivate(reductionCreationCallBacks_) 63 64 64 65 static bool registerOperation(EReductionType reduceType, CreateOperationCallBack createFn); … … 67 68 protected: 68 69 static bool initReductionOperation(std::map<StdString,EReductionType>& m); 70 static bool initReductionOperation(); 69 71 static bool _dummyInit; 72 #pragma omp threadprivate(_dummyInit) 70 73 }; 71 74 -
XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_extract_domain.cpp
r1260 r1601 13 13 #include "grid.hpp" 14 14 #include "grid_transformation_factory_impl.hpp" 15 #include "reduction.hpp"16 15 17 16 namespace xios { … … 62 61 63 62 pos_ = algo->position; 64 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 63 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 64 { 65 CReductionAlgorithm::initReductionOperation(); 66 } 67 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 65 68 } 66 69 -
XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_extract_domain.hpp
r1260 r1601 13 13 #include "transformation.hpp" 14 14 15 #include "reduction.hpp" 15 16 namespace xios { 16 17 … … 25 26 Extract a domain to an axis 26 27 */ 27 class CAxisAlgorithmExtractDomain : public CAxisAlgorithmTransformation 28 class CAxisAlgorithmExtractDomain : public CAxisAlgorithmTransformation, public CReductionAlgorithm 28 29 { 29 30 public: -
XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_inverse.cpp
r1542 r1601 15 15 #include "inverse_axis.hpp" 16 16 #include "client_client_dht_template.hpp" 17 using namespace ep_lib; 17 18 18 19 namespace xios { -
XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_reduce_axis.cpp
r1314 r1601 12 12 #include "grid.hpp" 13 13 #include "grid_transformation_factory_impl.hpp" 14 #include "reduction.hpp"15 14 16 15 namespace xios { … … 68 67 69 68 } 69 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 70 { 71 CReductionAlgorithm::initReductionOperation(); 72 } 70 73 71 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations [op]);74 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 72 75 } 73 76 -
XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_reduce_axis.hpp
r1314 r1601 12 12 #include "axis_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp" 14 15 15 16 namespace xios { … … 23 24 Reduce a axis to an axis 24 25 */ 25 class CAxisAlgorithmReduceAxis : public CAxisAlgorithmTransformation 26 class CAxisAlgorithmReduceAxis : public CAxisAlgorithmTransformation, public CReductionAlgorithm 26 27 { 27 28 public: -
XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_reduce_domain.cpp
r1299 r1601 13 13 #include "grid.hpp" 14 14 #include "grid_transformation_factory_impl.hpp" 15 #include "reduction.hpp" 15 16 16 17 17 namespace xios { … … 70 70 71 71 dir_ = (CReduceDomainToAxis::direction_attr::iDir == algo->direction) ? iDir : jDir; 72 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 72 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 73 { 74 CReductionAlgorithm::initReductionOperation(); 75 } 76 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 73 77 local = algo->local ; 74 78 } -
XIOS/dev/dev_trunk_omp/src/transformation/axis_algorithm_reduce_domain.hpp
r1299 r1601 12 12 #include "axis_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp" 14 15 15 16 namespace xios { … … 24 25 Reduce a domain to an axis 25 26 */ 26 class CAxisAlgorithmReduceDomain : public CAxisAlgorithmTransformation 27 class CAxisAlgorithmReduceDomain : public CAxisAlgorithmTransformation, public CReductionAlgorithm 27 28 { 28 29 public: … … 46 47 jDir = 2 47 48 }; 48 49 49 50 ReduceDirection dir_; 50 51 bool local ; -
XIOS/dev/dev_trunk_omp/src/transformation/domain_algorithm_expand.cpp
r1553 r1601 161 161 else domainDestination->domain_ref.setValue(domainDstRef); 162 162 163 164 163 // Here are attributes of source need tranfering 165 164 int niGloSrc = domainSource->ni_glo; -
XIOS/dev/dev_trunk_omp/src/transformation/domain_algorithm_interpolate.cpp
r1542 r1601 20 20 #include "interpolate_domain.hpp" 21 21 #include "grid.hpp" 22 using namespace ep_lib; 22 23 23 24 namespace xios { … … 406 407 CContextClient* client=context->client; 407 408 408 MPI_Comm poleComme(MPI_COMM_NULL);409 MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED: 1, 0, &poleComme);410 if ( MPI_COMM_NULL != poleComme)409 ep_lib::MPI_Comm poleComme = MPI_COMM_NULL; 410 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 411 if (poleComme!=MPI_COMM_NULL) 411 412 { 412 413 int nbClientPole; 413 MPI_Comm_size(poleComme, &nbClientPole);414 ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 414 415 415 416 std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, … … 422 423 std::vector<int> recvCount(nbClientPole,0); 423 424 std::vector<int> displ(nbClientPole,0); 424 MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 425 425 ep_lib::MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 426 426 displ[0]=0; 427 427 for(int n=1;n<nbClientPole;++n) displ[n]=displ[n-1]+recvCount[n-1] ; … … 445 445 446 446 // Gather all index and weight for pole 447 MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme);448 MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme);447 ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 448 ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 449 449 450 450 std::map<int,double> recvTemp; … … 593 593 594 594 595 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm);595 ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 596 596 597 597 int* sendIndexDestBuff = new int [sendBuffSize]; … … 599 599 double* sendWeightBuff = new double [sendBuffSize]; 600 600 601 std::vector< MPI_Request> sendRequest;601 std::vector<ep_lib::MPI_Request> sendRequest(3*globalIndexInterpSendToClient.size()); 602 602 603 603 int sendOffSet = 0, l = 0; 604 int position = 0; 604 605 for (itMap = itbMap; itMap != iteMap; ++itMap) 605 606 { … … 620 621 } 621 622 622 sendRequest.push_back(MPI_Request()); 623 MPI_Isend(sendIndexDestBuff + sendOffSet, 623 ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet, 624 624 k, 625 625 MPI_INT, … … 627 627 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 628 628 client->intraComm, 629 &sendRequest.back()); 630 sendRequest.push_back(MPI_Request()); 631 MPI_Isend(sendIndexSrcBuff + sendOffSet, 629 &sendRequest[position++]); 630 ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet, 632 631 k, 633 632 MPI_INT, … … 635 634 MPI_DOMAIN_INTERPOLATION_SRC_INDEX, 636 635 client->intraComm, 637 &sendRequest.back()); 638 sendRequest.push_back(MPI_Request()); 639 MPI_Isend(sendWeightBuff + sendOffSet, 636 &sendRequest[position++]); 637 ep_lib::MPI_Isend(sendWeightBuff + sendOffSet, 640 638 k, 641 639 MPI_DOUBLE, … … 643 641 MPI_DOMAIN_INTERPOLATION_WEIGHT, 644 642 client->intraComm, 645 &sendRequest .back());643 &sendRequest[position++]); 646 644 sendOffSet += k; 647 645 } … … 655 653 while (receivedSize < recvBuffSize) 656 654 { 657 MPI_Status recvStatus;658 MPI_Recv((recvIndexDestBuff + receivedSize),655 ep_lib::MPI_Status recvStatus; 656 ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 659 657 recvBuffSize, 660 658 MPI_INT, 661 MPI_ANY_SOURCE,659 -2, 662 660 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 663 661 client->intraComm, … … 665 663 666 664 int countBuff = 0; 667 MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 665 ep_lib::MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 666 #ifdef _usingMPI 668 667 clientSrcRank = recvStatus.MPI_SOURCE; 669 670 MPI_Recv((recvIndexSrcBuff + receivedSize), 668 #elif _usingEP 669 clientSrcRank = recvStatus.ep_src; 670 #endif 671 672 ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 671 673 recvBuffSize, 672 674 MPI_INT, … … 676 678 &recvStatus); 677 679 678 MPI_Recv((recvWeightBuff + receivedSize),680 ep_lib::MPI_Recv((recvWeightBuff + receivedSize), 679 681 recvBuffSize, 680 682 MPI_DOUBLE, … … 692 694 } 693 695 694 std::vector< MPI_Status> requestStatus(sendRequest.size());695 MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE);696 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 697 ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &requestStatus[0]); 696 698 697 699 delete [] sendIndexDestBuff; … … 706 708 707 709 /*! Redefined some functions of CONetCDF4 to make use of them */ 708 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const MPI_Comm comm)710 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm) 709 711 : CNc4DataOutput(NULL, filename, false, false, true, comm, false, true) {} 712 713 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, bool exist, const ep_lib::MPI_Comm comm) 714 : CNc4DataOutput(NULL, filename, exist, false, true, comm, false, true) {} 715 710 716 int CDomainAlgorithmInterpolate::WriteNetCdf::addDimensionWrite(const StdString& name, 711 717 const StdSize size) … … 785 791 } 786 792 787 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm);788 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm);793 ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 794 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 789 795 790 796 if (0 == globalNbWeight) … … 800 806 std::vector<StdSize> count(1, localNbWeight); 801 807 802 WriteNetCdf netCdfWriter(filename, client->intraComm); 803 804 // Define some dimensions 805 netCdfWriter.addDimensionWrite("n_src", n_src); 806 netCdfWriter.addDimensionWrite("n_dst", n_dst); 807 netCdfWriter.addDimensionWrite("n_weight", globalNbWeight); 808 809 std::vector<StdString> dims(1,"n_weight"); 810 811 // Add some variables 812 netCdfWriter.addVariableWrite("src_idx", NC_INT, dims); 813 netCdfWriter.addVariableWrite("dst_idx", NC_INT, dims); 814 netCdfWriter.addVariableWrite("weight", NC_DOUBLE, dims); 815 816 // End of definition 817 netCdfWriter.endDefinition(); 818 819 // // Write variables 820 if (0 != localNbWeight) 821 { 822 netCdfWriter.writeDataIndex(src_idx, "src_idx", false, 0, &start, &count); 823 netCdfWriter.writeDataIndex(dst_idx, "dst_idx", false, 0, &start, &count); 824 netCdfWriter.writeDataIndex(weights, "weight", false, 0, &start, &count); 825 } 826 827 netCdfWriter.closeFile(); 808 int my_rank_loc = client->intraComm->ep_comm_ptr->size_rank_info[1].first; 809 int my_rank = client->intraComm->ep_comm_ptr->size_rank_info[0].first; 810 811 812 813 WriteNetCdf *netCdfWriter; 814 815 MPI_Barrier_local(client->intraComm); 816 817 if(my_rank_loc==0) 818 { 819 info(100)<<"rank "<< my_rank <<" create weight info file"<< std::endl; 820 821 WriteNetCdf my_writer(filename, client->intraComm); 822 info(100)<<"rank "<< my_rank <<" file created"<< std::endl; 823 netCdfWriter = &my_writer; 824 825 // Define some dimensions 826 netCdfWriter->addDimensionWrite("n_src", n_src); 827 netCdfWriter->addDimensionWrite("n_dst", n_dst); 828 netCdfWriter->addDimensionWrite("n_weight", globalNbWeight); 829 info(100)<<"rank "<< my_rank <<" addDimensionWrite : n_src, n_dst, n_weight"<< std::endl; 830 831 std::vector<StdString> dims(1,"n_weight"); 832 833 // Add some variables 834 netCdfWriter->addVariableWrite("src_idx", NC_INT, dims); 835 netCdfWriter->addVariableWrite("dst_idx", NC_INT, dims); 836 netCdfWriter->addVariableWrite("weight", NC_DOUBLE, dims); 837 838 info(100)<<"rank "<< my_rank <<" addVariableWrite : src_idx, dst_idx, weight"<< std::endl; 839 840 // End of definition 841 netCdfWriter->endDefinition(); 842 info(100)<<"rank "<< my_rank <<" endDefinition"<< std::endl; 843 844 netCdfWriter->closeFile(); 845 info(100)<<"rank "<< my_rank <<" file closed"<< std::endl; 846 } 847 848 MPI_Barrier_local(client->intraComm); 849 850 #pragma omp critical (write_weight_data) 851 { 852 // open file 853 info(100)<<"rank "<< my_rank <<" writing in weight info file"<< std::endl; 854 855 WriteNetCdf my_writer(filename, true, client->intraComm); 856 info(100)<<"rank "<< my_rank <<" file opened"<< std::endl; 857 netCdfWriter = &my_writer; 858 859 // // Write variables 860 if (0 != localNbWeight) 861 { 862 netCdfWriter->writeDataIndex(src_idx, "src_idx", false, 0, &start, &count); 863 netCdfWriter->writeDataIndex(dst_idx, "dst_idx", false, 0, &start, &count); 864 netCdfWriter->writeDataIndex(weights, "weight", false, 0, &start, &count); 865 866 info(100)<<"rank "<< my_rank <<" WriteDataIndex : src_idx, dst_idx, weight"<< std::endl; 867 } 868 869 netCdfWriter->closeFile(); 870 info(100)<<"rank "<< my_rank <<" file closed"<< std::endl; 871 872 } 873 874 MPI_Barrier_local(client->intraComm); 875 876 828 877 } 829 878 … … 855 904 } 856 905 857 nc_open(filename.c_str(),NC_NOWRITE, &ncid) ; 858 nc_inq_dimid(ncid,"n_weight",&weightDimId) ; 859 nc_inq_dimlen(ncid,weightDimId,&nbWeightGlo) ; 860 861 size_t nbWeight ; 862 size_t start ; 863 size_t div = nbWeightGlo/clientSize ; 864 size_t mod = nbWeightGlo%clientSize ; 865 if (clientRank < mod) 866 { 867 nbWeight=div+1 ; 868 start=clientRank*(div+1) ; 869 } 870 else 871 { 872 nbWeight=div ; 873 start= mod * (div+1) + (clientRank-mod) * div ; 874 } 875 876 double* weight=new double[nbWeight] ; 877 int weightId ; 878 nc_inq_varid (ncid, "weight", &weightId) ; 879 nc_get_vara_double(ncid, weightId, &start, &nbWeight, weight) ; 880 881 long* srcIndex=new long[nbWeight] ; 882 int srcIndexId ; 883 nc_inq_varid (ncid, "src_idx", &srcIndexId) ; 884 nc_get_vara_long(ncid, srcIndexId, &start, &nbWeight, srcIndex) ; 885 886 long* dstIndex=new long[nbWeight] ; 887 int dstIndexId ; 888 nc_inq_varid (ncid, "dst_idx", &dstIndexId) ; 889 nc_get_vara_long(ncid, dstIndexId, &start, &nbWeight, dstIndex) ; 890 891 int indexOffset=0 ; 892 if (fortranConvention) indexOffset=1 ; 893 for(size_t ind=0; ind<nbWeight;++ind) 894 interpMapValue[dstIndex[ind]-indexOffset].push_back(make_pair(srcIndex[ind]-indexOffset,weight[ind])); 895 } 906 #pragma omp critical (read_weight_data) 907 { 908 nc_open(filename.c_str(),NC_NOWRITE, &ncid) ; 909 nc_inq_dimid(ncid,"n_weight",&weightDimId) ; 910 nc_inq_dimlen(ncid,weightDimId,&nbWeightGlo) ; 911 912 size_t nbWeight ; 913 size_t start ; 914 size_t div = nbWeightGlo/clientSize ; 915 size_t mod = nbWeightGlo%clientSize ; 916 if (clientRank < mod) 917 { 918 nbWeight=div+1 ; 919 start=clientRank*(div+1) ; 920 } 921 else 922 { 923 nbWeight=div ; 924 start= mod * (div+1) + (clientRank-mod) * div ; 925 } 926 927 double* weight=new double[nbWeight] ; 928 int weightId ; 929 nc_inq_varid (ncid, "weight", &weightId) ; 930 nc_get_vara_double(ncid, weightId, &start, &nbWeight, weight) ; 931 932 long* srcIndex=new long[nbWeight] ; 933 int srcIndexId ; 934 nc_inq_varid (ncid, "src_idx", &srcIndexId) ; 935 nc_get_vara_long(ncid, srcIndexId, &start, &nbWeight, srcIndex) ; 936 937 long* dstIndex=new long[nbWeight] ; 938 int dstIndexId ; 939 nc_inq_varid (ncid, "dst_idx", &dstIndexId) ; 940 nc_get_vara_long(ncid, dstIndexId, &start, &nbWeight, dstIndex) ; 941 942 int indexOffset=0 ; 943 if (fortranConvention) indexOffset=1 ; 944 for(size_t ind=0; ind<nbWeight;++ind) 945 interpMapValue[dstIndex[ind]-indexOffset].push_back(make_pair(srcIndex[ind]-indexOffset,weight[ind])); 946 } 947 } 896 948 897 949 void CDomainAlgorithmInterpolate::apply(const std::vector<std::pair<int,double> >& localIndex, -
XIOS/dev/dev_trunk_omp/src/transformation/domain_algorithm_interpolate.hpp
r1480 r1601 9 9 #ifndef __XIOS_DOMAIN_ALGORITHM_INTERPOLATE_HPP__ 10 10 #define __XIOS_DOMAIN_ALGORITHM_INTERPOLATE_HPP__ 11 11 #include "mpi_std.hpp" 12 12 #include "domain_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" … … 70 70 { 71 71 public: 72 WriteNetCdf(const StdString& filename, const MPI_Comm comm); 72 WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm); 73 WriteNetCdf(const StdString& filename, bool exist, const ep_lib::MPI_Comm comm); 73 74 int addDimensionWrite(const StdString& name, const StdSize size = UNLIMITED_DIM); 74 75 int addVariableWrite(const StdString& name, nc_type type, -
XIOS/dev/dev_trunk_omp/src/transformation/generic_algorithm_transformation.cpp
r1542 r1601 37 37 int nbLocalIndex = localIndex.size(); 38 38 double defaultValue = std::numeric_limits<double>::quiet_NaN(); 39 39 40 40 if (ignoreMissingValue) 41 41 { 42 42 if (firstPass) dataOut=defaultValue ; 43 43 44 44 for (int idx = 0; idx < nbLocalIndex; ++idx) 45 45 { … … 131 131 { 132 132 distributed=domainListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]->isDistributed() ; 133 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ;133 ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 134 134 135 135 } … … 137 137 { 138 138 distributed=axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]->isDistributed() ; 139 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ;139 ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 140 140 } 141 141 else //it's a scalar … … 230 230 int sendValue = (computeGlobalIndexOnProc) ? 1 : 0; 231 231 int recvValue = 0; 232 MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm);232 ep_lib::MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 233 233 computeGlobalIndexOnProc = (0 < recvValue); 234 234 -
XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation.cpp
r1542 r1601 500 500 sendRankSizeMap[itIndex->first] = sendSize; 501 501 } 502 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);502 ep_lib::MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 503 503 504 504 displ[0]=0 ; … … 507 507 int* recvRankBuff=new int[recvSize]; 508 508 int* recvSizeBuff=new int[recvSize]; 509 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);510 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);509 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 510 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 511 511 for (int i = 0; i < nbClient; ++i) 512 512 { … … 520 520 521 521 // Sending global index of grid source to corresponding process as well as the corresponding mask 522 std::vector< MPI_Request> requests;523 std::vector< MPI_Status> status;522 std::vector<ep_lib::MPI_Request> requests(recvRankSizeMap.size()*2 + globaIndexWeightFromSrcToDst.size()*2); 523 std::vector<ep_lib::MPI_Status> status; 524 524 std::unordered_map<int, unsigned char* > recvMaskDst; 525 525 std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 526 int requests_position = 0; 526 527 for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 527 528 { … … 531 532 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 532 533 533 requests.push_back(MPI_Request()); 534 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 535 requests.push_back(MPI_Request()); 536 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 534 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 535 ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 537 536 } 538 537 … … 569 568 570 569 // Send global index source and mask 571 requests.push_back(MPI_Request()); 572 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 573 requests.push_back(MPI_Request()); 574 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 570 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 571 ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 575 572 } 576 573 577 574 status.resize(requests.size()); 578 MPI_Waitall(requests.size(), &requests[0], &status[0]);575 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 579 576 580 577 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 581 std::vector<MPI_Request>().swap(requests); 582 std::vector<MPI_Status>().swap(status); 578 requests.resize(sendRankSizeMap.size() + recvRankSizeMap.size()); 579 requests_position = 0; 580 std::vector<ep_lib::MPI_Status>().swap(status); 583 581 // Okie, on destination side, we will wait for information of masked index of source 584 582 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 587 585 int recvSize = itSend->second; 588 586 589 requests.push_back(MPI_Request()); 590 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 587 ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 591 588 } 592 589 … … 624 621 625 622 // Okie, now inform the destination which source index are masked 626 requests.push_back(MPI_Request()); 627 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 623 ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 628 624 } 629 625 status.resize(requests.size()); 630 MPI_Waitall(requests.size(), &requests[0], &status[0]);626 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 631 627 632 628 // Cool, now we can fill in local index of grid destination (counted for masked index) -
XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation.hpp
r978 r1601 12 12 #include <map> 13 13 #include <vector> 14 #include "mpi_std.hpp" 14 15 #include "generic_algorithm_transformation.hpp" 15 16 #include "transformation_enum.hpp" -
XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation_factory_impl.hpp
r933 r1601 57 57 typedef std::map<ETranformationType, CreateTransformationCallBack> CallBackMap; 58 58 static CallBackMap* transformationCreationCallBacks_; 59 #pragma omp threadprivate(transformationCreationCallBacks_) 59 60 static bool registerTransformation(ETranformationType transType, CreateTransformationCallBack createFn); 60 61 static bool unregisterTransformation(ETranformationType transType); 61 62 static bool initializeTransformation_; 63 #pragma omp threadprivate(initializeTransformation_) 62 64 }; 63 65 -
XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation_selector.cpp
r1558 r1601 10 10 #include "grid.hpp" 11 11 #include "algo_types.hpp" 12 using namespace ep_lib; 12 13 13 14 namespace xios { -
XIOS/dev/dev_trunk_omp/src/transformation/grid_transformation_selector.hpp
r1275 r1601 12 12 #include <map> 13 13 #include <vector> 14 #include "mpi_std.hpp" 14 15 #include "generic_algorithm_transformation.hpp" 15 16 #include "transformation_enum.hpp" -
XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_extract_axis.cpp
r1260 r1601 14 14 #include "grid_transformation_factory_impl.hpp" 15 15 16 #include "reduction.hpp" 16 17 17 18 18 namespace xios { … … 49 49 StdString op = "extract"; 50 50 pos_ = algo->position; 51 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 51 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 52 { 53 CReductionAlgorithm::initReductionOperation(); 54 } 55 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 52 56 } 53 57 -
XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_extract_axis.hpp
r1260 r1601 12 12 #include "scalar_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp" 14 15 15 16 namespace xios { … … 24 25 Extract a scalar from an axis 25 26 */ 26 class CScalarAlgorithmExtractAxis : public CScalarAlgorithmTransformation 27 class CScalarAlgorithmExtractAxis : public CScalarAlgorithmTransformation, public CReductionAlgorithm 27 28 { 28 29 public: -
XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_axis.cpp
r1297 r1601 13 13 #include "grid.hpp" 14 14 #include "grid_transformation_factory_impl.hpp" 15 #include "reduction.hpp"16 15 17 #include "reduction.hpp" 16 18 17 19 18 namespace xios { … … 75 74 } 76 75 77 if (CReductionAlgorithm::ReductionOperations.end() == CReductionAlgorithm::ReductionOperations.find(op)) 76 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 77 { 78 CReductionAlgorithm::initReductionOperation(); 79 } 80 81 if ((*CReductionAlgorithm::ReductionOperations_ptr).end() == (*CReductionAlgorithm::ReductionOperations_ptr).find(op)) 78 82 ERROR("CScalarAlgorithmReduceAxis::CScalarAlgorithmReduceAxis(CAxis* axisDestination, CAxis* axisSource, CReduceAxisToScalar* algo)", 79 83 << "Operation '" << op << "' not found. Please make sure to use a supported one" … … 81 85 << "Scalar destination " << scalarDestination->getId()); 82 86 83 reduction_ = CReductionAlgorithm::createOperation( CReductionAlgorithm::ReductionOperations[op]);87 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 84 88 } 85 89 -
XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_axis.hpp
r1260 r1601 12 12 #include "scalar_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp" 14 15 15 16 namespace xios { … … 24 25 Reducing an axis to a scalar 25 26 */ 26 class CScalarAlgorithmReduceAxis : public CScalarAlgorithmTransformation 27 class CScalarAlgorithmReduceAxis : public CScalarAlgorithmTransformation, public CReductionAlgorithm 27 28 { 28 29 public: -
XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_domain.cpp
r1396 r1601 14 14 #include "grid_transformation_factory_impl.hpp" 15 15 16 #include "reduction.hpp"17 16 18 17 namespace xios { … … 71 70 } 72 71 73 if (CReductionAlgorithm::ReductionOperations.end() == CReductionAlgorithm::ReductionOperations.find(op)) 72 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 73 { 74 CReductionAlgorithm::initReductionOperation(); 75 } 76 if (CReductionAlgorithm::ReductionOperations_ptr->end() == CReductionAlgorithm::ReductionOperations_ptr->find(op)) 74 77 ERROR("CScalarAlgorithmReduceDomain::CScalarAlgorithmReduceDomain(CDomain* domainDestination, CDomain* domainSource, CReduceDomainToScalar* algo)", 75 78 << "Operation '" << op << "' not found. Please make sure to use a supported one" … … 77 80 << "Scalar destination " << scalarDestination->getId()); 78 81 79 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations [op]);82 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 80 83 local = algo->local ; 81 84 } -
XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_domain.hpp
r1313 r1601 12 12 #include "scalar_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp" 14 15 15 16 namespace xios { … … 24 25 Reducing an DOMAIN to a scalar 25 26 */ 26 class CScalarAlgorithmReduceDomain : public CScalarAlgorithmTransformation 27 class CScalarAlgorithmReduceDomain : public CScalarAlgorithmTransformation, public CReductionAlgorithm 27 28 { 28 29 public: -
XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_scalar.cpp
r1314 r1601 9 9 #include "grid.hpp" 10 10 #include "grid_transformation_factory_impl.hpp" 11 #include "reduction.hpp" 11 12 12 13 13 … … 70 70 71 71 } 72 73 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 74 { 75 CReductionAlgorithm::initReductionOperation(); 76 } 72 77 73 if (CReductionAlgorithm::ReductionOperations .end() == CReductionAlgorithm::ReductionOperations.find(op))78 if (CReductionAlgorithm::ReductionOperations_ptr->end() == CReductionAlgorithm::ReductionOperations_ptr->find(op)) 74 79 ERROR("CScalarAlgorithmReduceScalar::CScalarAlgorithmReduceScalar(CScalar* scalarDestination, CScalar* scalarSource, CReduceScalarToScalar* algo)", 75 80 << "Operation '" << op << "' not found. Please make sure to use a supported one" … … 77 82 << "Scalar destination " << scalarDestination->getId()); 78 83 79 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations [op]);84 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 80 85 } 81 86 -
XIOS/dev/dev_trunk_omp/src/transformation/scalar_algorithm_reduce_scalar.hpp
r1314 r1601 8 8 #include "scalar_algorithm_transformation.hpp" 9 9 #include "transformation.hpp" 10 #include "reduction.hpp" 10 11 11 12 namespace xios { … … 19 20 Reducing an scalar to a scalar 20 21 */ 21 class CScalarAlgorithmReduceScalar : public CScalarAlgorithmTransformation 22 class CScalarAlgorithmReduceScalar : public CScalarAlgorithmTransformation, public CReductionAlgorithm 22 23 { 23 24 public: -
XIOS/dev/dev_trunk_omp/src/type/type.hpp
r1478 r1601 95 95 const CType_ref& operator = (CType<T>& val) const ; 96 96 const CType_ref& operator = (const CType_ref& val) const; 97 operator T&() const; 97 operator T&() const; 98 98 99 99 inline virtual CBaseType* clone(void) const { return _clone(); } -
XIOS/dev/dev_trunk_omp/src/type/type_impl.hpp
r1478 r1601 88 88 { 89 89 this->checkEmpty(); 90 return *ptrValue ;90 return *ptrValue ; 91 91 } 92 92 … … 129 129 CType<T>::operator const T&() const 130 130 { 131 this->checkEmpty();132 return *ptrValue ;131 this->checkEmpty(); 132 return *ptrValue ; 133 133 } 134 134 -
XIOS/dev/dev_trunk_omp/src/xios.hpp
r591 r1601 5 5 6 6 /// XIOS headers /// 7 #include "nc4_data_output.hpp" 7 #include "data_output.hpp" 8 8 9 9 10 10 11 using namespace xios; 11 using namespace xios::xml; 12 using namespace xios::func; 12 13 13 14 14 15 #endif //__XIOS__ -
XIOS/dev/dev_trunk_omp/src/xios_server.f90
r1158 r1601 3 3 IMPLICIT NONE 4 4 INCLUDE "mpif.h" 5 INTEGER :: ierr 6 5 INTEGER :: ierr, provided 6 7 CALL MPI_Init_thread(3, provided, ierr) 7 8 CALL xios_init_server 9 CALL MPI_Finalize(ierr) 10 8 11 9 12 END PROGRAM server_main
Note: See TracChangeset
for help on using the changeset viewer.