Changeset 1328 for XIOS/dev/branch_openmp/src
- Timestamp:
- 11/15/17 12:14:34 (7 years ago)
- Location:
- XIOS/dev/branch_openmp/src
- Files:
-
- 149 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/src/array_new.hpp
r1134 r1328 554 554 TinyVector<int,N_rank> vect; 555 555 size_t ne; 556 556 557 557 ret = buffer.get(numDim); 558 558 ret &= buffer.get(vect.data(), N_rank); -
XIOS/dev/branch_openmp/src/attribute_enum.hpp
r1134 r1328 14 14 namespace xios 15 15 { 16 /// ////////////////////// Déclarations ////////////////////// ///17 /*!18 \class CAttributeEnum19 This class implements the attribute representing enumeration16 /// ////////////////////// Déclarations ////////////////////// /// 17 /*! 18 \class CAttributeEnum 19 This class implements the attribute representing enumeration 20 20 */ 21 template <class T>22 class CAttributeEnum : public CAttribute, public CEnum<T>23 {21 template <class T> 22 class CAttributeEnum : public CAttribute, public CEnum<T> 23 { 24 24 typedef typename T::t_enum T_enum ; 25 25 public : 26 26 27 /// Constructeurs ///28 explicit CAttributeEnum(const StdString & id);29 CAttributeEnum(const StdString & id,30 xios_map<StdString, CAttribute*> & umap);31 CAttributeEnum(const StdString & id, const T_enum & value);32 CAttributeEnum(const StdString & id, const T_enum & value,33 xios_map<StdString, CAttribute*> & umap);27 /// Constructeurs /// 28 explicit CAttributeEnum(const StdString & id); 29 CAttributeEnum(const StdString & id, 30 xios_map<StdString, CAttribute*> & umap); 31 CAttributeEnum(const StdString & id, const T_enum & value); 32 CAttributeEnum(const StdString & id, const T_enum & value, 33 xios_map<StdString, CAttribute*> & umap); 34 34 35 /// Accesseur ///36 T_enum getValue(void) const;37 string getStringValue(void) const;35 /// Accesseur /// 36 T_enum getValue(void) const; 37 string getStringValue(void) const; 38 38 39 39 40 /// Mutateurs /// 41 void setValue(const T_enum & value); 40 /// Mutateurs /// 41 void setValue(const T_enum & value); 42 43 void set(const CAttribute& attr) ; 44 void set(const CAttributeEnum& attr) ; 45 void reset(void); 46 47 void setInheritedValue(const CAttributeEnum& attr ); 48 void setInheritedValue(const CAttribute& attr ); 49 T_enum getInheritedValue(void) const; 50 string getInheritedStringValue(void) const; 51 bool hasInheritedValue(void) const; 52 53 bool isEqual(const CAttributeEnum& attr ); 54 bool isEqual(const CAttribute& attr ); 42 55 43 void set(const CAttribute& attr) ; 44 void set(const CAttributeEnum& attr) ; 45 void reset(void); 56 /// Destructeur /// 57 virtual ~CAttributeEnum(void) { } 46 58 47 void setInheritedValue(const CAttributeEnum& attr ); 48 void setInheritedValue(const CAttribute& attr ); 49 T_enum getInheritedValue(void) const; 50 string getInheritedStringValue(void) const; 51 bool hasInheritedValue(void) const; 59 /// Operateur /// 60 CAttributeEnum& operator=(const T_enum & value); 52 61 53 bool isEqual(const CAttributeEnum& attr ); 54 bool isEqual(const CAttribute& attr ); 62 /// Autre /// 63 virtual StdString toString(void) const { return _toString();} 64 virtual void fromString(const StdString & str) { if (str==resetInheritanceStr) { reset(); _canInherite=false ;} else _fromString(str);} 55 65 56 /// Destructeur /// 57 virtual ~CAttributeEnum(void) { } 66 virtual bool toBuffer (CBufferOut& buffer) const { return _toBuffer(buffer);} 67 virtual bool fromBuffer(CBufferIn& buffer) { return _fromBuffer(buffer); } 68 69 virtual void generateCInterface(ostream& oss,const string& className) ; 70 virtual void generateFortran2003Interface(ostream& oss,const string& className) ; 71 virtual void generateFortranInterfaceDeclaration_(ostream& oss,const string& className) ; 72 virtual void generateFortranInterfaceBody_(ostream& oss,const string& className) ; 73 virtual void generateFortranInterfaceDeclaration(ostream& oss,const string& className) ; 74 virtual void generateFortranInterfaceGetDeclaration_(ostream& oss,const string& className) ; 75 virtual void generateFortranInterfaceGetBody_(ostream& oss,const string& className) ; 76 virtual void generateFortranInterfaceGetDeclaration(ostream& oss,const string& className) ; 58 77 59 /// Operateur /// 60 CAttributeEnum& operator=(const T_enum & value); 61 62 /// Autre /// 63 virtual StdString toString(void) const { return _toString();} 64 virtual void fromString(const StdString & str) { if (str==resetInheritanceStr) { reset(); _canInherite=false ;} else _fromString(str);} 65 66 virtual bool toBuffer (CBufferOut& buffer) const { return _toBuffer(buffer);} 67 virtual bool fromBuffer(CBufferIn& buffer) { return _fromBuffer(buffer); } 68 69 virtual void generateCInterface(ostream& oss,const string& className) ; 70 virtual void generateFortran2003Interface(ostream& oss,const string& className) ; 71 virtual void generateFortranInterfaceDeclaration_(ostream& oss,const string& className) ; 72 virtual void generateFortranInterfaceBody_(ostream& oss,const string& className) ; 73 virtual void generateFortranInterfaceDeclaration(ostream& oss,const string& className) ; 74 virtual void generateFortranInterfaceGetDeclaration_(ostream& oss,const string& className) ; 75 virtual void generateFortranInterfaceGetBody_(ostream& oss,const string& className) ; 76 virtual void generateFortranInterfaceGetDeclaration(ostream& oss,const string& className) ; 77 78 private : 79 StdString _toString(void) const; 80 void _fromString(const StdString & str); 81 bool _toBuffer (CBufferOut& buffer) const; 82 bool _fromBuffer(CBufferIn& buffer) ; 83 CEnum<T> inheritedValue ; 84 }; // class CAttributeEnum 85 78 private : 79 StdString _toString(void) const; 80 void _fromString(const StdString & str); 81 bool _toBuffer (CBufferOut& buffer) const; 82 bool _fromBuffer(CBufferIn& buffer) ; 83 CEnum<T> inheritedValue ; 84 }; // class CAttributeEnum 85 86 86 } // namespace xios 87 87 88 88 #endif // __XIOS_ATTRIBUTE_ENUM__ 89 -
XIOS/dev/branch_openmp/src/attribute_enum_impl.hpp
r1134 r1328 10 10 namespace xios 11 11 { 12 /// ////////////////////// D éfinitions ////////////////////// ///12 /// ////////////////////// Définitions ////////////////////// /// 13 13 template <class T> 14 14 CAttributeEnum<T>::CAttributeEnum(const StdString & id) … … 30 30 umap.insert(umap.end(), std::make_pair(id, this)); 31 31 } 32 32 33 33 template <class T> 34 34 CAttributeEnum<T>::CAttributeEnum … … 40 40 umap.insert(umap.end(), std::make_pair(id, this)); 41 41 } 42 42 43 43 ///-------------------------------------------------------------- 44 44 template <class T> … … 54 54 return CEnum<T>::get(); 55 55 } 56 56 57 57 template <class T> 58 58 string CAttributeEnum<T>::getStringValue(void) const 59 59 { 60 return CEnum<T>::toString(); 61 } 62 60 return CEnum<T>::toString(); 61 } 63 62 64 63 template <class T> … … 71 70 void CAttributeEnum<T>::set(const CAttribute& attr) 72 71 { 73 74 } 75 76 72 this->set(dynamic_cast<const CAttributeEnum<T>& >(attr)); 73 } 74 75 template <class T> 77 76 void CAttributeEnum<T>::set(const CAttributeEnum& attr) 78 77 { 79 80 } 81 78 CEnum<T>::set(attr); 79 } 80 82 81 template <class T> 83 82 void CAttributeEnum<T>::setInheritedValue(const CAttribute& attr) 84 83 { 85 86 } 87 84 this->setInheritedValue(dynamic_cast<const CAttributeEnum<T>& >(attr)); 85 } 86 88 87 template <class T> 89 88 void CAttributeEnum<T>::setInheritedValue(const CAttributeEnum& attr) 90 89 { 91 92 } 93 90 if (this->isEmpty() && _canInherite && attr.hasInheritedValue()) inheritedValue.set(attr.getInheritedValue()); 91 } 92 94 93 template <class T> 95 94 typename T::t_enum CAttributeEnum<T>::getInheritedValue(void) const 96 95 { 97 98 99 } 100 101 template <class T> 102 103 104 105 106 107 108 template <class T> 109 110 111 112 113 114 template <class T> 115 116 117 118 119 120 template <class T> 121 122 123 124 96 if (this->isEmpty()) return inheritedValue.get(); 97 else return getValue(); 98 } 99 100 template <class T> 101 string CAttributeEnum<T>::getInheritedStringValue(void) const 102 { 103 if (this->isEmpty()) return inheritedValue.toString(); 104 else return CEnum<T>::toString();; 105 } 106 107 template <class T> 108 bool CAttributeEnum<T>::hasInheritedValue(void) const 109 { 110 return !this->isEmpty() || !inheritedValue.isEmpty(); 111 } 112 113 template <class T> 114 bool CAttributeEnum<T>::isEqual(const CAttribute& attr) 115 { 116 return (this->isEqual(dynamic_cast<const CAttributeEnum<T>& >(attr))); 117 } 118 119 template <class T> 120 bool CAttributeEnum<T>::isEqual(const CAttributeEnum& attr) 121 { 122 return ((dynamic_cast<const CEnum<T>& >(*this)) == (dynamic_cast<const CEnum<T>& >(attr))); 123 } 125 124 126 125 //--------------------------------------------------------------- 127 126 128 127 template <class T> 129 130 131 132 133 128 CAttributeEnum<T>& CAttributeEnum<T>::operator=(const T_enum & value) 129 { 130 this->setValue(value); 131 return *this; 132 } 134 133 135 134 //--------------------------------------------------------------- 136 135 137 136 template <class T> 138 139 140 141 142 143 144 145 146 template <class T> 147 148 149 150 151 152 template <class T> 153 154 155 156 157 158 template <class T> 159 160 161 162 163 164 template <typename T> 165 166 167 168 169 170 template <typename T> 171 172 173 174 175 176 template <typename T> 177 178 179 180 181 182 template <typename T> 183 184 185 186 187 188 template <typename T> 189 190 191 192 193 194 template <typename T> 195 196 197 198 199 200 template <typename T> 201 202 203 204 205 206 template <typename T> 207 208 209 210 137 StdString CAttributeEnum<T>::_toString(void) const 138 { 139 StdOStringStream oss; 140 if (!CEnum<T>::isEmpty() && this->hasId()) 141 oss << this->getName() << "=\"" << CEnum<T>::toString() << "\""; 142 return (oss.str()); 143 } 144 145 template <class T> 146 void CAttributeEnum<T>::_fromString(const StdString & str) 147 { 148 CEnum<T>::fromString(str); 149 } 150 151 template <class T> 152 bool CAttributeEnum<T>::_toBuffer (CBufferOut& buffer) const 153 { 154 return CEnum<T>::toBuffer(buffer); 155 } 156 157 template <class T> 158 bool CAttributeEnum<T>::_fromBuffer(CBufferIn& buffer) 159 { 160 return CEnum<T>::fromBuffer(buffer); 161 } 162 163 template <typename T> 164 void CAttributeEnum<T>::generateCInterface(ostream& oss,const string& className) 165 { 166 CInterface::AttributeCInterface<CEnumBase>(oss, className, this->getName()); 167 } 168 169 template <typename T> 170 void CAttributeEnum<T>::generateFortran2003Interface(ostream& oss,const string& className) 171 { 172 CInterface::AttributeFortran2003Interface<string>(oss, className, this->getName()); 173 } 174 175 template <typename T> 176 void CAttributeEnum<T>::generateFortranInterfaceDeclaration_(ostream& oss,const string& className) 177 { 178 CInterface::AttributeFortranInterfaceDeclaration<string>(oss, className, this->getName()+"_"); 179 } 180 181 template <typename T> 182 void CAttributeEnum<T>::generateFortranInterfaceBody_(ostream& oss,const string& className) 183 { 184 CInterface::AttributeFortranInterfaceBody<string>(oss, className, this->getName()); 185 } 186 187 template <typename T> 188 void CAttributeEnum<T>::generateFortranInterfaceDeclaration(ostream& oss,const string& className) 189 { 190 CInterface::AttributeFortranInterfaceDeclaration<string>(oss, className, this->getName()); 191 } 192 193 template <typename T> 194 void CAttributeEnum<T>::generateFortranInterfaceGetDeclaration_(ostream& oss,const string& className) 195 { 196 CInterface::AttributeFortranInterfaceGetDeclaration<string>(oss, className, this->getName()+"_"); 197 } 198 199 template <typename T> 200 void CAttributeEnum<T>::generateFortranInterfaceGetBody_(ostream& oss,const string& className) 201 { 202 CInterface::AttributeFortranInterfaceGetBody<string>(oss, className, this->getName()); 203 } 204 205 template <typename T> 206 void CAttributeEnum<T>::generateFortranInterfaceGetDeclaration(ostream& oss,const string& className) 207 { 208 CInterface::AttributeFortranInterfaceGetDeclaration<string>(oss, className, this->getName()); 209 } 211 210 } // namespace xios 212 211 213 212 #endif // __XIOS_ATTRIBUTE_ENUM_IMPL_HPP__ 214 -
XIOS/dev/branch_openmp/src/attribute_map.hpp
r1134 r1328 76 76 /// Propriété statique /// 77 77 static CAttributeMap * Current; 78 #pragma omp threadprivate (Current)79 78 80 79 }; // class CAttributeMap -
XIOS/dev/branch_openmp/src/buffer_client.cpp
r1205 r1328 7 7 #include "mpi.hpp" 8 8 #include "tracer.hpp" 9 10 11 using namespace ep_lib; 9 12 10 13 namespace xios … … 27 30 buffer[1] = new char[bufferSize]; 28 31 retBuffer = new CBufferOut(buffer[current], bufferSize); 29 #pragma omp critical (_output)30 32 info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl; 31 33 } -
XIOS/dev/branch_openmp/src/buffer_client.hpp
r1205 r1328 6 6 #include "mpi.hpp" 7 7 #include "cxios.hpp" 8 #ifdef _usingEP9 #include "ep_declaration.hpp"10 #endif11 8 12 9 namespace xios … … 16 13 public: 17 14 static size_t maxRequestSize; 18 #pragma omp threadprivate(maxRequestSize)19 15 20 CClientBuffer( MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents);16 CClientBuffer(ep_lib::MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents); 21 17 ~CClientBuffer(); 22 18 … … 40 36 bool pending; 41 37 42 MPI_Request request;38 ep_lib::MPI_Request request; 43 39 44 40 CBufferOut* retBuffer; 45 const MPI_Comm interComm;41 const ep_lib::MPI_Comm interComm; 46 42 }; 47 43 } -
XIOS/dev/branch_openmp/src/buffer_server.hpp
r1134 r1328 4 4 #include "xios_spl.hpp" 5 5 #include "buffer.hpp" 6 #include "mpi _std.hpp"6 #include "mpi.hpp" 7 7 #include "cxios.hpp" 8 8 -
XIOS/dev/branch_openmp/src/calendar.cpp
r1134 r1328 117 117 const CDate& CCalendar::update(int step) 118 118 { 119 #pragma omp critical (_output) 120 info(80)<< "update step : " << step << " timestep " << this->timestep << std::endl; 119 info(20) << "update step : " << step << " timestep " << this->timestep << std::endl; 121 120 return (this->currentDate = this->getInitDate() + step * this->timestep); 122 121 } -
XIOS/dev/branch_openmp/src/client.cpp
r1205 r1328 11 11 #include "timer.hpp" 12 12 #include "buffer_client.hpp" 13 #include "log.hpp" 14 13 using namespace ep_lib; 15 14 16 15 namespace xios 17 16 { 18 extern int test_omp_rank;19 #pragma omp threadprivate(test_omp_rank)20 17 21 18 MPI_Comm CClient::intraComm ; 22 19 MPI_Comm CClient::interComm ; 20 //std::list<MPI_Comm> CClient::contextInterComms; 23 21 std::list<MPI_Comm> *CClient::contextInterComms_ptr = 0; 24 22 int CClient::serverLeader ; … … 28 26 StdOFStream CClient::m_errorStream; 29 27 30 StdOFStream CClient::array_infoStream[16]; 31 32 void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 28 void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) 33 29 { 34 30 int initialized ; … … 41 37 { 42 38 // localComm doesn't given 43 44 39 if (localComm == MPI_COMM_NULL) 45 40 { 46 41 if (!is_MPI_Initialized) 47 42 { 48 //MPI_Init(NULL, NULL); 49 int return_level; 50 MPI_Init_thread(NULL, NULL, 3, &return_level); 51 assert(return_level == 3); 43 MPI_Init(NULL, NULL); 52 44 } 53 45 CTimer::get("XIOS").resume() ; … … 61 53 int myColor ; 62 54 int i,c ; 63 64 MPI_Comm_size(CXios::globalComm,&size); 55 MPI_Comm newComm ; 56 57 MPI_Comm_size(CXios::globalComm,&size) ; 65 58 MPI_Comm_rank(CXios::globalComm,&rank); 66 67 59 68 60 hashAll=new unsigned long[size] ; … … 106 98 MPI_Comm_size(intraComm,&intraCommSize) ; 107 99 MPI_Comm_rank(intraComm,&intraCommRank) ; 108 109 #pragma omp critical(_output) 110 { 111 info(10)<<"intercommCreate::client "<<test_omp_rank<< " "<< &test_omp_rank <<" intraCommSize : "<<intraCommSize 112 <<" intraCommRank :"<<intraCommRank<<" serverLeader "<< serverLeader 113 <<" globalComm : "<< &(CXios::globalComm) << endl ; 114 } 115 116 117 //test_sendrecv(CXios::globalComm); 100 info(50)<<"intercommCreate::client "<<rank<<" intraCommSize : "<<intraCommSize 101 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 118 102 MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 119 120 103 } 121 104 else … … 140 123 } 141 124 // using OASIS 142 else125 /* else 143 126 { 144 127 // localComm doesn't given … … 165 148 else MPI_Comm_dup(intraComm,&interComm) ; 166 149 } 167 150 */ 168 151 MPI_Comm_dup(intraComm,&returnComm) ; 169 170 152 } 171 153 … … 174 156 { 175 157 CContext::setCurrent(id) ; 176 CContext* context = CContext::create(id); 177 178 int tmp_rank; 179 MPI_Comm_rank(contextComm,&tmp_rank) ; 180 158 CContext* context=CContext::create(id); 181 159 StdString idServer(id); 182 160 idServer += "_server"; … … 185 163 { 186 164 int size,rank,globalRank ; 187 //size_t message_size ;188 //int leaderRank ;165 size_t message_size ; 166 int leaderRank ; 189 167 MPI_Comm contextInterComm ; 190 168 … … 197 175 CMessage msg ; 198 176 msg<<idServer<<size<<globalRank ; 199 177 // msg<<id<<size<<globalRank ; 200 178 201 179 int messageSize=msg.size() ; … … 208 186 209 187 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 210 211 #pragma omp critical(_output) 212 info(10)<<" RANK "<< tmp_rank<<" Register new Context : "<<id<<endl ; 213 188 info(10)<<"Register new Context : "<<id<<endl ; 214 189 215 190 MPI_Comm inter ; … … 217 192 MPI_Barrier(inter) ; 218 193 219 220 194 context->initClient(contextComm,contextInterComm) ; 221 195 222 196 //contextInterComms.push_back(contextInterComm); 223 197 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 224 198 contextInterComms_ptr->push_back(contextInterComm); 225 226 199 MPI_Comm_free(&inter); 227 200 } … … 240 213 // Finally, we should return current context to context client 241 214 CContext::setCurrent(id); 242 215 216 //contextInterComms.push_back(contextInterComm); 243 217 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 244 218 contextInterComms_ptr->push_back(contextInterComm); 245 246 219 } 247 220 } … … 253 226 254 227 MPI_Comm_rank(intraComm,&rank) ; 255 228 256 229 if (!CXios::isServer) 257 230 { … … 263 236 } 264 237 265 for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); ++it) 238 //for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 239 for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); it++) 266 240 MPI_Comm_free(&(*it)); 267 268 241 MPI_Comm_free(&interComm); 269 242 MPI_Comm_free(&intraComm); … … 274 247 if (!is_MPI_Initialized) 275 248 { 276 if (CXios::usingOasis) oasis_finalize(); 277 else MPI_Finalize(); 249 //if (CXios::usingOasis) oasis_finalize(); 250 //else 251 MPI_Finalize() ; 278 252 } 279 253 280 #pragma omp critical (_output) 281 info(20) << "Client "<<rank<<" : Client side context is finalized "<< endl ; 282 283 /*#pragma omp critical (_output) 284 { 254 info(20) << "Client side context is finalized"<<endl ; 285 255 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; 286 256 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; … … 292 262 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 293 263 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 294 }*/295 296 264 } 297 265 … … 322 290 323 291 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 324 325 292 fb->open(fileNameClient.str().c_str(), std::ios::out); 326 293 if (!fb->is_open()) 327 294 ERROR("void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", 328 << std::endl << "Can not open <" << fileNameClient << "> file to write the client log(s).");295 << std::endl << "Can not open <" << fileNameClient << "> file to write the client log(s)."); 329 296 } 330 297 … … 337 304 void CClient::openInfoStream(const StdString& fileName) 338 305 { 339 //std::filebuf* fb = m_infoStream.rdbuf(); 340 341 info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 342 343 openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 344 345 info.write2File(info_FB[omp_get_thread_num()]); 346 report.write2File(info_FB[omp_get_thread_num()]); 347 306 std::filebuf* fb = m_infoStream.rdbuf(); 307 openStream(fileName, ".out", fb); 308 309 info.write2File(fb); 310 report.write2File(fb); 348 311 } 349 312 -
XIOS/dev/branch_openmp/src/client.hpp
r1164 r1328 10 10 { 11 11 public: 12 static void initialize(const string& codeId, MPI_Comm& localComm,MPI_Comm& returnComm);12 static void initialize(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm); 13 13 static void finalize(void); 14 14 static void registerContext(const string& id, ep_lib::MPI_Comm contextComm); 15 15 16 static MPI_Comm intraComm; 17 #pragma omp threadprivate(intraComm) 18 19 static MPI_Comm interComm; 20 #pragma omp threadprivate(interComm) 21 16 static ep_lib::MPI_Comm intraComm; 17 static ep_lib::MPI_Comm interComm; 22 18 //static std::list<MPI_Comm> contextInterComms; 23 24 static std::list<MPI_Comm> * contextInterComms_ptr; 25 #pragma omp threadprivate(contextInterComms_ptr) 26 19 static std::list<ep_lib::MPI_Comm> *contextInterComms_ptr; 27 20 static int serverLeader; 28 #pragma omp threadprivate(serverLeader)29 30 21 static bool is_MPI_Initialized ; 31 #pragma omp threadprivate(is_MPI_Initialized)32 22 33 23 //! Get rank of the current process … … 50 40 protected: 51 41 static int rank; 52 #pragma omp threadprivate(rank)53 54 42 static StdOFStream m_infoStream; 55 #pragma omp threadprivate(m_infoStream)56 57 43 static StdOFStream m_errorStream; 58 #pragma omp threadprivate(m_errorStream)59 60 static StdOFStream array_infoStream[16];61 44 62 45 static void openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb); -
XIOS/dev/branch_openmp/src/client_client_dht_template.hpp
r1134 r1328 13 13 #include "xios_spl.hpp" 14 14 #include "array_new.hpp" 15 #include "mpi _std.hpp"15 #include "mpi.hpp" 16 16 #include "policy.hpp" 17 17 #include <boost/unordered_map.hpp> … … 87 87 const ep_lib::MPI_Comm& clientIntraComm, 88 88 std::vector<ep_lib::MPI_Request>& requestSendInfo); 89 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 90 const ep_lib::MPI_Comm& clientIntraComm, 91 ep_lib::MPI_Request* requestSendInfo); 89 92 90 93 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 91 94 const ep_lib::MPI_Comm& clientIntraComm, 92 95 std::vector<ep_lib::MPI_Request>& requestRecvInfo); 96 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 97 const ep_lib::MPI_Comm& clientIntraComm, 98 ep_lib::MPI_Request* requestRecvInfo); 99 93 100 94 101 // Send global index to clients … … 96 103 const ep_lib::MPI_Comm& clientIntraComm, 97 104 std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 105 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 106 const ep_lib::MPI_Comm& clientIntraComm, 107 ep_lib::MPI_Request* requestSendIndexGlobal); 98 108 99 109 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 100 110 const ep_lib::MPI_Comm& clientIntraComm, 101 111 std::vector<ep_lib::MPI_Request>& requestRecvIndex); 112 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 113 const ep_lib::MPI_Comm& clientIntraComm, 114 ep_lib::MPI_Request* requestRecvIndex); 102 115 103 116 void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, -
XIOS/dev/branch_openmp/src/client_client_dht_template_impl.hpp
r1209 r1328 10 10 #include "utils.hpp" 11 11 #include "mpi_tag.hpp" 12 #ifdef _usingEP13 #include "ep_declaration.hpp"14 #include "ep_lib.hpp"15 #endif16 17 12 18 13 namespace xios … … 22 17 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 23 18 { 24 MPI_Comm_size(clientIntraComm, &nbClient_);19 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 25 20 this->computeMPICommLevel(); 26 21 int nbLvl = this->getNbLevel(); … … 42 37 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 43 38 { 44 MPI_Comm_size(clientIntraComm, &nbClient_);39 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 45 40 this->computeMPICommLevel(); 46 41 int nbLvl = this->getNbLevel(); … … 67 62 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 68 63 { 69 MPI_Comm_size(clientIntraComm, &nbClient_);64 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_); 70 65 this->computeMPICommLevel(); 71 66 int nbLvl = this->getNbLevel(); … … 104 99 { 105 100 int clientRank; 106 MPI_Comm_rank(commLevel,&clientRank); 107 //ep_lib::MPI_Barrier(commLevel); 101 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 108 102 int groupRankBegin = this->getGroupBegin()[level]; 109 103 int nbClient = this->getNbInGroup()[level]; … … 175 169 recvIndexBuff = new unsigned long[recvNbIndexCount]; 176 170 177 int request_size = 0; 178 179 int currentIndex = 0; 180 int nbRecvClient = recvRankClient.size(); 181 182 int position = 0; 183 184 for (int idx = 0; idx < nbRecvClient; ++idx) 171 int request_size = 0; 172 for (int idx = 0; idx < recvRankClient.size(); ++idx) 185 173 { 186 174 if (0 != recvNbIndexClientCount[idx]) 187 { 188 request_size++; 189 } 175 request_size ++; 190 176 } 191 177 192 178 request_size += client2ClientIndex.size(); 193 179 194 195 180 std::vector<ep_lib::MPI_Request> request(request_size); 196 181 197 182 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 198 183 iteRecvIndex = recvRankClient.end(), 199 184 itbRecvNbIndex = recvNbIndexClientCount.begin(), 200 185 itRecvNbIndex; 201 202 186 int currentIndex = 0; 187 int nbRecvClient = recvRankClient.size(); 188 int request_position = 0; 189 for (int idx = 0; idx < nbRecvClient; ++idx) 190 { 191 if (0 != recvNbIndexClientCount[idx]) 192 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 193 //recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 194 currentIndex += recvNbIndexClientCount[idx]; 195 } 196 203 197 boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 204 198 iteIndex = client2ClientIndex.end(); 205 199 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 206 { 207 MPI_Isend(itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], MPI_UNSIGNED_LONG, itIndex->first, MPI_DHT_INDEX, commLevel, &request[position]); 208 position++; 200 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 209 201 //sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 210 } 211 212 for (int idx = 0; idx < nbRecvClient; ++idx) 213 { 214 if (0 != recvNbIndexClientCount[idx]) 215 { 216 MPI_Irecv(recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], MPI_UNSIGNED_LONG, 217 recvRankClient[idx], MPI_DHT_INDEX, commLevel, &request[position]); 218 position++; 219 //recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 220 } 221 currentIndex += recvNbIndexClientCount[idx]; 222 } 223 224 225 std::vector<ep_lib::MPI_Status> status(request_size); 226 MPI_Waitall(request.size(), &request[0], &status[0]); 227 202 203 std::vector<ep_lib::MPI_Status> status(request.size()); 204 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 228 205 229 206 CArray<size_t,1>* tmpGlobalIndex; … … 238 215 --level; 239 216 computeIndexInfoMappingLevel(*tmpGlobalIndex, this->internalComm_, level); 240 241 217 } 242 218 else // Now, we are in the last level where necessary mappings are. … … 279 255 } 280 256 281 request_size =0;257 int requestOnReturn_size=0; 282 258 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 283 259 { 284 260 if (0 != recvNbIndexOnReturn[idx]) 285 261 { 286 request _size += 2;262 requestOnReturn_size += 2; 287 263 } 288 264 } … … 292 268 if (0 != sendNbIndexOnReturn[idx]) 293 269 { 294 request_size += 2; 295 } 296 } 297 298 std::vector<ep_lib::MPI_Request> requestOnReturn(request_size); 270 requestOnReturn_size += 2; 271 } 272 } 273 274 int requestOnReturn_position=0; 275 276 std::vector<ep_lib::MPI_Request> requestOnReturn(requestOnReturn_size); 299 277 currentIndex = 0; 300 position = 0;301 278 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 302 279 { … … 304 281 { 305 282 //recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 306 MPI_Irecv(recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], MPI_UNSIGNED_LONG,307 recvRankOnReturn[idx], MPI_DHT_INDEX, commLevel, &requestOnReturn[position]);308 position++;309 283 //recvInfoFromClients(recvRankOnReturn[idx], 310 284 // recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 311 285 // recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 312 286 // commLevel, requestOnReturn); 313 MPI_Irecv(recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 314 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), MPI_CHAR, 315 recvRankOnReturn[idx], MPI_DHT_INFO, commLevel, &requestOnReturn[position]); 316 position++; 287 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 288 recvInfoFromClients(recvRankOnReturn[idx], 289 recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 290 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 291 commLevel, &requestOnReturn[requestOnReturn_position++]); 317 292 } 318 293 currentIndex += recvNbIndexOnReturn[idx]; … … 349 324 //sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 350 325 // sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 351 MPI_Isend(client2ClientIndexOnReturn[rank], sendNbIndexOnReturn[idx], MPI_UNSIGNED_LONG,352 rank, MPI_DHT_INDEX, commLevel, &requestOnReturn[position]);353 position++;354 326 //sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 355 327 // sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 356 MPI_Isend(client2ClientInfoOnReturn[rank], sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), MPI_CHAR, 357 rank, MPI_DHT_INFO, commLevel, &requestOnReturn[position]); 358 position++; 328 sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 329 sendNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]); 330 sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 331 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, &requestOnReturn[requestOnReturn_position++]); 332 359 333 } 360 334 currentIndex += recvNbIndexClientCount[idx]; … … 362 336 363 337 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 364 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]);338 ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 365 339 366 340 Index2VectorInfoTypeMap indexToInfoMapping; … … 432 406 { 433 407 int clientRank; 434 MPI_Comm_rank(commLevel,&clientRank);408 ep_lib::MPI_Comm_rank(commLevel,&clientRank); 435 409 computeSendRecvRank(level, clientRank); 436 //ep_lib::MPI_Barrier(commLevel);437 410 438 411 int groupRankBegin = this->getGroupBegin()[level]; … … 481 454 { 482 455 client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 456 // ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 483 457 ProcessDHTElement<InfoType>::packElement(infoTmp[idx], client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 484 458 ++sendNbIndexBuff[indexClient]; … … 494 468 int recvNbIndexCount = 0; 495 469 for (int idx = 0; idx < recvNbIndexClientCount.size(); ++idx) 496 {497 470 recvNbIndexCount += recvNbIndexClientCount[idx]; 498 }499 471 500 472 unsigned long* recvIndexBuff; … … 509 481 // it will send a message to the correct clients. 510 482 // Contents of the message are index and its corresponding informatioin 511 int request_size = 0; 483 int request_size = 0; 484 for (int idx = 0; idx < recvRankClient.size(); ++idx) 485 { 486 if (0 != recvNbIndexClientCount[idx]) 487 { 488 request_size += 2; 489 } 490 } 491 492 request_size += client2ClientIndex.size(); 493 request_size += client2ClientInfo.size(); 494 495 std::vector<ep_lib::MPI_Request> request(request_size); 512 496 int currentIndex = 0; 513 497 int nbRecvClient = recvRankClient.size(); 514 int current_pos = 0; 515 498 int request_position=0; 516 499 for (int idx = 0; idx < nbRecvClient; ++idx) 517 500 { 518 501 if (0 != recvNbIndexClientCount[idx]) 519 502 { 520 request_size += 2; 521 } 522 //currentIndex += recvNbIndexClientCount[idx]; 523 } 524 525 request_size += client2ClientIndex.size(); 526 request_size += client2ClientInfo.size(); 527 528 529 530 std::vector<ep_lib::MPI_Request> request(request_size); 531 532 //unsigned long* tmp_send_buf_long[client2ClientIndex.size()]; 533 //unsigned char* tmp_send_buf_char[client2ClientInfo.size()]; 534 535 int info_position = 0; 536 int index_position = 0; 537 503 //recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 504 //recvInfoFromClients(recvRankClient[idx], 505 // recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 506 // recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 507 // commLevel, request); 508 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 509 recvInfoFromClients(recvRankClient[idx], 510 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 511 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 512 commLevel, &request[request_position++]); 513 } 514 currentIndex += recvNbIndexClientCount[idx]; 515 } 538 516 539 517 boost::unordered_map<int, size_t* >::iterator itbIndex = client2ClientIndex.begin(), itIndex, 540 518 iteIndex = client2ClientIndex.end(); 541 519 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 542 {520 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 543 521 //sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 544 545 //tmp_send_buf_long[index_position] = new unsigned long[sendNbIndexBuff[itIndex->first-groupRankBegin]];546 //for(int i=0; i<sendNbIndexBuff[itIndex->first-groupRankBegin]; i++)547 //{548 // tmp_send_buf_long[index_position][i] = (static_cast<unsigned long * >(itIndex->second))[i];549 //}550 //MPI_Isend(tmp_send_buf_long[current_pos], sendNbIndexBuff[itIndex->first-groupRankBegin], MPI_UNSIGNED_LONG,551 MPI_Isend(itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], MPI_UNSIGNED_LONG,552 itIndex->first, MPI_DHT_INDEX, commLevel, &request[current_pos]);553 current_pos++;554 index_position++;555 556 }557 558 522 boost::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 559 523 iteInfo = client2ClientInfo.end(); 560 524 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 561 {525 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, &request[request_position++]); 562 526 //sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 563 527 564 //tmp_send_buf_char[info_position] = new unsigned char[sendNbInfo[itInfo->first-groupRankBegin]];565 //for(int i=0; i<sendNbInfo[itInfo->first-groupRankBegin]; i++)566 //{567 // tmp_send_buf_char[info_position][i] = (static_cast<unsigned char * >(itInfo->second))[i];568 //}569 570 MPI_Isend(itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], MPI_CHAR,571 itInfo->first, MPI_DHT_INFO, commLevel, &request[current_pos]);572 573 current_pos++;574 info_position++;575 }576 577 for (int idx = 0; idx < nbRecvClient; ++idx)578 {579 if (0 != recvNbIndexClientCount[idx])580 {581 //recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request);582 MPI_Irecv(recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], MPI_UNSIGNED_LONG,583 recvRankClient[idx], MPI_DHT_INDEX, commLevel, &request[current_pos]);584 current_pos++;585 586 587 MPI_Irecv(recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(),588 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(),589 MPI_CHAR, recvRankClient[idx], MPI_DHT_INFO, commLevel, &request[current_pos]);590 591 current_pos++;592 593 594 595 // recvInfoFromClients(recvRankClient[idx],596 // recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(),597 // recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(),598 // commLevel, request);599 }600 currentIndex += recvNbIndexClientCount[idx];601 }602 603 528 std::vector<ep_lib::MPI_Status> status(request.size()); 604 605 MPI_Waitall(request.size(), &request[0], &status[0]); 606 607 608 //for(int i=0; i<client2ClientInfo.size(); i++) 609 // delete[] tmp_send_buf_char[i]; 610 611 612 613 //for(int i=0; i<client2ClientIndex.size(); i++) 614 // delete[] tmp_send_buf_long[i]; 615 529 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 616 530 617 531 Index2VectorInfoTypeMap indexToInfoMapping; … … 654 568 else 655 569 index2InfoMapping_.swap(indexToInfoMapping); 656 657 570 } 658 571 … … 670 583 std::vector<ep_lib::MPI_Request>& requestSendIndex) 671 584 { 672 printf("should not call this function sendIndexToClients");673 585 ep_lib::MPI_Request request; 674 586 requestSendIndex.push_back(request); 675 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG,587 ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 676 588 clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 589 } 590 591 /*! 592 Send message containing index to clients 593 \param [in] clientDestRank rank of destination client 594 \param [in] indices index to send 595 \param [in] indiceSize size of index array to send 596 \param [in] clientIntraComm communication group of client 597 \param [in] requestSendIndex sending request 598 */ 599 template<typename T, typename H> 600 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 601 const ep_lib::MPI_Comm& clientIntraComm, 602 ep_lib::MPI_Request* requestSendIndex) 603 { 604 ep_lib::MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 605 clientDestRank, MPI_DHT_INDEX, clientIntraComm, requestSendIndex); 677 606 } 678 607 … … 689 618 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 690 619 { 691 printf("should not call this function recvIndexFromClients");692 620 ep_lib::MPI_Request request; 693 621 requestRecvIndex.push_back(request); 694 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG,622 ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 695 623 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 624 } 625 626 /*! 627 Receive message containing index to clients 628 \param [in] clientDestRank rank of destination client 629 \param [in] indices index to send 630 \param [in] clientIntraComm communication group of client 631 \param [in] requestRecvIndex receiving request 632 */ 633 template<typename T, typename H> 634 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 635 const ep_lib::MPI_Comm& clientIntraComm, 636 ep_lib::MPI_Request *requestRecvIndex) 637 { 638 ep_lib::MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 639 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, requestRecvIndex); 696 640 } 697 641 … … 709 653 std::vector<ep_lib::MPI_Request>& requestSendInfo) 710 654 { 711 printf("should not call this function sendInfoToClients");712 655 ep_lib::MPI_Request request; 713 656 requestSendInfo.push_back(request); 714 MPI_Isend(info, infoSize, MPI_CHAR, 657 658 ep_lib::MPI_Isend(info, infoSize, MPI_CHAR, 715 659 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 660 } 661 662 /*! 663 Send message containing information to clients 664 \param [in] clientDestRank rank of destination client 665 \param [in] info info array to send 666 \param [in] infoSize info array size to send 667 \param [in] clientIntraComm communication group of client 668 \param [in] requestSendInfo sending request 669 */ 670 template<typename T, typename H> 671 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 672 const ep_lib::MPI_Comm& clientIntraComm, 673 ep_lib::MPI_Request *requestSendInfo) 674 { 675 ep_lib::MPI_Isend(info, infoSize, MPI_CHAR, 676 clientDestRank, MPI_DHT_INFO, clientIntraComm, requestSendInfo); 716 677 } 717 678 … … 729 690 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 730 691 { 731 printf("should not call this function recvInfoFromClients\n");732 692 ep_lib::MPI_Request request; 733 693 requestRecvInfo.push_back(request); 734 694 735 MPI_Irecv(info, infoSize, MPI_CHAR,695 ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR, 736 696 clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 697 } 698 699 /*! 700 Receive message containing information from other clients 701 \param [in] clientDestRank rank of destination client 702 \param [in] info info array to receive 703 \param [in] infoSize info array size to receive 704 \param [in] clientIntraComm communication group of client 705 \param [in] requestRecvInfo list of receiving request 706 */ 707 template<typename T, typename H> 708 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 709 const ep_lib::MPI_Comm& clientIntraComm, 710 ep_lib::MPI_Request* requestRecvInfo) 711 { 712 ep_lib::MPI_Irecv(info, infoSize, MPI_CHAR, 713 clientSrcRank, MPI_DHT_INFO, clientIntraComm, requestRecvInfo); 737 714 } 738 715 … … 807 784 808 785 int nRequest = 0; 809 786 for (int idx = 0; idx < recvNbRank.size(); ++idx) 787 { 788 ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 789 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 790 ++nRequest; 791 } 810 792 811 793 for (int idx = 0; idx < sendNbRank.size(); ++idx) 812 794 { 813 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT,795 ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 814 796 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 815 797 ++nRequest; 816 798 } 817 818 for (int idx = 0; idx < recvNbRank.size(); ++idx) 819 { 820 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 821 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 822 ++nRequest; 823 } 824 825 MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 799 800 ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 826 801 } 827 802 … … 852 827 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 853 828 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 854 //ep_lib::MPI_Request request[sendBuffSize+recvBuffSize]; 855 //ep_lib::MPI_Status requestStatus[sendBuffSize+recvBuffSize]; 856 857 int my_rank; 858 MPI_Comm_rank(this->internalComm_, &my_rank); 859 829 860 830 int nRequest = 0; 861 831 for (int idx = 0; idx < recvBuffSize; ++idx) 862 832 { 863 MPI_Irecv(&recvBuff[2*idx], 2, MPI_INT,833 ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 864 834 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 865 835 ++nRequest; 866 836 } 867 868 837 869 838 for (int idx = 0; idx < sendBuffSize; ++idx) … … 873 842 sendBuff[idx*2+1] = sendNbElements[offSet]; 874 843 } 875 876 877 844 878 845 for (int idx = 0; idx < sendBuffSize; ++idx) 879 846 { 880 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT,847 ep_lib::MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 881 848 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 882 849 ++nRequest; 883 850 } 884 885 886 887 //MPI_Barrier(this->internalComm_); 888 889 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 890 //MPI_Waitall(sendBuffSize+recvBuffSize, request, requestStatus); 891 892 851 852 ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 893 853 int nbRecvRank = 0, nbRecvElements = 0; 894 854 recvNbRank.clear(); … … 902 862 } 903 863 } 904 905 906 907 908 } 909 910 } 911 864 } 865 866 } -
XIOS/dev/branch_openmp/src/client_server_mapping.cpp
r1287 r1328 8 8 */ 9 9 #include "client_server_mapping.hpp" 10 using namespace ep_lib; 10 11 11 12 namespace xios { … … 64 65 MPI_Allgather(&nbConnectedServer,1,MPI_INT,recvCount,1,MPI_INT,clientIntraComm) ; 65 66 66 67 // for(int i=0; i<nbClient; i++)68 // printf("MPI_Allgather : recvCount[%d] = %d\n", i, recvCount[i]);69 70 67 displ[0]=0 ; 71 68 for(int n=1;n<nbClient;n++) displ[n]=displ[n-1]+recvCount[n-1] ; … … 75 72 76 73 MPI_Allgatherv(sendBuff,nbConnectedServer,MPI_INT,recvBuff,recvCount,displ,MPI_INT,clientIntraComm) ; 77 78 // for(int i=0; i<recvSize; i++)79 // printf("MPI_Allgatherv : recvBuff[%d] = %d\n", i, recvBuff[i]);80 81 82 74 for(int n=0;n<recvSize;n++) clientRes[recvBuff[n]]++ ; 83 75 -
XIOS/dev/branch_openmp/src/client_server_mapping.hpp
r1134 r1328 14 14 #include "mpi.hpp" 15 15 #include <boost/unordered_map.hpp> 16 #ifdef _usingEP17 #include "ep_declaration.hpp"18 #endif19 20 16 21 17 namespace xios { -
XIOS/dev/branch_openmp/src/client_server_mapping_distributed.cpp
r907 r1328 15 15 #include "context.hpp" 16 16 #include "context_client.hpp" 17 using namespace ep_lib; 17 18 18 19 namespace xios -
XIOS/dev/branch_openmp/src/context_client.cpp
r1205 r1328 11 11 #include "timer.hpp" 12 12 #include "cxios.hpp" 13 using namespace ep_lib; 13 14 14 15 namespace xios … … 20 21 \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode) 21 22 */ 22 CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer)23 CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer) 23 24 : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) 24 25 { … … 293 294 if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio; 294 295 } 295 #ifdef _usingMPI 296 MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 297 #elif _usingEP 296 //MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 298 297 MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 299 #endif 300 298 301 299 if (minBufferSizeEventSizeRatio < 1.0) 302 300 { … … 402 400 for (itMap = itbMap; itMap != iteMap; ++itMap) 403 401 { 404 //report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl405 //<< " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl;402 report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl 403 << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 406 404 totalBuf += itMap->second; 407 405 } 408 //report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl;406 report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; 409 407 410 408 releaseBuffers(); -
XIOS/dev/branch_openmp/src/context_server.cpp
r1179 r1328 10 10 #include "file.hpp" 11 11 #include "grid.hpp" 12 #include "mpi _std.hpp"12 #include "mpi.hpp" 13 13 #include "tracer.hpp" 14 14 #include "timer.hpp" … … 18 18 #include <boost/functional/hash.hpp> 19 19 20 20 using namespace ep_lib; 21 21 22 22 namespace xios 23 23 { 24 24 25 CContextServer::CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_)25 CContextServer::CContextServer(CContext* parent,MPI_Comm intraComm_,MPI_Comm interComm_) 26 26 { 27 27 context=parent; … … 72 72 int count; 73 73 char * addr; 74 ep_lib::MPI_Status status;74 MPI_Status status; 75 75 map<int,CServerBuffer*>::iterator it; 76 77 for(rank=0;rank<commSize;rank++) 78 { 76 bool okLoop; 77 78 traceOff(); 79 MPI_Iprobe(-2, 20,interComm,&flag,&status); 80 traceOn(); 81 82 if (flag==true) 83 { 84 #ifdef _usingMPI 85 rank=status.MPI_SOURCE ; 86 #elif _usingEP 87 rank=status.ep_src ; 88 #endif 89 okLoop = true; 79 90 if (pendingRequest.find(rank)==pendingRequest.end()) 80 { 81 traceOff(); 82 ep_lib::MPI_Iprobe(rank,20,interComm,&flag,&status); 83 traceOn(); 84 if (flag) 91 okLoop = !listenPendingRequest(status) ; 92 if (okLoop) 93 { 94 for(rank=0;rank<commSize;rank++) 85 95 { 86 it=buffers.find(rank); 87 if (it==buffers.end()) // Receive the buffer size and allocate the buffer 96 if (pendingRequest.find(rank)==pendingRequest.end()) 88 97 { 89 StdSize buffSize = 0; 90 ep_lib::MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status); 91 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 92 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; 93 } 94 else 95 { 96 97 ep_lib::MPI_Get_count(&status,MPI_CHAR,&count); 98 if (it->second->isBufferFree(count)) 99 { 100 addr=(char*)it->second->getBuffer(count); 101 ep_lib::MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 102 bufferRequest[rank]=addr; 103 } 98 99 traceOff(); 100 MPI_Iprobe(rank, 20,interComm,&flag,&status); 101 traceOn(); 102 if (flag==true) listenPendingRequest(status) ; 104 103 } 105 104 } … … 108 107 } 109 108 109 bool CContextServer::listenPendingRequest(MPI_Status& status) 110 { 111 int count; 112 char * addr; 113 map<int,CServerBuffer*>::iterator it; 114 #ifdef _usingMPI 115 int rank=status.MPI_SOURCE ; 116 #elif _usingEP 117 int rank=status.ep_src; 118 #endif 119 120 it=buffers.find(rank); 121 if (it==buffers.end()) // Receive the buffer size and allocate the buffer 122 { 123 StdSize buffSize = 0; 124 MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status); 125 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 126 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; 127 return true; 128 } 129 else 130 { 131 MPI_Get_count(&status,MPI_CHAR,&count); 132 if (it->second->isBufferFree(count)) 133 { 134 addr=(char*)it->second->getBuffer(count); 135 MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 136 bufferRequest[rank]=addr; 137 return true; 138 } 139 else 140 return false; 141 } 142 } 143 110 144 void CContextServer::checkPendingRequest(void) 111 145 { 112 map<int, ep_lib::MPI_Request>::iterator it;146 map<int,MPI_Request>::iterator it; 113 147 list<int> recvRequest; 114 148 list<int>::iterator itRecv; … … 116 150 int flag; 117 151 int count; 118 ep_lib::MPI_Status status;119 120 for(it=pendingRequest.begin();it!=pendingRequest.end(); ++it)152 MPI_Status status; 153 154 for(it=pendingRequest.begin();it!=pendingRequest.end();it++) 121 155 { 122 156 rank=it->first; 123 157 traceOff(); 124 ep_lib::MPI_Test(& it->second, &flag, &status);158 MPI_Test(& it->second, &flag, &status); 125 159 traceOn(); 126 160 if (flag==true) 127 161 { 128 162 recvRequest.push_back(rank); 129 ep_lib::MPI_Get_count(&status,MPI_CHAR,&count);163 MPI_Get_count(&status,MPI_CHAR,&count); 130 164 processRequest(rank,bufferRequest[rank],count); 131 165 } … … 220 254 { 221 255 finished=true; 222 #pragma omp critical (_output)223 256 info(20)<<"Server Side context <"<<context->getId()<<"> finalized"<<endl; 224 257 std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), … … 227 260 for (itMap = itbMap; itMap != iteMap; ++itMap) 228 261 { 229 //report(10)<< " Memory report : Context <"<<context->getId()<<"> : server side : memory used for buffer of each connection to client" << endl230 //<< " +) With client of rank " << itMap->first << " : " << itMap->second << " bytes " << endl;262 report(10)<< " Memory report : Context <"<<context->getId()<<"> : server side : memory used for buffer of each connection to client" << endl 263 << " +) With client of rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 231 264 totalBuf += itMap->second; 232 265 } 233 266 context->finalize(); 234 //report(0)<< " Memory report : Context <"<<context->getId()<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl;267 report(0)<< " Memory report : Context <"<<context->getId()<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 235 268 } 236 269 else if (event.classId==CContext::GetType()) CContext::dispatchEvent(event); -
XIOS/dev/branch_openmp/src/context_server.hpp
r1134 r1328 17 17 bool eventLoop(bool enableEventsProcessing = true); 18 18 void listen(void) ; 19 bool listenPendingRequest(ep_lib::MPI_Status& status); 19 20 void checkPendingRequest(void) ; 20 21 void processRequest(int rank, char* buff,int count) ; -
XIOS/dev/branch_openmp/src/cxios.cpp
r1205 r1328 11 11 #include "memtrack.hpp" 12 12 #include "registry.hpp" 13 using namespace ep_lib; 13 14 14 15 namespace xios 15 16 { 16 17 extern int test_omp_rank; 18 #pragma omp threadprivate(test_omp_rank) 19 20 const string CXios::rootFile="./iodef.xml" ; 21 const string CXios::xiosCodeId="xios.x" ; 22 const string CXios::clientFile="./xios_client"; 23 const string CXios::serverFile="./xios_server"; 24 17 string CXios::rootFile="./iodef.xml" ; 18 string CXios::xiosCodeId="xios.x" ; 19 string CXios::clientFile="./xios_client"; 20 string CXios::serverFile="./xios_server"; 25 21 26 22 bool CXios::isClient ; 27 23 bool CXios::isServer ; 28 29 30 24 MPI_Comm CXios::globalComm ; 31 32 33 25 bool CXios::usingOasis ; 34 26 bool CXios::usingServer = false; 35 36 37 27 double CXios::bufferSizeFactor = 1.0; 38 28 const double CXios::defaultBufferSizeFactor = 1.0; 39 29 StdSize CXios::minBufferSize = 1024 * sizeof(double); 40 41 42 30 bool CXios::printLogs2Files; 43 31 bool CXios::isOptPerformance = true; … … 49 37 { 50 38 set_new_handler(noMemory); 51 52 53 #pragma omp critical 54 { 55 parseFile(rootFile); 56 } 57 #pragma omp barrier 39 parseFile(rootFile); 58 40 parseXiosConfig(); 59 41 } … … 87 69 ERROR("CXios::parseXiosConfig()", "recv_field_timeout cannot be negative."); 88 70 89 71 //globalComm=MPI_COMM_WORLD ; 90 72 int num_ep; 91 73 if(isClient) … … 93 75 num_ep = omp_get_num_threads(); 94 76 } 95 77 96 78 if(isServer) 97 79 { 98 80 num_ep = omp_get_num_threads(); 99 81 } 100 82 101 83 MPI_Info info; 102 84 #pragma omp master … … 106 88 passage = ep_comm; 107 89 } 108 90 109 91 #pragma omp barrier 110 111 92 93 112 94 CXios::globalComm = passage[omp_get_thread_num()]; 113 114 int tmp_rank;115 MPI_Comm_rank(CXios::globalComm, &tmp_rank);116 117 118 test_omp_rank = tmp_rank;119 120 95 } 121 96 … … 133 108 134 109 CClient::initialize(codeId,localComm,returnComm) ; 135 136 110 if (CClient::getRank()==0) globalRegistry = new CRegistry(returnComm) ; 137 111 … … 142 116 if (printLogs2Files) 143 117 { 144 #pragma omp critical145 118 CClient::openInfoStream(clientFile); 146 119 CClient::openErrorStream(clientFile); … … 158 131 if (CClient::getRank()==0) 159 132 { 160 #pragma omp critical (_output)161 133 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 162 134 globalRegistry->toFile("xios_registry.bin") ; … … 184 156 void CXios::initServer() 185 157 { 186 int initialized;187 MPI_Initialized(&initialized);188 if (initialized) CServer::is_MPI_Initialized=true ;189 else CServer::is_MPI_Initialized=false ;190 191 192 if(!CServer::is_MPI_Initialized)193 {194 MPI_Init(NULL, NULL);195 }196 197 158 set_new_handler(noMemory); 198 159 std::set<StdString> parseList; … … 210 171 211 172 initServer(); 212 213 173 214 174 // Initialize all aspects MPI 215 175 CServer::initialize(); -
XIOS/dev/branch_openmp/src/cxios.hpp
r1134 r1328 5 5 #include "mpi.hpp" 6 6 #include "registry.hpp" 7 #include "log.hpp"8 7 9 8 namespace xios … … 15 14 { 16 15 public: 17 18 19 20 21 16 static void initialize(void) ; 17 static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 18 static void initServerSide(void) ; 19 static void clientFinalize(void) ; 20 static void parseFile(const string& filename) ; 22 21 23 24 22 template <typename T> 23 static T getin(const string& id,const T& defaultValue) ; 25 24 26 27 25 template <typename T> 26 static T getin(const string& id) ; 28 27 29 28 public: 30 static const string rootFile; //!< Configuration filename31 static conststring xiosCodeId ; //!< Identity for XIOS32 static conststring clientFile; //!< Filename template for client33 static conststring serverFile; //!< Filename template for server29 static string rootFile ; //!< Configuration filename 30 static string xiosCodeId ; //!< Identity for XIOS 31 static string clientFile; //!< Filename template for client 32 static string serverFile; //!< Filename template for server 34 33 35 static bool isClient ; //!< Check if xios is client 36 static bool isServer ; //!< Check if xios is server 37 #pragma omp threadprivate(isClient, isServer) 34 static bool isClient ; //!< Check if xios is client 35 static bool isServer ; //!< Check if xios is server 38 36 39 static MPI_Comm globalComm ; //!< Global communicator 40 #pragma omp threadprivate(globalComm) 37 static ep_lib::MPI_Comm globalComm ; //!< Global communicator 41 38 42 static bool printLogs2Files; //!< Printing out logs into files 43 static bool usingOasis ; //!< Using Oasis 44 static bool usingServer ; //!< Using server (server mode) 45 static double bufferSizeFactor; //!< Factor used to tune the buffer size 46 static const double defaultBufferSizeFactor; //!< Default factor value 47 static StdSize minBufferSize; //!< Minimum buffer size 48 static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 49 #pragma omp threadprivate(printLogs2Files, usingOasis, usingServer, bufferSizeFactor, minBufferSize, isOptPerformance) 50 51 static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 52 static double recvFieldTimeout; 53 #pragma omp threadprivate(recvFieldTimeout) 54 55 39 static bool printLogs2Files; //!< Printing out logs into files 40 static bool usingOasis ; //!< Using Oasis 41 static bool usingServer ; //!< Using server (server mode) 42 static double bufferSizeFactor; //!< Factor used to tune the buffer size 43 static const double defaultBufferSizeFactor; //!< Default factor value 44 static StdSize minBufferSize; //!< Minimum buffer size 45 static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 46 static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 47 static double recvFieldTimeout; //!< Time to wait for data before issuing an error when receiving a field 48 56 49 public: 57 50 //! Setting xios to use server mode -
XIOS/dev/branch_openmp/src/data_output.cpp
r1205 r1328 4 4 #include "group_template.hpp" 5 5 #include "context.hpp" 6 //mpi.hpp 6 7 7 namespace xios 8 8 { -
XIOS/dev/branch_openmp/src/data_output.hpp
r1287 r1328 59 59 virtual void writeTimeDimension_(void) = 0; 60 60 virtual void writeTimeAxis_ (CField* field, 61 const boost::shared_ptr<CCalendar> cal) = 0;61 const shared_ptr<CCalendar> cal) = 0; 62 62 63 63 /// Propriétés protégées /// -
XIOS/dev/branch_openmp/src/dht_auto_indexing.cpp
r1134 r1328 8 8 */ 9 9 #include "dht_auto_indexing.hpp" 10 using namespace ep_lib; 10 11 11 12 namespace xios … … 22 23 23 24 CDHTAutoIndexing::CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 24 const ep_lib::MPI_Comm& clientIntraComm)25 const MPI_Comm& clientIntraComm) 25 26 : CClientClientDHTTemplate<size_t>(clientIntraComm) 26 27 { … … 58 59 */ 59 60 CDHTAutoIndexing::CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 60 const ep_lib::MPI_Comm& clientIntraComm)61 const MPI_Comm& clientIntraComm) 61 62 : CClientClientDHTTemplate<size_t>(clientIntraComm) 62 63 { -
XIOS/dev/branch_openmp/src/dht_auto_indexing.hpp
r1134 r1328 12 12 13 13 #include "client_client_dht_template.hpp" 14 #ifdef _usingEP15 #include "ep_declaration.hpp"16 #endif17 14 18 15 namespace xios -
XIOS/dev/branch_openmp/src/event_scheduler.cpp
r1134 r1328 2 2 #include "xios_spl.hpp" 3 3 #include "mpi.hpp" 4 using namespace ep_lib; 4 5 5 6 namespace xios … … 132 133 while(received) 133 134 { 134 #ifdef _usingEP 135 MPI_Iprobe(-1,1,communicator,&received, &status) ; 136 #else 137 MPI_Iprobe(MPI_ANY_SOURCE,1,communicator,&received, &status) ; 138 #endif 135 MPI_Iprobe(-2,1,communicator,&received, &status) ; 139 136 if (received) 140 137 { 141 138 recvRequest=new SPendingRequest ; 142 #ifdef _usingEP 143 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -1, 1, communicator, &(recvRequest->request)) ; 144 #else 145 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 1, communicator, &(recvRequest->request)) ; 146 #endif 139 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -2, 1, communicator, &(recvRequest->request)) ; 147 140 pendingRecvParentRequest.push(recvRequest) ; 148 141 } … … 182 175 while(received) 183 176 { 184 #ifdef _usingEP 185 MPI_Iprobe(-1,0,communicator,&received, &status) ; 186 #else 187 MPI_Iprobe(MPI_ANY_SOURCE,0,communicator,&received, &status) ; 188 #endif 177 MPI_Iprobe(-2,0,communicator,&received, &status) ; 189 178 if (received) 190 179 { 191 180 recvRequest=new SPendingRequest ; 192 #ifdef _usingEP 193 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -1, 0, communicator, &recvRequest->request) ; 194 #else 195 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 0, communicator, &recvRequest->request) ; 196 #endif 181 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -2, 0, communicator, &recvRequest->request) ; 197 182 pendingRecvChildRequest.push_back(recvRequest) ; 198 183 } -
XIOS/dev/branch_openmp/src/event_scheduler.hpp
r1134 r1328 4 4 #include "xios_spl.hpp" 5 5 #include "mpi.hpp" 6 #ifdef _usingEP7 #include "ep_declaration.hpp"8 #endif9 10 6 11 7 namespace xios … … 30 26 * @param[in] comm : MPI communicator du duplicate for internal use 31 27 */ 32 CEventScheduler(const MPI_Comm& comm) ;28 CEventScheduler(const ep_lib::MPI_Comm& comm) ; 33 29 34 30 … … 155 151 { 156 152 size_t buffer[3] ; /*!< communication buffer : timeLine, hashId, level */ 157 MPI_Request request ; /*!< pending MPI request */153 ep_lib::MPI_Request request ; /*!< pending MPI request */ 158 154 } ; 159 155 160 MPI_Comm communicator ; /*!< Internal MPI communicator */156 ep_lib::MPI_Comm communicator ; /*!< Internal MPI communicator */ 161 157 int mpiRank ; /*!< Rank in the communicator */ 162 158 int mpiSize ; /*!< Size of the communicator */ -
XIOS/dev/branch_openmp/src/filter/file_writer_filter.cpp
r1205 r1328 17 17 void CFileWriterFilter::onInputReady(std::vector<CDataPacketPtr> data) 18 18 { 19 CDataPacketPtr packet = data[0];20 21 19 const bool detectMissingValue = (!field->detect_missing_value.isEmpty() 22 20 && !field->default_value.isEmpty() 23 21 && field->detect_missing_value == true); 22 23 CArray<double, 1> dataArray = (detectMissingValue) ? data[0]->data.copy() : data[0]->data; 24 24 25 if (detectMissingValue) 25 26 { 26 27 const double missingValue = field->default_value; 27 const size_t nbData = packet->data.numElements();28 const size_t nbData = dataArray.numElements(); 28 29 for (size_t idx = 0; idx < nbData; ++idx) 29 30 { 30 if (NumTraits<double>::isnan( packet->data(idx)))31 packet->data(idx) = missingValue;31 if (NumTraits<double>::isnan(dataArray(idx))) 32 dataArray(idx) = missingValue; 32 33 } 33 34 } 34 35 35 field->sendUpdateData( packet->data);36 field->sendUpdateData(dataArray); 36 37 } 37 38 -
XIOS/dev/branch_openmp/src/filter/source_filter.cpp
r1205 r1328 99 99 void CSourceFilter::signalEndOfStream(CDate date) 100 100 { 101 date = date + offset; // this is a temporary solution, it should be part of a proper temporal filter 102 101 103 CDataPacketPtr packet(new CDataPacket); 102 104 packet->date = date; -
XIOS/dev/branch_openmp/src/filter/spatial_transform_filter.cpp
r1287 r1328 1 #include "mpi.hpp" 1 2 #include "spatial_transform_filter.hpp" 2 3 #include "grid_transformation.hpp" 3 4 #include "context.hpp" 4 5 #include "context_client.hpp" 6 using namespace ep_lib; 5 7 6 8 namespace xios … … 65 67 } 66 68 69 //std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> > CSpatialTransformFilterEngine::engines; 67 70 std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> > *CSpatialTransformFilterEngine::engines_ptr = 0; 68 71 … … 72 75 ERROR("CSpatialTransformFilterEngine& CSpatialTransformFilterEngine::get(CGridTransformation* gridTransformation)", 73 76 "Impossible to get the requested engine, the grid transformation is invalid."); 74 77 75 78 if(engines_ptr == NULL) engines_ptr = new std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> >; 76 79 80 //std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> >::iterator it = engines.find(gridTransformation); 77 81 std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> >::iterator it = engines_ptr->find(gridTransformation); 82 //if (it == engines.end()) 78 83 if (it == engines_ptr->end()) 79 84 { 80 85 boost::shared_ptr<CSpatialTransformFilterEngine> engine(new CSpatialTransformFilterEngine(gridTransformation)); 86 //it = engines.insert(std::make_pair(gridTransformation, engine)).first; 81 87 it = engines_ptr->insert(std::make_pair(gridTransformation, engine)).first; 82 88 } … … 122 128 double defaultValue = std::numeric_limits<double>::quiet_NaN(); 123 129 if (0 != dataDest.numElements()) ignoreMissingValue = NumTraits<double>::isnan(dataDest(0)); 124 125 const std::list<CGridTransformation::SendingIndexGridSourceMap> *listLocalIndexSend_ptr = & (gridTransformation->getLocalIndexToSendFromGridSource()); 126 130 127 131 const std::list<CGridTransformation::SendingIndexGridSourceMap>& listLocalIndexSend = gridTransformation->getLocalIndexToSendFromGridSource(); 128 132 const std::list<CGridTransformation::RecvIndexGridDestinationMap>& listLocalIndexToReceive = gridTransformation->getLocalIndexToReceiveOnGridDest(); … … 133 137 CArray<double,1> dataCurrentDest(dataSrc.copy()); 134 138 135 std::list<CGridTransformation::SendingIndexGridSourceMap>::const_iterator itListSend = listLocalIndexSend _ptr->begin(),136 iteListSend = listLocalIndexSend _ptr->end();139 std::list<CGridTransformation::SendingIndexGridSourceMap>::const_iterator itListSend = listLocalIndexSend.begin(), 140 iteListSend = listLocalIndexSend.end(); 137 141 std::list<CGridTransformation::RecvIndexGridDestinationMap>::const_iterator itListRecv = listLocalIndexToReceive.begin(); 138 142 std::list<size_t>::const_iterator itNbListRecv = listNbLocalIndexToReceive.begin(); … … 155 159 sendBuff[idxSendBuff] = new double[itSend->second.numElements()]; 156 160 } 157 158 const CGridTransformation::RecvIndexGridDestinationMap& localIndexToReceive = *itListRecv;159 CGridTransformation::RecvIndexGridDestinationMap::const_iterator itbRecv = localIndexToReceive.begin(), itRecv,160 iteRecv = localIndexToReceive.end();161 161 162 162 idxSendBuff = 0; 163 std::vector< ep_lib::MPI_Request> sendRecvRequest(localIndexToSend.size()+localIndexToReceive.size());163 std::vector<MPI_Request> sendRecvRequest(localIndexToSend.size() + itListRecv->size()); 164 164 int position = 0; 165 165 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) … … 172 172 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 173 173 } 174 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest[position]); 175 position++; 174 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest[position++]); 176 175 } 177 176 178 177 // Receiving data on destination fields 179 178 const CGridTransformation::RecvIndexGridDestinationMap& localIndexToReceive = *itListRecv; 179 CGridTransformation::RecvIndexGridDestinationMap::const_iterator itbRecv = localIndexToReceive.begin(), itRecv, 180 iteRecv = localIndexToReceive.end(); 180 181 int recvBuffSize = 0; 181 182 for (itRecv = itbRecv; itRecv != iteRecv; ++itRecv) recvBuffSize += itRecv->second.size(); //(recvBuffSize < itRecv->second.size()) … … 188 189 int srcRank = itRecv->first; 189 190 int countSize = itRecv->second.size(); 190 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest[position]); 191 position++; 191 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest[position++]); 192 192 currentBuff += countSize; 193 193 } 194 std::vector< ep_lib::MPI_Status> status(sendRecvRequest.size());194 std::vector<MPI_Status> status(sendRecvRequest.size()); 195 195 MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]); 196 196 … … 203 203 std::vector<bool> localInitFlag(dataCurrentDest.numElements(), true); 204 204 currentBuff = 0; 205 bool firstPass=true; 205 206 for (itRecv = itbRecv; itRecv != iteRecv; ++itRecv) 206 207 { … … 211 212 dataCurrentDest, 212 213 localInitFlag, 213 ignoreMissingValue );214 ignoreMissingValue,firstPass); 214 215 215 216 currentBuff += countSize; 217 firstPass=false ; 216 218 } 217 219 -
XIOS/dev/branch_openmp/src/filter/spatial_transform_filter.hpp
r1134 r1328 3 3 4 4 #include "filter.hpp" 5 6 5 namespace xios 7 6 { … … 106 105 //static std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> > engines; 107 106 static std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> > *engines_ptr; 108 #pragma omp threadprivate(engines_ptr)109 110 107 }; // class CSpatialTransformFilterEngine 111 108 } // namespace xios -
XIOS/dev/branch_openmp/src/filter/store_filter.cpp
r1205 r1328 78 78 void CStoreFilter::onInputReady(std::vector<CDataPacketPtr> data) 79 79 { 80 CDataPacketPtr packet = data[0]; 80 81 CDataPacketPtr packet; 82 if (detectMissingValues) 83 { 84 const size_t nbData = data[0]->data.numElements(); 85 86 packet = CDataPacketPtr(new CDataPacket); 87 packet->date = data[0]->date; 88 packet->timestamp = data[0]->timestamp; 89 packet->status = data[0]->status; 90 packet->data.resize(nbData); 91 packet->data = data[0]->data; 92 93 for (size_t idx = 0; idx < nbData; ++idx) 94 { 95 if (NumTraits<double>::isnan(packet->data(idx))) 96 packet->data(idx) = missingValue; 97 } 98 99 } 100 101 else 102 { 103 packet = data[0]; 104 } 81 105 82 106 packets.insert(std::make_pair(packet->timestamp, packet)); … … 85 109 gc.registerObject(this, packet->timestamp); 86 110 87 if (detectMissingValues)88 {89 const size_t nbData = packet->data.numElements();90 for (size_t idx = 0; idx < nbData; ++idx)91 {92 if (NumTraits<double>::isnan(packet->data(idx)))93 packet->data(idx) = missingValue;94 }95 }96 111 } 97 112 -
XIOS/dev/branch_openmp/src/filter/temporal_filter.cpp
r1124 r1328 14 14 , isOnceOperation(functor->timeType() == func::CFunctor::once) 15 15 , isInstantOperation(functor->timeType() == func::CFunctor::instant) 16 // If we can optimize the sampling when dealing with an instant functor we do it 17 , samplingFreq((isInstantOperation && samplingFreq == TimeStep && samplingOffset == NoneDu) ? opFreq : samplingFreq) 18 , samplingOffset((isInstantOperation && samplingFreq == TimeStep && samplingOffset == NoneDu) ? opFreq - initDate.getRelCalendar().getTimeStep() : samplingOffset) 16 , samplingFreq(samplingFreq) 17 , samplingOffset(samplingOffset) 19 18 , opFreq(opFreq) 20 19 , nextSamplingDate(initDate + this->samplingOffset + initDate.getRelCalendar().getTimeStep()) -
XIOS/dev/branch_openmp/src/group_factory.cpp
r1134 r1328 4 4 { 5 5 /// ////////////////////// Définitions ////////////////////// /// 6 StdString *CGroupFactory::CurrContext_ptr = new StdString;6 StdString CGroupFactory::CurrContext(""); 7 7 8 8 void CGroupFactory::SetCurrentContextId(const StdString & context) 9 { 10 if(CGroupFactory::CurrContext_ptr == NULL ) CGroupFactory::CurrContext_ptr = new StdString; 11 CGroupFactory::CurrContext_ptr->assign(context); 9 { 10 CGroupFactory::CurrContext = context; 12 11 } 13 12 14 13 StdString & CGroupFactory::GetCurrentContextId(void) 15 14 { 16 return ( *CGroupFactory::CurrContext_ptr);15 return (CGroupFactory::CurrContext); 17 16 } 18 17 -
XIOS/dev/branch_openmp/src/group_factory.hpp
r1134 r1328 69 69 70 70 /// Propriétés statiques /// 71 static StdString *CurrContext_ptr; 72 #pragma omp threadprivate(CurrContext_ptr) 71 static StdString CurrContext; 73 72 74 73 }; // class CGroupFactory -
XIOS/dev/branch_openmp/src/group_factory_decl.cpp
r1287 r1328 5 5 { 6 6 # define macro(U) \ 7 template void CGroupFactory::AddGroup<U>( boost::shared_ptr<U> pgroup,boost::shared_ptr<U> cgroup); \8 template void CGroupFactory::AddChild<U>( boost::shared_ptr<U> group, boost::shared_ptr<U::RelChild> child); \9 template boost::shared_ptr<U> CGroupFactory::GetGroup<U>(boost::shared_ptr<U> group, const StdString & id); \10 template boost::shared_ptr<U::RelChild> CGroupFactory::GetChild<U>(boost::shared_ptr<U> group, const StdString & id); \11 template int CGroupFactory::GetGroupNum<U>( boost::shared_ptr<U> group); \12 template int CGroupFactory::GetGroupIdNum<U>( boost::shared_ptr<U> group); \13 template int CGroupFactory::GetChildNum<U>( boost::shared_ptr<U> group); \7 template void CGroupFactory::AddGroup<U>(shared_ptr<U> pgroup,shared_ptr<U> cgroup); \ 8 template void CGroupFactory::AddChild<U>(shared_ptr<U> group, shared_ptr<U::RelChild> child); \ 9 template shared_ptr<U> CGroupFactory::GetGroup<U>(shared_ptr<U> group, const StdString & id); \ 10 template shared_ptr<U::RelChild> CGroupFactory::GetChild<U>(shared_ptr<U> group, const StdString & id); \ 11 template int CGroupFactory::GetGroupNum<U>(shared_ptr<U> group); \ 12 template int CGroupFactory::GetGroupIdNum<U>(shared_ptr<U> group); \ 13 template int CGroupFactory::GetChildNum<U>(shared_ptr<U> group); \ 14 14 template int CGroupFactory::GetChildIdNum<U>(boost::shared_ptr<U> group); \ 15 template bool CGroupFactory::HasGroup<U>( boost::shared_ptr<U> group, const StdString & id); \15 template bool CGroupFactory::HasGroup<U>(shared_ptr<U> group, const StdString & id); \ 16 16 template bool CGroupFactory::HasChild<U>(boost::shared_ptr<U> group, const StdString & id); \ 17 template boost::shared_ptr<U> CGroupFactory::CreateGroup<U>(boost::shared_ptr<U> group, const StdString & id ); \18 template boost::shared_ptr<U::RelChild> CGroupFactory::CreateChild<U>(boost::shared_ptr<U> group, const StdString & id);17 template shared_ptr<U> CGroupFactory::CreateGroup<U>(shared_ptr<U> group, const StdString & id ); \ 18 template shared_ptr<U::RelChild> CGroupFactory::CreateChild<U>(shared_ptr<U> group, const StdString & id); 19 19 20 20 macro(CFieldGroup) -
XIOS/dev/branch_openmp/src/indent.hpp
r1134 r1328 10 10 public: 11 11 static int defaultIncSize; 12 #pragma omp threadprivate(defaultIncSize)13 14 12 static int index ; 15 #pragma omp threadprivate(index)16 17 13 int incSize ; 18 14 int offset ; -
XIOS/dev/branch_openmp/src/indent_xml.cpp
r1134 r1328 15 15 { 16 16 static unsigned int LineNB = 1; 17 #pragma omp threadprivate(LineNB)18 19 17 if (CIndent::WithLine) out << LineNB++ << ". "; 20 18 for(unsigned int i = 0; i < CIndent::Indent; out << CIndent::Increm , i++){} -
XIOS/dev/branch_openmp/src/indent_xml.hpp
r1134 r1328 22 22 /// Propriétés statiques /// 23 23 static unsigned int Indent; 24 #pragma omp threadprivate(Indent)25 26 24 static StdString Increm; 27 #pragma omp threadprivate(Increm)28 29 25 static bool WithLine; 30 #pragma omp threadprivate(WithLine)31 26 32 27 }; // class CIndent -
XIOS/dev/branch_openmp/src/interface/c/icdata.cpp
r1205 r1328 1 1 /* ************************************************************************** * 2 * Copyright ©IPSL/LSCE, xios, Avril 2010 - Octobre 2011 *2 * Copyright IPSL/LSCE, xios, Avril 2010 - Octobre 2011 * 3 3 * ************************************************************************** */ 4 4 … … 9 9 #include <iostream> 10 10 11 11 #include "mpi_std.hpp" 12 12 #include "xios.hpp" 13 13 //#include "oasis_cinterface.hpp" … … 23 23 #include "context.hpp" 24 24 #include "context_client.hpp" 25 #include "mpi_std.hpp" 25 26 26 #include "timer.hpp" 27 #include "array_new.hpp"28 29 27 30 28 extern "C" 31 29 { 32 // /////////////////////////////// D éfinitions ////////////////////////////// //33 34 // ----------------------- Red éfinition de types ----------------------------30 // /////////////////////////////// Dfinitions ////////////////////////////// // 31 32 // ----------------------- Redfinition de types ---------------------------- 35 33 36 34 typedef enum { NETCDF4 = 0 } XFileType; … … 38 36 typedef xios::CContext* XContextPtr; 39 37 40 // -------------------- Traitement des donn ées ------------------------------38 // -------------------- Traitement des donnes ------------------------------ 41 39 42 40 // This function is not exported to the public Fortran interface, … … 64 62 int initialized; 65 63 MPI_Initialized(&initialized); 66 67 #ifdef _usingEP 68 if (initialized) local_comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_local_comm)); 69 else local_comm = MPI_COMM_NULL; 70 #else 64 #ifdef _usingMPI 71 65 if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 72 else local_comm = MPI_COMM_NULL; 66 else local_comm=MPI_COMM_NULL; 67 #elif _usingEP 68 ep_lib::fc_comm_map.clear(); 69 if (initialized) local_comm=ep_lib::EP_Comm_f2c(static_cast<int>(*f_local_comm)); 70 else local_comm=MPI_COMM_NULL; 73 71 #endif 74 72 75 73 76 74 77 75 CXios::initClientSide(str, local_comm, return_comm); 78 79 #ifdef _usingEP 80 *f_return_comm = ep_lib::EP_Comm_c2f(return_comm); 81 #else 82 *f_return_comm = MPI_Comm_c2f(return_comm); 76 #ifdef _usingMPI 77 *f_return_comm=MPI_Comm_c2f(return_comm); 78 #elif _usingEP 79 *f_return_comm=ep_lib::EP_Comm_c2f(return_comm); 83 80 #endif 84 85 81 CTimer::get("XIOS init").suspend(); 86 82 CTimer::get("XIOS").suspend(); … … 95 91 CTimer::get("XIOS").resume(); 96 92 CTimer::get("XIOS init context").resume(); 93 #ifdef _usingMPI 94 comm=MPI_Comm_f2c(*f_comm); 95 #elif _usingEP 97 96 comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_comm)); 98 99 CClient::registerContext(str,comm); 100 97 #endif 98 CClient::registerContext(str, comm); 101 99 CTimer::get("XIOS init context").suspend(); 102 100 CTimer::get("XIOS").suspend(); … … 382 380 383 381 384 // ---------------------- Ecriture des donn ées ------------------------------382 // ---------------------- Ecriture des donnes ------------------------------ 385 383 386 384 void cxios_write_data_k80(const char* fieldid, int fieldid_size, double* data_k8, int data_Xsize) … … 448 446 CContext* context = CContext::getCurrent(); 449 447 if (!context->hasServer && !context->client->isAttachedModeEnabled()) 450 { 451 context->checkBuffersAndListen(); 452 } 448 context->checkBuffersAndListen(); 453 449 454 450 CArray<double, 3>data(data_k8, shape(data_Xsize, data_Ysize, data_Zsize), neverDeleteData); … … 717 713 } 718 714 719 // ---------------------- Lecture des donn ées ------------------------------715 // ---------------------- Lecture des donnes ------------------------------ 720 716 721 717 void cxios_read_data_k80(const char* fieldid, int fieldid_size, double* data_k8, int data_Xsize) -
XIOS/dev/branch_openmp/src/interface/c/oasis_cinterface.cpp
r1134 r1328 1 1 #include "oasis_cinterface.hpp" 2 2 #include <string> 3 //#include "mpi_std.hpp" 3 using namespace ep_lib; 4 4 5 5 namespace xios … … 26 26 27 27 fxios_oasis_get_localcomm(&f_comm) ; 28 #ifdef _usingEP29 28 comm=EP_Comm_f2c(f_comm.mpi_fint) ; 30 #else31 comm=MPI_Comm_f2c(f_comm) ;32 #endif33 29 } 34 30 … … 38 34 39 35 fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 40 #ifdef _usingEP41 36 comm_client_server=EP_Comm_f2c(f_comm.mpi_fint) ; 42 #else43 comm_client_server=MPI_Comm_f2c(f_comm) ;44 #endif45 37 } 46 38 … … 50 42 51 43 fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 52 #ifdef _usingEP53 44 comm_client_server=EP_Comm_f2c(f_comm.mpi_fint) ; 54 #else55 comm_client_server=MPI_Comm_f2c(f_comm) ;56 #endif57 45 } 58 46 } -
XIOS/dev/branch_openmp/src/interface/c/oasis_cinterface.hpp
r501 r1328 10 10 void fxios_oasis_enddef(void) ; 11 11 void fxios_oasis_finalize(void) ; 12 void fxios_oasis_get_localcomm( MPI_Fint* f_comm) ;13 void fxios_oasis_get_intracomm( MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ;14 void fxios_oasis_get_intercomm( MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ;12 void fxios_oasis_get_localcomm(ep_lib::MPI_Fint* f_comm) ; 13 void fxios_oasis_get_intracomm(ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 14 void fxios_oasis_get_intercomm(ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 15 15 } 16 16 … … 20 20 void oasis_enddef(void) ; 21 21 void oasis_finalize(void) ; 22 void oasis_get_localcomm( MPI_Comm& comm) ;23 void oasis_get_intracomm( MPI_Comm& comm_client_server,const std::string& server_id) ;24 void oasis_get_intercomm( MPI_Comm& comm_client_server,const std::string& server_id) ;22 void oasis_get_localcomm(ep_lib::MPI_Comm& comm) ; 23 void oasis_get_intracomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ; 24 void oasis_get_intercomm(ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ; 25 25 } 26 26 #endif -
XIOS/dev/branch_openmp/src/io/inetcdf4.cpp
r1287 r1328 4 4 5 5 #include <boost/algorithm/string.hpp> 6 // mpi_std.hpp7 #ifdef _usingEP8 #include "ep_declaration.hpp"9 #endif10 6 11 7 namespace xios 12 8 { 13 CINetCDF4::CINetCDF4(const StdString& filename, const MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/, const StdString& timeCounterName /*= "time_counter"*/)9 CINetCDF4::CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/, const StdString& timeCounterName /*= "time_counter"*/) 14 10 { 15 11 // Don't use parallel mode if there is only one process … … 17 13 { 18 14 int commSize = 0; 19 MPI_Comm_size(*comm, &commSize);15 ep_lib::MPI_Comm_size(*comm, &commSize); 20 16 if (commSize <= 1) 21 17 comm = NULL; 22 18 } 23 24 19 mpi = comm && !multifile; 25 MPI_Info m_info = MPI_INFO_NULL.mpi_info;20 ep_lib::MPI_Info info_null; 26 21 27 22 // The file format will be detected automatically by NetCDF, it is safe to always set NC_MPIIO 28 23 // even if Parallel NetCDF ends up being used. 29 24 if (mpi) 30 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, m_info, this->ncidp); 25 //CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, info_null, this->ncidp); 26 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, static_cast<MPI_Comm>(comm->mpi_comm), info_null.mpi_info, this->ncidp); 31 27 else 32 28 CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); -
XIOS/dev/branch_openmp/src/io/inetcdf4.hpp
r1138 r1328 14 14 #endif // UNLIMITED_DIM 15 15 16 17 16 namespace xios 18 17 { … … 23 22 public: 24 23 /// Constructors /// 25 CINetCDF4(const StdString& filename, const MPI_Comm* comm = NULL, bool multifile = true,24 CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 26 25 const StdString& timeCounterName = "time_counter"); 27 26 CINetCDF4(const CINetCDF4& inetcdf4); // Not implemented. -
XIOS/dev/branch_openmp/src/io/inetcdf4_decl.cpp
r1138 r1328 1 1 #include "inetcdf4_impl.hpp" 2 // mpi_std.hpp3 2 4 3 namespace xios -
XIOS/dev/branch_openmp/src/io/inetcdf4_impl.hpp
r1138 r1328 4 4 #include "inetcdf4.hpp" 5 5 #include "netCdfInterface.hpp" 6 // mpi_std.hpp7 6 8 7 namespace xios -
XIOS/dev/branch_openmp/src/io/nc4_data_input.cpp
r1176 r1328 8 8 #include "scalar.hpp" 9 9 10 // mpi.hpp11 12 10 namespace xios 13 11 { 14 CNc4DataInput::CNc4DataInput(const StdString& filename, ::MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/, const StdString& timeCounterName /*= "time_counter"*/)12 CNc4DataInput::CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/, const StdString& timeCounterName /*= "time_counter"*/) 15 13 : SuperClass() 16 14 , SuperClassWriter(filename, &comm_file, multifile, timeCounterName) … … 55 53 CArray<double,1> fieldData(grid->getWrittenDataSize()); 56 54 if (!field->default_value.isEmpty()) fieldData = field->default_value; 57 #ifdef _usingEP 58 SuperClass::type = ONE_FILE; 59 printf("SuperClass::type = %d\n", SuperClass::type); 60 #endif 55 61 56 switch (SuperClass::type) 62 57 { … … 326 321 std::vector<StdSize> nBeginBndsLatLon(3), nSizeBndsLatLon(3); 327 322 nBeginBndsLatLon[0] = 0; nSizeBndsLatLon[0] = domain->nj_glo.getValue(); 328 nBeginBndsLatLon[1] = 0; nSizeBndsLatLon[1] = domain->n j_glo.getValue();323 nBeginBndsLatLon[1] = 0; nSizeBndsLatLon[1] = domain->ni_glo.getValue(); 329 324 nBeginBndsLatLon[2] = 0; nSizeBndsLatLon[2] = nbVertex; 330 325 -
XIOS/dev/branch_openmp/src/io/nc4_data_input.hpp
r1138 r1328 3 3 4 4 /// XIOS headers /// 5 #include "mpi_std.hpp" 5 6 #include "xios_spl.hpp" 6 7 #include "data_input.hpp" 7 8 #include "inetcdf4.hpp" 8 // mpi_std.hpp9 9 10 10 namespace xios … … 24 24 25 25 /// Constructors /// 26 CNc4DataInput(const StdString& filename, MPI_Comm comm_file, bool multifile, bool isCollective = true,26 CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 27 27 const StdString& timeCounterName = "time_counter"); 28 28 CNc4DataInput(const CNc4DataInput& dataInput); // Not implemented. … … 71 71 private: 72 72 /// Private attributes /// 73 MPI_Comm comm_file;73 ep_lib::MPI_Comm comm_file; 74 74 const StdString filename; 75 75 bool isCollective; -
XIOS/dev/branch_openmp/src/io/nc4_data_output.cpp
r1205 r1328 13 13 #include "timer.hpp" 14 14 #include "uuid.hpp" 15 // mpi.hpp16 17 15 namespace xios 18 16 { … … 30 28 CNc4DataOutput::CNc4DataOutput 31 29 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 32 ::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName)30 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 33 31 : SuperClass() 34 32 , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) … … 465 463 StdString domainName = domain->name; 466 464 domain->assignMesh(domainName, domain->nvertex); 467 domain->mesh->createMeshEpsilon(s tatic_cast< ::MPI_Comm >(server->intraComm.mpi_comm), domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv);465 domain->mesh->createMeshEpsilon(server->intraComm, domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv); 468 466 469 467 StdString node_x = domainName + "_node_x"; -
XIOS/dev/branch_openmp/src/io/nc4_data_output.hpp
r1138 r1328 4 4 /// XIOS headers /// 5 5 #include "xios_spl.hpp" 6 #include "mpi_std.hpp" 6 7 #include "onetcdf4.hpp" 7 8 #include "data_output.hpp" 8 // mpi_std.hpp9 9 10 10 namespace xios … … 27 27 CNc4DataOutput 28 28 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, 29 bool useCFConvention, MPI_Comm comm_file, bool multifile, 30 bool isCollective = true, const StdString& timeCounterName = "time_counter"); 29 bool useCFConvention, 30 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 31 const StdString& timeCounterName = "time_counter"); 31 32 32 33 CNc4DataOutput(const CNc4DataOutput & dataoutput); // Not implemented. … … 116 117 117 118 /// Propriétés privées /// 118 MPI_Comm comm_file;119 ep_lib::MPI_Comm comm_file; 119 120 const StdString filename; 120 121 std::map<Time, StdSize> timeToRecordCache; -
XIOS/dev/branch_openmp/src/io/netCdfInterface.cpp
r1153 r1328 10 10 #include "netCdfInterface.hpp" 11 11 #include "netCdfException.hpp" 12 // mpi_std.hpp13 12 14 13 namespace xios … … 50 49 int CNetCdfInterface::createPar(const StdString& fileName, int cMode, MPI_Comm comm, MPI_Info info, int& ncId) 51 50 { 52 int status = xios::nc_create_par(fileName.c_str(), cMode, comm, info, &ncId);51 int status = xios::nc_create_par(fileName.c_str(), cMode, comm, MPI_INFO_NULL.mpi_info, &ncId); 53 52 if (NC_NOERR != status) 54 53 { … … 75 74 int CNetCdfInterface::open(const StdString& fileName, int oMode, int& ncId) 76 75 { 77 int status = NC_NOERR; 78 #pragma omp critical (_netcdf) 79 status = nc_open(fileName.c_str(), oMode, &ncId); 80 76 int status = nc_open(fileName.c_str(), oMode, &ncId); 81 77 if (NC_NOERR != status) 82 78 { … … 106 102 int CNetCdfInterface::openPar(const StdString& fileName, int oMode, MPI_Comm comm, MPI_Info info, int& ncId) 107 103 { 108 int status; 109 #pragma omp critical (_netcdf) 110 status = xios::nc_open_par(fileName.c_str(), oMode, comm, info, &ncId); // nc_open 104 //int status = xios::nc_open_par(fileName.c_str(), oMode, comm, info, &ncId); 105 int status = xios::nc_open_par(fileName.c_str(), oMode, comm, MPI_INFO_NULL.mpi_info, &ncId); 111 106 if (NC_NOERR != status) 112 107 { … … 131 126 int CNetCdfInterface::close(int ncId) 132 127 { 133 int status = NC_NOERR; 134 #pragma omp critical (_netcdf) 135 //#pragma omp master 136 { 137 status = nc_close(ncId); 128 int status = nc_close(ncId); 138 129 if (NC_NOERR != status) 139 130 { … … 146 137 throw CNetCdfException(e); 147 138 } 148 } 139 149 140 return status; 150 141 } … … 356 347 int CNetCdfInterface::inqDimLen(int ncid, int dimId, StdSize& dimLen) 357 348 { 358 int status; 359 #pragma omp critical (_netcdf) 360 status = nc_inq_dimlen(ncid, dimId, &dimLen); 349 int status = nc_inq_dimlen(ncid, dimId, &dimLen); 361 350 if (NC_NOERR != status) 362 351 { -
XIOS/dev/branch_openmp/src/io/netCdfInterface.hpp
r1134 r1328 10 10 #define __NETCDF_INTERFACE_HPP_ 11 11 12 #include "mpi_std.hpp" 12 13 #include "xios_spl.hpp" 13 14 … … 16 17 #endif 17 18 18 #include "mpi_std.hpp"19 19 #include "netcdf.hpp" 20 20 -
XIOS/dev/branch_openmp/src/io/netCdfInterface_decl.cpp
r1138 r1328 9 9 10 10 #include "netCdfInterface_impl.hpp" 11 // mpi_std.hpp12 11 13 12 namespace xios -
XIOS/dev/branch_openmp/src/io/netCdfInterface_impl.hpp
r1146 r1328 13 13 #include "netCdfInterface.hpp" 14 14 #include "netCdfException.hpp" 15 // mpi_std.hpp16 15 17 16 namespace xios … … 86 85 int CNetCdfInterface::getVaraType(int ncid, int varId, const StdSize* start, const StdSize* count, T* data) 87 86 { 88 int status; 89 #pragma omp critical (_netcdf) 90 status = ncGetVaraType(ncid, varId, start, count, data); 87 int status = ncGetVaraType(ncid, varId, start, count, data); 91 88 if (NC_NOERR != status) 92 89 { -
XIOS/dev/branch_openmp/src/io/netCdf_cf_constant.hpp
r1138 r1328 4 4 #include "inetcdf4.hpp" 5 5 #include "netCdfInterface.hpp" 6 7 // mpi_std.hpp8 6 9 7 namespace xios -
XIOS/dev/branch_openmp/src/io/netcdf.hpp
r1138 r1328 18 18 extern "C" 19 19 { 20 #include <netcdf_par.h>20 # include <netcdf_par.h> 21 21 } 22 22 # endif -
XIOS/dev/branch_openmp/src/io/onetcdf4.cpp
r1287 r1328 12 12 /// ////////////////////// Définitions ////////////////////// /// 13 13 14 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 15 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 14 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 15 bool useCFConvention, 16 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 16 17 : path() 17 18 , wmpi(false) … … 31 32 32 33 void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 33 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName)34 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 34 35 { 35 36 this->useClassicFormat = useClassicFormat; … … 42 43 { 43 44 int commSize = 0; 44 MPI_Comm_size(*comm, &commSize);45 ep_lib::MPI_Comm_size(*comm, &commSize); 45 46 if (commSize <= 1) 46 47 comm = NULL; 47 48 } 48 49 wmpi = comm && !multifile; 50 ep_lib::MPI_Info info_null; 49 51 50 52 if (wmpi) … … 56 58 CTimer::get("Files : create").resume(); 57 59 if (wmpi) 58 CNetCdfInterface::createPar(filename, mode, *comm, MPI_INFO_NULL.mpi_info, this->ncidp);60 CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null.mpi_info, this->ncidp); 59 61 else 60 62 CNetCdfInterface::create(filename, mode, this->ncidp); … … 68 70 CTimer::get("Files : open").resume(); 69 71 if (wmpi) 70 CNetCdfInterface::openPar(filename, mode, *comm, MPI_INFO_NULL.mpi_info, this->ncidp);72 CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), info_null.mpi_info, this->ncidp); 71 73 else 72 74 CNetCdfInterface::open(filename, mode, this->ncidp); … … 539 541 const std::vector<StdSize>& scount, const int* data) 540 542 { 541 CNetCdfInterface::putVaraType(grpid, varid, &sstart[0], &scount[0], data); 542 } 543 543 CNetCdfInterface::putVaraType(grpid, varid, &sstart[0], &scount[0], data); 544 } 544 545 //--------------------------------------------------------------- 545 546 … … 549 550 const std::vector<StdSize>& scount, const float* data) 550 551 { 551 CNetCdfInterface::putVaraType(grpid, varid, &sstart[0], &scount[0], data);552 CNetCdfInterface::putVaraType(grpid, varid, &sstart[0], &scount[0], data); 552 553 } 553 554 -
XIOS/dev/branch_openmp/src/io/onetcdf4.hpp
r1205 r1328 4 4 /// XIOS headers /// 5 5 #include "xios_spl.hpp" 6 #include "mpi_std.hpp" 6 7 #include "exception.hpp" 7 8 #include "data_output.hpp" 8 9 #include "array_new.hpp" 9 #include "mpi_std.hpp"10 10 #include "netcdf.hpp" 11 11 … … 13 13 #define UNLIMITED_DIM (size_t)(-1) 14 14 #endif //UNLIMITED_DIM 15 16 // mpi_std.hpp17 15 18 16 namespace xios … … 30 28 CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 31 29 bool useCFConvention = true, 32 const MPI_Comm* comm = NULL, bool multifile = true,30 const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 33 31 const StdString& timeCounterName = "time_counter"); 34 32 … … 39 37 /// Initialisation /// 40 38 void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 41 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName);39 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 42 40 void close(void); 43 41 void sync(void); -
XIOS/dev/branch_openmp/src/io/onetcdf4_decl.cpp
r1138 r1328 1 1 #include "onetcdf4_impl.hpp" 2 // mpi_std.hpp3 2 4 3 namespace xios -
XIOS/dev/branch_openmp/src/io/onetcdf4_impl.hpp
r1205 r1328 5 5 #include "netCdfInterface.hpp" 6 6 #include "timer.hpp" 7 // mpi_std.hpp8 7 9 8 namespace xios -
XIOS/dev/branch_openmp/src/log.cpp
r1164 r1328 1 1 #include "log.hpp" 2 #include <string>3 #include <iostream>4 #include <string>5 2 6 3 namespace xios 7 4 { 8 9 std::filebuf* info_FB[16];10 11 12 5 CLog info("info") ; 13 6 CLog report("report") ; 14 7 CLog error("error", cerr.rdbuf()) ; 15 16 17 CLog& CLog::operator()(int l)18 {19 if (l<=level)20 {21 omp_set_lock( &mutex );22 //rdbuf(strBuf_);23 rdbuf(strBuf_array[omp_get_thread_num()]);24 *this<<"-> "<<name<<" : " ;25 omp_unset_lock( &mutex );26 }27 else rdbuf(NULL) ;28 return *this;29 }30 31 32 33 int test_omp_rank;34 #pragma omp threadprivate(test_omp_rank)35 36 37 38 8 } -
XIOS/dev/branch_openmp/src/log.hpp
r1164 r1328 5 5 #include <iostream> 6 6 #include <string> 7 #include <stdio.h>8 #include <omp.h>9 7 10 8 namespace xios … … 16 14 public : 17 15 CLog(const string& name_, std::streambuf* sBuff = cout.rdbuf()) 18 : ostream(cout.rdbuf()), level(0), name(name_), strBuf_(sBuff) 16 : ostream(sBuff), level(0), name(name_), strBuf_(sBuff) {} 17 CLog& operator()(int l) 19 18 { 20 omp_init_lock( &mutex ); 21 for(int i=0; i<16; i++) 22 strBuf_array[i] = sBuff; 19 if (l<=level) 20 { 21 rdbuf(strBuf_); 22 *this<<"-> "<<name<<" : " ; 23 } 24 else rdbuf(NULL) ; 25 return *this; 23 26 } 24 25 ~CLog()26 {27 omp_destroy_lock( &mutex );28 }29 30 31 CLog& operator()(int l);32 27 void setLevel(int l) {level=l; } 33 int 28 int getLevel() {return level ;} 34 29 bool isActive(void) { if (rdbuf()==NULL) return true ; else return false ;} 35 30 bool isActive(int l) {if (l<=level) return true ; else return false ; } … … 51 46 * \param [in] pointer to new streambuf 52 47 */ 53 void changeStreamBuff(std::streambuf* sBuff) 54 { 55 strBuf_ = sBuff; 56 strBuf_array[omp_get_thread_num()] = sBuff; 57 rdbuf(sBuff); 58 } 48 void changeStreamBuff(std::streambuf* sBuff) { strBuf_ = sBuff; rdbuf(sBuff); } 59 49 60 50 int level ; 61 51 string name ; 62 52 std::streambuf* strBuf_; 63 std::streambuf* strBuf_array[16];64 omp_lock_t mutex;65 53 }; 66 54 … … 68 56 extern CLog report; 69 57 extern CLog error; 70 71 72 extern std::filebuf* info_FB[16];73 74 75 58 } 76 59 #endif -
XIOS/dev/branch_openmp/src/memtrack.cpp
r1205 r1328 44 44 #include <sstream> 45 45 #include <string> 46 46 47 #include <execinfo.h> 47 48 … … 76 77 private: // static member variables 77 78 static BlockHeader *ourFirstNode; 78 #pragma omp threadprivate(ourFirstNode)79 79 80 80 private: // member variables -
XIOS/dev/branch_openmp/src/mpi.hpp
r1134 r1328 10 10 /* skip C++ Binding for OpenMPI */ 11 11 #define OMPI_SKIP_MPICXX 12 13 12 #ifdef _usingEP 14 13 #include <omp.h> 15 14 #include "../extern/src_ep_dev/ep_lib.hpp" 16 using namespace ep_lib; 15 #include "../extern/src_ep_dev/ep_declaration.hpp" 16 //using namespace ep_lib; 17 17 #elif _usingMPI 18 18 #include <mpi.h> -
XIOS/dev/branch_openmp/src/mpi_std.hpp
r1134 r1328 11 11 #define OMPI_SKIP_MPICXX 12 12 13 #include <mpi.h> 13 14 #include "../extern/src_ep_dev/ep_lib.hpp" 14 #include <mpi.h> 15 16 15 #include "../extern/src_ep_dev/ep_declaration.hpp" 17 16 18 17 #endif -
XIOS/dev/branch_openmp/src/node/axis.cpp
r1205 r1328 44 44 { /* Ne rien faire de plus */ } 45 45 46 std::map<StdString, ETranformationType> *CAxis::transformationMapList_ptr = 0; //new std::map<StdString, ETranformationType>(); 47 //bool CAxis::dummyTransformationMapList_ = CAxis::initializeTransformationMap(CAxis::transformationMapList_ptr); 48 46 //std::map<StdString, ETranformationType> CAxis::transformationMapList_ = std::map<StdString, ETranformationType>(); 47 std::map<StdString, ETranformationType> *CAxis::transformationMapList_ptr = 0; 48 //bool CAxis::dummyTransformationMapList_ = CAxis::initializeTransformationMap(CAxis::transformationMapList_); 49 49 50 bool CAxis::initializeTransformationMap(std::map<StdString, ETranformationType>& m) 50 51 { … … 55 56 m["extract_domain"] = TRANS_EXTRACT_DOMAIN_TO_AXIS; 56 57 } 57 58 58 59 59 bool CAxis::initializeTransformationMap() … … 66 66 (*CAxis::transformationMapList_ptr)["extract_domain"] = TRANS_EXTRACT_DOMAIN_TO_AXIS; 67 67 } 68 69 68 70 69 ///--------------------------------------------------------------- … … 500 499 globalAxisZoom[nZoomCount] = globalZoomIndex; 501 500 ++nZoomCount; 502 } 501 } 503 502 } 504 503 … … 831 830 CContextServer* server = CContext::getCurrent()->server; 832 831 axis->numberWrittenIndexes_ = axis->indexesToWrite.size(); 833 ep_lib::MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);834 ep_lib::MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);832 MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 833 MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 835 834 axis->offsetWrittenIndexes_ -= axis->numberWrittenIndexes_; 836 835 } … … 978 977 msg << ni << begin << end; 979 978 msg << global_zoom_begin.getValue() << global_zoom_n.getValue(); 980 msg << isCompressible_; 979 msg << isCompressible_; 981 980 msg << zoomIndex; 982 981 if (zoomIndex) … … 1157 1156 1158 1157 nodeElementName = node.getElementName(); 1159 1158 //std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 1160 1159 if(transformationMapList_ptr == 0) initializeTransformationMap(); 1161 //transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 1162 1163 std::map<StdString, ETranformationType>::const_iterator ite = (*CAxis::transformationMapList_ptr).end(), it; 1164 it = (*CAxis::transformationMapList_ptr).find(nodeElementName); 1160 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_ptr->end(), it; 1161 //it = transformationMapList_.find(nodeElementName); 1162 it = transformationMapList_ptr->find(nodeElementName); 1165 1163 if (ite != it) 1166 1164 { … … 1184 1182 1185 1183 } // namespace xios 1186 -
XIOS/dev/branch_openmp/src/node/axis.hpp
r1205 r1328 172 172 private: 173 173 static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 174 //static bool initializeTransformationMap(std::map<StdString, ETranformationType>* m);175 174 static bool initializeTransformationMap(); 176 177 175 //static std::map<StdString, ETranformationType> transformationMapList_; 178 179 176 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 180 #pragma omp threadprivate(transformationMapList_ptr) 181 182 //static bool dummyTransformationMapList_; 183 //#pragma omp threadprivate(dummyTransformationMapList_) 177 static bool dummyTransformationMapList_; 184 178 185 179 DECLARE_REF_FUNC(Axis,axis) … … 193 187 194 188 #endif // __XIOS_CAxis__ 195 -
XIOS/dev/branch_openmp/src/node/compute_connectivity_domain.hpp
r1134 r1328 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 64 62 }; // class CComputeConnectivityDomain 65 63 -
XIOS/dev/branch_openmp/src/node/context.cpp
r1287 r1328 1 1 2 #include "context.hpp" 2 3 #include "attribute_template.hpp" … … 17 18 #include "memtrack.hpp" 18 19 20 using namespace ep_lib; 21 19 22 20 23 namespace xios { 21 24 22 //shared_ptr<CContextGroup> CContext::root; 23 boost::shared_ptr<CContextGroup> * CContext::root_ptr = 0; 24 25 /// ////////////////////// Dfinitions ////////////////////// /// 25 shared_ptr<CContextGroup> CContext::root; 26 27 /// ////////////////////// Définitions ////////////////////// /// 26 28 27 29 CContext::CContext(void) … … 57 59 CContextGroup* CContext::getRoot(void) 58 60 { 59 //if (root.get()==NULL) root=shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName())); 60 //return root.get(); 61 62 //static shared_ptr<CContextGroup> *root_ptr; 63 if(root_ptr == 0) //root_ptr = new shared_ptr<CContextGroup>; 64 // if (root_ptr->get()==NULL) 65 root_ptr = new boost::shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName())); 66 return root_ptr->get(); 61 if (root.get()==NULL) root=shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName())); 62 return root.get(); 67 63 } 68 64 … … 186 182 if (!this->hasChild()) 187 183 { 188 //oss << "<!-- No definition -->" << std::endl; // fait planter l'incr mentation184 //oss << "<!-- No definition -->" << std::endl; // fait planter l'incrémentation 189 185 } 190 186 else … … 248 244 { 249 245 hasClient=true; 250 client = new CContextClient(this, intraComm, interComm, cxtServer); 251 252 int tmp_rank; 253 MPI_Comm_rank(intraComm, &tmp_rank); 254 MPI_Barrier(intraComm); 255 256 246 client = new CContextClient(this,intraComm, interComm, cxtServer); 257 247 registryIn=new CRegistry(intraComm); 258 248 registryIn->setPath(getId()) ; … … 271 261 else 272 262 { 273 MPI_Comm_dup(intraComm, &intraCommServer);263 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer); 274 264 comms.push_back(intraCommServer); 275 MPI_Comm_dup(interComm, &interCommServer);265 ep_lib::MPI_Comm_dup(interComm, &interCommServer); 276 266 comms.push_back(interCommServer); 277 267 } … … 353 343 else 354 344 { 355 MPI_Comm_dup(intraComm, &intraCommClient);345 ep_lib::MPI_Comm_dup(intraComm, &intraCommClient); 356 346 comms.push_back(intraCommClient); 357 MPI_Comm_dup(interComm, &interCommClient);347 ep_lib::MPI_Comm_dup(interComm, &interCommClient); 358 348 comms.push_back(interCommClient); 359 349 } … … 395 385 396 386 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 397 MPI_Comm_free(&(*it));387 ep_lib::MPI_Comm_free(&(*it)); 398 388 comms.clear(); 399 389 } … … 527 517 } 528 518 529 void CContext:: checkPrefetchingOfEnabledReadModeFiles()519 void CContext::doPostTimestepOperationsForEnabledReadModeFiles() 530 520 { 531 521 int size = enabledReadModeFiles.size(); 532 522 for (int i = 0; i < size; ++i) 533 523 { 534 enabledReadModeFiles[i]-> prefetchEnabledReadModeFieldsIfNeeded();524 enabledReadModeFiles[i]->doPostTimestepOperationsForEnabledReadModeFields(); 535 525 } 536 526 } … … 563 553 } 564 554 565 void CContext::solveAllInheritance(bool apply) 566 { 567 // R solution des hritages descendants (cd des hritages de groupes)555 void CContext::solveAllInheritance(bool apply) // default : apply = true 556 { 557 // Résolution des héritages descendants (cà d des héritages de groupes) 568 558 // pour chacun des contextes. 569 559 solveDescInheritance(apply); 570 560 571 // R solution des hritages par rfrence au niveau des fichiers.561 // Résolution des héritages par référence au niveau des fichiers. 572 562 const vector<CFile*> allFiles=CFile::getAll(); 573 563 const vector<CGrid*> allGrids= CGrid::getAll(); … … 593 583 594 584 for (unsigned int i = 0; i < allFiles.size(); i++) 595 if (!allFiles[i]->enabled.isEmpty()) // Si l'attribut 'enabled' est d fini.585 if (!allFiles[i]->enabled.isEmpty()) // Si l'attribut 'enabled' est défini. 596 586 { 597 if (allFiles[i]->enabled.getValue()) // Si l'attribut 'enabled' est fix 587 if (allFiles[i]->enabled.getValue()) // Si l'attribut 'enabled' est fixé à vrai. 598 588 { 599 589 if ((initDate + allFiles[i]->output_freq.getValue()) < (initDate + this->getCalendar()->getTimeStep())) … … 620 610 621 611 if (enabledFiles.size() == 0) 622 DEBUG(<<"Aucun fichier ne va tre sorti dans le contexte nomm\""612 DEBUG(<<"Aucun fichier ne va être sorti dans le contexte nommé \"" 623 613 << getId() << "\" !"); 624 614 } … … 836 826 void CContext::postProcessing() 837 827 { 838 int myRank;839 MPI_Comm_rank(MPI_COMM_WORLD, &myRank);840 841 828 if (isPostProcessed) return; 842 829 … … 860 847 prepareTimeseries(); 861 848 862 //Initialisation du vecteur 'enabledFiles' contenant la liste des fichiers sortir.849 //Initialisation du vecteur 'enabledFiles' contenant la liste des fichiers à sortir. 863 850 this->findEnabledFiles(); 864 851 this->findEnabledReadModeFiles(); … … 1218 1205 void CContext::updateCalendar(int step) 1219 1206 { 1220 #pragma omp critical (_output) 1221 {info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl;} 1207 info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl; 1222 1208 calendar->update(step); 1223 #pragma omp critical (_output) 1224 {info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl;} 1209 info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl; 1225 1210 #ifdef XIOS_MEMTRACK_LIGHT 1226 #pragma omp critical (_output) 1227 {info(50) << " Current memory used by XIOS : "<< MemTrack::getCurrentMemorySize()*1.0/(1024*1024)<<" Mbyte, at timestep "<<step<<" of context "<<this->getId()<<endl ;} 1211 info(50) << " Current memory used by XIOS : "<< MemTrack::getCurrentMemorySize()*1.0/(1024*1024)<<" Mbyte, at timestep "<<step<<" of context "<<this->getId()<<endl ; 1228 1212 #endif 1229 1213 if (hasClient) 1230 1214 { 1231 checkPrefetchingOfEnabledReadModeFiles();1215 doPostTimestepOperationsForEnabledReadModeFiles(); 1232 1216 garbageCollector.invalidate(calendar->getCurrentDate()); 1233 1217 } … … 1273 1257 CContext* context = CObjectFactory::CreateObject<CContext>(id).get(); 1274 1258 getRoot(); 1275 if (!hasctxt) CGroupFactory::AddChild( *root_ptr, context->getShared());1259 if (!hasctxt) CGroupFactory::AddChild(root, context->getShared()); 1276 1260 1277 1261 #define DECLARE_NODE(Name_, name_) \ -
XIOS/dev/branch_openmp/src/node/context.hpp
r1287 r1328 5 5 #include "xios_spl.hpp" 6 6 //#include "node_type.hpp" 7 #include "mpi_std.hpp" 7 8 #include "calendar_wrapper.hpp" 8 9 … … 13 14 #include "garbage_collector.hpp" 14 15 #include "registry.hpp" 15 #include "mpi.hpp"16 16 17 17 … … 115 115 void buildFilterGraphOfEnabledFields(); 116 116 void startPrefetchingOfEnabledReadModeFiles(); 117 void checkPrefetchingOfEnabledReadModeFiles();117 void doPostTimestepOperationsForEnabledReadModeFiles(); 118 118 void findFieldsWithReadAccess(void); 119 119 void solveAllRefOfFieldsWithReadAccess(); … … 206 206 207 207 // Context root 208 //static shared_ptr<CContextGroup> root; 209 210 static boost::shared_ptr<CContextGroup> *root_ptr; 211 #pragma omp threadprivate(root_ptr) 208 static shared_ptr<CContextGroup> root; 212 209 213 210 // Determine context on client or not … … 222 219 // Concrete contex client 223 220 CContextClient* client; 224 225 226 221 CRegistry* registryIn ; //!< input registry which is read from file 227 222 CRegistry* registryOut ; //!< output registry which will be wrote on file at the finalize 228 229 223 230 224 private: -
XIOS/dev/branch_openmp/src/node/domain.cpp
r1138 r1328 26 26 namespace xios { 27 27 28 /// ////////////////////// D finitions ////////////////////// ///28 /// ////////////////////// Définitions ////////////////////// /// 29 29 30 30 CDomain::CDomain(void) … … 66 66 67 67 //std::map<StdString, ETranformationType> CDomain::transformationMapList_ = std::map<StdString, ETranformationType>(); 68 std::map<StdString, ETranformationType> *CDomain::transformationMapList_ptr = 0; 68 69 //bool CDomain::_dummyTransformationMapList = CDomain::initializeTransformationMap(CDomain::transformationMapList_); 69 70 std::map<StdString, ETranformationType> *CDomain::transformationMapList_ptr = 0;71 70 72 71 bool CDomain::initializeTransformationMap(std::map<StdString, ETranformationType>& m) … … 88 87 (*CDomain::transformationMapList_ptr)["expand_domain"] = TRANS_EXPAND_DOMAIN; 89 88 } 90 91 89 92 90 const std::set<StdString> & CDomain::getRelFiles(void) const … … 445 443 break; 446 444 } 445 completeLonLatClient() ; 447 446 448 447 } … … 636 635 { 637 636 CContext* context = CContext::getCurrent(); 638 637 CContextClient* client = context->client; 639 638 lon_g.resize(ni_glo) ; 640 639 lat_g.resize(nj_glo) ; … … 1495 1494 1496 1495 if (context->hasClient) 1497 { // C tclient uniquement1496 { // CÃŽté client uniquement 1498 1497 this->checkMask(); 1499 1498 this->checkDomainData(); … … 1502 1501 } 1503 1502 else 1504 { // C tserveur uniquement1503 { // CÃŽté serveur uniquement 1505 1504 } 1506 1505 … … 1535 1534 1536 1535 if (context->hasClient) 1537 { // C tclient uniquement1536 { // CÃŽté client uniquement 1538 1537 this->checkMask(); 1539 1538 this->checkDomainData(); … … 1543 1542 } 1544 1543 else 1545 { // C tserveur uniquement1544 { // CÃŽté serveur uniquement 1546 1545 } 1547 1546 … … 1726 1725 client->intraComm); 1727 1726 clientServerMap->computeServerIndexMapping(globalIndexDomain); 1728 1729 1727 const CClientServerMapping::GlobalIndexMap& globalIndexDomainOnServer = clientServerMap->getGlobalIndexOnServer(); 1730 1728 … … 2365 2363 nodeElementName = node.getElementName(); 2366 2364 if(transformationMapList_ptr == 0) initializeTransformationMap(); 2367 std::map<StdString, ETranformationType>::const_iterator ite = (*transformationMapList_ptr).end(), it; 2368 it = (*transformationMapList_ptr).find(nodeElementName); 2365 //std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 2366 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_ptr->end(), it; 2367 //it = transformationMapList_.find(nodeElementName); 2368 it = transformationMapList_ptr->find(nodeElementName); 2369 2369 if (ite != it) 2370 2370 { -
XIOS/dev/branch_openmp/src/node/domain.hpp
r1134 r1328 218 218 static bool initializeTransformationMap(); 219 219 //static std::map<StdString, ETranformationType> transformationMapList_; 220 221 220 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 222 #pragma omp threadprivate(transformationMapList_ptr) 223 224 //static bool _dummyTransformationMapList; 225 //#pragma omp threadprivate(_dummyTransformationMapList) 221 static bool _dummyTransformationMapList; 226 222 227 223 DECLARE_REF_FUNC(Domain,domain) -
XIOS/dev/branch_openmp/src/node/expand_domain.hpp
r1134 r1328 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 64 62 }; // class CExpandDomain 65 63 -
XIOS/dev/branch_openmp/src/node/extract_axis_to_scalar.hpp
r1134 r1328 60 60 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 62 }; // class CExtractAxisToScalar 64 63 -
XIOS/dev/branch_openmp/src/node/extract_domain_to_axis.hpp
r1134 r1328 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 62 }; // class CExtractDomainToAxis 64 63 -
XIOS/dev/branch_openmp/src/node/field.cpp
r1209 r1328 24 24 #include "spatial_transform_filter.hpp" 25 25 26 #include <stdio.h> 27 26 28 namespace xios{ 27 29 28 30 /// ////////////////////// Dfinitions ////////////////////// /// 31 32 CField* CField::my_getDirectFieldReference(void) const 33 { 34 // if (this->field_ref.isEmpty()) 35 // ERROR("C" #type "* C" #type "::getDirect" #type "Reference(void)", 36 // << "The " #name_ " with id = '" << getId() << "'" 37 // << " has no " #name_ "_ref."); 38 39 // if (!C##type::has(this->name_##_ref)) 40 // ERROR("C" #type "* C" #type "::getDirect" #type "Reference(void)", 41 // << this->name_##_ref 42 // << " refers to an unknown " #name_ " id."); 43 44 return CField::get(this->field_ref); 45 } 46 29 47 30 48 CField::CField(void) … … 38 56 , hasTimeInstant(false) 39 57 , hasTimeCentered(false) 58 , wasDataRequestedFromServer(false) 40 59 , wasDataAlreadyReceivedFromServer(false) 41 60 , isEOF(false) … … 52 71 , hasTimeInstant(false) 53 72 , hasTimeCentered(false) 73 , wasDataRequestedFromServer(false) 54 74 , wasDataAlreadyReceivedFromServer(false) 55 75 , isEOF(false) … … 261 281 lastDataRequestedFromServer = tsDataRequested; 262 282 263 if (!isEOF) // No need to send the request if we already know we are at EOF 283 // No need to send the request if we are sure that we are already at EOF 284 if (!isEOF || context->getCalendar()->getCurrentDate() <= dateEOF) 264 285 { 265 286 CEventClient event(getType(), EVENT_ID_READ_DATA); … … 277 298 else 278 299 serverSourceFilter->signalEndOfStream(tsDataRequested); 300 301 wasDataRequestedFromServer = true; 279 302 280 303 return !isEOF; … … 293 316 while (currentDate >= lastDataRequestedFromServer) 294 317 { 295 #pragma omp critical (_output) 296 { 297 info(20) << "currentDate : " << currentDate << endl ; 298 info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ; 299 info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ; 300 info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 301 } 318 info(20) << "currentDate : " << currentDate << endl ; 319 info(20) << "lastDataRequestedFromServer : " << lastDataRequestedFromServer << endl ; 320 info(20) << "file->output_freq.getValue() : " << file->output_freq.getValue() << endl ; 321 info(20) << "lastDataRequestedFromServer + file->output_freq.getValue() : " << lastDataRequestedFromServer + file->output_freq << endl ; 322 302 323 dataRequested |= sendReadDataRequest(lastDataRequestedFromServer + file->output_freq); 303 324 } … … 435 456 { 436 457 CContext* context = CContext::getCurrent(); 437 int record;438 458 std::map<int, CArray<double,1> > data; 459 const bool wasEOF = isEOF; 439 460 440 461 for (int i = 0; i < ranks.size(); i++) 441 462 { 442 463 int rank = ranks[i]; 464 int record; 443 465 *buffers[i] >> record; 444 466 isEOF = (record == int(-1)); … … 459 481 460 482 if (isEOF) 483 { 484 if (!wasEOF) 485 dateEOF = lastDataReceivedFromServer; 486 461 487 serverSourceFilter->signalEndOfStream(lastDataReceivedFromServer); 488 } 462 489 else 463 490 serverSourceFilter->streamDataFromServer(lastDataReceivedFromServer, data); 491 } 492 493 void CField::checkForLateDataFromServer(void) 494 { 495 CContext* context = CContext::getCurrent(); 496 const CDate& currentDate = context->getCalendar()->getCurrentDate(); 497 498 // Check if data previously requested has been received as expected 499 if (wasDataRequestedFromServer && (!isEOF || currentDate <= dateEOF)) 500 { 501 CTimer timer("CField::checkForLateDataFromServer"); 502 503 bool isDataLate; 504 do 505 { 506 const CDate nextDataDue = wasDataAlreadyReceivedFromServer ? (lastDataReceivedFromServer + file->output_freq) : context->getCalendar()->getInitDate(); 507 isDataLate = nextDataDue < currentDate; 508 509 if (isDataLate) 510 { 511 timer.resume(); 512 513 context->checkBuffersAndListen(); 514 515 timer.suspend(); 516 } 517 } 518 while (isDataLate && timer.getCumulatedTime() < CXios::recvFieldTimeout); 519 520 if (isDataLate) 521 ERROR("void CField::checkForLateDataFromServer(void)", 522 << "Late data at timestep = " << currentDate); 523 } 464 524 } 465 525 … … 694 754 CContext* context = CContext::getCurrent(); 695 755 solveOnlyReferenceEnabledField(doSending2Server); 696 int myRank; 697 MPI_Comm_rank(context->client->intraComm, &myRank);756 757 //std::cout<<"Field "<<this->getId()<<" areAllReferenceSolved = "<<areAllReferenceSolved<<std::endl; 698 758 699 759 if (!areAllReferenceSolved) 700 760 { 701 761 areAllReferenceSolved = true; 702 762 //std::cout<<"Field "<<this->getId()<<" all reference solved"<<std::endl; 703 763 if (context->hasClient) 704 764 { … … 825 885 // Check if the data is to be read from a file 826 886 else if (file && !file->mode.isEmpty() && file->mode == CFile::mode_attr::read) 827 instantDataFilter = serverSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid,828 freq_offset.isEmpty() ? NoneDu : freq_offset,829 887 { 888 checkAttributes(); 889 instantDataFilter = serverSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, freq_offset, true, 830 890 detectMissingValues, defaultValue)); 891 } 831 892 else // The data might be passed from the model 832 893 { … … 910 971 { 911 972 if (!serverSourceFilter) 912 serverSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid,913 freq_offset.isEmpty() ? NoneDu : freq_offset,914 973 { 974 checkAttributes(); 975 serverSourceFilter = boost::shared_ptr<CSourceFilter>(new CSourceFilter(gc, grid, freq_offset, true, 915 976 detectMissingValues, defaultValue)); 977 } 916 978 917 979 selfReferenceFilter = serverSourceFilter; … … 958 1020 << "An operation must be defined for field \"" << getId() << "\"."); 959 1021 960 if (freq_op.isEmpty()) 961 freq_op.setValue(TimeStep); 962 if (freq_offset.isEmpty()) 963 freq_offset.setValue(NoneDu); 1022 checkAttributes(); 964 1023 965 1024 const bool detectMissingValues = (!detect_missing_value.isEmpty() && !default_value.isEmpty() && detect_missing_value == true); 966 967 1025 boost::shared_ptr<CTemporalFilter> temporalFilter(new CTemporalFilter(gc, operation, 968 1026 CContext::getCurrent()->getCalendar()->getInitDate(), … … 1000 1058 << "An operation must be defined for field \"" << getId() << "\"."); 1001 1059 1002 if (freq_op.isEmpty()) freq_op.setValue(TimeStep); 1003 if (freq_offset.isEmpty()) freq_offset.setValue(NoneDu); 1060 checkAttributes(); 1004 1061 1005 1062 const bool detectMissingValues = (!detect_missing_value.isEmpty() && !default_value.isEmpty() && detect_missing_value == true); 1006 1007 1063 boost::shared_ptr<CTemporalFilter> temporalFilter(new CTemporalFilter(gc, operation, 1008 1064 CContext::getCurrent()->getCalendar()->getInitDate(), … … 1450 1506 1451 1507 /*! 1508 * Check on freq_off and freq_op attributes. 1509 */ 1510 void CField::checkAttributes(void) 1511 { 1512 bool isFieldRead = file && !file->mode.isEmpty() && file->mode == CFile::mode_attr::read; 1513 if (isFieldRead && operation.getValue() != "instant") 1514 ERROR("void CField::checkAttributes(void)", 1515 << "Unsupported operation for field '" << getFieldOutputName() << "'." << std::endl 1516 << "Currently only \"instant\" is supported for fields read from file.") 1517 1518 if (freq_op.isEmpty()) 1519 { 1520 if (operation.getValue() == "instant") 1521 freq_op.setValue(file->output_freq.getValue()); 1522 else 1523 freq_op.setValue(TimeStep); 1524 } 1525 if (freq_offset.isEmpty()) 1526 freq_offset.setValue(isFieldRead ? NoneDu : (freq_op.getValue() - TimeStep)); 1527 } 1528 1529 /*! 1452 1530 * Returns string arithmetic expression associated to the field. 1453 1531 * \return if content is defined return content string, otherwise, if "expr" attribute is defined, return expr string. -
XIOS/dev/branch_openmp/src/node/field.hpp
r1119 r1328 150 150 static void recvReadDataReady(CEventServer& event); 151 151 void recvReadDataReady(vector<int> ranks, vector<CBufferIn*> buffers); 152 void checkForLateDataFromServer(void); 152 153 void outputField(CArray<double,3>& fieldOut); 153 154 void outputField(CArray<double,2>& fieldOut); … … 176 177 void sendAddAllVariables(); 177 178 179 /// Vérifications /// 180 void checkAttributes(void); 178 181 179 182 const std::vector<StdString>& getRefDomainAxisIds(); … … 181 184 const string& getExpression(void); 182 185 bool hasExpression(void) const; 186 187 CField* my_getDirectFieldReference(void) const; 188 189 183 190 184 191 public: … … 195 202 bool isEOF; 196 203 CDate lastlast_Write_srv, last_Write_srv, last_operation_srv; 197 CDate lastDataRequestedFromServer, lastDataReceivedFromServer ;198 bool wasData AlreadyReceivedFromServer;204 CDate lastDataRequestedFromServer, lastDataReceivedFromServer, dateEOF; 205 bool wasDataRequestedFromServer, wasDataAlreadyReceivedFromServer; 199 206 200 207 map<int,boost::shared_ptr<func::CFunctor> > foperation_srv; -
XIOS/dev/branch_openmp/src/node/file.cpp
r1287 r1328 18 18 #include "timer.hpp" 19 19 20 20 21 namespace xios { 21 22 … … 24 25 CFile::CFile(void) 25 26 : CObjectTemplate<CFile>(), CFileAttributes() 26 , vFieldGroup(), data_out(), enabledFields() , fileComm(MPI_COMM_NULL)27 , vFieldGroup(), data_out(), enabledFields() 27 28 , allDomainEmpty(false), isOpen(false) 28 29 { … … 33 34 CFile::CFile(const StdString & id) 34 35 : CObjectTemplate<CFile>(id), CFileAttributes() 35 , vFieldGroup(), data_out(), enabledFields() , fileComm(MPI_COMM_NULL)36 , vFieldGroup(), data_out(), enabledFields() 36 37 , allDomainEmpty(false), isOpen(false) 37 38 { … … 263 264 // create sub communicator for file 264 265 int color = allDomainEmpty ? 0 : 1; 265 MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm);266 if (allDomainEmpty) MPI_Comm_free(&fileComm);266 ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 267 if (allDomainEmpty) ep_lib::MPI_Comm_free(&fileComm); 267 268 268 269 // if (time_counter.isEmpty()) time_counter.setValue(time_counter_attr::centered); … … 461 462 { 462 463 int commSize, commRank; 463 MPI_Comm_size(fileComm, &commSize);464 MPI_Comm_rank(fileComm, &commRank);464 ep_lib::MPI_Comm_size(fileComm, &commSize); 465 ep_lib::MPI_Comm_rank(fileComm, &commRank); 465 466 466 467 if (server->intraCommSize > 1) … … 482 483 if (isOpen) data_out->closeFile(); 483 484 484 data_out = boost::shared_ptr<CDataOutput>(new CNc4DataOutput(this, oss.str(), append, useClassicFormat, useCFConvention,485 static_cast< ::MPI_Comm >(fileComm.mpi_comm), multifile, isCollective, time_counter_name));485 data_out = shared_ptr<CDataOutput>(new CNc4DataOutput(this, oss.str(), append, useClassicFormat, useCFConvention, 486 fileComm, multifile, isCollective, time_counter_name)); 486 487 isOpen = true; 487 488 … … 577 578 { 578 579 int commSize, commRank; 579 MPI_Comm_size(fileComm, &commSize);580 MPI_Comm_rank(fileComm, &commRank);580 ep_lib::MPI_Comm_size(fileComm, &commSize); 581 ep_lib::MPI_Comm_rank(fileComm, &commRank); 581 582 582 583 if (server->intraCommSize > 1) … … 596 597 bool isCollective = par_access.isEmpty() || par_access == par_access_attr::collective; 597 598 598 #ifdef _usingEP599 //printf("multifile was %d\n", multifile);600 multifile = true;601 599 if (isOpen) data_out->closeFile(); 602 if (time_counter_name.isEmpty()) data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast< ::MPI_Comm >(fileComm.mpi_comm), multifile, isCollective));603 else data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast< ::MPI_Comm >(fileComm.mpi_comm), multifile, isCollective, time_counter_name));600 if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective)); 601 else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name)); 604 602 isOpen = true; 605 #else606 if (isOpen) data_out->closeFile();607 if (time_counter_name.isEmpty()) data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast< ::MPI_Comm >(fileComm.mpi_comm), multifile, isCollective));608 else data_in = boost::shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast< ::MPI_Comm >(fileComm.mpi_comm), multifile, isCollective, time_counter_name));609 isOpen = true;610 #endif611 612 613 603 } 614 604 } … … 625 615 this->data_in->closeFile(); 626 616 } 627 if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 617 // if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 618 //if (fileComm.mpi_comm != ::MPI_COMM_NULL) MPI_Comm_free(&fileComm); 628 619 } 629 620 //---------------------------------------------------------------- … … 638 629 639 630 // It would probably be better to call initFile() somehow 640 641 MPI_Comm_dup(client->intraComm, &fileComm); 631 ep_lib::MPI_Comm_dup(client->intraComm, &fileComm); 642 632 if (time_counter_name.isEmpty()) time_counter_name = "time_counter"; 643 633 644 //#pragma omp critical (_readAttributesOfEnabledFieldsInReadMode_) 645 //{ 646 checkFile(); // calls nc_open 634 checkFile(); 647 635 648 636 for (int idx = 0; idx < enabledFields.size(); ++idx) … … 658 646 659 647 // Read necessary value from file 660 #pragma omp critical (_func) 661 { 662 //checkFile(); 663 this->data_in->readFieldAttributesValues(enabledFields[idx]); 664 //close(); 665 } 666 648 this->data_in->readFieldAttributesValues(enabledFields[idx]); 649 667 650 // Fill attributes for base reference 668 651 enabledFields[idx]->solveGridDomainAxisBaseRef(); … … 671 654 // Now everything is ok, close it 672 655 close(); 673 //}674 675 //if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm);676 677 656 } 678 657 … … 806 785 807 786 /*! 808 Prefetching the data for enabled fields read from file whose data is out-of-date. 809 */ 810 void CFile::prefetchEnabledReadModeFieldsIfNeeded(void) 787 Do all post timestep operations for enabled fields in read mode: 788 - Prefetch the data read from file when needed 789 - Check that the data excepted from server has been received 790 */ 791 void CFile::doPostTimestepOperationsForEnabledReadModeFields(void) 811 792 { 812 793 if (mode.isEmpty() || mode.getValue() != mode_attr::read) … … 815 796 int size = this->enabledFields.size(); 816 797 for (int i = 0; i < size; ++i) 798 { 799 this->enabledFields[i]->checkForLateDataFromServer(); 817 800 this->enabledFields[i]->sendReadDataRequestIfNeeded(); 801 } 818 802 } 819 803 … … 1121 1105 CField* field = this->enabledFields[i]; 1122 1106 this->sendAddField(field->getId()); 1107 field->checkAttributes(); 1123 1108 field->sendAllAttributesToServer(); 1124 1109 field->sendAddAllVariables(); -
XIOS/dev/branch_openmp/src/node/file.hpp
r1134 r1328 4 4 /// XIOS headers /// 5 5 #include "xios_spl.hpp" 6 #include "mpi_std.hpp" 6 7 #include "field.hpp" 7 8 #include "data_output.hpp" … … 11 12 #include "attribute_enum.hpp" 12 13 #include "attribute_enum_impl.hpp" 13 #include "mpi.hpp"14 #ifdef _usingEP15 #include "ep_declaration.hpp"16 #endif17 14 18 15 namespace xios { … … 110 107 void buildFilterGraphOfEnabledFields(CGarbageCollector& gc); 111 108 void prefetchEnabledReadModeFields(); 112 void prefetchEnabledReadModeFieldsIfNeeded();109 void doPostTimestepOperationsForEnabledReadModeFields(); 113 110 114 111 // Add component into file -
XIOS/dev/branch_openmp/src/node/generate_rectilinear_domain.hpp
r1134 r1328 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 62 }; // class CGenerateRectilinearDomain 64 63 -
XIOS/dev/branch_openmp/src/node/grid.cpp
r1205 r1328 1114 1114 outLocalIndexToServer(idx) = itIndex->second; 1115 1115 } 1116 1116 1117 1117 const std::list<int>& ranks = client->getRanksServerLeader(); 1118 1118 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) … … 1750 1750 pDom->solveRefInheritance(apply); 1751 1751 pDom->solveInheritanceTransformation(); 1752 //cout<<"pDom check"<<endl; 1752 1753 } 1753 1754 } -
XIOS/dev/branch_openmp/src/node/interpolate_axis.hpp
r1134 r1328 62 62 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 63 63 static bool _dummyRegistered; 64 #pragma omp threadprivate(_dummyRegistered)65 66 64 }; // class CInterpolateAxis 67 65 -
XIOS/dev/branch_openmp/src/node/interpolate_domain.hpp
r1134 r1328 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 62 }; // class CInterpolateDomain 64 63 -
XIOS/dev/branch_openmp/src/node/inverse_axis.hpp
r1134 r1328 59 59 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 60 60 static bool _dummyRegistered; 61 #pragma omp threadprivate(_dummyRegistered)62 61 63 62 }; // class CInverseAxis -
XIOS/dev/branch_openmp/src/node/mesh.cpp
r1134 r1328 6 6 7 7 #include "mesh.hpp" 8 using namespace ep_lib; 8 9 9 10 namespace xios { … … 31 32 } 32 33 33 std::map <StdString, CMesh> CMesh::meshList = std::map <StdString, CMesh>();34 std::map <StdString, vector<int> > CMesh::domainList = std::map <StdString, vector<int> >();34 //std::map <StdString, CMesh> CMesh::meshList = std::map <StdString, CMesh>(); 35 //std::map <StdString, vector<int> > CMesh::domainList = std::map <StdString, vector<int> >(); 35 36 36 37 std::map <StdString, CMesh> *CMesh::meshList_ptr = 0; 37 38 std::map <StdString, vector<int> > *CMesh::domainList_ptr = 0; 38 39 39 40 40 ///--------------------------------------------------------------- … … 45 45 * \param [in] nvertex Number of verteces (1 for nodes, 2 for edges, 3 and up for faces). 46 46 */ 47 48 /* bkp49 CMesh* CMesh::getMesh (StdString meshName, int nvertex)50 {51 CMesh::domainList[meshName].push_back(nvertex);52 53 if ( CMesh::meshList.begin() != CMesh::meshList.end() )54 {55 for (std::map<StdString, CMesh>::iterator it=CMesh::meshList.begin(); it!=CMesh::meshList.end(); ++it)56 {57 if (it->first == meshName)58 return &meshList[meshName];59 else60 {61 CMesh newMesh;62 CMesh::meshList.insert( make_pair(meshName, newMesh) );63 return &meshList[meshName];64 }65 }66 }67 else68 {69 CMesh newMesh;70 CMesh::meshList.insert( make_pair(meshName, newMesh) );71 return &meshList[meshName];72 }73 }74 */75 76 47 CMesh* CMesh::getMesh (StdString meshName, int nvertex) 77 48 { … … 79 50 if(CMesh::meshList_ptr == NULL) CMesh::meshList_ptr = new std::map <StdString, CMesh>(); 80 51 81 (*CMesh::domainList_ptr)[meshName].push_back(nvertex); 82 83 if ( (*CMesh::meshList_ptr).begin() != (*CMesh::meshList_ptr).end() ) 84 { 85 for (std::map<StdString, CMesh>::iterator it=(*CMesh::meshList_ptr).begin(); it!=(*CMesh::meshList_ptr).end(); ++it) 52 //CMesh::domainList[meshName].push_back(nvertex); 53 CMesh::domainList_ptr->at(meshName).push_back(nvertex); 54 55 //if ( CMesh::meshList.begin() != CMesh::meshList.end() ) 56 if ( CMesh::meshList_ptr->begin() != CMesh::meshList_ptr->end() ) 57 { 58 //for (std::map<StdString, CMesh>::iterator it=CMesh::meshList.begin(); it!=CMesh::meshList.end(); ++it) 59 for (std::map<StdString, CMesh>::iterator it=CMesh::meshList_ptr->begin(); it!=CMesh::meshList_ptr->end(); ++it) 86 60 { 87 61 if (it->first == meshName) 88 return &((*CMesh::meshList_ptr)[meshName]); 62 //return &meshList[meshName]; 63 return &meshList_ptr->at(meshName); 89 64 else 90 65 { 91 66 CMesh newMesh; 92 (*CMesh::meshList_ptr).insert( make_pair(meshName, newMesh) ); 93 return &((*CMesh::meshList_ptr)[meshName]); 67 //CMesh::meshList.insert( make_pair(meshName, newMesh) ); 68 CMesh::meshList_ptr->insert( make_pair(meshName, newMesh) ); 69 //return &meshList[meshName]; 70 return &meshList_ptr->at(meshName); 94 71 } 95 72 } … … 98 75 { 99 76 CMesh newMesh; 100 (*CMesh::meshList_ptr).insert( make_pair(meshName, newMesh) ); 101 return &((*CMesh::meshList_ptr)[meshName]); 77 //CMesh::meshList.insert( make_pair(meshName, newMesh) ); 78 CMesh::meshList_ptr->insert( make_pair(meshName, newMesh) ); 79 //return &meshList[meshName]; 80 return &meshList_ptr->at(meshName); 102 81 } 103 82 } … … 524 503 * \param [in] bounds_lat Array of boundary latitudes. Its size depends on the element type. 525 504 */ 526 void CMesh::createMeshEpsilon(const ep_lib::MPI_Comm& comm,505 void CMesh::createMeshEpsilon(const MPI_Comm& comm, 527 506 const CArray<double, 1>& lonvalue, const CArray<double, 1>& latvalue, 528 507 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat) … … 1724 1703 */ 1725 1704 1726 void CMesh::getGloNghbFacesNodeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx,1705 void CMesh::getGloNghbFacesNodeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx, 1727 1706 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1728 1707 CArray<int, 2>& nghbFaces) … … 1880 1859 */ 1881 1860 1882 void CMesh::getGloNghbFacesEdgeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx,1861 void CMesh::getGloNghbFacesEdgeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx, 1883 1862 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1884 1863 CArray<int, 2>& nghbFaces) … … 2061 2040 */ 2062 2041 2063 void CMesh::getGlobalNghbFaces(const int nghbType, const ep_lib::MPI_Comm& comm,2042 void CMesh::getGlobalNghbFaces(const int nghbType, const MPI_Comm& comm, 2064 2043 const CArray<int, 1>& face_idx, 2065 2044 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, -
XIOS/dev/branch_openmp/src/node/mesh.hpp
r1134 r1328 80 80 int nbFaces_; 81 81 82 static std::map <StdString, CMesh> meshList; 83 static std::map <StdString, vector<int> > domainList; 84 82 //static std::map <StdString, CMesh> meshList; 85 83 static std::map <StdString, CMesh> *meshList_ptr; 84 //static std::map <StdString, vector<int> > domainList; 86 85 static std::map <StdString, vector<int> > *domainList_ptr; 87 #pragma omp threadprivate(meshList_ptr, domainList_ptr)88 89 86 CClientClientDHTSizet* pNodeGlobalIndex; // pointer to a map <nodeHash, nodeIdxGlo> 90 87 CClientClientDHTSizet* pEdgeGlobalIndex; // pointer to a map <edgeHash, edgeIdxGlo> -
XIOS/dev/branch_openmp/src/node/reduce_axis_to_scalar.hpp
r1134 r1328 59 59 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 60 60 static bool _dummyRegistered; 61 #pragma omp threadprivate(_dummyRegistered)62 61 }; // class CReduceAxisToScalar 63 62 -
XIOS/dev/branch_openmp/src/node/reduce_domain_to_axis.hpp
r1134 r1328 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 62 }; // class CReduceDomainToAxis 64 63 -
XIOS/dev/branch_openmp/src/node/reduce_domain_to_scalar.hpp
r1134 r1328 60 60 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 62 }; // class CReduceDomainToScalar 64 63 -
XIOS/dev/branch_openmp/src/node/scalar.cpp
r1134 r1328 28 28 29 29 //std::map<StdString, ETranformationType> CScalar::transformationMapList_ = std::map<StdString, ETranformationType>(); 30 std::map<StdString, ETranformationType> *CScalar::transformationMapList_ptr = 0; 30 31 //bool CScalar::dummyTransformationMapList_ = CScalar::initializeTransformationMap(CScalar::transformationMapList_); 31 32 std::map<StdString, ETranformationType> *CScalar::transformationMapList_ptr = 0;33 34 32 bool CScalar::initializeTransformationMap(std::map<StdString, ETranformationType>& m) 35 33 { … … 175 173 176 174 nodeElementName = node.getElementName(); 175 //std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 177 176 if(CScalar::transformationMapList_ptr == 0) initializeTransformationMap(); 178 std::map<StdString, ETranformationType>::const_iterator ite = (*CScalar::transformationMapList_ptr).end(), it; 179 it = (*CScalar::transformationMapList_ptr).find(nodeElementName); 177 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_ptr->end(), it; 178 //it = transformationMapList_.find(nodeElementName); 179 it = transformationMapList_ptr->find(nodeElementName); 180 180 if (ite != it) 181 181 { -
XIOS/dev/branch_openmp/src/node/scalar.hpp
r1134 r1328 88 88 static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 89 89 static bool initializeTransformationMap(); 90 91 //static bool dummyTransformationMapList_; 92 90 //static std::map<StdString, ETranformationType> transformationMapList_; 93 91 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 94 #pragma omp threadprivate(transformationMapList_ptr)92 static bool dummyTransformationMapList_; 95 93 96 94 -
XIOS/dev/branch_openmp/src/node/transformation.hpp
r1138 r1328 35 35 typedef std::map<ETranformationType, CreateTransformationCallBack> CallBackMap; 36 36 static CallBackMap* transformationCreationCallBacks_; 37 //#pragma omp threadprivate(transformationCreationCallBacks_)38 37 39 38 static bool registerTransformation(ETranformationType transType, CreateTransformationCallBack createFn); … … 66 65 if (0 == transformationCreationCallBacks_) 67 66 transformationCreationCallBacks_ = new CallBackMap(); 67 68 68 return (*transformationCreationCallBacks_).insert(make_pair(transType, createFn)).second; 69 69 } -
XIOS/dev/branch_openmp/src/node/zoom_axis.hpp
r1134 r1328 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 62 }; // class CZoomAxis 64 63 -
XIOS/dev/branch_openmp/src/node/zoom_domain.hpp
r1134 r1328 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered)63 64 62 }; // class CZoomDomain 65 63 -
XIOS/dev/branch_openmp/src/object_factory.cpp
r1134 r1328 5 5 /// ////////////////////// Définitions ////////////////////// /// 6 6 7 StdString *CObjectFactory::CurrContext_ptr = new StdString;7 StdString CObjectFactory::CurrContext(""); 8 8 9 9 void CObjectFactory::SetCurrentContextId(const StdString & context) 10 { 11 if(CObjectFactory::CurrContext_ptr == NULL ) CObjectFactory::CurrContext_ptr = new StdString; 12 CObjectFactory::CurrContext_ptr->assign(context); 13 } 10 { CObjectFactory::CurrContext = context; } 14 11 15 12 StdString & CObjectFactory::GetCurrentContextId(void) 16 { 17 return (*CObjectFactory::CurrContext_ptr); 18 } 13 { return (CObjectFactory::CurrContext); } 19 14 20 15 } // namespace xios -
XIOS/dev/branch_openmp/src/object_factory.hpp
r1134 r1328 59 59 60 60 /// Propriétés statiques /// 61 static StdString *CurrContext_ptr; 62 #pragma omp threadprivate(CurrContext_ptr) 61 static StdString CurrContext; 63 62 64 63 }; // class CObjectFactory -
XIOS/dev/branch_openmp/src/object_factory_decl.cpp
r1287 r1328 5 5 { 6 6 #define macro(U) \ 7 template boost::shared_ptr<U> CObjectFactory::GetObject<U>(const StdString& id); \8 template boost::shared_ptr<U> CObjectFactory::GetObject<U>(const StdString& context,const StdString& id); \9 template boost::shared_ptr<U> CObjectFactory::GetObject<U>(const U* const object); \7 template shared_ptr<U> CObjectFactory::GetObject<U>(const StdString& id); \ 8 template shared_ptr<U> CObjectFactory::GetObject<U>(const StdString& context,const StdString& id); \ 9 template shared_ptr<U> CObjectFactory::GetObject<U>(const U* const object); \ 10 10 template int CObjectFactory::GetObjectNum<U>(void); \ 11 11 template int CObjectFactory::GetObjectIdNum<U>(void); \ 12 template const std::vector< boost::shared_ptr<U> >& CObjectFactory::GetObjectVector<U>(const StdString& context ); \12 template const std::vector<shared_ptr<U> >& CObjectFactory::GetObjectVector<U>(const StdString& context ); \ 13 13 template bool CObjectFactory::HasObject<U>(const StdString& id); \ 14 14 template bool CObjectFactory::HasObject<U>(const StdString& context,const StdString& id); \ -
XIOS/dev/branch_openmp/src/object_factory_impl.hpp
r1134 r1328 6 6 namespace xios 7 7 { 8 /// ////////////////////// D éfinitions ////////////////////// ///8 /// ////////////////////// Dfinitions ////////////////////// /// 9 9 template <typename U> 10 10 int CObjectFactory::GetObjectNum(void) 11 11 { 12 if (CurrContext _ptr->size() == 0)12 if (CurrContext.size() == 0) 13 13 ERROR("CObjectFactory::GetObjectNum(void)", 14 14 << "please define current context id !"); 15 16 if(U::AllVectObj == NULL) return 0; 17 18 19 return (*U::AllVectObj)[*CObjectFactory::CurrContext_ptr].size(); 15 //return (U::AllVectObj[CObjectFactory::CurrContext].size()); 16 if(U::AllVectObj_ptr == NULL) return 0; 17 return (*U::AllVectObj_ptr)[CObjectFactory::CurrContext].size(); 20 18 } 21 19 … … 23 21 int CObjectFactory::GetObjectIdNum(void) 24 22 { 25 if (CurrContext _ptr->size() == 0)23 if (CurrContext.size() == 0) 26 24 ERROR("CObjectFactory::GetObjectIdNum(void)", 27 25 << "please define current context id !"); 28 if(U::AllMapObj == NULL) return 0; 29 30 31 32 return (* U::AllMapObj) [*CObjectFactory::CurrContext_ptr].size(); 26 //return (U::AllMapObj[CObjectFactory::CurrContext].size()); 27 if(U::AllMapObj_ptr == NULL) return 0; 28 return (*U::AllMapObj_ptr)[CObjectFactory::CurrContext].size(); 33 29 } 34 30 … … 36 32 bool CObjectFactory::HasObject(const StdString & id) 37 33 { 38 if (CurrContext _ptr->size() == 0)34 if (CurrContext.size() == 0) 39 35 ERROR("CObjectFactory::HasObject(const StdString & id)", 40 36 << "[ id = " << id << " ] please define current context id !"); 41 42 if(U::AllMapObj == NULL) return false; 37 //return (U::AllMapObj[CObjectFactory::CurrContext].find(id) != 38 // U::AllMapObj[CObjectFactory::CurrContext].end()); 39 if(U::AllMapObj_ptr == NULL) return false; 40 return ((*U::AllMapObj_ptr)[CObjectFactory::CurrContext].find(id) != 41 (*U::AllMapObj_ptr)[CObjectFactory::CurrContext].end()); 43 42 44 45 46 return ((*U::AllMapObj)[*CObjectFactory::CurrContext_ptr].find(id) !=47 (*U::AllMapObj)[*CObjectFactory::CurrContext_ptr].end());48 43 } 49 44 … … 51 46 bool CObjectFactory::HasObject(const StdString & context, const StdString & id) 52 47 { 53 if(U::AllMapObj == NULL) return false;54 48 55 if (U::AllMapObj->find(context) == U::AllMapObj->end()) return false ; 49 // if (U::AllMapObj.find(context) == U::AllMapObj.end()) return false ; 50 // else return (U::AllMapObj[context].find(id) != U::AllMapObj[context].end()); 51 if(U::AllMapObj_ptr == NULL) return false; 56 52 57 else 58 { 59 return ((*U::AllMapObj)[context].find(id) != (*U::AllMapObj)[context].end()); 60 } 61 53 if (U::AllMapObj_ptr->find(context) == U::AllMapObj_ptr->end()) return false ; 54 else return ((*U::AllMapObj_ptr)[context].find(id) != (*U::AllMapObj_ptr)[context].end()); 62 55 } 63 56 … … 65 58 boost::shared_ptr<U> CObjectFactory::GetObject(const U * const object) 66 59 { 67 if(U::AllVectObj == NULL) return (boost::shared_ptr<U>()); 68 69 if (CurrContext_ptr->size() == 0) 60 if(U::AllVectObj_ptr == NULL) return (boost::shared_ptr<U>()); 61 if (CurrContext.size() == 0) 70 62 ERROR("CObjectFactory::GetObject(const U * const object)", 71 63 << "please define current context id !"); 72 std::vector<boost::shared_ptr<U> > & vect =73 (*U::AllVectObj)[*CObjectFactory::CurrContext_ptr];64 //std::vector<boost::shared_ptr<U> > & vect = U::AllVectObj[CObjectFactory::CurrContext]; 65 std::vector<boost::shared_ptr<U> > & vect = (*U::AllVectObj_ptr)[CObjectFactory::CurrContext]; 74 66 75 67 typename std::vector<boost::shared_ptr<U> >::const_iterator … … 92 84 boost::shared_ptr<U> CObjectFactory::GetObject(const StdString & id) 93 85 { 94 if(U::AllMapObj == NULL) return (boost::shared_ptr<U>()); 95 96 if (CurrContext_ptr->size() == 0) 86 if(U::AllMapObj_ptr == NULL) return (boost::shared_ptr<U>()); 87 if (CurrContext.size() == 0) 97 88 ERROR("CObjectFactory::GetObject(const StdString & id)", 98 89 << "[ id = " << id << " ] please define current context id !"); … … 101 92 << "[ id = " << id << ", U = " << U::GetName() << " ] " 102 93 << "object was not found."); 103 return (*U::AllMapObj)[*CObjectFactory::CurrContext_ptr][id]; 94 95 //cout<<"CObjectFactory::GetObject(const StdString & id)[ id = " << id << ", U = " << U::GetName() << " ] "<<endl; 96 if(id == "src_domain_regular_read") 97 { 98 //cout<<"match"<<endl; 99 } 100 101 //return (U::AllMapObj[CObjectFactory::CurrContext][id]); 102 return (*U::AllMapObj_ptr)[CObjectFactory::CurrContext][id]; 104 103 } 105 104 … … 107 106 boost::shared_ptr<U> CObjectFactory::GetObject(const StdString & context, const StdString & id) 108 107 { 109 if(U::AllMapObj == NULL) return (boost::shared_ptr<U>());108 if(U::AllMapObj_ptr == NULL) return (boost::shared_ptr<U>()); 110 109 111 110 if (!CObjectFactory::HasObject<U>(context,id)) … … 114 113 << "object was not found."); 115 114 116 return (*U::AllMapObj)[context][id]; 115 //cout<<"CObjectFactory::GetObject(const StdString & context, const StdString & id)[ id = " << id << ", U = " << U::GetName() << " ] "<<endl; 116 if(id == "src_domain_regular_read") 117 { 118 //cout<<"match"<<endl; 119 boost::shared_ptr<U> value; 120 } 121 //return (U::AllMapObj[context][id]); 122 return (*U::AllMapObj_ptr)[context][id]; 117 123 } 118 124 … … 120 126 boost::shared_ptr<U> CObjectFactory::CreateObject(const StdString& id) 121 127 { 122 if(U::AllVectObj == NULL) U::AllVectObj= new xios_map<StdString, std::vector<boost::shared_ptr<U> > >;123 if(U::AllMapObj == NULL) U::AllMapObj= new xios_map<StdString, xios_map<StdString, boost::shared_ptr<U> > >;128 if(U::AllVectObj_ptr == NULL) U::AllVectObj_ptr = new xios_map<StdString, std::vector<boost::shared_ptr<U> > >; 129 if(U::AllMapObj_ptr == NULL) U::AllMapObj_ptr = new xios_map<StdString, xios_map<StdString, boost::shared_ptr<U> > >; 124 130 125 126 if (CurrContext_ptr->empty()) 131 if (CurrContext.empty()) 127 132 ERROR("CObjectFactory::CreateObject(const StdString& id)", 128 133 << "[ id = " << id << " ] please define current context id !"); … … 136 141 boost::shared_ptr<U> value(new U(id.empty() ? CObjectFactory::GenUId<U>() : id)); 137 142 138 (* U::AllVectObj)[*CObjectFactory::CurrContext_ptr].insert((*U::AllVectObj)[*CObjectFactory::CurrContext_ptr].end(), value); 139 (* U::AllMapObj) [*CObjectFactory::CurrContext_ptr].insert(std::make_pair(value->getId(), value)); 143 //U::AllVectObj[CObjectFactory::CurrContext].insert(U::AllVectObj[CObjectFactory::CurrContext].end(), value); 144 //U::AllMapObj[CObjectFactory::CurrContext].insert(std::make_pair(value->getId(), value)); 145 146 (*U::AllVectObj_ptr)[CObjectFactory::CurrContext].insert((*U::AllVectObj_ptr)[CObjectFactory::CurrContext].end(), value); 147 (*U::AllMapObj_ptr) [CObjectFactory::CurrContext].insert(std::make_pair(value->getId(), value)); 148 //cout<<"CObjectFactory::CreateObject(const StdString& id) [ id = " << id << " ]" <<endl; 140 149 141 150 return value; … … 147 156 CObjectFactory::GetObjectVector(const StdString & context) 148 157 { 149 if(U::AllVectObj != NULL) 150 151 return (*U::AllVectObj)[context]; 158 //return (U::AllVectObj[context]); 159 return (*U::AllVectObj_ptr)[context]; 152 160 } 153 161 … … 163 171 { 164 172 StdOStringStream oss; 165 if(U::GenId == NULL) U::GenId= new xios_map< StdString, long int >;166 oss << GetUIdBase<U>() << (*U::GenId )[*CObjectFactory::CurrContext_ptr]++;173 if(U::GenId_ptr == NULL) U::GenId_ptr = new xios_map< StdString, long int >; 174 oss << GetUIdBase<U>() << (*U::GenId_ptr)[CObjectFactory::CurrContext]++; 167 175 return oss.str(); 168 176 } -
XIOS/dev/branch_openmp/src/object_template.hpp
r1287 r1328 77 77 static T* get(const string& contextId, const string& id) ; 78 78 T* get(void) ; 79 boost::shared_ptr<T> getShared(void) ;80 static boost::shared_ptr<T> getShared(const T* ptr) ;79 shared_ptr<T> getShared(void) ; 80 static shared_ptr<T> getShared(const T* ptr) ; 81 81 82 82 static T* create(const string& id=string("")) ; … … 100 100 101 101 /// Propriétés statiques /// 102 // bkp 103 // static xios_map<StdString, 104 // xios_map<StdString, 105 // boost::shared_ptr<DerivedType> > > AllMapObj; 106 // static xios_map<StdString, 107 // std::vector<boost::shared_ptr<DerivedType> > > AllVectObj; 102 static xios_map<StdString, 103 xios_map<StdString, 104 boost::shared_ptr<DerivedType> > > *AllMapObj_ptr; 105 static xios_map<StdString, 106 std::vector<boost::shared_ptr<DerivedType> > > *AllVectObj_ptr; 108 107 109 // static xios_map< StdString, long int > GenId ; 110 111 112 static xios_map<StdString, xios_map<StdString, boost::shared_ptr<DerivedType> > > *AllMapObj; 113 static xios_map<StdString, std::vector<boost::shared_ptr<DerivedType> > > *AllVectObj; 114 static xios_map< StdString, long int > *GenId; 115 #pragma omp threadprivate(AllMapObj, AllVectObj, GenId) 108 static xios_map< StdString, long int > *GenId_ptr ; 116 109 117 110 }; // class CObjectTemplate -
XIOS/dev/branch_openmp/src/object_template_impl.hpp
r1287 r1328 24 24 xios_map<StdString, 25 25 xios_map<StdString, 26 boost::shared_ptr<T> > > *CObjectTemplate<T>::AllMapObj = 0; 26 //boost::shared_ptr<T> > > CObjectTemplate<T>::AllMapObj; 27 boost::shared_ptr<T> > > *CObjectTemplate<T>::AllMapObj_ptr = 0; 27 28 28 29 template <class T> 29 30 xios_map<StdString, 30 std::vector<boost::shared_ptr<T> > > *CObjectTemplate<T>::AllVectObj = 0; 31 32 template <class T> 33 xios_map<StdString,long int> *CObjectTemplate<T>::GenId = 0; 31 //std::vector<boost::shared_ptr<T> > > CObjectTemplate<T>::AllVectObj; 32 std::vector<boost::shared_ptr<T> > > *CObjectTemplate<T>::AllVectObj_ptr = 0; 33 34 template <class T> 35 //xios_map<StdString,long int> CObjectTemplate<T>::GenId; 36 xios_map<StdString,long int> *CObjectTemplate<T>::GenId_ptr = 0; 34 37 35 38 template <class T> … … 66 69 CObjectTemplate<T>::GetAllVectobject(const StdString & contextId) 67 70 { 68 return (CObjectTemplate<T>::AllVectObj->at(contextId)); 71 //return (CObjectTemplate<T>::AllVectObj[contextId]); 72 return (CObjectTemplate<T>::AllVectObj_ptr->at(contextId)); 69 73 } 70 74 … … 321 325 322 326 template <typename T> 323 boost::shared_ptr<T> CObjectTemplate<T>::getShared(const T* ptr)327 shared_ptr<T> CObjectTemplate<T>::getShared(const T* ptr) 324 328 { 325 329 return CObjectFactory::GetObject<T>(ptr); … … 327 331 328 332 template <typename T> 329 boost::shared_ptr<T> CObjectTemplate<T>::getShared(void)333 shared_ptr<T> CObjectTemplate<T>::getShared(void) 330 334 { 331 335 return CObjectFactory::GetObject<T>((T*)this); … … 335 339 const vector<T*> CObjectTemplate<T>::getAll() 336 340 { 337 const vector< boost::shared_ptr<T> >& shared_vect= CObjectFactory::GetObjectVector<T>();341 const vector< shared_ptr<T> >& shared_vect= CObjectFactory::GetObjectVector<T>(); 338 342 vector<T*> vect; 339 343 340 typename vector< boost::shared_ptr<T> >::const_iterator it;344 typename vector<shared_ptr<T> >::const_iterator it; 341 345 for(it=shared_vect.begin();it!=shared_vect.end();++it) vect.push_back(it->get()); 342 346 return vect; … … 346 350 const vector<T*> CObjectTemplate<T>::getAll(const string & id) 347 351 { 348 const vector< boost::shared_ptr<T> >& shared_vect= CObjectFactory::GetObjectVector<T>(id);352 const vector< shared_ptr<T> >& shared_vect= CObjectFactory::GetObjectVector<T>(id); 349 353 vector<T*> vect; 350 354 351 typename vector< boost::shared_ptr<T> >::const_iterator it;355 typename vector<shared_ptr<T> >::const_iterator it; 352 356 for(it=shared_vect.begin();it!=shared_vect.end();++it) vect.push_back(it->get()); 353 357 return vect; -
XIOS/dev/branch_openmp/src/parse_expr/lex_parser.cpp
r1134 r1328 347 347 extern char *yytext; 348 348 #define yytext_ptr yytext 349 350 349 static yyconst flex_int16_t yy_nxt[][128] = 351 350 { -
XIOS/dev/branch_openmp/src/parse_expr/yacc_parser.cpp
r1134 r1328 80 80 } 81 81 82 static IFilterExprNode* parsed; 83 static std::string globalInputText; 84 static std::string *globalInputText_ptr = 0; 85 static size_t globalReadOffset = 0; 86 #pragma omp threadprivate(parsed, globalInputText_ptr, globalReadOffset) 87 82 IFilterExprNode* parsed; 83 std::string globalInputText; 84 size_t globalReadOffset = 0; 85 88 86 int readInputForLexer(char* buffer, size_t* numBytesRead, size_t maxBytesToRead) 89 87 { 90 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string;91 88 size_t numBytesToRead = maxBytesToRead; 92 size_t bytesRemaining = (*globalInputText_ptr).length()-globalReadOffset;89 size_t bytesRemaining = globalInputText.length()-globalReadOffset; 93 90 size_t i; 94 91 if (numBytesToRead > bytesRemaining) numBytesToRead = bytesRemaining; 95 for (i = 0; i < numBytesToRead; i++) buffer[i] = (*globalInputText_ptr).c_str()[globalReadOffset + i];92 for (i = 0; i < numBytesToRead; i++) buffer[i] = globalInputText.c_str()[globalReadOffset + i]; 96 93 *numBytesRead = numBytesToRead; 97 94 globalReadOffset += numBytesToRead; … … 2005 2002 IFilterExprNode* parseExpr(const string& strExpr) 2006 2003 { 2007 #pragma omp critical (_parser) 2008 { 2009 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 2010 (*globalInputText_ptr).assign (strExpr); 2011 globalReadOffset = 0; 2012 yyparse(); 2013 } 2004 globalInputText = strExpr; 2005 globalReadOffset = 0; 2006 yyparse(); 2014 2007 return parsed; 2015 2008 } … … 2017 2010 2018 2011 2019 -
XIOS/dev/branch_openmp/src/parse_expr/yacc_parser.yacc
r1134 r1328 15 15 } 16 16 17 static IFilterExprNode* parsed; 18 static std::string globalInputText; 19 static std::string *globalInputText_ptr = 0; 20 static size_t globalReadOffset = 0; 21 #pragma omp threadprivate(parsed, globalInputText_ptr, globalReadOffset) 22 17 IFilterExprNode* parsed; 18 std::string globalInputText; 19 size_t globalReadOffset = 0; 20 23 21 int readInputForLexer(char* buffer, size_t* numBytesRead, size_t maxBytesToRead) 24 22 { 25 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string;26 23 size_t numBytesToRead = maxBytesToRead; 27 size_t bytesRemaining = (*globalInputText_ptr).length()-globalReadOffset;24 size_t bytesRemaining = globalInputText.length()-globalReadOffset; 28 25 size_t i; 29 26 if (numBytesToRead > bytesRemaining) numBytesToRead = bytesRemaining; 30 for (i = 0; i < numBytesToRead; i++) buffer[i] = (*globalInputText_ptr).c_str()[globalReadOffset + i];27 for (i = 0; i < numBytesToRead; i++) buffer[i] = globalInputText.c_str()[globalReadOffset + i]; 31 28 *numBytesRead = numBytesToRead; 32 29 globalReadOffset += numBytesToRead; … … 148 145 IFilterExprNode* parseExpr(const string& strExpr) 149 146 { 150 #pragma omp critical (_parser) 151 { 152 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 153 (*globalInputText_ptr).assign (strExpr); 154 globalReadOffset = 0; 155 yyparse(); 156 } 147 globalInputText = strExpr; 148 globalReadOffset = 0; 149 yyparse(); 157 150 return parsed; 158 151 } -
XIOS/dev/branch_openmp/src/policy.cpp
r855 r1328 10 10 #include "policy.hpp" 11 11 #include <cmath> 12 using namespace ep_lib; 12 13 13 14 namespace xios -
XIOS/dev/branch_openmp/src/registry.cpp
r1134 r1328 1 1 #include "registry.hpp" 2 2 #include "type.hpp" 3 #include <mpi.hpp> 3 4 #include <fstream> 4 5 #include <sstream> 6 using namespace ep_lib; 5 7 6 8 namespace xios … … 257 259 void CRegistry::hierarchicalGatherRegistry(void) 258 260 { 259 // hierarchicalGatherRegistry(communicator) ; 260 gatherRegistry(communicator) ; 261 hierarchicalGatherRegistry(communicator) ; 261 262 } 262 263 -
XIOS/dev/branch_openmp/src/registry.hpp
r1134 r1328 6 6 #include "mpi.hpp" 7 7 #include "message.hpp" 8 #ifdef _usingEP9 #include "ep_declaration.hpp"10 #endif11 12 8 13 9 // Those two headers can be replaced by the C++11 equivalent in the future … … 110 106 111 107 /** use internally for recursivity */ 112 void gatherRegistry(const MPI_Comm& comm) ;108 void gatherRegistry(const ep_lib::MPI_Comm& comm) ; 113 109 114 110 /** use internally for recursivity */ 115 void hierarchicalGatherRegistry(const MPI_Comm& comm) ;111 void hierarchicalGatherRegistry(const ep_lib::MPI_Comm& comm) ; 116 112 117 113 -
XIOS/dev/branch_openmp/src/server.cpp
r1205 r1328 9 9 #include <boost/functional/hash.hpp> 10 10 #include <boost/algorithm/string.hpp> 11 #include "mpi.hpp" 11 12 #include "tracer.hpp" 12 13 #include "timer.hpp" 13 14 #include "event_scheduler.hpp" 15 using namespace ep_lib; 14 16 15 17 namespace xios … … 25 27 bool CServer::finished=false ; 26 28 bool CServer::is_MPI_Initialized ; 27 28 29 29 CEventScheduler* CServer::eventScheduler = 0; 30 30 31 31 void CServer::initialize(void) 32 32 { 33 // int initialized ; 34 // MPI_Initialized(&initialized) ; 35 // if (initialized) is_MPI_Initialized=true ; 36 // else is_MPI_Initialized=false ; 37 33 38 // Not using OASIS 34 39 if (!CXios::usingOasis) 35 40 { 36 41 42 // if (!is_MPI_Initialized) 43 // { 44 // MPI_Init(NULL, NULL); 45 // } 37 46 CTimer::get("XIOS").resume() ; 38 47 … … 42 51 unsigned long* hashAll ; 43 52 44 53 // int rank ; 45 54 int size ; 46 55 int myColor ; … … 49 58 50 59 MPI_Comm_size(CXios::globalComm,&size) ; 51 52 //size = CXios::globalComm.ep_comm_ptr->size_rank_info[0].second;53 printf("global size = %d, size= %d\n", CXios::globalComm.ep_comm_ptr->size_rank_info[0].second, size);54 55 60 MPI_Comm_rank(CXios::globalComm,&rank); 56 61 hashAll=new unsigned long[size] ; … … 73 78 74 79 myColor=colors[hashServer] ; 75 76 77 80 MPI_Comm_split(CXios::globalComm,myColor,rank,&intraComm) ; 78 81 79 80 82 int serverLeader=leaders[hashServer] ; 81 83 int clientLeader; 82 84 83 85 serverLeader=leaders[hashServer] ; 84 for(it=leaders.begin();it!=leaders.end(); ++it)86 for(it=leaders.begin();it!=leaders.end();it++) 85 87 { 86 88 if (it->first!=hashServer) … … 93 95 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 94 96 95 // test_sendrecv(CXios::globalComm);96 97 MPI_Intercomm_create(intraComm,0,CXios::globalComm,clientLeader,0,&newComm) ; 97 98 interComm.push_back(newComm) ; … … 102 103 } 103 104 // using OASIS 104 else105 /* else 105 106 { 107 int rank ,size; 106 108 int size; 107 109 if (!is_MPI_Initialized) oasis_init(CXios::xiosCodeId); … … 133 135 oasis_enddef() ; 134 136 } 135 137 */ 138 // int rank; 136 139 MPI_Comm_rank(intraComm,&rank) ; 137 140 if (rank==0) isRoot=true; … … 147 150 delete eventScheduler ; 148 151 149 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); ++it)152 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 150 153 MPI_Comm_free(&(*it)); 151 for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); ++it)154 for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) 152 155 MPI_Comm_free(&(*it)); 153 154 156 MPI_Comm_free(&intraComm); 155 157 … … 157 159 { 158 160 if (CXios::usingOasis) oasis_finalize(); 159 //else {MPI_Finalize() ;}161 //else MPI_Finalize() ; 160 162 } 161 162 163 163 report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; 164 164 report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; … … 182 182 { 183 183 listenRootContext(); 184 if (!finished) 185 { 186 listenRootFinalize() ; 187 } 184 if (!finished) listenRootFinalize() ; 188 185 } 189 186 190 187 contextEventLoop() ; 191 188 if (finished && contextList.empty()) stop=true ; 192 193 189 eventScheduler->checkEvent() ; 194 190 } 195 196 197 191 CTimer::get("XIOS server").suspend() ; 198 192 } … … 204 198 int flag ; 205 199 206 for(it=interComm.begin();it!=interComm.end(); ++it)200 for(it=interComm.begin();it!=interComm.end();it++) 207 201 { 208 202 MPI_Status status ; … … 214 208 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; 215 209 info(20)<<" CServer : Receive client finalize"<<endl ; 216 217 210 MPI_Comm_free(&(*it)); 218 211 interComm.erase(it) ; … … 268 261 { 269 262 traceOff() ; 270 #ifdef _usingEP 271 MPI_Iprobe(-1,1,CXios::globalComm, &flag, &status) ; 272 #else 273 MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 274 #endif 263 MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; 275 264 traceOn() ; 276 277 265 if (flag==true) 278 266 { … … 280 268 rank=status.MPI_SOURCE ; 281 269 #elif _usingEP 282 rank= status.ep_src;270 rank=status.ep_src; 283 271 #endif 284 272 MPI_Get_count(&status,MPI_CHAR,&count) ; … … 298 286 rank=status.MPI_SOURCE ; 299 287 #elif _usingEP 300 rank= status.ep_src;288 rank=status.ep_src; 301 289 #endif 302 290 MPI_Get_count(&status,MPI_CHAR,&count) ; … … 342 330 MPI_Isend(buff,count,MPI_CHAR,i,2,intraComm,&requests[i-1]) ; 343 331 } 344 345 332 MPI_Waitall(size-1,requests,status) ; 346 333 registerContext(buff,count,it->second.leaderRank) ; … … 422 409 bool finished ; 423 410 map<string,CContext*>::iterator it ; 424 for(it=contextList.begin();it!=contextList.end(); ++it)411 for(it=contextList.begin();it!=contextList.end();it++) 425 412 { 426 413 finished=it->second->checkBuffersAndListen(); -
XIOS/dev/branch_openmp/src/server.hpp
r1134 r1328 7 7 #include "mpi.hpp" 8 8 #include "event_scheduler.hpp" 9 10 #ifdef _usingEP11 #include "ep_declaration.hpp"12 #endif13 9 14 10 namespace xios … … 28 24 static void registerContext(void* buff,int count, int leaderRank=0); 29 25 30 static MPI_Comm intraComm;31 static list< MPI_Comm> interComm;32 static std::list< MPI_Comm> contextInterComms;26 static ep_lib::MPI_Comm intraComm; 27 static list<ep_lib::MPI_Comm> interComm; 28 static std::list<ep_lib::MPI_Comm> contextInterComms; 33 29 static CEventScheduler* eventScheduler; 34 30 -
XIOS/dev/branch_openmp/src/test/test_client.f90
r1134 r1328 35 35 36 36 CALL MPI_INIT(ierr) 37 37 38 CALL init_wait 38 39 CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr)40 if(rank < 2) then41 39 42 40 !!! XIOS Initialization (get the local communicator) … … 74 72 75 73 CALL xios_context_initialize("test",comm) 76 77 74 CALL xios_get_handle("test",ctx_hdl) 78 75 CALL xios_set_current_context(ctx_hdl) … … 128 125 CALL xios_is_defined_field_attr("field_A",enabled=ok) 129 126 PRINT *,"field_A : attribute enabled is defined ? ",ok 130 131 127 CALL xios_close_context_definition() 132 128 133 129 PRINT*,"field field_A is active ? ",xios_field_is_active("field_A") 134 135 call MPI_Barrier(comm, ierr)136 137 130 DO ts=1,24*10 138 131 CALL xios_update_calendar(ts) 139 132 CALL xios_send_field("field_A",field_A) 140 CALL wait_us(5000) 133 CALL wait_us(5000) ; 141 134 ENDDO 142 135 … … 148 141 149 142 CALL xios_finalize() 150 print *, "Client : xios_finalize "151 152 else153 154 CALL xios_init_server155 print *, "Server : xios_finalize "156 157 endif158 159 143 160 144 CALL MPI_FINALIZE(ierr) -
XIOS/dev/branch_openmp/src/test/test_complete.f90
r1134 r1328 5 5 IMPLICIT NONE 6 6 INCLUDE "mpif.h" 7 INTEGER :: rank , size7 INTEGER :: rank 8 8 INTEGER :: size_loc 9 9 INTEGER :: ierr … … 28 28 INTEGER, ALLOCATABLE :: kindex(:) 29 29 INTEGER :: ni,ibegin,iend,nj,jbegin,jend 30 INTEGER :: i,j,l,ts,n, nb_pt , provided30 INTEGER :: i,j,l,ts,n, nb_pt 31 31 32 32 !!! MPI Initialization 33 33 34 CALL MPI_INIT_THREAD(3, provided, ierr) 35 if(provided .NE. 3) then 36 print*, "provided thread level = ", provided 37 call MPI_Abort() 38 endif 39 40 34 CALL MPI_INIT(ierr) 41 35 42 36 CALL init_wait 43 44 CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr)45 CALL MPI_COMM_SIZE(MPI_COMM_WORLD,size,ierr)46 if(rank < size-1) then47 37 48 38 !!! XIOS Initialization (get the local communicator) … … 230 220 !#################################################################################### 231 221 232 DO ts=1,24*2 233 !DO ts=1,24 222 DO ts=1,24*10 234 223 235 224 CALL xios_get_handle("atmosphere",ctx_hdl) … … 266 255 !!! Fin des contextes 267 256 268 269 CALL xios_get_handle("surface",ctx_hdl) 270 257 CALL xios_context_finalize() 258 CALL xios_get_handle("atmosphere",ctx_hdl) 271 259 CALL xios_set_current_context(ctx_hdl) 272 260 CALL xios_context_finalize() 273 261 274 print *, "xios_context_finalize(surface)"275 276 CALL xios_get_handle("atmosphere",ctx_hdl)277 278 CALL xios_set_current_context(ctx_hdl)279 280 CALL xios_context_finalize()281 282 print *, "xios_context_finalize(atmosphere)"283 284 285 286 !!! Fin de XIOS287 288 289 290 CALL xios_finalize()291 292 262 DEALLOCATE(lon, lat, field_A_atm, lonvalue) 293 263 DEALLOCATE(kindex, field_A_srf) 294 264 295 print *, "Client : xios_finalize " 265 !!! Fin de XIOS 296 266 297 267 CALL MPI_COMM_FREE(comm, ierr) 298 268 299 else 300 301 CALL xios_init_server 302 print *, "Server : xios_finalize " 303 304 endif 305 269 CALL xios_finalize() 306 270 307 271 CALL MPI_FINALIZE(ierr) -
XIOS/dev/branch_openmp/src/test/test_remap.f90
r1141 r1328 42 42 CALL MPI_INIT(ierr) 43 43 CALL init_wait 44 45 CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr)46 CALL MPI_COMM_SIZE(MPI_COMM_WORLD,size,ierr)47 if(rank < size-2) then48 44 49 45 !!! XIOS Initialization (get the local communicator) … … 232 228 233 229 CALL xios_finalize() 234 235 print *, "Client : xios_finalize "236 237 else238 239 CALL xios_init_server240 print *, "Server : xios_finalize "241 242 endif243 230 244 231 CALL MPI_FINALIZE(ierr) -
XIOS/dev/branch_openmp/src/timer.cpp
r1205 r1328 6 6 #include <sstream> 7 7 #include "tracer.hpp" 8 using namespace ep_lib; 8 9 9 10 namespace xios 10 11 { 11 std::map<std::string,CTimer> CTimer::allTimer;12 //std::map<std::string,CTimer> CTimer::allTimer; 12 13 std::map<std::string,CTimer> *CTimer::allTimer_ptr = 0; 13 14 … … 55 56 CTimer& CTimer::get(const std::string name) 56 57 { 57 // bkp 58 // std::map<std::string,CTimer>::iterator it = allTimer.find(name); 59 // if (it == allTimer.end()) 60 // it = allTimer.insert(std::make_pair(name, CTimer(name))).first; 61 // return it->second; 58 if(allTimer_ptr == NULL) allTimer_ptr = new std::map<std::string,CTimer>; 62 59 63 if(allTimer_ptr == 0) allTimer_ptr = new std::map<std::string,CTimer>; 60 //std::map<std::string,CTimer>::iterator it = allTimer.find(name); 61 std::map<std::string,CTimer>::iterator it = allTimer_ptr->find(name); 62 //if (it == allTimer.end()) 63 if (it == allTimer_ptr->end()) 64 it = allTimer_ptr->insert(std::make_pair(name, CTimer(name))).first; 64 65 65 std::map<std::string,CTimer>::iterator it = (*allTimer_ptr).find(name); 66 if (it == (*allTimer_ptr).end()) 67 it = (*allTimer_ptr).insert(std::make_pair(name, CTimer(name))).first; 66 //it = allTimer.insert(std::make_pair(name, CTimer(name))).first; 68 67 return it->second; 69 68 } … … 72 71 { 73 72 std::ostringstream strOut ; 74 for(std::map<std::string,CTimer>::iterator it=allTimer.begin();it!=allTimer.end();++it) 73 if(allTimer_ptr == 0) allTimer_ptr = new std::map<std::string,CTimer>; 74 //for(std::map<std::string,CTimer>::iterator it=allTimer.begin();it!=allTimer.end();++it) 75 for(std::map<std::string,CTimer>::iterator it=allTimer_ptr->begin();it!=allTimer_ptr->end();++it) 75 76 strOut<<"Timer : "<<it->first<<" --> cumulated time : "<<it->second.getCumulatedTime()<<std::endl ; 76 77 return strOut.str() ; -
XIOS/dev/branch_openmp/src/timer.hpp
r1205 r1328 20 20 void reset(void); 21 21 double getCumulatedTime(void); 22 static std::map<std::string,CTimer> allTimer; 23 22 //static std::map<std::string,CTimer> allTimer; 24 23 static std::map<std::string,CTimer> *allTimer_ptr; 25 #pragma omp threadprivate(allTimer_ptr)26 27 24 static double getTime(void); 28 25 static CTimer& get(std::string name); -
XIOS/dev/branch_openmp/src/transformation/Functions/average_reduction.cpp
r1205 r1328 31 31 CArray<double,1>& dataOut, 32 32 std::vector<bool>& flagInitial, 33 bool ignoreMissingValue )33 bool ignoreMissingValue, bool firstPass) 34 34 { 35 35 if (resetWeight_) { weights_.resize(flagInitial.size()); weights_ = 1.0; resetWeight_ = false; } … … 39 39 int nbLocalIndex = localIndex.size(); 40 40 int currentlocalIndex = 0; 41 double currentWeight = 0.0; 41 double currentWeight = 0.0; 42 43 if (firstPass) dataOut=std::numeric_limits<double>::quiet_NaN(); 42 44 43 45 for (int idx = 0; idx < nbLocalIndex; ++idx) … … 57 59 weights_(currentlocalIndex) += 1.0; 58 60 } 59 }60 else61 {62 if (flagInitial[currentlocalIndex])63 dataOut(currentlocalIndex) = std::numeric_limits<double>::quiet_NaN();64 61 } 65 62 } -
XIOS/dev/branch_openmp/src/transformation/Functions/average_reduction.hpp
r1076 r1328 27 27 CArray<double,1>& dataOut, 28 28 std::vector<bool>& flagInitial, 29 bool ignoreMissingValue );29 bool ignoreMissingValue, bool firstPass); 30 30 31 31 virtual void updateData(CArray<double,1>& dataOut); -
XIOS/dev/branch_openmp/src/transformation/Functions/extract.cpp
r1076 r1328 30 30 CArray<double,1>& dataOut, 31 31 std::vector<bool>& flagInitial, 32 bool ignoreMissingValue )32 bool ignoreMissingValue, bool firstPass) 33 33 { 34 34 int nbLocalIndex = localIndex.size(); -
XIOS/dev/branch_openmp/src/transformation/Functions/extract.hpp
r1076 r1328 27 27 CArray<double,1>& dataOut, 28 28 std::vector<bool>& flagInitial, 29 bool ignoreMissingValue );29 bool ignoreMissingValue, bool firstPass); 30 30 31 31 virtual ~CExtractReductionAlgorithm() {} -
XIOS/dev/branch_openmp/src/transformation/Functions/max_reduction.cpp
r1205 r1328 31 31 CArray<double,1>& dataOut, 32 32 std::vector<bool>& flagInitial, 33 bool ignoreMissingValue )33 bool ignoreMissingValue, bool firstPass) 34 34 { 35 35 if (ignoreMissingValue) 36 36 { 37 37 int nbLocalIndex = localIndex.size(); 38 int currentlocalIndex = 0; 38 int currentlocalIndex = 0; 39 if (firstPass) dataOut=std::numeric_limits<double>::quiet_NaN(); 39 40 for (int idx = 0; idx < nbLocalIndex; ++idx) 40 41 { … … 51 52 dataOut(currentlocalIndex) = std::max(*(dataInput + idx), dataOut(currentlocalIndex)); 52 53 } 53 }54 else55 {56 if (flagInitial[currentlocalIndex])57 dataOut(currentlocalIndex) = std::numeric_limits<double>::quiet_NaN();58 54 } 59 55 } -
XIOS/dev/branch_openmp/src/transformation/Functions/max_reduction.hpp
r1076 r1328 27 27 CArray<double,1>& dataOut, 28 28 std::vector<bool>& flagInitial, 29 bool ignoreMissingValue );29 bool ignoreMissingValue, bool firstPass); 30 30 31 31 virtual ~CMaxReductionAlgorithm() {} -
XIOS/dev/branch_openmp/src/transformation/Functions/min_reduction.cpp
r1205 r1328 31 31 CArray<double,1>& dataOut, 32 32 std::vector<bool>& flagInitial, 33 bool ignoreMissingValue )33 bool ignoreMissingValue, bool firstPass) 34 34 { 35 35 if (ignoreMissingValue) 36 36 { 37 37 int nbLocalIndex = localIndex.size(); 38 int currentlocalIndex = 0; 38 int currentlocalIndex = 0; 39 if (firstPass) dataOut=std::numeric_limits<double>::quiet_NaN(); 39 40 for (int idx = 0; idx < nbLocalIndex; ++idx) 40 41 { … … 51 52 dataOut(currentlocalIndex) = std::min(*(dataInput + idx), dataOut(currentlocalIndex)); 52 53 } 53 }54 else55 {56 if (flagInitial[currentlocalIndex])57 dataOut(currentlocalIndex) = std::numeric_limits<double>::quiet_NaN();58 54 } 59 55 } -
XIOS/dev/branch_openmp/src/transformation/Functions/min_reduction.hpp
r1076 r1328 27 27 CArray<double,1>& dataOut, 28 28 std::vector<bool>& flagInitial, 29 bool ignoreMissingValue );29 bool ignoreMissingValue, bool firstPass); 30 30 31 31 virtual ~CMinReductionAlgorithm() {} -
XIOS/dev/branch_openmp/src/transformation/Functions/reduction.cpp
r1155 r1328 10 10 CReductionAlgorithm::CallBackMap* CReductionAlgorithm::reductionCreationCallBacks_ = 0; 11 11 //std::map<StdString,EReductionType> CReductionAlgorithm::ReductionOperations = std::map<StdString,EReductionType>(); 12 std::map<StdString,EReductionType> *CReductionAlgorithm::ReductionOperations_ptr = 0; 13 //#pragma omp threadprivate(CReductionAlgorithm::ReductionOperations_ptr) 12 std::map<StdString,EReductionType> *CReductionAlgorithm::ReductionOperations_ptr = 0; 14 13 14 bool CReductionAlgorithm::initReductionOperation(std::map<StdString,EReductionType>& m) 15 { 16 // So so stupid way to intialize operation but it works ... 17 m["sum"] = TRANS_REDUCE_SUM; 18 CSumReductionAlgorithm::registerTrans(); 19 20 m["min"] = TRANS_REDUCE_MIN; 21 CMinReductionAlgorithm::registerTrans(); 22 23 m["max"] = TRANS_REDUCE_MAX; 24 CMaxReductionAlgorithm::registerTrans(); 25 26 m["extract"] = TRANS_REDUCE_EXTRACT; 27 CExtractReductionAlgorithm::registerTrans(); 28 29 m["average"] = TRANS_REDUCE_AVERAGE; 30 CAverageReductionAlgorithm::registerTrans(); 31 } 15 32 16 33 bool CReductionAlgorithm::initReductionOperation() … … 35 52 36 53 //bool CReductionAlgorithm::_dummyInit = CReductionAlgorithm::initReductionOperation(CReductionAlgorithm::ReductionOperations); 37 //bool CReductionAlgorithm::_dummyInit = CReductionAlgorithm::initReductionOperation();54 bool CReductionAlgorithm::_dummyInit = CReductionAlgorithm::initReductionOperation(); 38 55 39 56 CReductionAlgorithm* CReductionAlgorithm::createOperation(EReductionType reduceType) 40 57 { 41 58 int reduceTypeInt = reduceType; 42 43 59 CallBackMap::const_iterator it = (*reductionCreationCallBacks_).find(reduceType); 44 60 if ((*reductionCreationCallBacks_).end() == it) -
XIOS/dev/branch_openmp/src/transformation/Functions/reduction.hpp
r1155 r1328 25 25 //static std::map<StdString,EReductionType> ReductionOperations; 26 26 static std::map<StdString,EReductionType> *ReductionOperations_ptr; 27 #pragma omp threadprivate(ReductionOperations_ptr)28 27 29 28 public: 30 CReductionAlgorithm() { 29 CReductionAlgorithm() {} 31 30 32 31 /*! … … 43 42 \param [in/out] dataOut Array contains local data 44 43 \param [in/out] flagInitial vector of boolean to mark the local index already initialized. True means there is a need for initialization 44 \param [in] firstPass indicate if it is the first time the apply funtion is called for a same transformation, in order to make a clean initialization 45 45 */ 46 46 virtual void apply(const std::vector<std::pair<int,double> >& localIndex, … … 48 48 CArray<double,1>& dataOut, 49 49 std::vector<bool>& flagInitial, 50 bool ignoreMissingValue ) = 0;50 bool ignoreMissingValue, bool firstPass) = 0; 51 51 /*! 52 52 Update local data … … 62 62 typedef std::map<EReductionType, CreateOperationCallBack> CallBackMap; 63 63 static CallBackMap* reductionCreationCallBacks_; 64 #pragma omp threadprivate(reductionCreationCallBacks_)65 64 66 65 static bool registerOperation(EReductionType reduceType, CreateOperationCallBack createFn); … … 71 70 static bool initReductionOperation(); 72 71 static bool _dummyInit; 73 #pragma omp threadprivate(_dummyInit)74 72 }; 75 73 -
XIOS/dev/branch_openmp/src/transformation/Functions/sum_reduction.cpp
r1205 r1328 31 31 CArray<double,1>& dataOut, 32 32 std::vector<bool>& flagInitial, 33 bool ignoreMissingValue )33 bool ignoreMissingValue, bool firstPass) 34 34 { 35 35 if (ignoreMissingValue) 36 36 { 37 37 int nbLocalIndex = localIndex.size(); 38 int currentlocalIndex = 0; 38 int currentlocalIndex = 0; 39 40 if (firstPass) dataOut=std::numeric_limits<double>::quiet_NaN(); 39 41 40 42 for (int idx = 0; idx < nbLocalIndex; ++idx) … … 52 54 dataOut(currentlocalIndex) += *(dataInput + idx); 53 55 } 54 }55 else56 {57 if (flagInitial[currentlocalIndex])58 dataOut(currentlocalIndex) = std::numeric_limits<double>::quiet_NaN();59 56 } 60 57 } -
XIOS/dev/branch_openmp/src/transformation/Functions/sum_reduction.hpp
r1076 r1328 27 27 CArray<double,1>& dataOut, 28 28 std::vector<bool>& flagInitial, 29 bool ignoreMissingValue );29 bool ignoreMissingValue, bool firstPass); 30 30 31 31 virtual ~CSumReductionAlgorithm() {} -
XIOS/dev/branch_openmp/src/transformation/axis_algorithm_extract_domain.cpp
r1155 r1328 13 13 #include "grid.hpp" 14 14 #include "grid_transformation_factory_impl.hpp" 15 #include "reduction.hpp" 15 16 16 17 namespace xios { … … 61 62 62 63 pos_ = algo->position; 63 64 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 65 CReductionAlgorithm::initReductionOperation(); 66 67 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 64 //reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 65 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 68 66 } 69 67 … … 72 70 CArray<double,1>& dataOut, 73 71 std::vector<bool>& flagInitial, 74 bool ignoreMissingValue )72 bool ignoreMissingValue, bool firstPass) 75 73 { 76 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue );74 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue, firstPass); 77 75 } 78 76 -
XIOS/dev/branch_openmp/src/transformation/axis_algorithm_extract_domain.hpp
r1155 r1328 12 12 #include "axis_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp"15 14 16 15 namespace xios { … … 26 25 Extract a domain to an axis 27 26 */ 28 class CAxisAlgorithmExtractDomain : public CAxisAlgorithmTransformation , public CReductionAlgorithm27 class CAxisAlgorithmExtractDomain : public CAxisAlgorithmTransformation 29 28 { 30 29 public: … … 35 34 CArray<double,1>& dataOut, 36 35 std::vector<bool>& flagInitial, 37 bool ignoreMissingValue );36 bool ignoreMissingValue, bool firstPass); 38 37 39 38 virtual ~CAxisAlgorithmExtractDomain(); -
XIOS/dev/branch_openmp/src/transformation/axis_algorithm_inverse.cpp
r1134 r1328 15 15 #include "inverse_axis.hpp" 16 16 #include "client_client_dht_template.hpp" 17 using namespace ep_lib; 17 18 18 19 namespace xios { … … 173 174 174 175 // Sending global index of grid source to corresponding process as well as the corresponding mask 175 std::vector< ep_lib::MPI_Request> requests;176 std::vector< ep_lib::MPI_Status> status;176 std::vector<MPI_Request> requests; 177 std::vector<MPI_Status> status; 177 178 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 178 179 boost::unordered_map<int, double* > sendValueToDest; … … 184 185 sendValueToDest[recvRank] = new double [recvSize]; 185 186 186 requests.push_back( ep_lib::MPI_Request());187 requests.push_back(MPI_Request()); 187 188 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 188 189 } … … 206 207 207 208 // Send global index source and mask 208 requests.push_back( ep_lib::MPI_Request());209 requests.push_back(MPI_Request()); 209 210 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 210 211 } … … 213 214 MPI_Waitall(requests.size(), &requests[0], &status[0]); 214 215 215 std::vector<ep_lib::MPI_Request>().swap(requests); 216 std::vector<ep_lib::MPI_Status>().swap(status); 216 217 std::vector<MPI_Request>().swap(requests); 218 std::vector<MPI_Status>().swap(status); 217 219 218 220 // Okie, on destination side, we will wait for information of masked index of source … … 222 224 int recvSize = itSend->second; 223 225 224 requests.push_back( ep_lib::MPI_Request());226 requests.push_back(MPI_Request()); 225 227 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 226 228 } … … 240 242 } 241 243 // Okie, now inform the destination which source index are masked 242 requests.push_back( ep_lib::MPI_Request());244 requests.push_back(MPI_Request()); 243 245 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 244 246 } 245 247 status.resize(requests.size()); 246 248 MPI_Waitall(requests.size(), &requests[0], &status[0]); 249 247 250 248 251 size_t nGloAxisDest = axisDest_->n_glo.getValue() - 1; -
XIOS/dev/branch_openmp/src/transformation/axis_algorithm_inverse.hpp
r1134 r1328 12 12 #include "axis_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #ifdef _usingEP 15 #include "ep_declaration.hpp" 16 #endif 17 14 18 15 namespace xios { 19 16 -
XIOS/dev/branch_openmp/src/transformation/axis_algorithm_reduce_domain.cpp
r1155 r1328 13 13 #include "grid.hpp" 14 14 #include "grid_transformation_factory_impl.hpp" 15 #include "reduction.hpp" 15 16 16 17 namespace xios { … … 69 70 70 71 dir_ = (CReduceDomainToAxis::direction_attr::iDir == algo->direction) ? iDir : jDir; 71 72 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 73 CReductionAlgorithm::initReductionOperation(); 74 75 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 72 //reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 73 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 76 74 } 77 75 … … 80 78 CArray<double,1>& dataOut, 81 79 std::vector<bool>& flagInitial, 82 bool ignoreMissingValue )80 bool ignoreMissingValue, bool firstPass) 83 81 { 84 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue );82 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue, firstPass); 85 83 } 86 84 … … 105 103 CArray<int,1>& axisDstIndex = axisDest_->index; 106 104 int ni_glo = domainSrc_->ni_glo, nj_glo = domainSrc_->nj_glo; 107 if ( jDir == dir_)105 if (iDir == dir_) 108 106 { 109 107 int nbAxisIdx = axisDstIndex.numElements(); … … 120 118 } 121 119 } 122 else if ( iDir == dir_)120 else if (jDir == dir_) 123 121 { 124 122 int nbAxisIdx = axisDstIndex.numElements(); -
XIOS/dev/branch_openmp/src/transformation/axis_algorithm_reduce_domain.hpp
r1155 r1328 12 12 #include "axis_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp"15 14 16 15 namespace xios { … … 25 24 Reduce a domain to an axis 26 25 */ 27 class CAxisAlgorithmReduceDomain : public CAxisAlgorithmTransformation , public CReductionAlgorithm26 class CAxisAlgorithmReduceDomain : public CAxisAlgorithmTransformation 28 27 { 29 28 public: … … 34 33 CArray<double,1>& dataOut, 35 34 std::vector<bool>& flagInitial, 36 bool ignoreMissingValue );35 bool ignoreMissingValue, bool firstPass); 37 36 38 37 virtual void updateData(CArray<double,1>& dataOut); -
XIOS/dev/branch_openmp/src/transformation/axis_algorithm_zoom.cpp
r1205 r1328 44 44 zoomBegin_ = zoomAxis->begin.getValue(); 45 45 zoomSize_ = zoomAxis->n.getValue(); 46 zoomEnd_ = zoomBegin_ + zoomSize_ - 1; 46 zoomEnd_ = zoomBegin_ + zoomSize_ - 1; 47 47 48 48 if (zoomSize_ > axisSource->n_glo.getValue()) -
XIOS/dev/branch_openmp/src/transformation/domain_algorithm_interpolate.cpp
r1205 r1328 20 20 #include "interpolate_domain.hpp" 21 21 #include "grid.hpp" 22 using namespace ep_lib; 22 23 23 24 namespace xios { … … 113 114 nVertexSrc = nVertexDest = constNVertex; 114 115 116 117 115 118 // First of all, try to retrieve the boundary values of domain source and domain destination 116 119 int localDomainSrcSize = domainSrc_->i_index.numElements(); … … 281 284 } 282 285 286 283 287 for (int idx = 0; idx < nDstLocal; ++idx) 284 288 { … … 404 408 CContext* context = CContext::getCurrent(); 405 409 CContextClient* client=context->client; 406 int split_key; 407 ep_lib::MPI_Comm_rank(client->intraComm, &split_key); 408 409 ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 410 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 9 : 1, split_key, &poleComme); 411 if (MPI_COMM_NULL != poleComme) 410 411 ep_lib::MPI_Comm poleComme; 412 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 413 if (!poleComme.is_null()) 412 414 { 413 415 int nbClientPole; … … 428 430 for(int n=1;n<nbClientPole;++n) displ[n]=displ[n-1]+recvCount[n-1] ; 429 431 int recvSize=displ[nbClientPole-1]+recvCount[nbClientPole-1] ; 430 431 432 432 433 std::vector<int> sendSourceIndexBuff(nbWeight); … … 601 602 double* sendWeightBuff = new double [sendBuffSize]; 602 603 603 std::vector<ep_lib::MPI_Request> sendRequest ;604 std::vector<ep_lib::MPI_Request> sendRequest(3*globalIndexInterpSendToClient.size()); 604 605 605 606 int sendOffSet = 0, l = 0; 607 int position = 0; 606 608 for (itMap = itbMap; itMap != iteMap; ++itMap) 607 609 { … … 622 624 } 623 625 624 sendRequest.push_back(ep_lib::MPI_Request());625 626 ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet, 626 627 k, … … 629 630 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 630 631 client->intraComm, 631 &sendRequest.back()); 632 sendRequest.push_back(ep_lib::MPI_Request()); 632 &sendRequest[position++]); 633 633 ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet, 634 634 k, … … 637 637 MPI_DOMAIN_INTERPOLATION_SRC_INDEX, 638 638 client->intraComm, 639 &sendRequest.back()); 640 sendRequest.push_back(ep_lib::MPI_Request()); 639 &sendRequest[position++]); 641 640 ep_lib::MPI_Isend(sendWeightBuff + sendOffSet, 642 641 k, … … 645 644 MPI_DOMAIN_INTERPOLATION_WEIGHT, 646 645 client->intraComm, 647 &sendRequest .back());646 &sendRequest[position++]); 648 647 sendOffSet += k; 649 648 } … … 661 660 recvBuffSize, 662 661 MPI_INT, 663 MPI_ANY_SOURCE,662 -2, 664 663 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 665 664 client->intraComm, … … 673 672 clientSrcRank = recvStatus.ep_src; 674 673 #endif 674 675 675 ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 676 676 recvBuffSize, … … 698 698 699 699 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 700 ep_lib::MPI_Status stat_ignore; 701 ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 700 ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &requestStatus[0]); 702 701 703 702 delete [] sendIndexDestBuff; … … 712 711 713 712 /*! Redefined some functions of CONetCDF4 to make use of them */ 714 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const MPI_Comm comm)713 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm) 715 714 : CNc4DataOutput(NULL, filename, false, false, true, comm, false, true) {} 716 715 int CDomainAlgorithmInterpolate::WriteNetCdf::addDimensionWrite(const StdString& name, … … 803 802 std::vector<StdSize> start(1, startIndex - localNbWeight); 804 803 std::vector<StdSize> count(1, localNbWeight); 805 806 WriteNetCdf netCdfWriter(filename, static_cast<MPI_Comm>(client->intraComm.mpi_comm));804 805 WriteNetCdf netCdfWriter(filename, client->intraComm); 807 806 808 807 // Define some dimensions -
XIOS/dev/branch_openmp/src/transformation/domain_algorithm_interpolate.hpp
r1205 r1328 9 9 #ifndef __XIOS_DOMAIN_ALGORITHM_INTERPOLATE_HPP__ 10 10 #define __XIOS_DOMAIN_ALGORITHM_INTERPOLATE_HPP__ 11 11 #include "mpi_std.hpp" 12 12 #include "domain_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 14 #include "nc4_data_output.hpp" 15 #ifdef _usingEP16 #include "ep_declaration.hpp"17 #endif18 15 19 16 namespace xios { … … 59 56 { 60 57 public: 61 WriteNetCdf(const StdString& filename, const MPI_Comm comm);58 WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm); 62 59 int addDimensionWrite(const StdString& name, const StdSize size = UNLIMITED_DIM); 63 60 int addVariableWrite(const StdString& name, nc_type type, -
XIOS/dev/branch_openmp/src/transformation/generic_algorithm_transformation.cpp
r1205 r1328 30 30 CArray<double,1>& dataOut, 31 31 std::vector<bool>& flagInitial, 32 bool ignoreMissingValue )32 bool ignoreMissingValue, bool firstPass ) 33 33 { 34 34 int nbLocalIndex = localIndex.size(); -
XIOS/dev/branch_openmp/src/transformation/generic_algorithm_transformation.hpp
r1076 r1328 64 64 \param [in/out] flagInitial vector of boolean to mark the local index already initialized. True means there is a need for initalization 65 65 \param [in] ignoreMissingValue don't count missing value in operation if this flag is true 66 \param [in] firstPass indicate if it is the first time the apply funtion is called for a same transformation, in order to make a clean initialization 66 67 */ 67 68 virtual void apply(const std::vector<std::pair<int,double> >& localIndex, … … 69 70 CArray<double,1>& dataOut, 70 71 std::vector<bool>& flagInitial, 71 bool ignoreMissingValue );72 bool ignoreMissingValue, bool firstPass); 72 73 73 74 /*! -
XIOS/dev/branch_openmp/src/transformation/grid_transformation.cpp
r1203 r1328 453 453 sendRankSizeMap[itIndex->first] = sendSize; 454 454 } 455 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);455 ep_lib::MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 456 456 457 457 displ[0]=0 ; … … 460 460 int* recvRankBuff=new int[recvSize]; 461 461 int* recvSizeBuff=new int[recvSize]; 462 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);463 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);462 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 463 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 464 464 for (int i = 0; i < nbClient; ++i) 465 465 { … … 473 473 474 474 // Sending global index of grid source to corresponding process as well as the corresponding mask 475 std::vector<ep_lib::MPI_Request> requests( 2*recvRankSizeMap.size()+2*globaIndexWeightFromSrcToDst.size());475 std::vector<ep_lib::MPI_Request> requests(recvRankSizeMap.size()*2 + globaIndexWeightFromSrcToDst.size()*2); 476 476 std::vector<ep_lib::MPI_Status> status; 477 477 boost::unordered_map<int, unsigned char* > recvMaskDst; 478 478 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 479 int position = 0;479 int requests_position = 0; 480 480 for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 481 481 { … … 485 485 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 486 486 487 488 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[position]); 489 position++; 490 491 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[position]); 492 position++; 487 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 488 ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 493 489 } 494 490 … … 525 521 526 522 // Send global index source and mask 527 528 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[position]); 529 position++; 530 531 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[position]); 532 position++; 523 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 524 ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 533 525 } 534 526 535 527 status.resize(requests.size()); 536 MPI_Waitall(requests.size(), &requests[0], &status[0]);528 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 537 529 538 530 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 539 //std::vector<ep_lib::MPI_Request>().swap(requests); 540 //std::vector<ep_lib::MPI_Status>().swap(status); 541 requests.resize(sendRankSizeMap.size()+recvRankSizeMap.size()); 542 position = 0; 531 requests.resize(sendRankSizeMap.size() + recvRankSizeMap.size()); 532 requests_position = 0; 533 std::vector<ep_lib::MPI_Status>().swap(status); 543 534 // Okie, on destination side, we will wait for information of masked index of source 544 535 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 547 538 int recvSize = itSend->second; 548 539 549 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[position]); 550 position++; 540 ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 551 541 } 552 542 … … 584 574 585 575 // Okie, now inform the destination which source index are masked 586 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[position]); 587 position++; 576 ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 588 577 } 589 578 status.resize(requests.size()); 590 MPI_Waitall(requests.size(), &requests[0], &status[0]);579 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]); 591 580 592 581 // Cool, now we can fill in local index of grid destination (counted for masked index) -
XIOS/dev/branch_openmp/src/transformation/grid_transformation.hpp
r978 r1328 12 12 #include <map> 13 13 #include <vector> 14 #include "mpi_std.hpp" 14 15 #include "generic_algorithm_transformation.hpp" 15 16 #include "transformation_enum.hpp" -
XIOS/dev/branch_openmp/src/transformation/grid_transformation_factory_impl.hpp
r1134 r1328 57 57 typedef std::map<ETranformationType, CreateTransformationCallBack> CallBackMap; 58 58 static CallBackMap* transformationCreationCallBacks_; 59 #pragma omp threadprivate(transformationCreationCallBacks_)60 61 59 static bool registerTransformation(ETranformationType transType, CreateTransformationCallBack createFn); 62 60 static bool unregisterTransformation(ETranformationType transType); 63 61 static bool initializeTransformation_; 64 #pragma omp threadprivate(initializeTransformation_)65 62 }; 66 63 … … 82 79 std::map<int, int>& elementPositionInGridDst2DomainPosition) 83 80 { 84 if (0 == transformationCreationCallBacks_)85 transformationCreationCallBacks_ = new CallBackMap();86 81 typename CallBackMap::const_iterator it = (*transformationCreationCallBacks_).find(transType); 87 82 if ((*transformationCreationCallBacks_).end() == it) -
XIOS/dev/branch_openmp/src/transformation/grid_transformation_selector.cpp
r1106 r1328 10 10 #include "grid.hpp" 11 11 #include "algo_types.hpp" 12 using namespace ep_lib; 12 13 13 14 namespace xios { -
XIOS/dev/branch_openmp/src/transformation/grid_transformation_selector.hpp
r978 r1328 12 12 #include <map> 13 13 #include <vector> 14 #include "mpi_std.hpp" 14 15 #include "generic_algorithm_transformation.hpp" 15 16 #include "transformation_enum.hpp" -
XIOS/dev/branch_openmp/src/transformation/scalar_algorithm_extract_axis.cpp
r1155 r1328 14 14 #include "grid_transformation_factory_impl.hpp" 15 15 16 16 #include "reduction.hpp" 17 17 18 18 namespace xios { … … 49 49 StdString op = "extract"; 50 50 pos_ = algo->position; 51 52 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 53 CReductionAlgorithm::initReductionOperation(); 54 55 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 51 //reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 52 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 56 53 } 57 54 … … 60 57 CArray<double,1>& dataOut, 61 58 std::vector<bool>& flagInitial, 62 bool ignoreMissingValue )59 bool ignoreMissingValue, bool firstPass) 63 60 { 64 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue );61 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue, firstPass); 65 62 } 66 63 -
XIOS/dev/branch_openmp/src/transformation/scalar_algorithm_extract_axis.hpp
r1155 r1328 12 12 #include "scalar_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp"15 14 16 15 namespace xios { … … 25 24 Extract a scalar from an axis 26 25 */ 27 class CScalarAlgorithmExtractAxis : public CScalarAlgorithmTransformation , public CReductionAlgorithm26 class CScalarAlgorithmExtractAxis : public CScalarAlgorithmTransformation 28 27 { 29 28 public: … … 34 33 CArray<double,1>& dataOut, 35 34 std::vector<bool>& flagInitial, 36 bool ignoreMissingValue );35 bool ignoreMissingValue, bool firstPass); 37 36 38 37 virtual ~CScalarAlgorithmExtractAxis(); -
XIOS/dev/branch_openmp/src/transformation/scalar_algorithm_reduce_axis.cpp
r1155 r1328 13 13 #include "grid.hpp" 14 14 #include "grid_transformation_factory_impl.hpp" 15 #include "reduction.hpp" 16 17 #include "reduction.hpp" 15 18 16 19 namespace xios { 17 18 //extern std::map<StdString,EReductionType> *CReductionAlgorithm::ReductionOperations_ptr;19 //#pragma omp threadprivate(CReductionAlgorithm::ReductionOperations_ptr)20 21 20 CGenericAlgorithmTransformation* CScalarAlgorithmReduceAxis::create(CGrid* gridDst, CGrid* gridSrc, 22 21 CTransformation<CScalar>* transformation, … … 76 75 } 77 76 78 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 79 CReductionAlgorithm::initReductionOperation(); 80 81 if ((*CReductionAlgorithm::ReductionOperations_ptr).end() == (*CReductionAlgorithm::ReductionOperations_ptr).find(op)) 77 //if (CReductionAlgorithm::ReductionOperations.end() == CReductionAlgorithm::ReductionOperations.find(op)) 78 if (CReductionAlgorithm::ReductionOperations_ptr->end() == CReductionAlgorithm::ReductionOperations_ptr->find(op)) 82 79 ERROR("CScalarAlgorithmReduceAxis::CScalarAlgorithmReduceAxis(CAxis* axisDestination, CAxis* axisSource, CReduceAxisToScalar* algo)", 83 80 << "Operation '" << op << "' not found. Please make sure to use a supported one" … … 85 82 << "Scalar destination " << scalarDestination->getId()); 86 83 87 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 84 //reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 85 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 88 86 } 89 87 90 void CScalarAlgorithmReduceAxis::apply(const std::vector<std::pair<int,double> >& localIndex, 91 const double* dataInput, 92 CArray<double,1>& dataOut, 93 std::vector<bool>& flagInitial, 94 bool ignoreMissingValue) 88 void CScalarAlgorithmReduceAxis::apply(const std::vector<std::pair<int,double> >& localIndex, const double* dataInput, CArray<double,1>& dataOut, 89 std::vector<bool>& flagInitial, bool ignoreMissingValue, bool firstPass) 95 90 { 96 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue );91 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue, firstPass); 97 92 } 98 93 … … 115 110 TransformationWeightMap& transWeight = this->transformationWeight_[0]; 116 111 117 CArray<int,1>& axisSrcIndex = axisSrc_->index; 118 int globalIndexSize = axisSrcIndex.numElements(); 112 int globalIndexSize = axisSrc_-> n_glo; 119 113 120 114 for (int idx = 0; idx < globalIndexSize; ++idx) 121 115 { 122 transMap[0].push_back( axisSrcIndex(idx));116 transMap[0].push_back(idx); 123 117 transWeight[0].push_back(1.0); 124 118 } -
XIOS/dev/branch_openmp/src/transformation/scalar_algorithm_reduce_axis.hpp
r1155 r1328 12 12 #include "scalar_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp"15 14 16 15 namespace xios { … … 25 24 Reducing an axis to a scalar 26 25 */ 27 class CScalarAlgorithmReduceAxis : public CScalarAlgorithmTransformation , public CReductionAlgorithm26 class CScalarAlgorithmReduceAxis : public CScalarAlgorithmTransformation 28 27 { 29 28 public: … … 34 33 CArray<double,1>& dataOut, 35 34 std::vector<bool>& flagInitial, 36 bool ignoreMissingValue );35 bool ignoreMissingValue, bool firstPass); 37 36 38 37 virtual void updateData(CArray<double,1>& dataOut); -
XIOS/dev/branch_openmp/src/transformation/scalar_algorithm_reduce_domain.cpp
r1155 r1328 14 14 #include "grid_transformation_factory_impl.hpp" 15 15 16 16 #include "reduction.hpp" 17 17 18 18 namespace xios { … … 68 68 69 69 } 70 71 // if(CReductionAlgorithm::ReductionOperations_ptr == 0) 72 // CReductionAlgorithm::initReductionOperation(); 70 73 71 if(CReductionAlgorithm::ReductionOperations_ptr == 0) 72 CReductionAlgorithm::initReductionOperation(); 73 74 if ((*CReductionAlgorithm::ReductionOperations_ptr).end() == (*CReductionAlgorithm::ReductionOperations_ptr).find(op)) 74 //if (CReductionAlgorithm::ReductionOperations.end() == CReductionAlgorithm::ReductionOperations.find(op)) 75 if (CReductionAlgorithm::ReductionOperations_ptr->end() == CReductionAlgorithm::ReductionOperations_ptr->find(op)) 75 76 ERROR("CScalarAlgorithmReduceDomain::CScalarAlgorithmReduceDomain(CDomain* domainDestination, CDomain* domainSource, CReduceDomainToScalar* algo)", 76 77 << "Operation '" << op << "' not found. Please make sure to use a supported one" … … 78 79 << "Scalar destination " << scalarDestination->getId()); 79 80 80 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 81 //reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations[op]); 82 reduction_ = CReductionAlgorithm::createOperation(CReductionAlgorithm::ReductionOperations_ptr->at(op)); 81 83 } 82 84 … … 85 87 CArray<double,1>& dataOut, 86 88 std::vector<bool>& flagInitial, 87 bool ignoreMissingValue )89 bool ignoreMissingValue, bool firstPass) 88 90 { 89 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue );91 reduction_->apply(localIndex, dataInput, dataOut, flagInitial, ignoreMissingValue, firstPass); 90 92 } 91 93 -
XIOS/dev/branch_openmp/src/transformation/scalar_algorithm_reduce_domain.hpp
r1155 r1328 12 12 #include "scalar_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 #include "reduction.hpp"15 14 16 15 namespace xios { … … 25 24 Reducing an DOMAIN to a scalar 26 25 */ 27 class CScalarAlgorithmReduceDomain : public CScalarAlgorithmTransformation , public CReductionAlgorithm26 class CScalarAlgorithmReduceDomain : public CScalarAlgorithmTransformation 28 27 { 29 28 public: … … 34 33 CArray<double,1>& dataOut, 35 34 std::vector<bool>& flagInitial, 36 bool ignoreMissingValue );35 bool ignoreMissingValue, bool firstPass); 37 36 38 37 virtual void updateData(CArray<double,1>& dataOut); -
XIOS/dev/branch_openmp/src/type/type.hpp
r1134 r1328 94 94 const CType_ref& operator = (CType<T>& val) const ; 95 95 const CType_ref& operator = (const CType_ref& val) const; 96 operator T&() const; 96 operator T&() const; 97 97 98 98 inline virtual CBaseType* clone(void) const { return _clone(); } -
XIOS/dev/branch_openmp/src/xios.hpp
r591 r1328 5 5 6 6 /// XIOS headers /// 7 #include "nc4_data_output.hpp" 7 #include "data_output.hpp" 8 //#include "nc4_data_output.hpp" 8 9 9 10 10 11 using namespace xios; 11 using namespace xios::xml;12 using namespace xios::func;12 //using namespace xios::xml; 13 //using namespace xios::func; 13 14 14 15 #endif //__XIOS__ -
XIOS/dev/branch_openmp/src/xios_server.f90
r1134 r1328 1 1 PROGRAM server_main 2 2 USE xios 3 USE mod_wait4 3 IMPLICIT NONE 5 4 INCLUDE "mpif.h" 6 INTEGER :: ierr, th_level 7 8 CALL MPI_INIT(ierr) 9 !CALL MPI_INIT_thread(3, th_level, ierr) 10 CALL init_wait 5 INTEGER :: ierr 6 11 7 CALL xios_init_server 12 13 CALL MPI_FINALIZE(ierr)14 8 15 9 END PROGRAM server_main
Note: See TracChangeset
for help on using the changeset viewer.