Changeset 1134
- Timestamp:
- 05/16/17 17:54:30 (7 years ago)
- Location:
- XIOS/dev/branch_yushan_merged
- Files:
-
- 51 added
- 120 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/arch/arch-GCC_LINUX.env
r395 r1134 1 export HDF5_INC_DIR=$HOME/hdf5/include2 export HDF5_LIB_DIR=$HOME/hdf5/lib3 1 4 export NETCDF_INC_DIR=$HOME/netcdf4/include5 export NETCDF_LIB_DIR=$HOME/netcdf4/lib2 export HDF5_INC_DIR=$HOME/lib/hdf5/include 3 export HDF5_LIB_DIR=$HOME/lib/hdf5/lib 6 4 5 export NETCDF_INC_DIR=$HOME/lib/netcdf/include 6 export NETCDF_LIB_DIR=$HOME/lib/netcdf/lib 7 -
XIOS/dev/branch_yushan_merged/arch/arch-GCC_LINUX.fcm
r591 r1134 3 3 ################################################################################ 4 4 5 %CCOMPILER mpicc 6 %FCOMPILER mpif90 7 %LINKER mpif90 5 %CCOMPILER mpicc -fopenmp -D_openmpi -D_usingEP 6 %FCOMPILER mpif90 -fopenmp 7 %LINKER mpif90 -fopenmp -D_openmpi -D_usingEP 8 8 9 9 %BASE_CFLAGS -ansi -w -
XIOS/dev/branch_yushan_merged/arch/arch-GCC_LINUX.path
r475 r1134 1 NETCDF_INCDIR="-I $NETCDF_INC_DIR "2 NETCDF_LIBDIR="-L $NETCDF_LIB_DIR "3 NETCDF_LIB=" -lnetcdff-lnetcdf"1 NETCDF_INCDIR="-I $NETCDF_INC_DIR -I $HOME/lib/netcdf_f/include" 2 NETCDF_LIBDIR="-L $NETCDF_LIB_DIR -L $HOME/lib/netcdf_f/lib" 3 NETCDF_LIB=" -lnetcdf" 4 4 5 MPI_INCDIR=" "6 MPI_LIBDIR=" "5 MPI_INCDIR="-I /usr/local/include" 6 MPI_LIBDIR="-L /usr/local/lib" 7 7 MPI_LIB="" 8 8 9 9 HDF5_INCDIR="-I $HDF5_INC_DIR" 10 10 HDF5_LIBDIR="-L $HDF5_LIB_DIR" 11 HDF5_LIB="-lhdf5_hl -lhdf5 -lhdf5 -lz "11 HDF5_LIB="-lhdf5_hl -lhdf5 -lhdf5 -lz -ldl -lcurl" 12 12 13 13 OASIS_INCDIR="-I$PWD/../../oasis3-mct/BLD/build/lib/psmile.MPI1" -
XIOS/dev/branch_yushan_merged/arch/arch-X64_ADA.fcm
r985 r1134 3 3 ################################################################################ 4 4 5 %CCOMPILER mpiicc 6 %FCOMPILER mpiifort 7 %LINKER mpiifort -nofor-main 5 %CCOMPILER mpiicc -qopenmp -D_usingEP -D_intelmpi 6 %FCOMPILER mpiifort -qopenmp -D_usingEP -D_intelmpi 7 %LINKER mpiifort -nofor-main -qopenmp -D_usingEP -D_intelmpi 8 8 9 9 %BASE_CFLAGS -diag-disable 1125 -diag-disable 279 -
XIOS/dev/branch_yushan_merged/arch/arch-X64_CURIE.fcm
r1002 r1134 3 3 ################################################################################ 4 4 5 %CCOMPILER mpicc 6 %FCOMPILER mpif90 5 %CCOMPILER mpicc -openmp -D_openmpi -D_usingEP 6 %FCOMPILER mpif90 -openmp -D_openmpi -D_usingEP 7 7 %LINKER mpif90 -nofor-main 8 8 -
XIOS/dev/branch_yushan_merged/bld.cfg
r1118 r1134 29 29 src::netcdf $PWD/extern/netcdf4 30 30 src::remap $PWD/extern/remap/src 31 src::src_ep_dev $PWD/extern/src_ep_dev 31 32 bld::lib xios 32 bld::target libxios.a33 #bld::target libxios.a 33 34 #bld::target generate_fortran_interface.exe 34 bld::target xios_server.exe35 #bld::target xios_server.exe 35 36 #bld::target test_remap.exe 36 37 #bld::target test_regular.exe 37 38 #bld::target test_expand_domain.exe 38 39 #bld::target test_new_features.exe test_unstruct_complete.exe 40 bld::target test_omp.exe test_complete_omp.exe 39 41 bld::target test_client.exe test_complete.exe test_xios2_cmip6.exe 40 42 #bld::target test_connectivity_expand.exe … … 66 68 bld::excl_dep use::netcdf 67 69 bld::excl_dep inc::mpif.h 70 bld::excl_dep use::omp_lib -
XIOS/dev/branch_yushan_merged/extern/remap/src/mapper.cpp
r1114 r1134 368 368 double srcArea = recvArea[rank][n1]; 369 369 double w = (*it)->area; 370 if (quantity) w/=srcArea ;370 if (quantity) w/=srcArea ; 371 371 372 372 /* first order: src value times weight (weight = supermesh area), later divide by target area */ -
XIOS/dev/branch_yushan_merged/extern/remap/src/mapper.hpp
r1114 r1134 3 3 #include "parallel_tree.hpp" 4 4 #include "mpi.hpp" 5 6 #ifdef _usingEP 7 #include "ep_declaration.hpp" 8 #endif 5 9 6 10 namespace sphereRemap { … … 18 22 { 19 23 public: 20 Mapper( MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {}24 Mapper(ep_lib::MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {} 21 25 ~Mapper(); 22 26 void setVerbosity(verbosity v) {verbose=v ;} … … 67 71 68 72 CParallelTree sstree; 69 MPI_Comm communicator ;73 ep_lib::MPI_Comm communicator ; 70 74 std::vector<Elt> sourceElements ; 71 75 std::vector<Node> sourceMesh ; -
XIOS/dev/branch_yushan_merged/extern/remap/src/mpi_routing.cpp
r694 r1134 5 5 #include "timerRemap.hpp" 6 6 #include <iostream> 7 #ifdef _usingEP 8 #include "ep_declaration.hpp" 9 #endif 7 10 8 11 namespace sphereRemap { … … 122 125 CTimer::get("CMPIRouting::init(reduce_scatter)").print(); 123 126 124 MPI_Alloc_mem(nbTarget *sizeof(int), MPI_INFO_NULL, &targetRank); 125 MPI_Alloc_mem(nbSource *sizeof(int), MPI_INFO_NULL, &sourceRank); 127 MPI_Info info_null; 128 129 MPI_Alloc_mem(nbTarget *sizeof(int), info_null, &targetRank); 130 MPI_Alloc_mem(nbSource *sizeof(int), info_null, &sourceRank); 126 131 127 132 targetRankToIndex = new int[mpiSize]; … … 150 155 for (int i = 0; i < nbSource; i++) 151 156 { 157 #ifdef _usingEP 158 MPI_Irecv(&sourceRank[i], 1, MPI_INT, -1, 0, communicator, &request[indexRequest]); 159 #else 152 160 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 161 #endif 153 162 indexRequest++; 154 163 } … … 170 179 for (int i = 0; i < nbSource; i++) 171 180 { 181 #ifdef _usingEP 182 MPI_Irecv(&sourceRank[i], 1, MPI_INT, -1, 0, communicator, &request[indexRequest]); 183 #else 172 184 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 185 #endif 173 186 indexRequest++; 174 187 } -
XIOS/dev/branch_yushan_merged/extern/remap/src/parallel_tree.hpp
r694 r1134 6 6 #include "mpi_cascade.hpp" 7 7 #include "mpi.hpp" 8 #ifdef _usingEP 9 #include "ep_declaration.hpp" 10 #endif 8 11 9 12 namespace sphereRemap { … … 12 15 { 13 16 public: 14 CParallelTree( MPI_Comm comm);17 CParallelTree(ep_lib::MPI_Comm comm); 15 18 ~CParallelTree(); 16 19 … … 34 37 vector<CSampleTree> treeCascade; // first for sample tree, then for routing tree 35 38 CMPICascade cascade; 36 MPI_Comm communicator ;39 ep_lib::MPI_Comm communicator ; 37 40 38 41 }; -
XIOS/dev/branch_yushan_merged/extern/remap/src/tree.cpp
r1066 r1134 142 142 root->parent = 0; 143 143 root->leafCount = 0; 144 // initialize root node on the sphere 145 root->centre.x=1 ; root->centre.y=0 ; root->centre.z=0 ; 144 // initialize root node on the sphere 145 root->centre.x=1 ; 146 root->centre.y=0 ; 147 root->centre.z=0 ; 146 148 root->radius = 0.; 147 149 root->reinserted = false; -
XIOS/dev/branch_yushan_merged/inputs/COMPLETE/context_atmosphere.xml
r787 r1134 8 8 </field_definition> 9 9 10 <file_definition type=" multiple_file" par_access="collective" output_freq="6h" sync_freq="6h" output_level="10" enabled=".TRUE.">10 <file_definition type="one_file" par_access="collective" output_freq="6h" sync_freq="6h" output_level="10" enabled=".TRUE."> 11 11 <file id="output_atmosphere" name="output_atmosphere"> 12 12 <field field_ref="field_A_atm" /> -
XIOS/dev/branch_yushan_merged/inputs/COMPLETE/context_surface.xml
r562 r1134 12 12 </field_definition> 13 13 14 <file_definition type=" multiple_file" par_access="collective" output_level="10" enabled=".TRUE.">14 <file_definition type="one_file" par_access="collective" output_level="10" enabled=".TRUE."> 15 15 <file id="output_surface" name="output_surface_6h" output_freq="6h"> 16 16 <field field_ref="field_A_srf"> -
XIOS/dev/branch_yushan_merged/inputs/iodef.xml
r787 r1134 12 12 13 13 14 <file_definition type=" multiple_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE.">14 <file_definition type="one_file" par_access="collective" output_freq="6h" output_level="10" enabled=".TRUE."> 15 15 <file id="output" name="output"> 16 16 <field field_ref="field_A_zoom" name="field_A" /> -
XIOS/dev/branch_yushan_merged/src/array_new.hpp
r1111 r1134 554 554 TinyVector<int,N_rank> vect; 555 555 size_t ne; 556 556 557 557 ret = buffer.get(numDim); 558 558 ret &= buffer.get(vect.data(), N_rank); -
XIOS/dev/branch_yushan_merged/src/attribute.cpp
r1112 r1134 54 54 ///-------------------------------------------------------------- 55 55 56 56 57 CMessage& operator<<(CMessage& msg,CAttribute& type) 57 58 { -
XIOS/dev/branch_yushan_merged/src/attribute_enum.hpp
r1112 r1134 14 14 namespace xios 15 15 { 16 /// ////////////////////// Déclarations ////////////////////// ///17 18 19 16 /// ////////////////////// Déclarations ////////////////////// /// 17 /*! 18 \class CAttributeEnum 19 This class implements the attribute representing enumeration 20 20 */ 21 22 23 21 template <class T> 22 class CAttributeEnum : public CAttribute, public CEnum<T> 23 { 24 24 typedef typename T::t_enum T_enum ; 25 25 public : 26 26 27 28 29 30 31 32 33 27 /// Constructeurs /// 28 explicit CAttributeEnum(const StdString & id); 29 CAttributeEnum(const StdString & id, 30 xios_map<StdString, CAttribute*> & umap); 31 CAttributeEnum(const StdString & id, const T_enum & value); 32 CAttributeEnum(const StdString & id, const T_enum & value, 33 xios_map<StdString, CAttribute*> & umap); 34 34 35 36 37 35 /// Accesseur /// 36 T_enum getValue(void) const; 37 string getStringValue(void) const; 38 38 39 39 40 /// Mutateurs /// 41 void setValue(const T_enum & value); 42 43 void set(const CAttribute& attr) ; 44 void set(const CAttributeEnum& attr) ; 45 void reset(void); 46 47 void setInheritedValue(const CAttributeEnum& attr ); 48 void setInheritedValue(const CAttribute& attr ); 49 T_enum getInheritedValue(void) const; 50 string getInheritedStringValue(void) const; 51 bool hasInheritedValue(void) const; 52 53 bool isEqual(const CAttributeEnum& attr ); 54 bool isEqual(const CAttribute& attr ); 40 /// Mutateurs /// 41 void setValue(const T_enum & value); 55 42 56 /// Destructeur /// 57 virtual ~CAttributeEnum(void) { } 43 void set(const CAttribute& attr) ; 44 void set(const CAttributeEnum& attr) ; 45 void reset(void); 58 46 59 /// Operateur /// 60 CAttributeEnum& operator=(const T_enum & value); 47 void setInheritedValue(const CAttributeEnum& attr ); 48 void setInheritedValue(const CAttribute& attr ); 49 T_enum getInheritedValue(void) const; 50 string getInheritedStringValue(void) const; 51 bool hasInheritedValue(void) const; 61 52 62 /// Autre /// 63 virtual StdString toString(void) const { return _toString();} 64 virtual void fromString(const StdString & str) { if (str==resetInheritanceStr) { reset(); _canInherite=false ;} else _fromString(str);} 53 bool isEqual(const CAttributeEnum& attr ); 54 bool isEqual(const CAttribute& attr ); 65 55 66 virtual bool toBuffer (CBufferOut& buffer) const { return _toBuffer(buffer);} 67 virtual bool fromBuffer(CBufferIn& buffer) { return _fromBuffer(buffer); } 68 69 virtual void generateCInterface(ostream& oss,const string& className) ; 70 virtual void generateFortran2003Interface(ostream& oss,const string& className) ; 71 virtual void generateFortranInterfaceDeclaration_(ostream& oss,const string& className) ; 72 virtual void generateFortranInterfaceBody_(ostream& oss,const string& className) ; 73 virtual void generateFortranInterfaceDeclaration(ostream& oss,const string& className) ; 74 virtual void generateFortranInterfaceGetDeclaration_(ostream& oss,const string& className) ; 75 virtual void generateFortranInterfaceGetBody_(ostream& oss,const string& className) ; 76 virtual void generateFortranInterfaceGetDeclaration(ostream& oss,const string& className) ; 56 /// Destructeur /// 57 virtual ~CAttributeEnum(void) { } 77 58 78 private : 79 StdString _toString(void) const; 80 void _fromString(const StdString & str); 81 bool _toBuffer (CBufferOut& buffer) const; 82 bool _fromBuffer(CBufferIn& buffer) ; 83 CEnum<T> inheritedValue ; 84 }; // class CAttributeEnum 85 59 /// Operateur /// 60 CAttributeEnum& operator=(const T_enum & value); 61 62 /// Autre /// 63 virtual StdString toString(void) const { return _toString();} 64 virtual void fromString(const StdString & str) { if (str==resetInheritanceStr) { reset(); _canInherite=false ;} else _fromString(str);} 65 66 virtual bool toBuffer (CBufferOut& buffer) const { return _toBuffer(buffer);} 67 virtual bool fromBuffer(CBufferIn& buffer) { return _fromBuffer(buffer); } 68 69 virtual void generateCInterface(ostream& oss,const string& className) ; 70 virtual void generateFortran2003Interface(ostream& oss,const string& className) ; 71 virtual void generateFortranInterfaceDeclaration_(ostream& oss,const string& className) ; 72 virtual void generateFortranInterfaceBody_(ostream& oss,const string& className) ; 73 virtual void generateFortranInterfaceDeclaration(ostream& oss,const string& className) ; 74 virtual void generateFortranInterfaceGetDeclaration_(ostream& oss,const string& className) ; 75 virtual void generateFortranInterfaceGetBody_(ostream& oss,const string& className) ; 76 virtual void generateFortranInterfaceGetDeclaration(ostream& oss,const string& className) ; 77 78 private : 79 StdString _toString(void) const; 80 void _fromString(const StdString & str); 81 bool _toBuffer (CBufferOut& buffer) const; 82 bool _fromBuffer(CBufferIn& buffer) ; 83 CEnum<T> inheritedValue ; 84 }; // class CAttributeEnum 85 86 86 } // namespace xios 87 87 88 88 #endif // __XIOS_ATTRIBUTE_ENUM__ 89 -
XIOS/dev/branch_yushan_merged/src/attribute_enum_impl.hpp
r1112 r1134 10 10 namespace xios 11 11 { 12 /// ////////////////////// D éfinitions ////////////////////// ///12 /// ////////////////////// Définitions ////////////////////// /// 13 13 template <class T> 14 14 CAttributeEnum<T>::CAttributeEnum(const StdString & id) … … 30 30 umap.insert(umap.end(), std::make_pair(id, this)); 31 31 } 32 32 33 33 template <class T> 34 34 CAttributeEnum<T>::CAttributeEnum … … 40 40 umap.insert(umap.end(), std::make_pair(id, this)); 41 41 } 42 42 43 43 ///-------------------------------------------------------------- 44 44 template <class T> … … 54 54 return CEnum<T>::get(); 55 55 } 56 56 57 57 template <class T> 58 58 string CAttributeEnum<T>::getStringValue(void) const 59 59 { 60 return CEnum<T>::toString(); 61 } 60 return CEnum<T>::toString(); 61 } 62 62 63 63 64 template <class T> … … 70 71 void CAttributeEnum<T>::set(const CAttribute& attr) 71 72 { 72 this->set(dynamic_cast<const CAttributeEnum<T>& >(attr));73 } 74 75 template <class T>73 this->set(dynamic_cast<const CAttributeEnum<T>& >(attr)); 74 } 75 76 template <class T> 76 77 void CAttributeEnum<T>::set(const CAttributeEnum& attr) 77 78 { 78 CEnum<T>::set(attr);79 } 80 79 CEnum<T>::set(attr); 80 } 81 81 82 template <class T> 82 83 void CAttributeEnum<T>::setInheritedValue(const CAttribute& attr) 83 84 { 84 this->setInheritedValue(dynamic_cast<const CAttributeEnum<T>& >(attr));85 } 86 85 this->setInheritedValue(dynamic_cast<const CAttributeEnum<T>& >(attr)); 86 } 87 87 88 template <class T> 88 89 void CAttributeEnum<T>::setInheritedValue(const CAttributeEnum& attr) 89 90 { 90 if (this->isEmpty() && _canInherite && attr.hasInheritedValue()) inheritedValue.set(attr.getInheritedValue());91 } 92 91 if (this->isEmpty() && _canInherite && attr.hasInheritedValue()) inheritedValue.set(attr.getInheritedValue()); 92 } 93 93 94 template <class T> 94 95 typename T::t_enum CAttributeEnum<T>::getInheritedValue(void) const 95 96 { 96 if (this->isEmpty()) return inheritedValue.get();97 else return getValue();98 } 99 100 template <class T> 101 string CAttributeEnum<T>::getInheritedStringValue(void) const102 {103 if (this->isEmpty()) return inheritedValue.toString();104 else return CEnum<T>::toString();;105 }106 107 template <class T> 108 bool CAttributeEnum<T>::hasInheritedValue(void) const109 {110 return !this->isEmpty() || !inheritedValue.isEmpty();111 }112 113 template <class T> 114 bool CAttributeEnum<T>::isEqual(const CAttribute& attr)115 {116 return (this->isEqual(dynamic_cast<const CAttributeEnum<T>& >(attr)));117 }118 119 template <class T> 120 bool CAttributeEnum<T>::isEqual(const CAttributeEnum& attr)121 {122 return ((dynamic_cast<const CEnum<T>& >(*this)) == (dynamic_cast<const CEnum<T>& >(attr)));123 }97 if (this->isEmpty()) return inheritedValue.get(); 98 else return getValue(); 99 } 100 101 template <class T> 102 string CAttributeEnum<T>::getInheritedStringValue(void) const 103 { 104 if (this->isEmpty()) return inheritedValue.toString(); 105 else return CEnum<T>::toString();; 106 } 107 108 template <class T> 109 bool CAttributeEnum<T>::hasInheritedValue(void) const 110 { 111 return !this->isEmpty() || !inheritedValue.isEmpty(); 112 } 113 114 template <class T> 115 bool CAttributeEnum<T>::isEqual(const CAttribute& attr) 116 { 117 return (this->isEqual(dynamic_cast<const CAttributeEnum<T>& >(attr))); 118 } 119 120 template <class T> 121 bool CAttributeEnum<T>::isEqual(const CAttributeEnum& attr) 122 { 123 return ((dynamic_cast<const CEnum<T>& >(*this)) == (dynamic_cast<const CEnum<T>& >(attr))); 124 } 124 125 125 126 //--------------------------------------------------------------- 126 127 127 128 template <class T> 128 CAttributeEnum<T>& CAttributeEnum<T>::operator=(const T_enum & value)129 {130 this->setValue(value);131 return *this;132 }129 CAttributeEnum<T>& CAttributeEnum<T>::operator=(const T_enum & value) 130 { 131 this->setValue(value); 132 return *this; 133 } 133 134 134 135 //--------------------------------------------------------------- 135 136 136 137 template <class T> 137 StdString CAttributeEnum<T>::_toString(void) const138 {139 StdOStringStream oss;140 if (!CEnum<T>::isEmpty() && this->hasId())141 oss << this->getName() << "=\"" << CEnum<T>::toString() << "\"";142 return (oss.str());143 }144 145 template <class T> 146 void CAttributeEnum<T>::_fromString(const StdString & str)147 {148 CEnum<T>::fromString(str);149 }150 151 template <class T> 152 bool CAttributeEnum<T>::_toBuffer (CBufferOut& buffer) const153 {154 return CEnum<T>::toBuffer(buffer);155 }156 157 template <class T> 158 bool CAttributeEnum<T>::_fromBuffer(CBufferIn& buffer)159 {160 return CEnum<T>::fromBuffer(buffer);161 }162 163 template <typename T> 164 void CAttributeEnum<T>::generateCInterface(ostream& oss,const string& className)165 {166 CInterface::AttributeCInterface<CEnumBase>(oss, className, this->getName());167 }168 169 template <typename T> 170 void CAttributeEnum<T>::generateFortran2003Interface(ostream& oss,const string& className)171 {172 CInterface::AttributeFortran2003Interface<string>(oss, className, this->getName());173 }174 175 template <typename T> 176 void CAttributeEnum<T>::generateFortranInterfaceDeclaration_(ostream& oss,const string& className)177 {178 CInterface::AttributeFortranInterfaceDeclaration<string>(oss, className, this->getName()+"_");179 }180 181 template <typename T> 182 void CAttributeEnum<T>::generateFortranInterfaceBody_(ostream& oss,const string& className)183 {184 CInterface::AttributeFortranInterfaceBody<string>(oss, className, this->getName());185 }186 187 template <typename T> 188 void CAttributeEnum<T>::generateFortranInterfaceDeclaration(ostream& oss,const string& className)189 {190 CInterface::AttributeFortranInterfaceDeclaration<string>(oss, className, this->getName());191 }192 193 template <typename T> 194 void CAttributeEnum<T>::generateFortranInterfaceGetDeclaration_(ostream& oss,const string& className)195 {196 CInterface::AttributeFortranInterfaceGetDeclaration<string>(oss, className, this->getName()+"_");197 }198 199 template <typename T> 200 void CAttributeEnum<T>::generateFortranInterfaceGetBody_(ostream& oss,const string& className)201 {202 CInterface::AttributeFortranInterfaceGetBody<string>(oss, className, this->getName());203 }204 205 template <typename T> 206 void CAttributeEnum<T>::generateFortranInterfaceGetDeclaration(ostream& oss,const string& className)207 {208 CInterface::AttributeFortranInterfaceGetDeclaration<string>(oss, className, this->getName());209 }138 StdString CAttributeEnum<T>::_toString(void) const 139 { 140 StdOStringStream oss; 141 if (!CEnum<T>::isEmpty() && this->hasId()) 142 oss << this->getName() << "=\"" << CEnum<T>::toString() << "\""; 143 return (oss.str()); 144 } 145 146 template <class T> 147 void CAttributeEnum<T>::_fromString(const StdString & str) 148 { 149 CEnum<T>::fromString(str); 150 } 151 152 template <class T> 153 bool CAttributeEnum<T>::_toBuffer (CBufferOut& buffer) const 154 { 155 return CEnum<T>::toBuffer(buffer); 156 } 157 158 template <class T> 159 bool CAttributeEnum<T>::_fromBuffer(CBufferIn& buffer) 160 { 161 return CEnum<T>::fromBuffer(buffer); 162 } 163 164 template <typename T> 165 void CAttributeEnum<T>::generateCInterface(ostream& oss,const string& className) 166 { 167 CInterface::AttributeCInterface<CEnumBase>(oss, className, this->getName()); 168 } 169 170 template <typename T> 171 void CAttributeEnum<T>::generateFortran2003Interface(ostream& oss,const string& className) 172 { 173 CInterface::AttributeFortran2003Interface<string>(oss, className, this->getName()); 174 } 175 176 template <typename T> 177 void CAttributeEnum<T>::generateFortranInterfaceDeclaration_(ostream& oss,const string& className) 178 { 179 CInterface::AttributeFortranInterfaceDeclaration<string>(oss, className, this->getName()+"_"); 180 } 181 182 template <typename T> 183 void CAttributeEnum<T>::generateFortranInterfaceBody_(ostream& oss,const string& className) 184 { 185 CInterface::AttributeFortranInterfaceBody<string>(oss, className, this->getName()); 186 } 187 188 template <typename T> 189 void CAttributeEnum<T>::generateFortranInterfaceDeclaration(ostream& oss,const string& className) 190 { 191 CInterface::AttributeFortranInterfaceDeclaration<string>(oss, className, this->getName()); 192 } 193 194 template <typename T> 195 void CAttributeEnum<T>::generateFortranInterfaceGetDeclaration_(ostream& oss,const string& className) 196 { 197 CInterface::AttributeFortranInterfaceGetDeclaration<string>(oss, className, this->getName()+"_"); 198 } 199 200 template <typename T> 201 void CAttributeEnum<T>::generateFortranInterfaceGetBody_(ostream& oss,const string& className) 202 { 203 CInterface::AttributeFortranInterfaceGetBody<string>(oss, className, this->getName()); 204 } 205 206 template <typename T> 207 void CAttributeEnum<T>::generateFortranInterfaceGetDeclaration(ostream& oss,const string& className) 208 { 209 CInterface::AttributeFortranInterfaceGetDeclaration<string>(oss, className, this->getName()); 210 } 210 211 } // namespace xios 211 212 212 213 #endif // __XIOS_ATTRIBUTE_ENUM_IMPL_HPP__ 214 -
XIOS/dev/branch_yushan_merged/src/attribute_map.hpp
r1117 r1134 76 76 /// Propriété statique /// 77 77 static CAttributeMap * Current; 78 #pragma omp threadprivate (Current) 78 79 79 80 }; // class CAttributeMap -
XIOS/dev/branch_yushan_merged/src/buffer_client.cpp
r917 r1134 25 25 buffer[1] = new char[bufferSize]; 26 26 retBuffer = new CBufferOut(buffer[current], bufferSize); 27 #pragma omp critical (_output) 27 28 info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl; 28 29 } -
XIOS/dev/branch_yushan_merged/src/buffer_client.hpp
r917 r1134 6 6 #include "mpi.hpp" 7 7 #include "cxios.hpp" 8 #ifdef _usingEP 9 #include "ep_declaration.hpp" 10 #endif 8 11 9 12 namespace xios … … 13 16 public: 14 17 static size_t maxRequestSize; 18 #pragma omp threadprivate(maxRequestSize) 15 19 16 20 CClientBuffer(MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize maxBufferedEvents); -
XIOS/dev/branch_yushan_merged/src/buffer_server.hpp
r717 r1134 4 4 #include "xios_spl.hpp" 5 5 #include "buffer.hpp" 6 #include "mpi .hpp"6 #include "mpi_std.hpp" 7 7 #include "cxios.hpp" 8 8 -
XIOS/dev/branch_yushan_merged/src/calendar.cpp
r561 r1134 117 117 const CDate& CCalendar::update(int step) 118 118 { 119 info(20) << "update step : " << step << " timestep " << this->timestep << std::endl; 119 #pragma omp critical (_output) 120 info(80)<< "update step : " << step << " timestep " << this->timestep << std::endl; 120 121 return (this->currentDate = this->getInitDate() + step * this->timestep); 121 122 } -
XIOS/dev/branch_yushan_merged/src/client.cpp
r1032 r1134 11 11 #include "timer.hpp" 12 12 #include "buffer_client.hpp" 13 #include "log.hpp" 14 13 15 14 16 namespace xios 15 17 { 18 extern int test_omp_rank; 19 #pragma omp threadprivate(test_omp_rank) 16 20 17 21 MPI_Comm CClient::intraComm ; 18 22 MPI_Comm CClient::interComm ; 19 std::list<MPI_Comm> CClient::contextInterComms;23 std::list<MPI_Comm> *CClient::contextInterComms_ptr = 0; 20 24 int CClient::serverLeader ; 21 25 bool CClient::is_MPI_Initialized ; … … 24 28 StdOFStream CClient::m_errorStream; 25 29 26 void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) 30 StdOFStream CClient::array_infoStream[10]; 31 32 void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 27 33 { 28 34 int initialized ; … … 35 41 { 36 42 // localComm doesn't given 43 37 44 if (localComm == MPI_COMM_NULL) 38 45 { 39 46 if (!is_MPI_Initialized) 40 47 { 41 MPI_Init(NULL, NULL); 48 //MPI_Init(NULL, NULL); 49 int return_level; 50 MPI_Init_thread(NULL, NULL, 3, &return_level); 51 assert(return_level == 3); 42 52 } 43 53 CTimer::get("XIOS").resume() ; … … 51 61 int myColor ; 52 62 int i,c ; 53 MPI_Comm newComm ; 54 55 MPI_Comm_size(CXios::globalComm,&size) ; 63 64 MPI_Comm_size(CXios::globalComm,&size); 56 65 MPI_Comm_rank(CXios::globalComm,&rank); 66 57 67 58 68 hashAll=new unsigned long[size] ; … … 96 106 MPI_Comm_size(intraComm,&intraCommSize) ; 97 107 MPI_Comm_rank(intraComm,&intraCommRank) ; 98 info(50)<<"intercommCreate::client "<<rank<<" intraCommSize : "<<intraCommSize 99 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 108 109 #pragma omp critical(_output) 110 { 111 info(10)<<"intercommCreate::client "<<test_omp_rank<< " "<< &test_omp_rank <<" intraCommSize : "<<intraCommSize 112 <<" intraCommRank :"<<intraCommRank<<" serverLeader "<< serverLeader 113 <<" globalComm : "<< &(CXios::globalComm) << endl ; 114 } 115 116 117 100 118 MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 119 101 120 } 102 121 else … … 148 167 149 168 MPI_Comm_dup(intraComm,&returnComm) ; 169 150 170 } 151 171 … … 154 174 { 155 175 CContext::setCurrent(id) ; 156 CContext* context=CContext::create(id); 176 CContext* context = CContext::create(id); 177 178 int tmp_rank; 179 MPI_Comm_rank(contextComm,&tmp_rank) ; 180 157 181 StdString idServer(id); 158 182 idServer += "_server"; … … 161 185 { 162 186 int size,rank,globalRank ; 163 size_t message_size ;164 int leaderRank ;187 //size_t message_size ; 188 //int leaderRank ; 165 189 MPI_Comm contextInterComm ; 166 190 … … 173 197 CMessage msg ; 174 198 msg<<idServer<<size<<globalRank ; 175 // msg<<id<<size<<globalRank ; 199 176 200 177 201 int messageSize=msg.size() ; … … 184 208 185 209 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 186 info(10)<<"Register new Context : "<<id<<endl ; 210 211 #pragma omp critical(_output) 212 info(10)<<" RANK "<< tmp_rank<<" Register new Context : "<<id<<endl ; 213 187 214 188 215 MPI_Comm inter ; … … 190 217 MPI_Barrier(inter) ; 191 218 219 192 220 context->initClient(contextComm,contextInterComm) ; 193 221 194 contextInterComms.push_back(contextInterComm); 222 223 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 224 contextInterComms_ptr->push_back(contextInterComm); 225 195 226 MPI_Comm_free(&inter); 196 227 } … … 209 240 // Finally, we should return current context to context client 210 241 CContext::setCurrent(id); 211 212 contextInterComms.push_back(contextInterComm); 242 243 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 244 contextInterComms_ptr->push_back(contextInterComm); 245 213 246 } 214 247 } … … 220 253 221 254 MPI_Comm_rank(intraComm,&rank) ; 222 255 223 256 if (!CXios::isServer) 224 257 { … … 230 263 } 231 264 232 for (std::list<MPI_Comm>::iterator it = contextInterComms .begin(); it != contextInterComms.end(); it++)265 for (std::list<MPI_Comm>::iterator it = contextInterComms_ptr->begin(); it != contextInterComms_ptr->end(); ++it) 233 266 MPI_Comm_free(&(*it)); 267 234 268 MPI_Comm_free(&interComm); 235 269 MPI_Comm_free(&intraComm); … … 241 275 { 242 276 if (CXios::usingOasis) oasis_finalize(); 243 else MPI_Finalize() ; 244 } 245 246 info(20) << "Client side context is finalized"<<endl ; 247 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 248 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 249 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; 250 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 251 // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 252 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 253 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 277 else MPI_Finalize(); 278 } 279 280 #pragma omp critical (_output) 281 info(20) << "Client "<<rank<<" : Client side context is finalized "<< endl ; 282 283 /* #pragma omp critical (_output) 284 { 285 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 286 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 287 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; 288 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 289 report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 290 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 291 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 292 } 293 */ 254 294 } 255 295 … … 280 320 281 321 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 322 282 323 fb->open(fileNameClient.str().c_str(), std::ios::out); 283 324 if (!fb->is_open()) 284 325 ERROR("void CClient::openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb)", 285 326 << std::endl << "Can not open <" << fileNameClient << "> file to write the client log(s)."); 286 327 } 287 328 … … 294 335 void CClient::openInfoStream(const StdString& fileName) 295 336 { 296 std::filebuf* fb = m_infoStream.rdbuf(); 297 openStream(fileName, ".out", fb); 298 299 info.write2File(fb); 300 report.write2File(fb); 337 //std::filebuf* fb = m_infoStream.rdbuf(); 338 339 info_FB[omp_get_thread_num()] = array_infoStream[omp_get_thread_num()].rdbuf(); 340 341 openStream(fileName, ".out", info_FB[omp_get_thread_num()]); 342 343 info.write2File(info_FB[omp_get_thread_num()]); 344 report.write2File(info_FB[omp_get_thread_num()]); 345 301 346 } 302 347 -
XIOS/dev/branch_yushan_merged/src/client.hpp
r655 r1134 12 12 static void initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm); 13 13 static void finalize(void); 14 static void registerContext(const string& id, MPI_Comm contextComm);14 static void registerContext(const string& id, ep_lib::MPI_Comm contextComm); 15 15 16 16 static MPI_Comm intraComm; 17 #pragma omp threadprivate(intraComm) 18 17 19 static MPI_Comm interComm; 18 static std::list<MPI_Comm> contextInterComms; 20 #pragma omp threadprivate(interComm) 21 22 //static std::list<MPI_Comm> contextInterComms; 23 24 static std::list<MPI_Comm> * contextInterComms_ptr; 25 #pragma omp threadprivate(contextInterComms_ptr) 26 19 27 static int serverLeader; 28 #pragma omp threadprivate(serverLeader) 29 20 30 static bool is_MPI_Initialized ; 31 #pragma omp threadprivate(is_MPI_Initialized) 21 32 22 33 //! Get rank of the current process … … 39 50 protected: 40 51 static int rank; 52 #pragma omp threadprivate(rank) 53 41 54 static StdOFStream m_infoStream; 55 #pragma omp threadprivate(m_infoStream) 56 42 57 static StdOFStream m_errorStream; 58 #pragma omp threadprivate(m_errorStream) 59 60 static StdOFStream array_infoStream[10]; 43 61 44 62 static void openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb); -
XIOS/dev/branch_yushan_merged/src/client_client_dht_template.hpp
r941 r1134 13 13 #include "xios_spl.hpp" 14 14 #include "array_new.hpp" 15 #include "mpi .hpp"15 #include "mpi_std.hpp" 16 16 #include "policy.hpp" 17 17 #include <boost/unordered_map.hpp> … … 40 40 public: 41 41 CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, 42 const MPI_Comm& clientIntraComm);42 const ep_lib::MPI_Comm& clientIntraComm); 43 43 44 44 CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 45 const MPI_Comm& clientIntraComm);45 const ep_lib::MPI_Comm& clientIntraComm); 46 46 47 47 void computeIndexInfoMapping(const CArray<size_t,1>& indices); … … 55 55 56 56 protected: 57 CClientClientDHTTemplate(const MPI_Comm& clientIntraComm);57 CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm); 58 58 59 59 protected: … … 62 62 // Redistribute index and info among clients 63 63 void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, 64 const MPI_Comm& intraCommLevel,64 const ep_lib::MPI_Comm& intraCommLevel, 65 65 int level); 66 66 67 67 void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 68 const MPI_Comm& intraCommLevel,68 const ep_lib::MPI_Comm& intraCommLevel, 69 69 int level); 70 70 … … 73 73 74 74 void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 75 const MPI_Comm& intraCommLevel,75 const ep_lib::MPI_Comm& intraCommLevel, 76 76 int level); 77 77 … … 85 85 // Send information to clients 86 86 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 87 const MPI_Comm& clientIntraComm,88 std::vector< MPI_Request>& requestSendInfo);87 const ep_lib::MPI_Comm& clientIntraComm, 88 std::vector<ep_lib::MPI_Request>& requestSendInfo); 89 89 90 90 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 91 const MPI_Comm& clientIntraComm,92 std::vector< MPI_Request>& requestRecvInfo);91 const ep_lib::MPI_Comm& clientIntraComm, 92 std::vector<ep_lib::MPI_Request>& requestRecvInfo); 93 93 94 94 // Send global index to clients 95 95 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 96 const MPI_Comm& clientIntraComm,97 std::vector< MPI_Request>& requestSendIndexGlobal);96 const ep_lib::MPI_Comm& clientIntraComm, 97 std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 98 98 99 99 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 100 const MPI_Comm& clientIntraComm,101 std::vector< MPI_Request>& requestRecvIndex);100 const ep_lib::MPI_Comm& clientIntraComm, 101 std::vector<ep_lib::MPI_Request>& requestRecvIndex); 102 102 103 103 void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, -
XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp
r892 r1134 10 10 #include "utils.hpp" 11 11 #include "mpi_tag.hpp" 12 #ifdef _usingEP 13 #include "ep_declaration.hpp" 14 #endif 15 12 16 13 17 namespace xios 14 18 { 15 19 template<typename T, typename H> 16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm)20 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm) 17 21 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 18 22 { … … 34 38 template<typename T, typename H> 35 39 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 36 const MPI_Comm& clientIntraComm)40 const ep_lib::MPI_Comm& clientIntraComm) 37 41 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 38 42 { … … 59 63 template<typename T, typename H> 60 64 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 61 const MPI_Comm& clientIntraComm)65 const ep_lib::MPI_Comm& clientIntraComm) 62 66 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 63 67 { … … 95 99 template<typename T, typename H> 96 100 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 97 const MPI_Comm& commLevel,101 const ep_lib::MPI_Comm& commLevel, 98 102 int level) 99 103 { … … 169 173 recvIndexBuff = new unsigned long[recvNbIndexCount]; 170 174 171 std::vector< MPI_Request> request;175 std::vector<ep_lib::MPI_Request> request; 172 176 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 173 177 iteRecvIndex = recvRankClient.end(), … … 179 183 { 180 184 if (0 != recvNbIndexClientCount[idx]) 185 { 181 186 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 187 } 182 188 currentIndex += recvNbIndexClientCount[idx]; 183 189 } … … 188 194 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 189 195 190 std::vector< MPI_Status> status(request.size());196 std::vector<ep_lib::MPI_Status> status(request.size()); 191 197 MPI_Waitall(request.size(), &request[0], &status[0]); 192 198 … … 242 248 } 243 249 244 std::vector< MPI_Request> requestOnReturn;250 std::vector<ep_lib::MPI_Request> requestOnReturn; 245 251 currentIndex = 0; 246 252 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 293 299 } 294 300 295 std::vector< MPI_Status> statusOnReturn(requestOnReturn.size());301 std::vector<ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size()); 296 302 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 297 303 … … 360 366 template<typename T, typename H> 361 367 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 362 const MPI_Comm& commLevel,368 const ep_lib::MPI_Comm& commLevel, 363 369 int level) 364 370 { … … 412 418 { 413 419 client2ClientIndex[indexClient + groupRankBegin][sendNbIndexBuff[indexClient]] = it->first;; 414 // ProcessDHTElement<InfoType>::packElement(it->second, client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]);415 420 ProcessDHTElement<InfoType>::packElement(infoTmp[idx], client2ClientInfo[indexClient + groupRankBegin], sendNbInfo[indexClient]); 416 421 ++sendNbIndexBuff[indexClient]; … … 439 444 // it will send a message to the correct clients. 440 445 // Contents of the message are index and its corresponding informatioin 441 std::vector< MPI_Request> request;446 std::vector<ep_lib::MPI_Request> request; 442 447 int currentIndex = 0; 443 448 int nbRecvClient = recvRankClient.size(); … … 458 463 iteIndex = client2ClientIndex.end(); 459 464 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 465 { 460 466 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 467 } 468 461 469 boost::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 462 470 iteInfo = client2ClientInfo.end(); 463 471 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 472 { 464 473 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 465 474 466 std::vector<MPI_Status> status(request.size()); 475 } 476 477 std::vector<ep_lib::MPI_Status> status(request.size()); 478 467 479 MPI_Waitall(request.size(), &request[0], &status[0]); 468 480 … … 518 530 template<typename T, typename H> 519 531 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 520 const MPI_Comm& clientIntraComm,521 std::vector< MPI_Request>& requestSendIndex)522 { 523 MPI_Request request;532 const ep_lib::MPI_Comm& clientIntraComm, 533 std::vector<ep_lib::MPI_Request>& requestSendIndex) 534 { 535 ep_lib::MPI_Request request; 524 536 requestSendIndex.push_back(request); 525 537 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, … … 536 548 template<typename T, typename H> 537 549 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 538 const MPI_Comm& clientIntraComm,539 std::vector< MPI_Request>& requestRecvIndex)540 { 541 MPI_Request request;550 const ep_lib::MPI_Comm& clientIntraComm, 551 std::vector<ep_lib::MPI_Request>& requestRecvIndex) 552 { 553 ep_lib::MPI_Request request; 542 554 requestRecvIndex.push_back(request); 543 555 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, … … 555 567 template<typename T, typename H> 556 568 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 557 const MPI_Comm& clientIntraComm,558 std::vector< MPI_Request>& requestSendInfo)559 { 560 MPI_Request request;569 const ep_lib::MPI_Comm& clientIntraComm, 570 std::vector<ep_lib::MPI_Request>& requestSendInfo) 571 { 572 ep_lib::MPI_Request request; 561 573 requestSendInfo.push_back(request); 562 563 574 MPI_Isend(info, infoSize, MPI_CHAR, 564 575 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); … … 575 586 template<typename T, typename H> 576 587 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 577 const MPI_Comm& clientIntraComm,578 std::vector< MPI_Request>& requestRecvInfo)579 { 580 MPI_Request request;588 const ep_lib::MPI_Comm& clientIntraComm, 589 std::vector<ep_lib::MPI_Request>& requestRecvInfo) 590 { 591 ep_lib::MPI_Request request; 581 592 requestRecvInfo.push_back(request); 582 593 … … 651 662 { 652 663 recvNbElements.resize(recvNbRank.size()); 653 std::vector< MPI_Request> request(sendNbRank.size()+recvNbRank.size());654 std::vector< MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());664 std::vector<ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 665 std::vector<ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 655 666 656 667 int nRequest = 0; … … 696 707 std::vector<int> recvBuff(recvBuffSize*2,0); 697 708 698 std::vector< MPI_Request> request(sendBuffSize+recvBuffSize);699 std::vector< MPI_Status> requestStatus(sendBuffSize+recvBuffSize);709 std::vector<ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize); 710 std::vector<ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 700 711 701 712 int nRequest = 0; … … 721 732 } 722 733 734 //MPI_Barrier(this->internalComm_); 735 723 736 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 737 724 738 int nbRecvRank = 0, nbRecvElements = 0; 725 739 recvNbRank.clear(); -
XIOS/dev/branch_yushan_merged/src/client_server_mapping.hpp
r843 r1134 14 14 #include "mpi.hpp" 15 15 #include <boost/unordered_map.hpp> 16 #ifdef _usingEP 17 #include "ep_declaration.hpp" 18 #endif 19 16 20 17 21 namespace xios { … … 37 41 38 42 static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 39 MPI_Comm& clientIntraComm,43 ep_lib::MPI_Comm& clientIntraComm, 40 44 const std::vector<int>& connectedServerRank); 41 45 -
XIOS/dev/branch_yushan_merged/src/client_server_mapping_distributed.hpp
r835 r1134 35 35 /** Default constructor */ 36 36 CClientServerMappingDistributed(const boost::unordered_map<size_t,int>& globalIndexOfServer, 37 const MPI_Comm& clientIntraComm,37 const ep_lib::MPI_Comm& clientIntraComm, 38 38 bool isDataDistributed = true); 39 39 -
XIOS/dev/branch_yushan_merged/src/context_client.cpp
r1033 r1134 20 20 \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode) 21 21 */ 22 CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_, CContext* cxtSer)22 CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer) 23 23 : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) 24 24 { … … 291 291 if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio; 292 292 } 293 #ifdef _usingMPI 293 294 MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 294 295 #elif _usingEP 296 MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 297 #endif 298 295 299 if (minBufferSizeEventSizeRatio < 1.0) 296 300 ERROR("void CContextClient::setBufferSize(const std::map<int,StdSize>& mapSize, const std::map<int,StdSize>& maxEventSize)", … … 392 396 for (itMap = itbMap; itMap != iteMap; ++itMap) 393 397 { 394 report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl395 << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl;398 //report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl 399 // << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 396 400 totalBuf += itMap->second; 397 401 } 398 report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl;402 //report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; 399 403 400 404 releaseBuffers(); -
XIOS/dev/branch_yushan_merged/src/context_client.hpp
r1033 r1134 27 27 public: 28 28 // Contructor 29 CContextClient(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm, CContext* parentServer = 0);29 CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0); 30 30 31 31 // Send event to server … … 67 67 int serverSize; //!< Size of server group 68 68 69 MPI_Comm interComm; //!< Communicator of server group69 ep_lib::MPI_Comm interComm; //!< Communicator of server group 70 70 71 MPI_Comm intraComm; //!< Communicator of client group71 ep_lib::MPI_Comm intraComm; //!< Communicator of client group 72 72 73 73 map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers -
XIOS/dev/branch_yushan_merged/src/context_server.cpp
r1033 r1134 10 10 #include "file.hpp" 11 11 #include "grid.hpp" 12 #include "mpi .hpp"12 #include "mpi_std.hpp" 13 13 #include "tracer.hpp" 14 14 #include "timer.hpp" … … 23 23 { 24 24 25 CContextServer::CContextServer(CContext* parent, MPI_Comm intraComm_,MPI_Comm interComm_)25 CContextServer::CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_) 26 26 { 27 27 context=parent; … … 72 72 int count; 73 73 char * addr; 74 MPI_Status status;74 ep_lib::MPI_Status status; 75 75 map<int,CServerBuffer*>::iterator it; 76 76 … … 80 80 { 81 81 traceOff(); 82 MPI_Iprobe(rank,20,interComm,&flag,&status);82 ep_lib::MPI_Iprobe(rank,20,interComm,&flag,&status); 83 83 traceOn(); 84 84 if (flag==true) … … 88 88 { 89 89 StdSize buffSize = 0; 90 MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status);90 ep_lib::MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status); 91 91 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 92 92 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; … … 94 94 else 95 95 { 96 MPI_Get_count(&status,MPI_CHAR,&count); 96 97 ep_lib::MPI_Get_count(&status,MPI_CHAR,&count); 97 98 if (it->second->isBufferFree(count)) 98 99 { 99 100 addr=(char*)it->second->getBuffer(count); 100 MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]);101 ep_lib::MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 101 102 bufferRequest[rank]=addr; 102 103 } … … 109 110 void CContextServer::checkPendingRequest(void) 110 111 { 111 map<int, MPI_Request>::iterator it;112 map<int,ep_lib::MPI_Request>::iterator it; 112 113 list<int> recvRequest; 113 114 list<int>::iterator itRecv; … … 115 116 int flag; 116 117 int count; 117 MPI_Status status;118 119 for(it=pendingRequest.begin();it!=pendingRequest.end(); it++)118 ep_lib::MPI_Status status; 119 120 for(it=pendingRequest.begin();it!=pendingRequest.end();++it) 120 121 { 121 122 rank=it->first; 122 123 traceOff(); 123 MPI_Test(& it->second, &flag, &status);124 ep_lib::MPI_Test(& it->second, &flag, &status); 124 125 traceOn(); 125 126 if (flag==true) 126 127 { 127 128 recvRequest.push_back(rank); 128 MPI_Get_count(&status,MPI_CHAR,&count);129 ep_lib::MPI_Get_count(&status,MPI_CHAR,&count); 129 130 processRequest(rank,bufferRequest[rank],count); 130 131 } … … 219 220 { 220 221 finished=true; 222 #pragma omp critical (_output) 221 223 info(20)<<"Server Side context <"<<context->getId()<<"> finalized"<<endl; 222 224 std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), … … 225 227 for (itMap = itbMap; itMap != iteMap; ++itMap) 226 228 { 227 report(10)<< " Memory report : Context <"<<context->getId()<<"> : server side : memory used for buffer of each connection to client" << endl228 << " +) With client of rank " << itMap->first << " : " << itMap->second << " bytes " << endl;229 //report(10)<< " Memory report : Context <"<<context->getId()<<"> : server side : memory used for buffer of each connection to client" << endl 230 // << " +) With client of rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 229 231 totalBuf += itMap->second; 230 232 } 231 233 context->finalize(); 232 report(0)<< " Memory report : Context <"<<context->getId()<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl;234 //report(0)<< " Memory report : Context <"<<context->getId()<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 233 235 } 234 236 else if (event.classId==CContext::GetType()) CContext::dispatchEvent(event); -
XIOS/dev/branch_yushan_merged/src/context_server.hpp
r1033 r1134 14 14 public: 15 15 16 CContextServer(CContext* parent, MPI_Comm intraComm,MPI_Comm interComm) ;16 CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm) ; 17 17 bool eventLoop(bool enableEventsProcessing = true); 18 18 void listen(void) ; … … 25 25 bool hasFinished(void); 26 26 27 MPI_Comm intraComm ;27 ep_lib::MPI_Comm intraComm ; 28 28 int intraCommSize ; 29 29 int intraCommRank ; 30 30 31 MPI_Comm interComm ;31 ep_lib::MPI_Comm interComm ; 32 32 int commSize ; 33 33 34 34 map<int,CServerBuffer*> buffers ; 35 map<int, MPI_Request> pendingRequest ;35 map<int,ep_lib::MPI_Request> pendingRequest ; 36 36 map<int,char*> bufferRequest ; 37 37 -
XIOS/dev/branch_yushan_merged/src/cxios.cpp
r1029 r1134 14 14 namespace xios 15 15 { 16 string CXios::rootFile="./iodef.xml" ; 17 string CXios::xiosCodeId="xios.x" ; 18 string CXios::clientFile="./xios_client"; 19 string CXios::serverFile="./xios_server"; 16 17 extern int test_omp_rank; 18 #pragma omp threadprivate(test_omp_rank) 19 20 const string CXios::rootFile="./iodef.xml" ; 21 const string CXios::xiosCodeId="xios.x" ; 22 const string CXios::clientFile="./xios_client"; 23 const string CXios::serverFile="./xios_server"; 24 20 25 21 26 bool CXios::isClient ; 22 27 bool CXios::isServer ; 28 29 23 30 MPI_Comm CXios::globalComm ; 31 32 24 33 bool CXios::usingOasis ; 25 34 bool CXios::usingServer = false; 35 36 26 37 double CXios::bufferSizeFactor = 1.0; 27 38 const double CXios::defaultBufferSizeFactor = 1.0; 28 39 StdSize CXios::minBufferSize = 1024 * sizeof(double); 40 41 29 42 bool CXios::printLogs2Files; 30 43 bool CXios::isOptPerformance = true; … … 36 49 { 37 50 set_new_handler(noMemory); 38 parseFile(rootFile); 51 52 53 #pragma omp critical 54 { 55 parseFile(rootFile); 56 } 57 #pragma omp barrier 39 58 parseXiosConfig(); 40 59 } … … 68 87 ERROR("CXios::parseXiosConfig()", "recv_field_timeout cannot be negative."); 69 88 70 globalComm=MPI_COMM_WORLD ; 89 90 int num_ep; 91 if(isClient) 92 { 93 num_ep = omp_get_num_threads(); 94 } 95 96 if(isServer) 97 { 98 num_ep = omp_get_num_threads(); 99 } 100 101 MPI_Info info; 102 #pragma omp master 103 { 104 MPI_Comm *ep_comm; 105 MPI_Comm_create_endpoints(MPI_COMM_WORLD, num_ep, info, ep_comm); // servers should reach here too. 106 passage = ep_comm; 107 } 108 109 #pragma omp barrier 110 111 112 CXios::globalComm = passage[omp_get_thread_num()]; 113 114 int tmp_rank; 115 MPI_Comm_rank(CXios::globalComm, &tmp_rank); 116 117 118 test_omp_rank = tmp_rank; 119 71 120 } 72 121 … … 79 128 void CXios::initClientSide(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 80 129 { 130 isClient = true; 131 81 132 initialize() ; 82 133 83 isClient = true;84 85 134 CClient::initialize(codeId,localComm,returnComm) ; 135 86 136 if (CClient::getRank()==0) globalRegistry = new CRegistry(returnComm) ; 87 137 … … 92 142 if (printLogs2Files) 93 143 { 144 #pragma omp critical 94 145 CClient::openInfoStream(clientFile); 95 146 CClient::openErrorStream(clientFile); … … 107 158 if (CClient::getRank()==0) 108 159 { 160 #pragma omp critical (_output) 109 161 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 110 162 globalRegistry->toFile("xios_registry.bin") ; … … 123 175 void CXios::initServer() 124 176 { 177 int initialized; 178 MPI_Initialized(&initialized); 179 if (initialized) CServer::is_MPI_Initialized=true ; 180 else CServer::is_MPI_Initialized=false ; 181 182 183 if(!CServer::is_MPI_Initialized) 184 { 185 MPI_Init(NULL, NULL); 186 } 187 125 188 set_new_handler(noMemory); 126 189 std::set<StdString> parseList; … … 133 196 void CXios::initServerSide(void) 134 197 { 135 initServer();198 136 199 isClient = false; 137 200 isServer = true; 138 201 202 initServer(); 203 204 139 205 // Initialize all aspects MPI 140 206 CServer::initialize(); … … 162 228 delete globalRegistry ; 163 229 } 230 164 231 CServer::finalize(); 232 165 233 CServer::closeInfoStream(); 166 234 } -
XIOS/dev/branch_yushan_merged/src/cxios.hpp
r1029 r1134 5 5 #include "mpi.hpp" 6 6 #include "registry.hpp" 7 #include "log.hpp" 7 8 8 9 namespace xios … … 14 15 { 15 16 public: 16 static void initialize(void) ;17 static void initClientSide(const string & codeId, MPI_Comm& localComm,MPI_Comm& returnComm) ;18 static void initServerSide(void) ;19 static void clientFinalize(void) ;20 static void parseFile(const string& filename) ;17 static void initialize(void) ; 18 static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 19 static void initServerSide(void) ; 20 static void clientFinalize(void) ; 21 static void parseFile(const string& filename) ; 21 22 22 template <typename T>23 static T getin(const string& id,const T& defaultValue) ;23 template <typename T> 24 static T getin(const string& id,const T& defaultValue) ; 24 25 25 template <typename T>26 static T getin(const string& id) ;26 template <typename T> 27 static T getin(const string& id) ; 27 28 28 29 public: 29 static string rootFile; //!< Configuration filename30 staticstring xiosCodeId ; //!< Identity for XIOS31 staticstring clientFile; //!< Filename template for client32 staticstring serverFile; //!< Filename template for server30 static const string rootFile; //!< Configuration filename 31 static const string xiosCodeId ; //!< Identity for XIOS 32 static const string clientFile; //!< Filename template for client 33 static const string serverFile; //!< Filename template for server 33 34 34 static bool isClient ; //!< Check if xios is client 35 static bool isServer ; //!< Check if xios is server 35 static bool isClient ; //!< Check if xios is client 36 static bool isServer ; //!< Check if xios is server 37 #pragma omp threadprivate(isClient, isServer) 36 38 37 static MPI_Comm globalComm ; //!< Global communicator 39 static MPI_Comm globalComm ; //!< Global communicator 40 #pragma omp threadprivate(globalComm) 38 41 39 static bool printLogs2Files; //!< Printing out logs into files 40 static bool usingOasis ; //!< Using Oasis 41 static bool usingServer ; //!< Using server (server mode) 42 static double bufferSizeFactor; //!< Factor used to tune the buffer size 43 static const double defaultBufferSizeFactor; //!< Default factor value 44 static StdSize minBufferSize; //!< Minimum buffer size 45 static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 46 static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 47 static double recvFieldTimeout; //!< Time to wait for data before issuing an error when receiving a field 48 42 static bool printLogs2Files; //!< Printing out logs into files 43 static bool usingOasis ; //!< Using Oasis 44 static bool usingServer ; //!< Using server (server mode) 45 static double bufferSizeFactor; //!< Factor used to tune the buffer size 46 static const double defaultBufferSizeFactor; //!< Default factor value 47 static StdSize minBufferSize; //!< Minimum buffer size 48 static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 49 #pragma omp threadprivate(printLogs2Files, usingOasis, usingServer, bufferSizeFactor, minBufferSize, isOptPerformance) 50 51 static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 52 static double recvFieldTimeout; 53 #pragma omp threadprivate(recvFieldTimeout) 54 55 49 56 public: 50 57 //! Setting xios to use server mode -
XIOS/dev/branch_yushan_merged/src/dht_auto_indexing.cpp
r1002 r1134 22 22 23 23 CDHTAutoIndexing::CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 24 const MPI_Comm& clientIntraComm)24 const ep_lib::MPI_Comm& clientIntraComm) 25 25 : CClientClientDHTTemplate<size_t>(clientIntraComm) 26 26 { … … 58 58 */ 59 59 CDHTAutoIndexing::CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 60 const MPI_Comm& clientIntraComm)60 const ep_lib::MPI_Comm& clientIntraComm) 61 61 : CClientClientDHTTemplate<size_t>(clientIntraComm) 62 62 { -
XIOS/dev/branch_yushan_merged/src/dht_auto_indexing.hpp
r924 r1134 12 12 13 13 #include "client_client_dht_template.hpp" 14 #ifdef _usingEP 15 #include "ep_declaration.hpp" 16 #endif 14 17 15 18 namespace xios … … 25 28 26 29 CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 27 const MPI_Comm& clientIntraComm);30 const ep_lib::MPI_Comm& clientIntraComm); 28 31 29 32 CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 30 const MPI_Comm& clientIntraComm);33 const ep_lib::MPI_Comm& clientIntraComm); 31 34 32 35 size_t getNbIndexesGlobal() const; -
XIOS/dev/branch_yushan_merged/src/event_scheduler.cpp
r591 r1134 132 132 while(received) 133 133 { 134 #ifdef _usingEP 135 MPI_Iprobe(-1,1,communicator,&received, &status) ; 136 #else 134 137 MPI_Iprobe(MPI_ANY_SOURCE,1,communicator,&received, &status) ; 138 #endif 135 139 if (received) 136 140 { 137 141 recvRequest=new SPendingRequest ; 142 #ifdef _usingEP 143 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -1, 1, communicator, &(recvRequest->request)) ; 144 #else 138 145 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 1, communicator, &(recvRequest->request)) ; 146 #endif 139 147 pendingRecvParentRequest.push(recvRequest) ; 140 148 } … … 174 182 while(received) 175 183 { 184 #ifdef _usingEP 185 MPI_Iprobe(-1,0,communicator,&received, &status) ; 186 #else 176 187 MPI_Iprobe(MPI_ANY_SOURCE,0,communicator,&received, &status) ; 188 #endif 177 189 if (received) 178 190 { 179 191 recvRequest=new SPendingRequest ; 192 #ifdef _usingEP 193 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, -1, 0, communicator, &recvRequest->request) ; 194 #else 180 195 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 0, communicator, &recvRequest->request) ; 196 #endif 181 197 pendingRecvChildRequest.push_back(recvRequest) ; 182 198 } -
XIOS/dev/branch_yushan_merged/src/event_scheduler.hpp
r591 r1134 4 4 #include "xios_spl.hpp" 5 5 #include "mpi.hpp" 6 #ifdef _usingEP 7 #include "ep_declaration.hpp" 8 #endif 9 6 10 7 11 namespace xios -
XIOS/dev/branch_yushan_merged/src/filter/spatial_transform_filter.cpp
r1076 r1134 65 65 } 66 66 67 std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> > CSpatialTransformFilterEngine::engines;67 std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> > *CSpatialTransformFilterEngine::engines_ptr = 0; 68 68 69 69 CSpatialTransformFilterEngine* CSpatialTransformFilterEngine::get(CGridTransformation* gridTransformation) … … 73 73 "Impossible to get the requested engine, the grid transformation is invalid."); 74 74 75 std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> >::iterator it = engines.find(gridTransformation); 76 if (it == engines.end()) 75 if(engines_ptr == NULL) engines_ptr = new std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> >; 76 77 std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> >::iterator it = engines_ptr->find(gridTransformation); 78 if (it == engines_ptr->end()) 77 79 { 78 80 boost::shared_ptr<CSpatialTransformFilterEngine> engine(new CSpatialTransformFilterEngine(gridTransformation)); 79 it = engines .insert(std::make_pair(gridTransformation, engine)).first;81 it = engines_ptr->insert(std::make_pair(gridTransformation, engine)).first; 80 82 } 81 83 … … 153 155 154 156 idxSendBuff = 0; 155 std::vector< MPI_Request> sendRecvRequest;157 std::vector<ep_lib::MPI_Request> sendRecvRequest; 156 158 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 157 159 { … … 163 165 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 164 166 } 165 sendRecvRequest.push_back( MPI_Request());167 sendRecvRequest.push_back(ep_lib::MPI_Request()); 166 168 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 167 169 } … … 181 183 int srcRank = itRecv->first; 182 184 int countSize = itRecv->second.size(); 183 sendRecvRequest.push_back( MPI_Request());185 sendRecvRequest.push_back(ep_lib::MPI_Request()); 184 186 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 185 187 currentBuff += countSize; 186 188 } 187 std::vector< MPI_Status> status(sendRecvRequest.size());189 std::vector<ep_lib::MPI_Status> status(sendRecvRequest.size()); 188 190 MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]); 189 191 -
XIOS/dev/branch_yushan_merged/src/filter/spatial_transform_filter.hpp
r1018 r1134 104 104 105 105 //! The allocated engines 106 static std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> > engines; 106 //static std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> > engines; 107 static std::map<CGridTransformation*, boost::shared_ptr<CSpatialTransformFilterEngine> > *engines_ptr; 108 #pragma omp threadprivate(engines_ptr) 109 107 110 }; // class CSpatialTransformFilterEngine 108 111 } // namespace xios -
XIOS/dev/branch_yushan_merged/src/group_factory.cpp
r501 r1134 4 4 { 5 5 /// ////////////////////// Définitions ////////////////////// /// 6 StdString CGroupFactory::CurrContext("");6 StdString *CGroupFactory::CurrContext_ptr = new StdString; 7 7 8 8 void CGroupFactory::SetCurrentContextId(const StdString & context) 9 { 10 CGroupFactory::CurrContext = context; 9 { 10 if(CGroupFactory::CurrContext_ptr == NULL ) CGroupFactory::CurrContext_ptr = new StdString; 11 CGroupFactory::CurrContext_ptr->assign(context); 11 12 } 12 13 13 14 StdString & CGroupFactory::GetCurrentContextId(void) 14 15 { 15 return ( CGroupFactory::CurrContext);16 return (*CGroupFactory::CurrContext_ptr); 16 17 } 17 18 -
XIOS/dev/branch_yushan_merged/src/group_factory.hpp
r591 r1134 69 69 70 70 /// Propriétés statiques /// 71 static StdString CurrContext; 71 static StdString *CurrContext_ptr; 72 #pragma omp threadprivate(CurrContext_ptr) 72 73 73 74 }; // class CGroupFactory -
XIOS/dev/branch_yushan_merged/src/indent.hpp
r501 r1134 10 10 public: 11 11 static int defaultIncSize; 12 #pragma omp threadprivate(defaultIncSize) 13 12 14 static int index ; 15 #pragma omp threadprivate(index) 16 13 17 int incSize ; 14 18 int offset ; -
XIOS/dev/branch_yushan_merged/src/indent_xml.cpp
r501 r1134 15 15 { 16 16 static unsigned int LineNB = 1; 17 #pragma omp threadprivate(LineNB) 18 17 19 if (CIndent::WithLine) out << LineNB++ << ". "; 18 20 for(unsigned int i = 0; i < CIndent::Indent; out << CIndent::Increm , i++){} -
XIOS/dev/branch_yushan_merged/src/indent_xml.hpp
r591 r1134 22 22 /// Propriétés statiques /// 23 23 static unsigned int Indent; 24 #pragma omp threadprivate(Indent) 25 24 26 static StdString Increm; 27 #pragma omp threadprivate(Increm) 28 25 29 static bool WithLine; 30 #pragma omp threadprivate(WithLine) 26 31 27 32 }; // class CIndent -
XIOS/dev/branch_yushan_merged/src/interface/c/icdata.cpp
r961 r1134 11 11 12 12 #include "xios.hpp" 13 #include "oasis_cinterface.hpp"13 //#include "oasis_cinterface.hpp" 14 14 15 15 #include "attribute_template.hpp" … … 23 23 #include "context.hpp" 24 24 #include "context_client.hpp" 25 #include "mpi .hpp"25 #include "mpi_std.hpp" 26 26 #include "timer.hpp" 27 27 #include "array_new.hpp" … … 55 55 { 56 56 std::string str; 57 MPI_Comm local_comm; 58 MPI_Comm return_comm; 57 ep_lib::MPI_Comm local_comm; 58 ep_lib::MPI_Comm return_comm; 59 60 ep_lib::fc_comm_map.clear(); 59 61 60 62 if (!cstr2string(client_id, len_client_id, str)) return; … … 62 64 int initialized; 63 65 MPI_Initialized(&initialized); 66 67 #ifdef _usingEP 68 if (initialized) local_comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_local_comm)); 69 else local_comm = MPI_COMM_NULL; 70 #else 64 71 if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 65 else local_comm=MPI_COMM_NULL; 72 else local_comm = MPI_COMM_NULL; 73 #endif 74 75 76 66 77 CXios::initClientSide(str, local_comm, return_comm); 67 *f_return_comm=MPI_Comm_c2f(return_comm); 78 79 #ifdef _usingEP 80 *f_return_comm = ep_lib::EP_Comm_c2f(return_comm); 81 #else 82 *f_return_comm = MPI_Comm_c2f(return_comm); 83 #endif 84 68 85 CTimer::get("XIOS init").suspend(); 69 86 CTimer::get("XIOS").suspend(); … … 73 90 { 74 91 std::string str; 75 MPI_Comm comm;92 ep_lib::MPI_Comm comm; 76 93 77 94 if (!cstr2string(context_id, len_context_id, str)) return; 78 95 CTimer::get("XIOS").resume(); 79 96 CTimer::get("XIOS init context").resume(); 80 comm=MPI_Comm_f2c(*f_comm); 81 CClient::registerContext(str, comm); 97 comm = ep_lib::EP_Comm_f2c(static_cast< int >(*f_comm)); 98 99 CClient::registerContext(str,comm); 100 82 101 CTimer::get("XIOS init context").suspend(); 83 102 CTimer::get("XIOS").suspend(); … … 100 119 CTimer::get("XIOS close definition").resume(); 101 120 CContext* context = CContext::getCurrent(); 121 102 122 context->closeDefinition(); 123 103 124 CTimer::get("XIOS close definition").suspend(); 104 125 CTimer::get("XIOS").suspend(); … … 109 130 CTimer::get("XIOS").resume(); 110 131 CTimer::get("XIOS context finalize").resume(); 132 133 134 111 135 CContext* context = CContext::getCurrent(); 112 136 context->finalize(); … … 429 453 CContext* context = CContext::getCurrent(); 430 454 if (!context->hasServer && !context->client->isAttachedModeEnabled()) 431 context->checkBuffersAndListen(); 455 { 456 context->checkBuffersAndListen(); 457 } 432 458 433 459 CArray<double, 3>data(data_k8, shape(data_Xsize, data_Ysize, data_Zsize), neverDeleteData); -
XIOS/dev/branch_yushan_merged/src/interface/c/oasis_cinterface.cpp
r501 r1134 1 1 #include "oasis_cinterface.hpp" 2 2 #include <string> 3 #include "mpi.hpp"3 //#include "mpi_std.hpp" 4 4 5 5 namespace xios … … 26 26 27 27 fxios_oasis_get_localcomm(&f_comm) ; 28 #ifdef _usingEP 29 comm=EP_Comm_f2c(f_comm.mpi_fint) ; 30 #else 28 31 comm=MPI_Comm_f2c(f_comm) ; 32 #endif 29 33 } 30 34 … … 34 38 35 39 fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 40 #ifdef _usingEP 41 comm_client_server=EP_Comm_f2c(f_comm.mpi_fint) ; 42 #else 36 43 comm_client_server=MPI_Comm_f2c(f_comm) ; 44 #endif 37 45 } 38 46 … … 42 50 43 51 fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 52 #ifdef _usingEP 53 comm_client_server=EP_Comm_f2c(f_comm.mpi_fint) ; 54 #else 44 55 comm_client_server=MPI_Comm_f2c(f_comm) ; 56 #endif 45 57 } 46 58 } -
XIOS/dev/branch_yushan_merged/src/interface/fortran/idata.F90
r965 r1134 465 465 INTEGER :: f_return_comm 466 466 467 467 468 IF (PRESENT(local_comm)) THEN 468 469 f_local_comm=local_comm -
XIOS/dev/branch_yushan_merged/src/io/inetcdf4.cpp
r948 r1134 18 18 } 19 19 mpi = comm && !multifile; 20 MPI_Info m_info; 20 21 21 22 // The file format will be detected automatically by NetCDF, it is safe to always set NC_MPIIO 22 23 // even if Parallel NetCDF ends up being used. 23 24 if (mpi) 24 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, MPI_INFO_NULL, this->ncidp);25 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, m_info, this->ncidp); 25 26 else 26 27 CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); -
XIOS/dev/branch_yushan_merged/src/io/inetcdf4.hpp
r802 r1134 7 7 #include "array_new.hpp" 8 8 9 #include "mpi .hpp"9 #include "mpi_std.hpp" 10 10 #include "netcdf.hpp" 11 11 -
XIOS/dev/branch_yushan_merged/src/io/nc4_data_output.cpp
r1108 r1134 28 28 CNc4DataOutput::CNc4DataOutput 29 29 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 30 MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName)30 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 31 31 : SuperClass() 32 32 , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) … … 78 78 79 79 80 StdString dimXid, dimYid ;80 StdString dimXid, dimYid ; 81 81 82 82 nc_type typePrec ; … … 463 463 StdString domainName = domain->name; 464 464 domain->assignMesh(domainName, domain->nvertex); 465 domain->mesh->createMeshEpsilon(s erver->intraComm, domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv);465 domain->mesh->createMeshEpsilon(static_cast<MPI_Comm>(server->intraComm.mpi_comm), domain->lonvalue_srv, domain->latvalue_srv, domain->bounds_lon_srv, domain->bounds_lat_srv); 466 466 467 467 StdString node_x = domainName + "_node_x"; -
XIOS/dev/branch_yushan_merged/src/io/nc4_data_output.hpp
r1096 r1134 27 27 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, 28 28 bool useCFConvention, 29 MPI_Comm comm_file, bool multifile, bool isCollective = true,29 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true, 30 30 const StdString& timeCounterName = "time_counter"); 31 31 … … 116 116 117 117 /// Propriétés privées /// 118 MPI_Comm comm_file;118 ep_lib::MPI_Comm comm_file; 119 119 const StdString filename; 120 120 std::map<Time, StdSize> timeToRecordCache; -
XIOS/dev/branch_yushan_merged/src/io/netCdfInterface.hpp
r811 r1134 16 16 #endif 17 17 18 #include "mpi .hpp"18 #include "mpi_std.hpp" 19 19 #include "netcdf.hpp" 20 20 -
XIOS/dev/branch_yushan_merged/src/io/netcdf.hpp
r685 r1134 1 1 #ifndef __XIOS_NETCDF_HPP__ 2 2 #define __XIOS_NETCDF_HPP__ 3 #include "mpi .hpp"3 #include "mpi_std.hpp" 4 4 #define MPI_INCLUDED 5 5 #include <netcdf.h> … … 18 18 extern "C" 19 19 { 20 #include <netcdf_par.h>20 #include <netcdf_par.h> 21 21 } 22 22 # endif … … 30 30 namespace xios 31 31 { 32 inline int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)32 inline int nc_create_par(const char *path, int cmode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 33 33 { 34 34 #if defined(USING_NETCDF_PAR) 35 return ::nc_create_par(path, cmode, comm, info, ncidp) ;35 return ::nc_create_par(path, cmode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 36 36 #else 37 37 ERROR("int nc_create_par(const char *path, int cmode, MPI_Comm comm, MPI_Info info,int *ncidp)", … … 41 41 } 42 42 43 inline int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)43 inline int nc_open_par(const char *path, int mode, ep_lib::MPI_Comm comm, MPI_Info info,int *ncidp) 44 44 { 45 45 #if defined(USING_NETCDF_PAR) 46 return ::nc_open_par(path, mode, comm, info, ncidp) ;46 return ::nc_open_par(path, mode, static_cast<MPI_Comm>(comm.mpi_comm), info, ncidp) ; 47 47 #else 48 48 ERROR("int nc_open_par(const char *path, int mode, MPI_Comm comm, MPI_Info info,int *ncidp)", -
XIOS/dev/branch_yushan_merged/src/io/onetcdf4.cpp
r1097 r1134 3 3 #include "onetcdf4.hpp" 4 4 #include "group_template.hpp" 5 #include "mpi.hpp"6 5 #include "netcdf.hpp" 7 6 #include "netCdfInterface.hpp" … … 12 11 /// ////////////////////// Définitions ////////////////////// /// 13 12 14 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 15 bool useCFConvention, 16 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 13 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 14 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 17 15 : path() 18 16 , wmpi(false) … … 32 30 33 31 void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 34 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName)32 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 35 33 { 36 34 this->useClassicFormat = useClassicFormat; … … 56 54 { 57 55 if (wmpi) 58 CNetCdfInterface::createPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp);56 CNetCdfInterface::createPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), MPI_INFO_NULL_STD, this->ncidp); 59 57 else 60 58 CNetCdfInterface::create(filename, mode, this->ncidp); … … 66 64 mode |= NC_WRITE; 67 65 if (wmpi) 68 CNetCdfInterface::openPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp);66 CNetCdfInterface::openPar(filename, mode, static_cast<MPI_Comm>(comm->mpi_comm), MPI_INFO_NULL_STD, this->ncidp); 69 67 else 70 68 CNetCdfInterface::open(filename, mode, this->ncidp); … … 535 533 const std::vector<StdSize>& scount, const int* data) 536 534 { 537 CNetCdfInterface::putVaraType(grpid, varid, &sstart[0], &scount[0], data); 538 } 535 CNetCdfInterface::putVaraType(grpid, varid, &sstart[0], &scount[0], data); 536 } 537 539 538 //--------------------------------------------------------------- 540 539 … … 544 543 const std::vector<StdSize>& scount, const float* data) 545 544 { 546 545 CNetCdfInterface::putVaraType(grpid, varid, &sstart[0], &scount[0], data); 547 546 } 548 547 -
XIOS/dev/branch_yushan_merged/src/io/onetcdf4.hpp
r1097 r1134 7 7 #include "data_output.hpp" 8 8 #include "array_new.hpp" 9 #include "mpi .hpp"9 #include "mpi_std.hpp" 10 10 #include "netcdf.hpp" 11 11 … … 28 28 CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 29 29 bool useCFConvention = true, 30 const MPI_Comm* comm = NULL, bool multifile = true,30 const ep_lib::MPI_Comm* comm = NULL, bool multifile = true, 31 31 const StdString& timeCounterName = "time_counter"); 32 32 … … 37 37 /// Initialisation /// 38 38 void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 39 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName);39 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 40 40 void close(void); 41 41 void sync(void); -
XIOS/dev/branch_yushan_merged/src/log.cpp
r523 r1134 1 1 #include "log.hpp" 2 #include <string> 3 #include <iostream> 4 #include <string> 2 5 3 6 namespace xios 4 7 { 8 9 std::filebuf* info_FB[10]; 10 11 5 12 CLog info("info") ; 6 13 CLog report("report") ; 7 14 CLog error("error", cerr.rdbuf()) ; 15 16 17 CLog& CLog::operator()(int l) 18 { 19 if (l<=level) 20 { 21 omp_set_lock( &mutex ); 22 //rdbuf(strBuf_); 23 rdbuf(strBuf_array[omp_get_thread_num()]); 24 *this<<"-> "<<name<<" : " ; 25 omp_unset_lock( &mutex ); 26 } 27 else rdbuf(NULL) ; 28 return *this; 29 } 30 31 32 33 int test_omp_rank; 34 #pragma omp threadprivate(test_omp_rank) 35 36 37 8 38 } -
XIOS/dev/branch_yushan_merged/src/log.hpp
r523 r1134 5 5 #include <iostream> 6 6 #include <string> 7 #include <stdio.h> 8 #include <omp.h> 7 9 8 10 namespace xios … … 14 16 public : 15 17 CLog(const string& name_, std::streambuf* sBuff = cout.rdbuf()) 16 : ostream(sBuff), level(0), name(name_), strBuf_(sBuff) {} 17 CLog& operator()(int l) 18 : ostream(cout.rdbuf()), level(0), name(name_), strBuf_(sBuff) 18 19 { 19 if (l<=level) 20 { 21 rdbuf(strBuf_); 22 *this<<"-> "<<name<<" : " ; 23 } 24 else rdbuf(NULL) ; 25 return *this; 20 omp_init_lock( &mutex ); 21 for(int i=0; i<10; i++) 22 strBuf_array[i] = sBuff; 26 23 } 24 25 ~CLog() 26 { 27 omp_destroy_lock( &mutex ); 28 } 29 30 31 CLog& operator()(int l); 27 32 void setLevel(int l) {level=l; } 28 int getLevel() {return level ;}33 int getLevel() {return level ;} 29 34 bool isActive(void) { if (rdbuf()==NULL) return true ; else return false ;} 30 35 bool isActive(int l) {if (l<=level) return true ; else return false ; } … … 46 51 * \param [in] pointer to new streambuf 47 52 */ 48 void changeStreamBuff(std::streambuf* sBuff) { strBuf_ = sBuff; rdbuf(sBuff); } 53 void changeStreamBuff(std::streambuf* sBuff) 54 { 55 strBuf_ = sBuff; 56 strBuf_array[omp_get_thread_num()] = sBuff; 57 rdbuf(sBuff); 58 } 49 59 50 60 int level ; 51 61 string name ; 52 62 std::streambuf* strBuf_; 63 std::streambuf* strBuf_array[10]; 64 omp_lock_t mutex; 53 65 }; 54 66 … … 56 68 extern CLog report; 57 69 extern CLog error; 70 71 72 extern std::filebuf* info_FB[10]; 73 74 58 75 } 59 76 #endif -
XIOS/dev/branch_yushan_merged/src/memtrack.cpp
r501 r1134 68 68 private: // static member variables 69 69 static BlockHeader *ourFirstNode; 70 #pragma omp threadprivate(ourFirstNode) 70 71 71 72 private: // member variables -
XIOS/dev/branch_yushan_merged/src/mpi.hpp
r501 r1134 11 11 #define OMPI_SKIP_MPICXX 12 12 13 #include <mpi.h> 13 #ifdef _usingEP 14 #include <omp.h> 15 #include "../extern/src_ep_dev/ep_lib.hpp" 16 using namespace ep_lib; 17 #elif _usingMPI 18 #include <mpi.h> 19 #endif 20 14 21 15 22 #endif -
XIOS/dev/branch_yushan_merged/src/node/axis.cpp
r1117 r1134 44 44 { /* Ne rien faire de plus */ } 45 45 46 std::map<StdString, ETranformationType> CAxis::transformationMapList_ = std::map<StdString, ETranformationType>(); 47 bool CAxis::dummyTransformationMapList_ = CAxis::initializeTransformationMap(CAxis::transformationMapList_); 46 std::map<StdString, ETranformationType> *CAxis::transformationMapList_ptr = 0; //new std::map<StdString, ETranformationType>(); 47 //bool CAxis::dummyTransformationMapList_ = CAxis::initializeTransformationMap(CAxis::transformationMapList_ptr); 48 48 49 bool CAxis::initializeTransformationMap(std::map<StdString, ETranformationType>& m) 49 50 { … … 54 55 m["extract_domain"] = TRANS_EXTRACT_DOMAIN_TO_AXIS; 55 56 } 57 58 59 bool CAxis::initializeTransformationMap() 60 { 61 if(CAxis::transformationMapList_ptr == 0) CAxis::transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 62 (*CAxis::transformationMapList_ptr)["zoom_axis"] = TRANS_ZOOM_AXIS; 63 (*CAxis::transformationMapList_ptr)["interpolate_axis"] = TRANS_INTERPOLATE_AXIS; 64 (*CAxis::transformationMapList_ptr)["inverse_axis"] = TRANS_INVERSE_AXIS; 65 (*CAxis::transformationMapList_ptr)["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_AXIS; 66 (*CAxis::transformationMapList_ptr)["extract_domain"] = TRANS_EXTRACT_DOMAIN_TO_AXIS; 67 } 68 56 69 57 70 ///--------------------------------------------------------------- … … 780 793 CContextServer* server = CContext::getCurrent()->server; 781 794 axis->numberWrittenIndexes_ = axis->indexesToWrite.size(); 782 MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);783 MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm);795 ep_lib::MPI_Allreduce(&axis->numberWrittenIndexes_, &axis->totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 796 ep_lib::MPI_Scan(&axis->numberWrittenIndexes_, &axis->offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 784 797 axis->offsetWrittenIndexes_ -= axis->numberWrittenIndexes_; 785 798 } … … 1030 1043 } 1031 1044 1045 1046 1032 1047 void CAxis::duplicateTransformation(CAxis* src) 1033 1048 { … … 1073 1088 1074 1089 nodeElementName = node.getElementName(); 1075 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 1076 it = transformationMapList_.find(nodeElementName); 1090 1091 if(transformationMapList_ptr == 0) initializeTransformationMap(); 1092 //transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 1093 1094 std::map<StdString, ETranformationType>::const_iterator ite = (*CAxis::transformationMapList_ptr).end(), it; 1095 it = (*CAxis::transformationMapList_ptr).find(nodeElementName); 1077 1096 if (ite != it) 1078 1097 { … … 1096 1115 1097 1116 } // namespace xios 1117 -
XIOS/dev/branch_yushan_merged/src/node/axis.hpp
r1106 r1134 169 169 private: 170 170 static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 171 static std::map<StdString, ETranformationType> transformationMapList_; 172 static bool dummyTransformationMapList_; 171 //static bool initializeTransformationMap(std::map<StdString, ETranformationType>* m); 172 static bool initializeTransformationMap(); 173 174 //static std::map<StdString, ETranformationType> transformationMapList_; 175 176 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 177 #pragma omp threadprivate(transformationMapList_ptr) 178 179 //static bool dummyTransformationMapList_; 180 //#pragma omp threadprivate(dummyTransformationMapList_) 173 181 174 182 DECLARE_REF_FUNC(Axis,axis) … … 182 190 183 191 #endif // __XIOS_CAxis__ 192 -
XIOS/dev/branch_yushan_merged/src/node/compute_connectivity_domain.hpp
r934 r1134 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 63 62 64 }; // class CComputeConnectivityDomain 63 65 -
XIOS/dev/branch_yushan_merged/src/node/context.cpp
r1091 r1134 18 18 namespace xios { 19 19 20 shared_ptr<CContextGroup> CContext::root; 20 //shared_ptr<CContextGroup> CContext::root; 21 shared_ptr<CContextGroup> * CContext::root_ptr = 0; 21 22 22 23 /// ////////////////////// Définitions ////////////////////// /// … … 54 55 CContextGroup* CContext::getRoot(void) 55 56 { 56 if (root.get()==NULL) root=shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName())); 57 return root.get(); 57 //if (root.get()==NULL) root=shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName())); 58 //return root.get(); 59 60 //static shared_ptr<CContextGroup> *root_ptr; 61 if(root_ptr == 0) //root_ptr = new shared_ptr<CContextGroup>; 62 // if (root_ptr->get()==NULL) 63 root_ptr = new shared_ptr<CContextGroup>(new CContextGroup(xml::CXMLNode::GetRootName())); 64 return root_ptr->get(); 58 65 } 59 66 … … 236 243 237 244 //! Initialize client side 238 void CContext::initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer /*= 0*/)245 void CContext::initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/) 239 246 { 240 247 hasClient=true; 241 client = new CContextClient(this,intraComm, interComm, cxtServer); 248 client = new CContextClient(this, intraComm, interComm, cxtServer); 249 250 int tmp_rank; 251 MPI_Comm_rank(intraComm, &tmp_rank); 252 MPI_Barrier(intraComm); 253 254 242 255 registryIn=new CRegistry(intraComm); 243 256 registryIn->setPath(getId()) ; … … 248 261 registryOut->setPath(getId()) ; 249 262 250 MPI_Comm intraCommServer, interCommServer;263 ep_lib::MPI_Comm intraCommServer, interCommServer; 251 264 if (cxtServer) // Attached mode 252 265 { … … 311 324 312 325 //! Initialize server 313 void CContext::initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient /*= 0*/)326 void CContext::initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/) 314 327 { 315 328 hasServer=true; … … 323 336 registryOut->setPath(getId()) ; 324 337 325 MPI_Comm intraCommClient, interCommClient;338 ep_lib::MPI_Comm intraCommClient, interCommClient; 326 339 if (cxtClient) // Attached mode 327 340 { … … 372 385 } 373 386 374 for (std::list< MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)387 for (std::list<ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 375 388 MPI_Comm_free(&(*it)); 376 389 comms.clear(); … … 812 825 void CContext::postProcessing() 813 826 { 827 int myRank; 828 MPI_Comm_rank(MPI_COMM_WORLD, &myRank); 829 814 830 if (isPostProcessed) return; 815 831 … … 1191 1207 void CContext::updateCalendar(int step) 1192 1208 { 1193 info(50) << "updateCalendar : before : " << calendar->getCurrentDate() << endl;1194 1209 calendar->update(step); 1195 info(50) << "updateCalendar : after : " << calendar->getCurrentDate() << endl;1196 1210 1197 1211 if (hasClient) … … 1241 1255 CContext* context = CObjectFactory::CreateObject<CContext>(id).get(); 1242 1256 getRoot(); 1243 if (!hasctxt) CGroupFactory::AddChild(root, context->getShared()); 1257 //if (!hasctxt) CGroupFactory::AddChild(root, context->getShared()); 1258 if (!hasctxt) CGroupFactory::AddChild(*root_ptr, context->getShared()); 1244 1259 1245 1260 #define DECLARE_NODE(Name_, name_) \ -
XIOS/dev/branch_yushan_merged/src/node/context.hpp
r1033 r1134 88 88 public : 89 89 // Initialize server or client 90 void initServer( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtClient = 0);91 void initClient( MPI_Comm intraComm,MPI_Comm interComm, CContext* cxtServer = 0);90 void initServer(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0); 91 void initClient(ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0); 92 92 bool isInitialized(void); 93 93 … … 206 206 207 207 // Context root 208 static shared_ptr<CContextGroup> root; 208 //static shared_ptr<CContextGroup> root; 209 210 static shared_ptr<CContextGroup> *root_ptr; 211 #pragma omp threadprivate(root_ptr) 209 212 210 213 // Determine context on client or not … … 219 222 // Concrete contex client 220 223 CContextClient* client; 224 225 221 226 CRegistry* registryIn ; //!< input registry which is read from file 222 227 CRegistry* registryOut ; //!< output registry which will be wrote on file at the finalize 228 223 229 224 230 private: … … 227 233 StdString idServer_; 228 234 CGarbageCollector garbageCollector; 229 std::list< MPI_Comm> comms; //!< Communicators allocated internally235 std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 230 236 231 237 public: // Some function maybe removed in the near future -
XIOS/dev/branch_yushan_merged/src/node/domain.cpp
r1117 r1134 65 65 } 66 66 67 std::map<StdString, ETranformationType> CDomain::transformationMapList_ = std::map<StdString, ETranformationType>(); 68 bool CDomain::_dummyTransformationMapList = CDomain::initializeTransformationMap(CDomain::transformationMapList_); 67 //std::map<StdString, ETranformationType> CDomain::transformationMapList_ = std::map<StdString, ETranformationType>(); 68 //bool CDomain::_dummyTransformationMapList = CDomain::initializeTransformationMap(CDomain::transformationMapList_); 69 70 std::map<StdString, ETranformationType> *CDomain::transformationMapList_ptr = 0; 69 71 70 72 bool CDomain::initializeTransformationMap(std::map<StdString, ETranformationType>& m) … … 76 78 m["expand_domain"] = TRANS_EXPAND_DOMAIN; 77 79 } 80 81 bool CDomain::initializeTransformationMap() 82 { 83 CDomain::transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 84 (*CDomain::transformationMapList_ptr)["zoom_domain"] = TRANS_ZOOM_DOMAIN; 85 (*CDomain::transformationMapList_ptr)["interpolate_domain"] = TRANS_INTERPOLATE_DOMAIN; 86 (*CDomain::transformationMapList_ptr)["generate_rectilinear_domain"] = TRANS_GENERATE_RECTILINEAR_DOMAIN; 87 (*CDomain::transformationMapList_ptr)["compute_connectivity_domain"] = TRANS_COMPUTE_CONNECTIVITY_DOMAIN; 88 (*CDomain::transformationMapList_ptr)["expand_domain"] = TRANS_EXPAND_DOMAIN; 89 } 90 78 91 79 92 const std::set<StdString> & CDomain::getRelFiles(void) const … … 623 636 { 624 637 CContext* context = CContext::getCurrent(); 625 CContextClient* client = context->client;638 CContextClient* client = context->client; 626 639 lon_g.resize(ni_glo) ; 627 640 lat_g.resize(nj_glo) ; … … 1713 1726 client->intraComm); 1714 1727 clientServerMap->computeServerIndexMapping(globalIndexDomain); 1728 1715 1729 const CClientServerMapping::GlobalIndexMap& globalIndexDomainOnServer = clientServerMap->getGlobalIndexOnServer(); 1716 1730 … … 2350 2364 2351 2365 nodeElementName = node.getElementName(); 2352 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 2353 it = transformationMapList_.find(nodeElementName); 2366 if(transformationMapList_ptr == 0) initializeTransformationMap(); 2367 std::map<StdString, ETranformationType>::const_iterator ite = (*transformationMapList_ptr).end(), it; 2368 it = (*transformationMapList_ptr).find(nodeElementName); 2354 2369 if (ite != it) 2355 2370 { -
XIOS/dev/branch_yushan_merged/src/node/domain.hpp
r1106 r1134 216 216 private: 217 217 static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 218 static std::map<StdString, ETranformationType> transformationMapList_; 219 static bool _dummyTransformationMapList; 218 static bool initializeTransformationMap(); 219 //static std::map<StdString, ETranformationType> transformationMapList_; 220 221 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 222 #pragma omp threadprivate(transformationMapList_ptr) 223 224 //static bool _dummyTransformationMapList; 225 //#pragma omp threadprivate(_dummyTransformationMapList) 220 226 221 227 DECLARE_REF_FUNC(Domain,domain) -
XIOS/dev/branch_yushan_merged/src/node/expand_domain.hpp
r935 r1134 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 63 62 64 }; // class CExpandDomain 63 65 -
XIOS/dev/branch_yushan_merged/src/node/extract_axis_to_scalar.hpp
r960 r1134 60 60 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CExtractAxisToScalar 63 64 -
XIOS/dev/branch_yushan_merged/src/node/extract_domain_to_axis.hpp
r895 r1134 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CExtractDomainToAxis 63 64 -
XIOS/dev/branch_yushan_merged/src/node/field.cpp
r1120 r1134 953 953 954 954 const bool ignoreMissingValue = (!detect_missing_value.isEmpty() && !default_value.isEmpty() && detect_missing_value == true); 955 955 956 956 boost::shared_ptr<CTemporalFilter> temporalFilter(new CTemporalFilter(gc, operation, 957 957 CContext::getCurrent()->getCalendar()->getInitDate(), -
XIOS/dev/branch_yushan_merged/src/node/file.cpp
r1098 r1134 579 579 580 580 if (isOpen) data_out->closeFile(); 581 if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective));582 else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), fileComm, multifile, isCollective, time_counter_name));581 if (time_counter_name.isEmpty()) data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective)); 582 else data_in = shared_ptr<CDataInput>(new CNc4DataInput(oss.str(), static_cast<MPI_Comm>(fileComm.mpi_comm), multifile, isCollective, time_counter_name)); 583 583 isOpen = true; 584 584 } -
XIOS/dev/branch_yushan_merged/src/node/file.hpp
r1090 r1134 12 12 #include "attribute_enum_impl.hpp" 13 13 #include "mpi.hpp" 14 #ifdef _usingEP 15 #include "ep_declaration.hpp" 16 #endif 14 17 15 18 namespace xios { … … 156 159 bool isOpen; 157 160 bool allDomainEmpty; 158 MPI_Comm fileComm;161 ep_lib::MPI_Comm fileComm; 159 162 160 163 private : -
XIOS/dev/branch_yushan_merged/src/node/generate_rectilinear_domain.hpp
r836 r1134 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CGenerateRectilinearDomain 63 64 -
XIOS/dev/branch_yushan_merged/src/node/grid.cpp
r1093 r1134 1114 1114 outLocalIndexToServer(idx) = itIndex->second; 1115 1115 } 1116 1116 1117 1117 const std::list<int>& ranks = client->getRanksServerLeader(); 1118 1118 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) -
XIOS/dev/branch_yushan_merged/src/node/interpolate_axis.hpp
r836 r1134 62 62 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 63 63 static bool _dummyRegistered; 64 #pragma omp threadprivate(_dummyRegistered) 65 64 66 }; // class CInterpolateAxis 65 67 -
XIOS/dev/branch_yushan_merged/src/node/interpolate_domain.hpp
r1004 r1134 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CInterpolateDomain 63 64 -
XIOS/dev/branch_yushan_merged/src/node/inverse_axis.hpp
r836 r1134 59 59 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 60 60 static bool _dummyRegistered; 61 #pragma omp threadprivate(_dummyRegistered) 61 62 62 63 }; // class CInverseAxis -
XIOS/dev/branch_yushan_merged/src/node/mesh.cpp
r1002 r1134 34 34 std::map <StdString, vector<int> > CMesh::domainList = std::map <StdString, vector<int> >(); 35 35 36 std::map <StdString, CMesh> *CMesh::meshList_ptr = 0; 37 std::map <StdString, vector<int> > *CMesh::domainList_ptr = 0; 38 39 36 40 ///--------------------------------------------------------------- 37 41 /*! … … 41 45 * \param [in] nvertex Number of verteces (1 for nodes, 2 for edges, 3 and up for faces). 42 46 */ 47 48 /* bkp 43 49 CMesh* CMesh::getMesh (StdString meshName, int nvertex) 44 50 { … … 64 70 CMesh::meshList.insert( make_pair(meshName, newMesh) ); 65 71 return &meshList[meshName]; 72 } 73 } 74 */ 75 76 CMesh* CMesh::getMesh (StdString meshName, int nvertex) 77 { 78 if(CMesh::domainList_ptr == NULL) CMesh::domainList_ptr = new std::map <StdString, vector<int> >(); 79 if(CMesh::meshList_ptr == NULL) CMesh::meshList_ptr = new std::map <StdString, CMesh>(); 80 81 (*CMesh::domainList_ptr)[meshName].push_back(nvertex); 82 83 if ( (*CMesh::meshList_ptr).begin() != (*CMesh::meshList_ptr).end() ) 84 { 85 for (std::map<StdString, CMesh>::iterator it=(*CMesh::meshList_ptr).begin(); it!=(*CMesh::meshList_ptr).end(); ++it) 86 { 87 if (it->first == meshName) 88 return &((*CMesh::meshList_ptr)[meshName]); 89 else 90 { 91 CMesh newMesh; 92 (*CMesh::meshList_ptr).insert( make_pair(meshName, newMesh) ); 93 return &((*CMesh::meshList_ptr)[meshName]); 94 } 95 } 96 } 97 else 98 { 99 CMesh newMesh; 100 (*CMesh::meshList_ptr).insert( make_pair(meshName, newMesh) ); 101 return &((*CMesh::meshList_ptr)[meshName]); 66 102 } 67 103 } … … 488 524 * \param [in] bounds_lat Array of boundary latitudes. Its size depends on the element type. 489 525 */ 490 void CMesh::createMeshEpsilon(const MPI_Comm& comm,526 void CMesh::createMeshEpsilon(const ep_lib::MPI_Comm& comm, 491 527 const CArray<double, 1>& lonvalue, const CArray<double, 1>& latvalue, 492 528 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat) … … 1688 1724 */ 1689 1725 1690 void CMesh::getGloNghbFacesNodeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1726 void CMesh::getGloNghbFacesNodeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1691 1727 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1692 1728 CArray<int, 2>& nghbFaces) … … 1844 1880 */ 1845 1881 1846 void CMesh::getGloNghbFacesEdgeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx,1882 void CMesh::getGloNghbFacesEdgeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx, 1847 1883 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1848 1884 CArray<int, 2>& nghbFaces) … … 2025 2061 */ 2026 2062 2027 void CMesh::getGlobalNghbFaces(const int nghbType, const MPI_Comm& comm,2063 void CMesh::getGlobalNghbFaces(const int nghbType, const ep_lib::MPI_Comm& comm, 2028 2064 const CArray<int, 1>& face_idx, 2029 2065 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, -
XIOS/dev/branch_yushan_merged/src/node/mesh.hpp
r931 r1134 60 60 const CArray<double, 2>&, const CArray<double, 2>& ); 61 61 62 void createMeshEpsilon(const MPI_Comm&,62 void createMeshEpsilon(const ep_lib::MPI_Comm&, 63 63 const CArray<double, 1>&, const CArray<double, 1>&, 64 64 const CArray<double, 2>&, const CArray<double, 2>& ); 65 65 66 void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&,66 void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&, 67 67 const CArray<double, 2>&, const CArray<double, 2>&, 68 68 CArray<int, 2>&); … … 82 82 static std::map <StdString, CMesh> meshList; 83 83 static std::map <StdString, vector<int> > domainList; 84 85 static std::map <StdString, CMesh> *meshList_ptr; 86 static std::map <StdString, vector<int> > *domainList_ptr; 87 #pragma omp threadprivate(meshList_ptr, domainList_ptr) 88 84 89 CClientClientDHTSizet* pNodeGlobalIndex; // pointer to a map <nodeHash, nodeIdxGlo> 85 90 CClientClientDHTSizet* pEdgeGlobalIndex; // pointer to a map <edgeHash, edgeIdxGlo> 86 void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);87 void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);91 void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 92 void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 88 93 void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 89 94 void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); -
XIOS/dev/branch_yushan_merged/src/node/reduce_axis_to_scalar.hpp
r888 r1134 59 59 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 60 60 static bool _dummyRegistered; 61 #pragma omp threadprivate(_dummyRegistered) 61 62 }; // class CReduceAxisToScalar 62 63 -
XIOS/dev/branch_yushan_merged/src/node/reduce_domain_to_axis.hpp
r895 r1134 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CReduceDomainToAxis 63 64 -
XIOS/dev/branch_yushan_merged/src/node/reduce_domain_to_scalar.hpp
r976 r1134 60 60 static CTransformation<CScalar>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CReduceDomainToScalar 63 64 -
XIOS/dev/branch_yushan_merged/src/node/scalar.cpp
r1117 r1134 27 27 { /* Ne rien faire de plus */ } 28 28 29 std::map<StdString, ETranformationType> CScalar::transformationMapList_ = std::map<StdString, ETranformationType>(); 30 bool CScalar::dummyTransformationMapList_ = CScalar::initializeTransformationMap(CScalar::transformationMapList_); 29 //std::map<StdString, ETranformationType> CScalar::transformationMapList_ = std::map<StdString, ETranformationType>(); 30 //bool CScalar::dummyTransformationMapList_ = CScalar::initializeTransformationMap(CScalar::transformationMapList_); 31 32 std::map<StdString, ETranformationType> *CScalar::transformationMapList_ptr = 0; 33 31 34 bool CScalar::initializeTransformationMap(std::map<StdString, ETranformationType>& m) 32 35 { … … 34 37 m["extract_axis"] = TRANS_EXTRACT_AXIS_TO_SCALAR; 35 38 m["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_SCALAR; 39 } 40 41 bool CScalar::initializeTransformationMap() 42 { 43 CScalar::transformationMapList_ptr = new std::map<StdString, ETranformationType>(); 44 (*CScalar::transformationMapList_ptr)["reduce_axis"] = TRANS_REDUCE_AXIS_TO_SCALAR; 45 (*CScalar::transformationMapList_ptr)["extract_axis"] = TRANS_EXTRACT_AXIS_TO_SCALAR; 46 (*CScalar::transformationMapList_ptr)["reduce_domain"] = TRANS_REDUCE_DOMAIN_TO_SCALAR; 36 47 } 37 48 … … 164 175 165 176 nodeElementName = node.getElementName(); 166 std::map<StdString, ETranformationType>::const_iterator ite = transformationMapList_.end(), it; 167 it = transformationMapList_.find(nodeElementName); 177 if(CScalar::transformationMapList_ptr == 0) initializeTransformationMap(); 178 std::map<StdString, ETranformationType>::const_iterator ite = (*CScalar::transformationMapList_ptr).end(), it; 179 it = (*CScalar::transformationMapList_ptr).find(nodeElementName); 168 180 if (ite != it) 169 181 { -
XIOS/dev/branch_yushan_merged/src/node/scalar.hpp
r1106 r1134 87 87 private: 88 88 static bool initializeTransformationMap(std::map<StdString, ETranformationType>& m); 89 static std::map<StdString, ETranformationType> transformationMapList_; 90 static bool dummyTransformationMapList_; 89 static bool initializeTransformationMap(); 90 91 //static bool dummyTransformationMapList_; 92 93 static std::map<StdString, ETranformationType> *transformationMapList_ptr; 94 #pragma omp threadprivate(transformationMapList_ptr) 91 95 92 96 -
XIOS/dev/branch_yushan_merged/src/node/zoom_axis.hpp
r836 r1134 60 60 static CTransformation<CAxis>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 62 63 }; // class CZoomAxis 63 64 -
XIOS/dev/branch_yushan_merged/src/node/zoom_domain.hpp
r836 r1134 60 60 static CTransformation<CDomain>* create(const StdString& id, xml::CXMLNode* node); 61 61 static bool _dummyRegistered; 62 #pragma omp threadprivate(_dummyRegistered) 63 62 64 }; // class CZoomDomain 63 65 -
XIOS/dev/branch_yushan_merged/src/object_factory.cpp
r501 r1134 5 5 /// ////////////////////// Définitions ////////////////////// /// 6 6 7 StdString CObjectFactory::CurrContext("");7 StdString *CObjectFactory::CurrContext_ptr = new StdString; 8 8 9 9 void CObjectFactory::SetCurrentContextId(const StdString & context) 10 { CObjectFactory::CurrContext = context; } 10 { 11 if(CObjectFactory::CurrContext_ptr == NULL ) CObjectFactory::CurrContext_ptr = new StdString; 12 CObjectFactory::CurrContext_ptr->assign(context); 13 } 11 14 12 15 StdString & CObjectFactory::GetCurrentContextId(void) 13 { return (CObjectFactory::CurrContext); } 16 { 17 return (*CObjectFactory::CurrContext_ptr); 18 } 14 19 15 20 } // namespace xios -
XIOS/dev/branch_yushan_merged/src/object_factory.hpp
r769 r1134 59 59 60 60 /// Propriétés statiques /// 61 static StdString CurrContext; 61 static StdString *CurrContext_ptr; 62 #pragma omp threadprivate(CurrContext_ptr) 62 63 63 64 }; // class CObjectFactory -
XIOS/dev/branch_yushan_merged/src/object_factory_impl.hpp
r769 r1134 10 10 int CObjectFactory::GetObjectNum(void) 11 11 { 12 if (CurrContext .size() == 0)12 if (CurrContext_ptr->size() == 0) 13 13 ERROR("CObjectFactory::GetObjectNum(void)", 14 14 << "please define current context id !"); 15 return (U::AllVectObj[CObjectFactory::CurrContext].size()); 15 16 if(U::AllVectObj == NULL) return 0; 17 18 19 return (*U::AllVectObj)[*CObjectFactory::CurrContext_ptr].size(); 16 20 } 17 21 … … 19 23 int CObjectFactory::GetObjectIdNum(void) 20 24 { 21 if (CurrContext .size() == 0)25 if (CurrContext_ptr->size() == 0) 22 26 ERROR("CObjectFactory::GetObjectIdNum(void)", 23 27 << "please define current context id !"); 24 return (U::AllMapObj[CObjectFactory::CurrContext].size()); 28 if(U::AllMapObj == NULL) return 0; 29 30 31 32 return (* U::AllMapObj) [*CObjectFactory::CurrContext_ptr].size(); 25 33 } 26 34 … … 28 36 bool CObjectFactory::HasObject(const StdString & id) 29 37 { 30 if (CurrContext .size() == 0)38 if (CurrContext_ptr->size() == 0) 31 39 ERROR("CObjectFactory::HasObject(const StdString & id)", 32 40 << "[ id = " << id << " ] please define current context id !"); 33 return (U::AllMapObj[CObjectFactory::CurrContext].find(id) != 34 U::AllMapObj[CObjectFactory::CurrContext].end()); 41 42 if(U::AllMapObj == NULL) return false; 43 44 45 46 return ((*U::AllMapObj)[*CObjectFactory::CurrContext_ptr].find(id) != 47 (*U::AllMapObj)[*CObjectFactory::CurrContext_ptr].end()); 35 48 } 36 49 … … 38 51 bool CObjectFactory::HasObject(const StdString & context, const StdString & id) 39 52 { 40 if (U::AllMapObj.find(context) == U::AllMapObj.end()) return false ; 41 else return (U::AllMapObj[context].find(id) != U::AllMapObj[context].end()); 53 if(U::AllMapObj == NULL) return false; 54 55 if (U::AllMapObj->find(context) == U::AllMapObj->end()) return false ; 56 57 else 58 { 59 return ((*U::AllMapObj)[context].find(id) != (*U::AllMapObj)[context].end()); 60 } 61 42 62 } 43 63 … … 45 65 boost::shared_ptr<U> CObjectFactory::GetObject(const U * const object) 46 66 { 47 if (CurrContext.size() == 0) 67 if(U::AllVectObj == NULL) return (boost::shared_ptr<U>()); 68 69 if (CurrContext_ptr->size() == 0) 48 70 ERROR("CObjectFactory::GetObject(const U * const object)", 49 71 << "please define current context id !"); 50 72 std::vector<boost::shared_ptr<U> > & vect = 51 U::AllVectObj[CObjectFactory::CurrContext];73 (*U::AllVectObj)[*CObjectFactory::CurrContext_ptr]; 52 74 53 75 typename std::vector<boost::shared_ptr<U> >::const_iterator … … 70 92 boost::shared_ptr<U> CObjectFactory::GetObject(const StdString & id) 71 93 { 72 if (CurrContext.size() == 0) 94 if(U::AllMapObj == NULL) return (boost::shared_ptr<U>()); 95 96 if (CurrContext_ptr->size() == 0) 73 97 ERROR("CObjectFactory::GetObject(const StdString & id)", 74 98 << "[ id = " << id << " ] please define current context id !"); … … 77 101 << "[ id = " << id << ", U = " << U::GetName() << " ] " 78 102 << "object was not found."); 79 return ( U::AllMapObj[CObjectFactory::CurrContext][id]);103 return (*U::AllMapObj)[*CObjectFactory::CurrContext_ptr][id]; 80 104 } 81 105 … … 83 107 boost::shared_ptr<U> CObjectFactory::GetObject(const StdString & context, const StdString & id) 84 108 { 109 if(U::AllMapObj == NULL) return (boost::shared_ptr<U>()); 110 85 111 if (!CObjectFactory::HasObject<U>(context,id)) 86 112 ERROR("CObjectFactory::GetObject(const StdString & id)", 87 113 << "[ id = " << id << ", U = " << U::GetName() <<", context = "<<context<< " ] " 88 114 << "object was not found."); 89 return (U::AllMapObj[context][id]); 115 116 return (*U::AllMapObj)[context][id]; 90 117 } 91 118 … … 93 120 boost::shared_ptr<U> CObjectFactory::CreateObject(const StdString& id) 94 121 { 95 if (CurrContext.empty()) 122 if(U::AllVectObj == NULL) U::AllVectObj = new xios_map<StdString, std::vector<boost::shared_ptr<U> > >; 123 if(U::AllMapObj == NULL) U::AllMapObj = new xios_map<StdString, xios_map<StdString, boost::shared_ptr<U> > >; 124 125 126 if (CurrContext_ptr->empty()) 96 127 ERROR("CObjectFactory::CreateObject(const StdString& id)", 97 128 << "[ id = " << id << " ] please define current context id !"); … … 105 136 boost::shared_ptr<U> value(new U(id.empty() ? CObjectFactory::GenUId<U>() : id)); 106 137 107 U::AllVectObj[CObjectFactory::CurrContext].insert(U::AllVectObj[CObjectFactory::CurrContext].end(), value);108 U::AllMapObj[CObjectFactory::CurrContext].insert(std::make_pair(value->getId(), value));138 (* U::AllVectObj)[*CObjectFactory::CurrContext_ptr].insert((*U::AllVectObj)[*CObjectFactory::CurrContext_ptr].end(), value); 139 (* U::AllMapObj) [*CObjectFactory::CurrContext_ptr].insert(std::make_pair(value->getId(), value)); 109 140 110 141 return value; … … 116 147 CObjectFactory::GetObjectVector(const StdString & context) 117 148 { 118 return (U::AllVectObj[context]); 149 if(U::AllVectObj != NULL) 150 151 return (*U::AllVectObj)[context]; 119 152 } 120 153 … … 130 163 { 131 164 StdOStringStream oss; 132 oss << GetUIdBase<U>() << U::GenId[CObjectFactory::CurrContext]++; 165 if(U::GenId == NULL) U::GenId = new xios_map< StdString, long int >; 166 oss << GetUIdBase<U>() << (*U::GenId)[*CObjectFactory::CurrContext_ptr]++; 133 167 return oss.str(); 134 168 } -
XIOS/dev/branch_yushan_merged/src/object_template.hpp
r1117 r1134 100 100 101 101 /// Propriétés statiques /// 102 static xios_map<StdString, 103 xios_map<StdString, 104 boost::shared_ptr<DerivedType> > > AllMapObj; 105 static xios_map<StdString, 106 std::vector<boost::shared_ptr<DerivedType> > > AllVectObj; 102 // bkp 103 // static xios_map<StdString, 104 // xios_map<StdString, 105 // boost::shared_ptr<DerivedType> > > AllMapObj; 106 // static xios_map<StdString, 107 // std::vector<boost::shared_ptr<DerivedType> > > AllVectObj; 107 108 108 static xios_map< StdString, long int > GenId ; 109 // static xios_map< StdString, long int > GenId ; 110 111 112 static xios_map<StdString, xios_map<StdString, boost::shared_ptr<DerivedType> > > *AllMapObj; 113 static xios_map<StdString, std::vector<boost::shared_ptr<DerivedType> > > *AllVectObj; 114 static xios_map< StdString, long int > *GenId; 115 #pragma omp threadprivate(AllMapObj, AllVectObj, GenId) 109 116 110 117 }; // class CObjectTemplate -
XIOS/dev/branch_yushan_merged/src/object_template_impl.hpp
r1117 r1134 24 24 xios_map<StdString, 25 25 xios_map<StdString, 26 boost::shared_ptr<T> > > CObjectTemplate<T>::AllMapObj;26 boost::shared_ptr<T> > > *CObjectTemplate<T>::AllMapObj = 0; 27 27 28 28 template <class T> 29 29 xios_map<StdString, 30 std::vector<boost::shared_ptr<T> > > CObjectTemplate<T>::AllVectObj;31 32 template <class T> 33 xios_map<StdString,long int> CObjectTemplate<T>::GenId;30 std::vector<boost::shared_ptr<T> > > *CObjectTemplate<T>::AllVectObj = 0; 31 32 template <class T> 33 xios_map<StdString,long int> *CObjectTemplate<T>::GenId = 0; 34 34 35 35 template <class T> … … 66 66 CObjectTemplate<T>::GetAllVectobject(const StdString & contextId) 67 67 { 68 return (CObjectTemplate<T>::AllVectObj [contextId]);68 return (CObjectTemplate<T>::AllVectObj->at(contextId)); 69 69 } 70 70 -
XIOS/dev/branch_yushan_merged/src/parse_expr/lex_parser.cpp
r1038 r1134 347 347 extern char *yytext; 348 348 #define yytext_ptr yytext 349 349 350 static yyconst flex_int16_t yy_nxt[][128] = 350 351 { -
XIOS/dev/branch_yushan_merged/src/parse_expr/yacc_parser.cpp
r1038 r1134 80 80 } 81 81 82 IFilterExprNode* parsed; 83 std::string globalInputText; 84 size_t globalReadOffset = 0; 85 82 static IFilterExprNode* parsed; 83 static std::string globalInputText; 84 static std::string *globalInputText_ptr = 0; 85 static size_t globalReadOffset = 0; 86 #pragma omp threadprivate(parsed, globalInputText_ptr, globalReadOffset) 87 86 88 int readInputForLexer(char* buffer, size_t* numBytesRead, size_t maxBytesToRead) 87 89 { 90 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 88 91 size_t numBytesToRead = maxBytesToRead; 89 size_t bytesRemaining = globalInputText.length()-globalReadOffset;92 size_t bytesRemaining = (*globalInputText_ptr).length()-globalReadOffset; 90 93 size_t i; 91 94 if (numBytesToRead > bytesRemaining) numBytesToRead = bytesRemaining; 92 for (i = 0; i < numBytesToRead; i++) buffer[i] = globalInputText.c_str()[globalReadOffset + i];95 for (i = 0; i < numBytesToRead; i++) buffer[i] = (*globalInputText_ptr).c_str()[globalReadOffset + i]; 93 96 *numBytesRead = numBytesToRead; 94 97 globalReadOffset += numBytesToRead; … … 2002 2005 IFilterExprNode* parseExpr(const string& strExpr) 2003 2006 { 2004 globalInputText = strExpr; 2005 globalReadOffset = 0; 2006 yyparse(); 2007 #pragma omp critical (_parser) 2008 { 2009 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 2010 (*globalInputText_ptr).assign (strExpr); 2011 globalReadOffset = 0; 2012 yyparse(); 2013 } 2007 2014 return parsed; 2008 2015 } … … 2010 2017 2011 2018 2019 -
XIOS/dev/branch_yushan_merged/src/parse_expr/yacc_parser.yacc
r1038 r1134 15 15 } 16 16 17 IFilterExprNode* parsed; 18 std::string globalInputText; 19 size_t globalReadOffset = 0; 20 17 static IFilterExprNode* parsed; 18 static std::string globalInputText; 19 static std::string *globalInputText_ptr = 0; 20 static size_t globalReadOffset = 0; 21 #pragma omp threadprivate(parsed, globalInputText_ptr, globalReadOffset) 22 21 23 int readInputForLexer(char* buffer, size_t* numBytesRead, size_t maxBytesToRead) 22 24 { 25 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 23 26 size_t numBytesToRead = maxBytesToRead; 24 size_t bytesRemaining = globalInputText.length()-globalReadOffset;27 size_t bytesRemaining = (*globalInputText_ptr).length()-globalReadOffset; 25 28 size_t i; 26 29 if (numBytesToRead > bytesRemaining) numBytesToRead = bytesRemaining; 27 for (i = 0; i < numBytesToRead; i++) buffer[i] = globalInputText.c_str()[globalReadOffset + i];30 for (i = 0; i < numBytesToRead; i++) buffer[i] = (*globalInputText_ptr).c_str()[globalReadOffset + i]; 28 31 *numBytesRead = numBytesToRead; 29 32 globalReadOffset += numBytesToRead; … … 145 148 IFilterExprNode* parseExpr(const string& strExpr) 146 149 { 147 globalInputText = strExpr; 148 globalReadOffset = 0; 149 yyparse(); 150 #pragma omp critical (_parser) 151 { 152 if(globalInputText_ptr == 0) globalInputText_ptr = new std::string; 153 (*globalInputText_ptr).assign (strExpr); 154 globalReadOffset = 0; 155 yyparse(); 156 } 150 157 return parsed; 151 158 } -
XIOS/dev/branch_yushan_merged/src/policy.hpp
r855 r1134 31 31 { 32 32 protected: 33 DivideAdaptiveComm(const MPI_Comm& mpiComm);33 DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm); 34 34 35 35 void computeMPICommLevel(); … … 41 41 42 42 protected: 43 const MPI_Comm& internalComm_;43 const ep_lib::MPI_Comm& internalComm_; 44 44 std::vector<std::vector<int> > groupParentsBegin_; 45 45 std::vector<std::vector<int> > nbInGroupParents_; -
XIOS/dev/branch_yushan_merged/src/registry.cpp
r696 r1134 1 1 #include "registry.hpp" 2 2 #include "type.hpp" 3 #include <mpi.hpp>4 3 #include <fstream> 5 4 #include <sstream> … … 258 257 void CRegistry::hierarchicalGatherRegistry(void) 259 258 { 260 hierarchicalGatherRegistry(communicator) ; 259 // hierarchicalGatherRegistry(communicator) ; 260 gatherRegistry(communicator) ; 261 261 } 262 262 -
XIOS/dev/branch_yushan_merged/src/registry.hpp
r700 r1134 6 6 #include "mpi.hpp" 7 7 #include "message.hpp" 8 #ifdef _usingEP 9 #include "ep_declaration.hpp" 10 #endif 11 8 12 9 13 // Those two headers can be replaced by the C++11 equivalent in the future … … 23 27 24 28 /** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 25 CRegistry(const MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {}29 CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 26 30 27 31 /** Copy constructor */ … … 120 124 121 125 /** MPI communicator used for broadcast and gather operation */ 122 MPI_Comm communicator ;126 ep_lib::MPI_Comm communicator ; 123 127 } ; 124 128 -
XIOS/dev/branch_yushan_merged/src/server.cpp
r1032 r1134 9 9 #include <boost/functional/hash.hpp> 10 10 #include <boost/algorithm/string.hpp> 11 #include "mpi.hpp"12 11 #include "tracer.hpp" 13 12 #include "timer.hpp" … … 26 25 bool CServer::finished=false ; 27 26 bool CServer::is_MPI_Initialized ; 27 28 28 29 CEventScheduler* CServer::eventScheduler = 0; 29 30 30 31 void CServer::initialize(void) 31 32 { 32 int initialized ;33 MPI_Initialized(&initialized) ;34 if (initialized) is_MPI_Initialized=true ;35 else is_MPI_Initialized=false ;36 37 33 // Not using OASIS 38 34 if (!CXios::usingOasis) 39 35 { 40 36 41 if (!is_MPI_Initialized)42 {43 MPI_Init(NULL, NULL);44 }45 37 CTimer::get("XIOS").resume() ; 46 38 … … 50 42 unsigned long* hashAll ; 51 43 52 // int rank ; 44 53 45 int size ; 54 46 int myColor ; … … 77 69 78 70 myColor=colors[hashServer] ; 79 MPI_Comm_split(MPI_COMM_WORLD,myColor,rank,&intraComm) ; 80 71 72 73 MPI_Comm_split(CXios::globalComm,myColor,rank,&intraComm) ; 74 75 81 76 int serverLeader=leaders[hashServer] ; 82 77 int clientLeader; 83 78 84 79 serverLeader=leaders[hashServer] ; 85 for(it=leaders.begin();it!=leaders.end(); it++)80 for(it=leaders.begin();it!=leaders.end();++it) 86 81 { 87 82 if (it->first!=hashServer) … … 104 99 else 105 100 { 106 // int rank ,size;107 101 int size; 108 102 if (!is_MPI_Initialized) oasis_init(CXios::xiosCodeId); … … 135 129 } 136 130 137 // int rank;138 131 MPI_Comm_rank(intraComm,&rank) ; 139 132 if (rank==0) isRoot=true; … … 149 142 delete eventScheduler ; 150 143 151 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++)144 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); ++it) 152 145 MPI_Comm_free(&(*it)); 153 for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++)146 for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); ++it) 154 147 MPI_Comm_free(&(*it)); 148 155 149 MPI_Comm_free(&intraComm); 156 150 … … 158 152 { 159 153 if (CXios::usingOasis) oasis_finalize(); 160 else MPI_Finalize() ;154 //else {MPI_Finalize() ;} 161 155 } 156 157 162 158 report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; 163 159 report(0)<<"Performance report : Time spent in processing events : "<<CTimer::get("Process events").getCumulatedTime()<<endl ; … … 180 176 { 181 177 listenRootContext(); 182 if (!finished) listenRootFinalize() ; 178 if (!finished) 179 { 180 listenRootFinalize() ; 181 } 183 182 } 184 183 185 184 contextEventLoop() ; 186 185 if (finished && contextList.empty()) stop=true ; 186 187 187 eventScheduler->checkEvent() ; 188 188 } 189 190 189 191 CTimer::get("XIOS server").suspend() ; 190 192 } … … 196 198 int flag ; 197 199 198 for(it=interComm.begin();it!=interComm.end(); it++)200 for(it=interComm.begin();it!=interComm.end();++it) 199 201 { 200 202 MPI_Status status ; … … 206 208 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; 207 209 info(20)<<" CServer : Receive client finalize"<<endl ; 210 208 211 MPI_Comm_free(&(*it)); 209 212 interComm.erase(it) ; … … 259 262 { 260 263 traceOff() ; 264 #ifdef _usingEP 265 MPI_Iprobe(-1,1,CXios::globalComm, &flag, &status) ; 266 #else 261 267 MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 268 #endif 262 269 traceOn() ; 270 263 271 if (flag==true) 264 272 { 273 #ifdef _usingMPI 265 274 rank=status.MPI_SOURCE ; 275 #elif _usingEP 276 rank= status.ep_src ; 277 #endif 266 278 MPI_Get_count(&status,MPI_CHAR,&count) ; 267 279 buffer=new char[count] ; … … 277 289 if (flag==true) 278 290 { 291 #ifdef _usingMPI 279 292 rank=status.MPI_SOURCE ; 293 #elif _usingEP 294 rank= status.ep_src ; 295 #endif 280 296 MPI_Get_count(&status,MPI_CHAR,&count) ; 281 297 recvContextMessage((void*)buffer,count) ; … … 399 415 bool finished ; 400 416 map<string,CContext*>::iterator it ; 401 for(it=contextList.begin();it!=contextList.end(); it++)417 for(it=contextList.begin();it!=contextList.end();++it) 402 418 { 403 419 finished=it->second->checkBuffersAndListen(); -
XIOS/dev/branch_yushan_merged/src/server.hpp
r697 r1134 7 7 #include "mpi.hpp" 8 8 #include "event_scheduler.hpp" 9 10 #ifdef _usingEP 11 #include "ep_declaration.hpp" 12 #endif 9 13 10 14 namespace xios -
XIOS/dev/branch_yushan_merged/src/test/test_client.f90
r794 r1134 35 35 36 36 CALL MPI_INIT(ierr) 37 CALL init_wait 37 38 38 CALL init_wait 39 CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr) 40 if(rank < 2) then 39 41 40 42 !!! XIOS Initialization (get the local communicator) … … 72 74 73 75 CALL xios_context_initialize("test",comm) 76 74 77 CALL xios_get_handle("test",ctx_hdl) 75 78 CALL xios_set_current_context(ctx_hdl) … … 125 128 CALL xios_is_defined_field_attr("field_A",enabled=ok) 126 129 PRINT *,"field_A : attribute enabled is defined ? ",ok 130 127 131 CALL xios_close_context_definition() 128 132 129 133 PRINT*,"field field_A is active ? ",xios_field_is_active("field_A") 134 135 call MPI_Barrier(comm, ierr) 136 130 137 DO ts=1,24*10 131 138 CALL xios_update_calendar(ts) 132 139 CALL xios_send_field("field_A",field_A) 133 CALL wait_us(5000) ;140 CALL wait_us(5000) 134 141 ENDDO 135 142 … … 141 148 142 149 CALL xios_finalize() 150 print *, "Client : xios_finalize " 151 152 else 153 154 CALL xios_init_server 155 print *, "Server : xios_finalize " 156 157 endif 158 143 159 144 160 CALL MPI_FINALIZE(ierr) -
XIOS/dev/branch_yushan_merged/src/test/test_complete.f90
r787 r1134 5 5 IMPLICIT NONE 6 6 INCLUDE "mpif.h" 7 INTEGER :: rank 7 INTEGER :: rank, size 8 8 INTEGER :: size_loc 9 9 INTEGER :: ierr … … 28 28 INTEGER, ALLOCATABLE :: kindex(:) 29 29 INTEGER :: ni,ibegin,iend,nj,jbegin,jend 30 INTEGER :: i,j,l,ts,n, nb_pt 30 INTEGER :: i,j,l,ts,n, nb_pt, provided 31 31 32 32 !!! MPI Initialization 33 33 34 CALL MPI_INIT(ierr) 34 CALL MPI_INIT_THREAD(3, provided, ierr) 35 if(provided .NE. 3) then 36 print*, "provided thread level = ", provided 37 call MPI_Abort() 38 endif 39 40 35 41 36 42 CALL init_wait 43 44 CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr) 45 CALL MPI_COMM_SIZE(MPI_COMM_WORLD,size,ierr) 46 if(rank < size-1) then 37 47 38 48 !!! XIOS Initialization (get the local communicator) … … 220 230 !#################################################################################### 221 231 222 DO ts=1,24*10 232 DO ts=1,24*2 233 !DO ts=1,24 223 234 224 235 CALL xios_get_handle("atmosphere",ctx_hdl) … … 255 266 !!! Fin des contextes 256 267 257 CALL xios_context_finalize() 258 CALL xios_get_handle("atmosphere",ctx_hdl) 268 269 CALL xios_get_handle("surface",ctx_hdl) 270 259 271 CALL xios_set_current_context(ctx_hdl) 260 272 CALL xios_context_finalize() 261 273 274 print *, "xios_context_finalize(surface)" 275 276 CALL xios_get_handle("atmosphere",ctx_hdl) 277 278 CALL xios_set_current_context(ctx_hdl) 279 280 CALL xios_context_finalize() 281 282 print *, "xios_context_finalize(atmosphere)" 283 284 285 286 !!! Fin de XIOS 287 288 289 290 CALL xios_finalize() 291 262 292 DEALLOCATE(lon, lat, field_A_atm, lonvalue) 263 293 DEALLOCATE(kindex, field_A_srf) 264 294 265 !!! Fin de XIOS 295 print *, "Client : xios_finalize " 266 296 267 297 CALL MPI_COMM_FREE(comm, ierr) 268 298 269 CALL xios_finalize() 299 else 300 301 CALL xios_init_server 302 print *, "Server : xios_finalize " 303 304 endif 305 270 306 271 307 CALL MPI_FINALIZE(ierr) -
XIOS/dev/branch_yushan_merged/src/timer.cpp
r652 r1134 8 8 { 9 9 std::map<std::string,CTimer> CTimer::allTimer; 10 std::map<std::string,CTimer> *CTimer::allTimer_ptr = 0; 10 11 11 12 CTimer::CTimer(const std::string& name_) : name(name_) … … 52 53 CTimer& CTimer::get(const std::string name) 53 54 { 54 std::map<std::string,CTimer>::iterator it = allTimer.find(name); 55 if (it == allTimer.end()) 56 it = allTimer.insert(std::make_pair(name, CTimer(name))).first; 55 // bkp 56 // std::map<std::string,CTimer>::iterator it = allTimer.find(name); 57 // if (it == allTimer.end()) 58 // it = allTimer.insert(std::make_pair(name, CTimer(name))).first; 59 // return it->second; 60 61 if(allTimer_ptr == NULL) allTimer_ptr = new std::map<std::string,CTimer>; 62 63 std::map<std::string,CTimer>::iterator it = (*allTimer_ptr).find(name); 64 if (it == (*allTimer_ptr).end()) 65 it = (*allTimer_ptr).insert(std::make_pair(name, CTimer(name))).first; 57 66 return it->second; 58 67 } -
XIOS/dev/branch_yushan_merged/src/timer.hpp
r688 r1134 21 21 double getCumulatedTime(void); 22 22 static std::map<std::string,CTimer> allTimer; 23 24 static std::map<std::string,CTimer> *allTimer_ptr; 25 #pragma omp threadprivate(allTimer_ptr) 26 23 27 static double getTime(void); 24 28 static CTimer& get(std::string name); -
XIOS/dev/branch_yushan_merged/src/transformation/Functions/reduction.cpp
r979 r1134 9 9 10 10 CReductionAlgorithm::CallBackMap* CReductionAlgorithm::reductionCreationCallBacks_ = 0; 11 std::map<StdString,EReductionType> CReductionAlgorithm::ReductionOperations = std::map<StdString,EReductionType>(); 11 //std::map<StdString,EReductionType> CReductionAlgorithm::ReductionOperations = std::map<StdString,EReductionType>(); 12 std::map<StdString,EReductionType> *CReductionAlgorithm::ReductionOperations_ptr = 0; 13 12 14 bool CReductionAlgorithm::initReductionOperation(std::map<StdString,EReductionType>& m) 13 15 { … … 29 31 } 30 32 31 bool CReductionAlgorithm::_dummyInit = CReductionAlgorithm::initReductionOperation(CReductionAlgorithm::ReductionOperations); 33 34 bool CReductionAlgorithm::initReductionOperation() 35 { 36 if(CReductionAlgorithm::ReductionOperations_ptr == NULL) CReductionAlgorithm::ReductionOperations_ptr = new std::map<StdString,EReductionType>(); 37 // So so stupid way to intialize operation but it works ... 38 (*CReductionAlgorithm::ReductionOperations_ptr)["sum"] = TRANS_REDUCE_SUM; 39 CSumReductionAlgorithm::registerTrans(); 40 41 (*CReductionAlgorithm::ReductionOperations_ptr)["min"] = TRANS_REDUCE_MIN; 42 CMinReductionAlgorithm::registerTrans(); 43 44 (*CReductionAlgorithm::ReductionOperations_ptr)["max"] = TRANS_REDUCE_MAX; 45 CMaxReductionAlgorithm::registerTrans(); 46 47 (*CReductionAlgorithm::ReductionOperations_ptr)["extract"] = TRANS_REDUCE_EXTRACT; 48 CExtractReductionAlgorithm::registerTrans(); 49 50 (*CReductionAlgorithm::ReductionOperations_ptr)["average"] = TRANS_REDUCE_AVERAGE; 51 CAverageReductionAlgorithm::registerTrans(); 52 } 53 54 //bool CReductionAlgorithm::_dummyInit = CReductionAlgorithm::initReductionOperation(CReductionAlgorithm::ReductionOperations); 55 bool CReductionAlgorithm::_dummyInit = CReductionAlgorithm::initReductionOperation(); 32 56 33 57 CReductionAlgorithm* CReductionAlgorithm::createOperation(EReductionType reduceType) 34 58 { 35 59 int reduceTypeInt = reduceType; 60 //if (0 == reductionCreationCallBacks_) 61 // reductionCreationCallBacks_ = new CallBackMap(); 62 36 63 CallBackMap::const_iterator it = (*reductionCreationCallBacks_).find(reduceType); 37 64 if ((*reductionCreationCallBacks_).end() == it) -
XIOS/dev/branch_yushan_merged/src/transformation/Functions/reduction.hpp
r1076 r1134 23 23 { 24 24 public: 25 static std::map<StdString,EReductionType> ReductionOperations; 25 //static std::map<StdString,EReductionType> ReductionOperations; 26 static std::map<StdString,EReductionType> *ReductionOperations_ptr; 27 #pragma omp threadprivate(ReductionOperations_ptr) 26 28 27 29 public: … … 60 62 typedef std::map<EReductionType, CreateOperationCallBack> CallBackMap; 61 63 static CallBackMap* reductionCreationCallBacks_; 64 #pragma omp threadprivate(reductionCreationCallBacks_) 62 65 63 66 static bool registerOperation(EReductionType reduceType, CreateOperationCallBack createFn); … … 66 69 protected: 67 70 static bool initReductionOperation(std::map<StdString,EReductionType>& m); 71 static bool initReductionOperation(); 68 72 static bool _dummyInit; 73 #pragma omp threadprivate(_dummyInit) 69 74 }; 70 75 -
XIOS/dev/branch_yushan_merged/src/transformation/axis_algorithm_extract_domain.cpp
r1076 r1134 62 62 63 63 pos_ = algo->position; 64 reduction_ = CReductionAlgorithm::createOperation( CReductionAlgorithm::ReductionOperations[op]);64 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 65 65 } 66 66 -
XIOS/dev/branch_yushan_merged/src/transformation/axis_algorithm_inverse.cpp
r936 r1134 173 173 174 174 // Sending global index of grid source to corresponding process as well as the corresponding mask 175 std::vector< MPI_Request> requests;176 std::vector< MPI_Status> status;175 std::vector<ep_lib::MPI_Request> requests; 176 std::vector<ep_lib::MPI_Status> status; 177 177 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 178 178 boost::unordered_map<int, double* > sendValueToDest; … … 184 184 sendValueToDest[recvRank] = new double [recvSize]; 185 185 186 requests.push_back( MPI_Request());186 requests.push_back(ep_lib::MPI_Request()); 187 187 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 188 188 } … … 206 206 207 207 // Send global index source and mask 208 requests.push_back( MPI_Request());208 requests.push_back(ep_lib::MPI_Request()); 209 209 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 210 210 } … … 213 213 MPI_Waitall(requests.size(), &requests[0], &status[0]); 214 214 215 216 std::vector<MPI_Request>().swap(requests); 217 std::vector<MPI_Status>().swap(status); 215 std::vector<ep_lib::MPI_Request>().swap(requests); 216 std::vector<ep_lib::MPI_Status>().swap(status); 218 217 219 218 // Okie, on destination side, we will wait for information of masked index of source … … 223 222 int recvSize = itSend->second; 224 223 225 requests.push_back( MPI_Request());224 requests.push_back(ep_lib::MPI_Request()); 226 225 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 227 226 } … … 241 240 } 242 241 // Okie, now inform the destination which source index are masked 243 requests.push_back( MPI_Request());242 requests.push_back(ep_lib::MPI_Request()); 244 243 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 245 244 } 246 245 status.resize(requests.size()); 247 246 MPI_Waitall(requests.size(), &requests[0], &status[0]); 248 249 247 250 248 size_t nGloAxisDest = axisDest_->n_glo.getValue() - 1; -
XIOS/dev/branch_yushan_merged/src/transformation/axis_algorithm_inverse.hpp
r933 r1134 12 12 #include "axis_algorithm_transformation.hpp" 13 13 #include "transformation.hpp" 14 14 #ifdef _usingEP 15 #include "ep_declaration.hpp" 16 #endif 17 15 18 namespace xios { 16 19 -
XIOS/dev/branch_yushan_merged/src/transformation/axis_algorithm_reduce_domain.cpp
r1076 r1134 70 70 71 71 dir_ = (CReduceDomainToAxis::direction_attr::iDir == algo->direction) ? iDir : jDir; 72 reduction_ = CReductionAlgorithm::createOperation( CReductionAlgorithm::ReductionOperations[op]);72 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 73 73 } 74 74 -
XIOS/dev/branch_yushan_merged/src/transformation/domain_algorithm_interpolate.cpp
r1114 r1134 405 405 CContextClient* client=context->client; 406 406 407 MPI_Comm poleComme(MPI_COMM_NULL);408 MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme);407 ep_lib::MPI_Comm poleComme(MPI_COMM_NULL); 408 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 409 409 if (MPI_COMM_NULL != poleComme) 410 410 { 411 411 int nbClientPole; 412 MPI_Comm_size(poleComme, &nbClientPole);412 ep_lib::MPI_Comm_size(poleComme, &nbClientPole); 413 413 414 414 std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, … … 573 573 double* sendWeightBuff = new double [sendBuffSize]; 574 574 575 std::vector< MPI_Request> sendRequest;575 std::vector<ep_lib::MPI_Request> sendRequest; 576 576 577 577 int sendOffSet = 0, l = 0; … … 594 594 } 595 595 596 sendRequest.push_back( MPI_Request());596 sendRequest.push_back(ep_lib::MPI_Request()); 597 597 MPI_Isend(sendIndexDestBuff + sendOffSet, 598 598 k, … … 602 602 client->intraComm, 603 603 &sendRequest.back()); 604 sendRequest.push_back( MPI_Request());604 sendRequest.push_back(ep_lib::MPI_Request()); 605 605 MPI_Isend(sendIndexSrcBuff + sendOffSet, 606 606 k, … … 610 610 client->intraComm, 611 611 &sendRequest.back()); 612 sendRequest.push_back( MPI_Request());612 sendRequest.push_back(ep_lib::MPI_Request()); 613 613 MPI_Isend(sendWeightBuff + sendOffSet, 614 614 k, … … 629 629 while (receivedSize < recvBuffSize) 630 630 { 631 MPI_Status recvStatus;631 ep_lib::MPI_Status recvStatus; 632 632 MPI_Recv((recvIndexDestBuff + receivedSize), 633 633 recvBuffSize, … … 640 640 int countBuff = 0; 641 641 MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 642 #ifdef _usingMPI 642 643 clientSrcRank = recvStatus.MPI_SOURCE; 643 644 #elif _usingEP 645 clientSrcRank = recvStatus.ep_src; 646 #endif 644 647 MPI_Recv((recvIndexSrcBuff + receivedSize), 645 648 recvBuffSize, … … 666 669 } 667 670 668 std::vector<MPI_Status> requestStatus(sendRequest.size()); 669 MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 671 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 672 ep_lib::MPI_Status stat_ignore; 673 MPI_Waitall(sendRequest.size(), &sendRequest[0], &stat_ignore); 670 674 671 675 delete [] sendIndexDestBuff; … … 758 762 759 763 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 760 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm);764 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 761 765 762 766 if (0 == globalNbWeight) … … 771 775 std::vector<StdSize> start(1, startIndex - localNbWeight); 772 776 std::vector<StdSize> count(1, localNbWeight); 773 774 WriteNetCdf netCdfWriter(filename, client->intraComm);777 778 WriteNetCdf netCdfWriter(filename, static_cast<MPI_Comm>(client->intraComm.mpi_comm)); 775 779 776 780 // Define some dimensions -
XIOS/dev/branch_yushan_merged/src/transformation/domain_algorithm_interpolate.hpp
r1014 r1134 13 13 #include "transformation.hpp" 14 14 #include "nc4_data_output.hpp" 15 #ifdef _usingEP 16 #include "ep_declaration.hpp" 17 #endif 15 18 16 19 namespace xios { -
XIOS/dev/branch_yushan_merged/src/transformation/grid_transformation.cpp
r1078 r1134 473 473 474 474 // Sending global index of grid source to corresponding process as well as the corresponding mask 475 std::vector< MPI_Request> requests;476 std::vector< MPI_Status> status;475 std::vector<ep_lib::MPI_Request> requests; 476 std::vector<ep_lib::MPI_Status> status; 477 477 boost::unordered_map<int, unsigned char* > recvMaskDst; 478 478 boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc; … … 484 484 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 485 485 486 requests.push_back( MPI_Request());486 requests.push_back(ep_lib::MPI_Request()); 487 487 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 488 requests.push_back( MPI_Request());488 requests.push_back(ep_lib::MPI_Request()); 489 489 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 490 490 } … … 522 522 523 523 // Send global index source and mask 524 requests.push_back( MPI_Request());524 requests.push_back(ep_lib::MPI_Request()); 525 525 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 526 requests.push_back( MPI_Request());526 requests.push_back(ep_lib::MPI_Request()); 527 527 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 528 528 } … … 532 532 533 533 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 534 std::vector< MPI_Request>().swap(requests);535 std::vector< MPI_Status>().swap(status);534 std::vector<ep_lib::MPI_Request>().swap(requests); 535 std::vector<ep_lib::MPI_Status>().swap(status); 536 536 // Okie, on destination side, we will wait for information of masked index of source 537 537 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 540 540 int recvSize = itSend->second; 541 541 542 requests.push_back( MPI_Request());542 requests.push_back(ep_lib::MPI_Request()); 543 543 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 544 544 } … … 577 577 578 578 // Okie, now inform the destination which source index are masked 579 requests.push_back( MPI_Request());579 requests.push_back(ep_lib::MPI_Request()); 580 580 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 581 581 } -
XIOS/dev/branch_yushan_merged/src/transformation/grid_transformation_factory_impl.hpp
r933 r1134 57 57 typedef std::map<ETranformationType, CreateTransformationCallBack> CallBackMap; 58 58 static CallBackMap* transformationCreationCallBacks_; 59 #pragma omp threadprivate(transformationCreationCallBacks_) 60 59 61 static bool registerTransformation(ETranformationType transType, CreateTransformationCallBack createFn); 60 62 static bool unregisterTransformation(ETranformationType transType); 61 63 static bool initializeTransformation_; 64 #pragma omp threadprivate(initializeTransformation_) 62 65 }; 63 66 … … 79 82 std::map<int, int>& elementPositionInGridDst2DomainPosition) 80 83 { 84 if (0 == transformationCreationCallBacks_) 85 transformationCreationCallBacks_ = new CallBackMap(); 81 86 typename CallBackMap::const_iterator it = (*transformationCreationCallBacks_).find(transType); 82 87 if ((*transformationCreationCallBacks_).end() == it) -
XIOS/dev/branch_yushan_merged/src/transformation/scalar_algorithm_extract_axis.cpp
r1076 r1134 49 49 StdString op = "extract"; 50 50 pos_ = algo->position; 51 reduction_ = CReductionAlgorithm::createOperation( CReductionAlgorithm::ReductionOperations[op]);51 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 52 52 } 53 53 -
XIOS/dev/branch_yushan_merged/src/transformation/scalar_algorithm_reduce_axis.cpp
r1082 r1134 75 75 } 76 76 77 if ( CReductionAlgorithm::ReductionOperations.end() == CReductionAlgorithm::ReductionOperations.find(op))77 if ((*CReductionAlgorithm::ReductionOperations_ptr).end() == (*CReductionAlgorithm::ReductionOperations_ptr).find(op)) 78 78 ERROR("CScalarAlgorithmReduceAxis::CScalarAlgorithmReduceAxis(CAxis* axisDestination, CAxis* axisSource, CReduceAxisToScalar* algo)", 79 79 << "Operation '" << op << "' not found. Please make sure to use a supported one" … … 81 81 << "Scalar destination " << scalarDestination->getId()); 82 82 83 reduction_ = CReductionAlgorithm::createOperation( CReductionAlgorithm::ReductionOperations[op]);83 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 84 84 } 85 85 -
XIOS/dev/branch_yushan_merged/src/transformation/scalar_algorithm_reduce_domain.cpp
r1076 r1134 69 69 } 70 70 71 if ( CReductionAlgorithm::ReductionOperations.end() == CReductionAlgorithm::ReductionOperations.find(op))71 if ((*CReductionAlgorithm::ReductionOperations_ptr).end() == (*CReductionAlgorithm::ReductionOperations_ptr).find(op)) 72 72 ERROR("CScalarAlgorithmReduceDomain::CScalarAlgorithmReduceDomain(CDomain* domainDestination, CDomain* domainSource, CReduceDomainToScalar* algo)", 73 73 << "Operation '" << op << "' not found. Please make sure to use a supported one" … … 75 75 << "Scalar destination " << scalarDestination->getId()); 76 76 77 reduction_ = CReductionAlgorithm::createOperation( CReductionAlgorithm::ReductionOperations[op]);77 reduction_ = CReductionAlgorithm::createOperation((*CReductionAlgorithm::ReductionOperations_ptr)[op]); 78 78 } 79 79 -
XIOS/dev/branch_yushan_merged/src/type/type.hpp
r1107 r1134 94 94 const CType_ref& operator = (CType<T>& val) const ; 95 95 const CType_ref& operator = (const CType_ref& val) const; 96 operator T&() const; 96 operator T&() const; 97 97 98 98 inline virtual CBaseType* clone(void) const { return _clone(); } -
XIOS/dev/branch_yushan_merged/src/xios_server.f90
r501 r1134 1 1 PROGRAM server_main 2 2 USE xios 3 USE mod_wait 3 4 IMPLICIT NONE 4 5 INCLUDE "mpif.h" 5 INTEGER :: ierr 6 6 INTEGER :: ierr, th_level 7 8 CALL MPI_INIT(ierr) 9 !CALL MPI_INIT_thread(3, th_level, ierr) 10 CALL init_wait 7 11 CALL xios_init_server 12 13 CALL MPI_FINALIZE(ierr) 8 14 9 15 END PROGRAM server_main
Note: See TracChangeset
for help on using the changeset viewer.