Changeset 1081
- Timestamp:
- 03/31/17 20:26:21 (8 years ago)
- Location:
- XIOS/dev/branch_yushan
- Files:
-
- 17 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan/bld.cfg
r1080 r1081 36 36 #bld::target test_remap.exe 37 37 #bld::target test_new_features.exe test_unstruct_complete.exe 38 bld::target test_omp.exe #test_client.exe #test_complete.exe38 bld::target test_omp.exe test_client.exe #test_complete.exe 39 39 bld::exe_dep 40 40 -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_split.cpp
r1056 r1081 167 167 { 168 168 *newcomm = comm.ep_comm_ptr->comm_list->mem_bridge[new_ep_rank_loc]; 169 //newcomm = &(comm.ep_comm_ptr->comm_list->mem_bridge[new_ep_rank_loc]);169 // newcomm = &(comm.ep_comm_ptr->comm_list->mem_bridge[new_ep_rank_loc]); 170 170 (*newcomm).ep_comm_ptr->comm_label = color; 171 171 } -
XIOS/dev/branch_yushan/extern/src_ep_dev/ep_type.hpp
r1068 r1081 174 174 } 175 175 176 // ep_intercomm(ep_intercomm &ref) 177 // { 178 // printf("calling copy Constructor of ep_intercomm\n"); 179 // ep_intercomm return_intercomm; 180 181 // return_intercomm.mpi_inter_comm = ref.mpi_inter_comm; 182 // return_intercomm.intercomm_rank_map = ref.intercomm_rank_map; 183 // return_intercomm.local_rank_map = ref.local_rank_map; 184 // return_intercomm.remote_rank_map = ref.remote_rank_map; 185 // return_intercomm.size_rank_info[0] = ref.size_rank_info[0]; 186 // return_intercomm.size_rank_info[1] = ref.size_rank_info[1]; 187 // return_intercomm.size_rank_info[2] = ref.size_rank_info[2]; 188 // return_intercomm.local_comm = ref.local_comm; 189 // return_intercomm.intercomm_tag = ref.intercomm_tag; 190 // } 191 176 192 bool operator == (ep_intercomm right) 177 193 { … … 195 211 return a||b||c||d||e||f; 196 212 } 213 214 // ep_intercomm operator = (ep_intercomm ref) 215 // { 216 // printf("calling = operator of ep_intercomm\n"); 217 // ep_intercomm return_intercomm; 218 219 // return_intercomm.mpi_inter_comm = ref.mpi_inter_comm; 220 // return_intercomm.intercomm_rank_map = ref.intercomm_rank_map; 221 // return_intercomm.local_rank_map = ref.local_rank_map; 222 // return_intercomm.remote_rank_map = ref.remote_rank_map; 223 // return_intercomm.size_rank_info[0] = ref.size_rank_info[0]; 224 // return_intercomm.size_rank_info[1] = ref.size_rank_info[1]; 225 // return_intercomm.size_rank_info[2] = ref.size_rank_info[2]; 226 // return_intercomm.local_comm = ref.local_comm; 227 // return_intercomm.intercomm_tag = ref.intercomm_tag; 228 // } 197 229 }; 198 230 … … 238 270 return a||b||c; 239 271 } 272 273 // ep_communicator operator = (ep_communicator ref) 274 // { 275 // printf("calling = operator of ep_communicator\n"); 276 // ep_communicator return_ep; 277 278 // return_ep.intercomm = ref.intercomm; 279 // return_ep.comm_label = ref.comm_label; 280 // return_ep.message_queue = ref.message_queue; 281 // return_ep.comm_list = ref.comm_list; 282 // return_ep.size_rank_info[0] = ref.size_rank_info[0]; 283 // return_ep.size_rank_info[1] = ref.size_rank_info[1]; 284 // return_ep.size_rank_info[2] = ref.size_rank_info[2]; 285 // } 240 286 }; 241 287 … … 340 386 return a||b||c||d; 341 387 } 388 389 // MPI_Comm operator = (MPI_Comm ref) 390 // { 391 // printf("calling = operator of MPI_Comm\n"); 392 // MPI_Comm return_comm; 393 394 // return_comm.mpi_comm = ref.mpi_comm; 395 // return_comm.is_ep = ref.is_ep; 396 // return_comm.is_intercomm = ref.is_intercomm; 397 // return_comm.my_buffer = ref.my_buffer; 398 // return_comm.ep_barrier = ref.ep_barrier; 399 // return_comm.rank_map = ref.rank_map; 400 // return_comm.ep_comm_ptr = ref.ep_comm_ptr; 401 // } 342 402 }; 343 403 -
XIOS/dev/branch_yushan/src/client.cpp
r1080 r1081 44 44 //MPI_Init(NULL, NULL); 45 45 int return_level; 46 #ifdef _intelmpi47 46 MPI_Init_thread(NULL, NULL, 3, &return_level); 48 47 assert(return_level == 3); 49 #elif _openmpi50 MPI_Init_thread(NULL, NULL, 2, &return_level);51 assert(return_level == 2);52 #endif53 48 } 54 49 CTimer::get("XIOS").resume() ; … … 58 53 unsigned long hashClient=hashString(codeId) ; 59 54 unsigned long hashServer=hashString(CXios::xiosCodeId) ; 60 //hashServer=hashString("xios.x") ;61 55 unsigned long* hashAll ; 62 56 int size ; … … 66 60 MPI_Comm_size(CXios::globalComm,&size); 67 61 MPI_Comm_rank(CXios::globalComm,&rank); 68 69 printf("client init : rank = %d, size = %d\n", rank, size); 70 62 71 63 72 64 hashAll=new unsigned long[size] ; … … 120 112 MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 121 113 122 int interCommSize, interCommRank ;123 MPI_Comm_size(interComm,&interCommSize) ;124 MPI_Comm_rank(interComm,&interCommRank) ;125 126 #pragma omp critical(_output)127 {128 info(50)<<" interCommRank :"<<interCommRank129 <<" interCommSize : "<< interCommSize << endl ;130 }131 132 133 114 } 134 115 else … … 186 167 void CClient::registerContext(const string& id,MPI_Comm contextComm) 187 168 { 169 //#pragma omp critical(_output) 188 170 //info(50) << "Client "<<getRank() << " start registerContext using info output" << endl; 189 printf("Client %d start registerContext\n", getRank());190 //printf("Client start registerContext\n");191 171 192 172 CContext::setCurrent(id) ; 193 printf("Client %d CContext::setCurrent OK\n", getRank());194 173 CContext* context = CContext::create(id); 195 printf("Client %d context=CContext::create(%s) OK, *context = %p\n", getRank(), id, &(*context)); 174 175 #pragma omp critical (_output) 176 printf("Client::registerContext context add = %p\n", &(*context)); 177 196 178 197 179 StdString idServer(id); … … 201 183 { 202 184 int size,rank,globalRank ; 203 size_t message_size ;204 int leaderRank ;185 //size_t message_size ; 186 //int leaderRank ; 205 187 MPI_Comm contextInterComm ; 206 188 … … 238 220 239 221 #pragma omp critical (_output) 240 printf("Client %d : MPI_Intercomm_merge OK \n", getRank()) ; 241 222 printf("Client %d context=CContext::create(%s) OK, context.identifier = %d\n", getRank(), id, context->get_identifier()); 242 223 243 224 context->initClient(contextComm,contextInterComm) ; 225 244 226 #pragma omp critical (_output) 245 227 printf("Client %d : context->initClient(contextComm,contextInterComm) OK \n", getRank()) ; … … 299 281 } 300 282 301 info(20) << "Client side context is finalized"<<endl ; 302 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 303 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 304 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; 305 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 306 // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 307 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 308 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 283 284 //info(20) << "Client "<<rank<<" : Client side context is finalized "<< endl ; 285 // report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 286 // report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 287 // report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; 288 // report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 289 // // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 290 // report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 291 // report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 292 293 309 294 } 310 295 … … 335 320 336 321 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 337 printf("getrank() = %d, file name = %s\n", getRank(), fileNameClient.str().c_str());322 //printf("getrank() = %d, file name = %s\n", getRank(), fileNameClient.str().c_str()); 338 323 339 324 fb->open(fileNameClient.str().c_str(), std::ios::out); -
XIOS/dev/branch_yushan/src/client.hpp
r1080 r1081 22 22 23 23 static std::list<MPI_Comm> contextInterComms; 24 // #pragma omp threadprivate(contextInterComms)25 24 //std::list<MPI_Comm> contextInterComms; 25 static std::list<MPI_Comm> * contextInterComms_ptr; 26 #pragma omp threadprivate(contextInterComms_ptr) 26 27 27 28 static int serverLeader; 28 //#pragma omp threadprivate(serverLeader)29 #pragma omp threadprivate(serverLeader) 29 30 30 31 static bool is_MPI_Initialized ; 31 //#pragma omp threadprivate(is_MPI_Initialized)32 #pragma omp threadprivate(is_MPI_Initialized) 32 33 33 34 //! Get rank of the current process … … 53 54 54 55 static StdOFStream m_infoStream; 55 //#pragma omp threadprivate(m_infoStream)56 #pragma omp threadprivate(m_infoStream) 56 57 57 58 static StdOFStream m_errorStream; 58 //#pragma omp threadprivate(m_errorStream)59 #pragma omp threadprivate(m_errorStream) 59 60 60 61 static void openStream(const StdString& fileName, const StdString& ext, std::filebuf* fb); -
XIOS/dev/branch_yushan/src/context_client.cpp
r1070 r1081 14 14 namespace xios 15 15 { 16 CContextClient::CContextClient() {} 17 16 18 /*! 17 19 \param [in] parent Pointer to context on client side 18 20 \param [in] intraComm_ communicator of group client 19 21 \param [in] interComm_ communicator of group server 20 \ cxtSer[in] cxtSer Pointer to context of server side. (It is only used on case of attached mode)22 \param [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode) 21 23 */ 22 24 CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer) … … 28 30 MPI_Comm_rank(intraComm, &clientRank); 29 31 MPI_Comm_size(intraComm, &clientSize); 30 32 31 33 int flag; 32 34 MPI_Comm_test_inter(interComm, &flag); … … 34 36 else MPI_Comm_size(interComm, &serverSize); 35 37 38 36 39 if (clientSize < serverSize) 37 40 { … … 50 53 for (int i = 0; i < serverByClient; i++) 51 54 ranksServerLeader.push_back(rankStart + i); 55 56 52 57 } 53 58 else … … 67 72 ranksServerLeader.push_back(remain + rank / clientByServer); 68 73 } 74 75 printf("clientRank = %d (%p)\n", clientRank, &clientRank); 69 76 } 70 77 -
XIOS/dev/branch_yushan/src/context_client.hpp
r1053 r1081 31 31 public: 32 32 // Contructor 33 CContextClient(); 33 34 CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0); 34 35 -
XIOS/dev/branch_yushan/src/cxios.cpp
r1079 r1081 14 14 namespace xios 15 15 { 16 string CXios::rootFile="./iodef.xml" ; 17 string CXios::xiosCodeId="xios.x" ; 18 string CXios::clientFile="./xios_client"; 19 string CXios::serverFile="./xios_server"; 16 const string CXios::rootFile="./iodef.xml" ; 17 const string CXios::xiosCodeId="xios.x" ; 18 const string CXios::clientFile="./xios_client"; 19 const string CXios::serverFile="./xios_server"; 20 //#pragma omp threadprivate(CXios::rootFile, CXios::xiosCodeId, CXios::clientFile, CXios::serverFile) 20 21 21 22 bool CXios::isClient ; 22 23 bool CXios::isServer ; 24 #pragma omp threadprivate(CXios::isServer, CXios::isClient) 25 23 26 MPI_Comm CXios::globalComm ; 27 #pragma omp threadprivate(CXios::globalComm) 24 28 25 29 bool CXios::usingOasis ; 26 30 bool CXios::usingServer = false; 31 #pragma omp threadprivate(CXios::usingOasis, CXios::usingServer) 32 27 33 double CXios::bufferSizeFactor = 1.0; 28 34 const double CXios::defaultBufferSizeFactor = 1.0; 29 35 StdSize CXios::minBufferSize = 1024 * sizeof(double); 36 #pragma omp threadprivate(CXios::bufferSizeFactor, CXios::defaultBufferSizeFactor, CXios::minBufferSize) 37 30 38 bool CXios::printLogs2Files; 31 39 bool CXios::isOptPerformance = true; 32 40 CRegistry* CXios::globalRegistry = 0; 41 #pragma omp threadprivate(CXios::printLogs2Files, CXios::isOptPerformance) 42 33 43 34 44 //! Parse configuration file and create some objects from it … … 36 46 { 37 47 set_new_handler(noMemory); 48 38 49 #pragma omp critical 39 50 { 40 51 parseFile(rootFile); 41 52 } 53 #pragma omp barrier 42 54 parseXiosConfig(); 43 55 } … … 95 107 CXios::globalComm = passage[omp_get_thread_num()]; 96 108 97 // int tmp_rank; 98 // MPI_Comm_rank(CXios::globalComm, &tmp_rank); 99 // if(isClient) printf("client thread %d/%d, globalComm = %p\n", omp_get_thread_num(), tmp_rank, &(CXios::globalComm)); 109 int tmp_rank; 110 MPI_Comm_rank(CXios::globalComm, &tmp_rank); 111 if(isClient) printf("client thread %d/%d, globalComm = %p, passage = %p\n", 112 omp_get_thread_num(), tmp_rank, 113 &(CXios::globalComm), passage); 100 114 //if(isServer) printf("server thread %d/%d, globalComm = %p\n", omp_get_thread_num(), tmp_rank, &globalComm); 101 115 … … 122 136 isServer = !usingServer; 123 137 124 printf("CXios::initClientSide OK, printLogs2Files = %d\n", printLogs2Files);138 //printf("CXios::initClientSide OK, printLogs2Files = %d\n", printLogs2Files); 125 139 126 140 if (printLogs2Files) 127 141 { 128 142 CClient::openInfoStream(clientFile); 129 CClient::openErrorStream(clientFile);143 //CClient::openErrorStream(clientFile); 130 144 } 131 145 else … … 173 187 std::set<StdString> parseList; 174 188 parseList.insert("xios"); 189 175 190 xml::CXMLParser::ParseFile(rootFile, parseList); 191 176 192 parseXiosConfig(); 177 193 } -
XIOS/dev/branch_yushan/src/cxios.hpp
r1079 r1081 18 18 { 19 19 public: 20 static void initialize(void) ; 21 static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 22 static void initServerSide(void) ; 23 static void clientFinalize(void) ; 24 static void parseFile(const string& filename) ; 20 CXios(); 21 static void initialize(void) ; 22 static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ; 23 static void initServerSide(void) ; 24 static void clientFinalize(void) ; 25 static void parseFile(const string& filename) ; 25 26 26 template <typename T>27 static T getin(const string& id,const T& defaultValue) ;27 template <typename T> 28 static T getin(const string& id,const T& defaultValue) ; 28 29 29 template <typename T>30 static T getin(const string& id) ;30 template <typename T> 31 static T getin(const string& id) ; 31 32 32 33 public: 33 static string rootFile ; //!< Configuration filename 34 //#pragma omp threadprivate(rootFile) 34 static const string rootFile; //!< Configuration filename 35 35 36 static string xiosCodeId ; //!< Identity for XIOS 37 //#pragma omp threadprivate(xiosCodeId) 36 static const string xiosCodeId ; //!< Identity for XIOS 38 37 39 static string clientFile; //!< Filename template for client 40 //#pragma omp threadprivate(clientFile) 38 static const string clientFile; //!< Filename template for client 41 39 42 static string serverFile; //!< Filename template for server 43 //#pragma omp threadprivate(serverFile) 40 static const string serverFile; //!< Filename template for server 44 41 45 static bool isClient ; //!< Check if xios is client 46 //#pragma omp threadprivate(isClient) 42 static bool isClient ; //!< Check if xios is client 47 43 48 static bool isServer ; //!< Check if xios is server 49 //#pragma omp threadprivate(isServer) 44 static bool isServer ; //!< Check if xios is server 50 45 51 static MPI_Comm globalComm ; //!< Global communicator 52 #pragma omp threadprivate(globalComm) 46 #pragma omp threadprivate(isClient, isServer) 53 47 54 static bool printLogs2Files; //!< Printing out logs into files55 //#pragma omp threadprivate(printLogs2Files)48 static MPI_Comm globalComm ; //!< Global communicator 49 #pragma omp threadprivate(globalComm) 56 50 57 static bool usingOasis ; //!< Using Oasis 58 //#pragma omp threadprivate(usingOasis) 51 static bool printLogs2Files; //!< Printing out logs into files 59 52 60 static bool usingServer ; //!< Using server (server mode) 61 //#pragma omp threadprivate(usingServer) 53 static bool usingOasis ; //!< Using Oasis 62 54 63 static double bufferSizeFactor; //!< Factor used to tune the buffer size 64 //#pragma omp threadprivate(bufferSizeFactor) 55 static bool usingServer ; //!< Using server (server mode) 65 56 66 static const double defaultBufferSizeFactor; //!< Default factor value 67 //#pragma omp threadprivate(defaultBufferSizeFactor) 57 static double bufferSizeFactor; //!< Factor used to tune the buffer size 68 58 69 static StdSize minBufferSize; //!< Minimum buffer size 70 //#pragma omp threadprivate(minBufferSize) 59 static const double defaultBufferSizeFactor; //!< Default factor value 71 60 72 static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 73 //#pragma omp threadprivate(isOptPerformance) 61 static StdSize minBufferSize; //!< Minimum buffer size 74 62 75 static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 76 //#pragma omp threadprivate(globalRegistry) 63 static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 77 64 65 #pragma omp threadprivate(printLogs2Files, usingOasis, usingServer, bufferSizeFactor, minBufferSize, isOptPerformance) 66 67 static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 68 69 70 78 71 public: 79 72 //! Setting xios to use server mode -
XIOS/dev/branch_yushan/src/interface/c/icdata.cpp
r1074 r1081 129 129 CTimer::get("XIOS context finalize").resume(); 130 130 131 printf("Check : calling cxios_context_finalize\n");131 132 132 CContext* context = CContext::getCurrent(); 133 printf("Check : CContext::getCurrent() = %p\n", CContext::getCurrent()); 133 134 context->finalize(); 134 135 -
XIOS/dev/branch_yushan/src/log.hpp
r523 r1081 26 26 } 27 27 void setLevel(int l) {level=l; } 28 int getLevel() {return level ;}28 int getLevel() {return level ;} 29 29 bool isActive(void) { if (rdbuf()==NULL) return true ; else return false ;} 30 30 bool isActive(int l) {if (l<=level) return true ; else return false ; } -
XIOS/dev/branch_yushan/src/node/context.cpp
r1080 r1081 239 239 { 240 240 hasClient=true; 241 client = new CContextClient(this,intraComm, interComm, cxtServer); 241 242 #pragma omp critical 243 client = new CContextClient(this, intraComm, interComm, cxtServer); 244 242 245 243 246 int tmp_rank; … … 245 248 MPI_Barrier(intraComm); 246 249 250 #pragma omp critical 251 registryIn=new CRegistry(intraComm); 247 252 253 254 registryIn->setPath(getId()) ; 248 255 249 registryIn=new CRegistry(intraComm);250 251 256 #pragma omp critical (_output) 252 printf("Client %d : registryIn=new CRegistry(intraComm), ®istryIn = %p, registryIn = %p \n", tmp_rank, ®istryIn, registryIn) ; 253 254 // registryIn=new CRegistry; 255 // registryIn->communicator = intraComm; 256 registryIn->setPath(getId()) ; 257 printf("Client %d : registryIn->setPath(getId()=%s), clientRank = %d (%p) \n", tmp_rank, getId(), client->clientRank, &(client->clientRank)) ; 258 printf("Client %d : context.identifier = %d\n", tmp_rank, this->get_identifier()); 259 257 260 if (client->clientRank==0) registryIn->fromFile("xios_registry.bin") ; 258 261 registryIn->bcastRegistry() ; … … 260 263 registryOut=new CRegistry(intraComm) ; 261 264 registryOut->setPath(getId()) ; 265 262 266 #pragma omp critical (_output) 263 printf("Client %d : registryOut->setPath(getId() ) \n", tmp_rank) ;267 printf("Client %d : registryOut->setPath(getId()=%s) \n", tmp_rank, getId()) ; 264 268 265 269 ep_lib::MPI_Comm intraCommServer, interCommServer; … … 1192 1196 */ 1193 1197 //bkp 1194 // CContext* CContext::create(const StdString& id)1195 // {1196 // CContext::setCurrent(id);1197 1198 // bool hasctxt = CContext::has(id);1199 // CContext* context = CObjectFactory::CreateObject<CContext>(id).get();1200 // getRoot();1201 // if (!hasctxt) CGroupFactory::AddChild(root, context->getShared());1202 1203 // #define DECLARE_NODE(Name_, name_) \1204 // C##Name_##Definition::create(C##Name_##Definition::GetDefName());1205 // #define DECLARE_NODE_PAR(Name_, name_)1206 // #include "node_type.conf"1207 1208 // return (context);1209 // }1210 1211 1212 1198 CContext* CContext::create(const StdString& id) 1213 1199 { 1214 1200 CContext::setCurrent(id); 1215 1201 1216 1217 1202 bool hasctxt = CContext::has(id); 1218 CContext* context[omp_get_num_threads()]; 1219 for(int i=0; i<omp_get_num_threads(); i++) 1220 { 1221 1222 context[i] = CObjectFactory::CreateObject<CContext>(id).get(); 1223 getRoot(); 1224 if (!hasctxt) CGroupFactory::AddChild(root, context[i]->getShared()); 1203 CContext* context = CObjectFactory::CreateObject<CContext>(id).get(); 1204 getRoot(); 1205 if (!hasctxt) CGroupFactory::AddChild(root, context->getShared()); 1225 1206 1226 1207 #define DECLARE_NODE(Name_, name_) \ 1227 1208 C##Name_##Definition::create(C##Name_##Definition::GetDefName()); 1228 1209 #define DECLARE_NODE_PAR(Name_, name_) 1229 1210 #include "node_type.conf" 1230 } 1231 int tmp_rank; 1232 MPI_Comm_rank(MPI_COMM_WORLD, &tmp_rank); 1233 printf("CContext::create : num_threads = %d, my_id = %d, return add = %p\n", omp_get_num_threads(), tmp_rank, &(context[omp_get_thread_num()])); 1234 1235 return (context[omp_get_thread_num()]); 1211 1212 return (context); 1236 1213 } 1214 1215 int CContext::get_identifier() 1216 { 1217 return this->identifier; 1218 } 1219 1237 1220 1238 1221 //! Server side: Receive a message to do some post processing -
XIOS/dev/branch_yushan/src/node/context.hpp
r1080 r1081 222 222 // Concrete contex client 223 223 CContextClient* client; 224 225 224 226 CRegistry* registryIn ; //!< input registry which is read from file 225 227 CRegistry* registryOut ; //!< output registry which will be wrote on file at the finalize 226 //#pragma omp threadprivate(registryIn, registryOut)228 227 229 228 230 private: … … 233 235 std::list<ep_lib::MPI_Comm> comms; //!< Communicators allocated internally 234 236 237 int identifier; 238 235 239 public: // Some function maybe removed in the near future 240 int get_identifier(); 236 241 // virtual void toBinary (StdOStream & os) const; 237 242 // virtual void fromBinary(StdIStream & is); -
XIOS/dev/branch_yushan/src/object_template.hpp
r731 r1081 104 104 105 105 static xios_map< StdString, long int > GenId ; 106 //#pragma omp threadprivate(AllMapObj, AllVectObj, GenId) 106 107 107 108 }; // class CObjectTemplate -
XIOS/dev/branch_yushan/src/registry.hpp
r1080 r1081 28 28 /** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 29 29 30 //CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 31 CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) 32 { 33 communicator = comm; 34 } 30 CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 31 // CRegistry(const ep_lib::MPI_Comm& comm=MPI_COMM_WORLD) 32 // { 33 // communicator = comm; 34 35 // int tmp_rank; 36 // MPI_Comm_rank(comm, &tmp_rank); 37 // printf("rank %d (%d): constructor on address %p, ref_comm = %p\n", tmp_rank, omp_get_thread_num(), &communicator, &comm); 38 // } 35 39 36 40 -
XIOS/dev/branch_yushan/src/test/test_client.f90
r1079 r1081 37 37 CALL init_wait 38 38 39 !CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr)40 !if(rank < 2) then39 CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr) 40 if(rank < 2) then 41 41 42 42 … … 153 153 print *, "Client : xios_finalize " 154 154 155 !else155 else 156 156 157 !CALL xios_init_server158 !print *, "Server : xios_finalize "157 CALL xios_init_server 158 print *, "Server : xios_finalize " 159 159 160 !endif160 endif 161 161 162 162 -
XIOS/dev/branch_yushan/src/test/test_omp.f90
r1080 r1081 35 35 !!! MPI Initialization 36 36 37 CALL MPI_INIT_THREAD(3, provided, ierr) 38 print*, "provided = ", provided 39 40 CALL init_wait 41 42 CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr) 43 if(rank < 2) then 44 45 !$omp parallel default(private) 46 47 CALL xios_initialize(id,return_comm=comm) 48 49 CALL MPI_COMM_RANK(comm,rank,ierr) 50 CALL MPI_COMM_SIZE(comm,size,ierr) 51 52 size = size*omp_get_num_threads() 53 rank = rank*omp_get_num_threads() + omp_get_thread_num() 54 55 DO j=1,nj_glo 56 DO i=1,ni_glo 57 lon_glo(i,j)=(i-1)+(j-1)*ni_glo 58 lat_glo(i,j)=1000+(i-1)+(j-1)*ni_glo 59 DO l=1,llm 60 field_A_glo(i,j,l)=(i-1)+(j-1)*ni_glo+10000*l 37 CALL MPI_INIT_THREAD(3, provided, ierr) 38 if(provided .NE. 3) then 39 print*, "provided thread level = ", provided 40 call MPI_Abort() 41 endif 42 43 CALL init_wait 44 45 CALL MPI_COMM_RANK(MPI_COMM_WORLD,rank,ierr) 46 if(rank < 2) then 47 48 !$omp parallel default(private) 49 50 CALL xios_initialize(id,return_comm=comm) 51 52 CALL MPI_COMM_RANK(comm,rank,ierr) 53 CALL MPI_COMM_SIZE(comm,size,ierr) 54 55 size = size*omp_get_num_threads() 56 rank = rank*omp_get_num_threads() + omp_get_thread_num() 57 58 DO j=1,nj_glo 59 DO i=1,ni_glo 60 lon_glo(i,j)=(i-1)+(j-1)*ni_glo 61 lat_glo(i,j)=1000+(i-1)+(j-1)*ni_glo 62 DO l=1,llm 63 field_A_glo(i,j,l)=(i-1)+(j-1)*ni_glo+10000*l 64 ENDDO 61 65 ENDDO 62 66 ENDDO 63 ENDDO 64 ni=ni_glo ; ibegin=0 65 66 jbegin=0 67 DO n=0,size-1 68 nj=nj_glo/size 69 IF (n<MOD(nj_glo,size)) nj=nj+1 70 IF (n==rank) exit 71 jbegin=jbegin+nj 72 ENDDO 73 74 iend=ibegin+ni-1 ; jend=jbegin+nj-1 75 76 ALLOCATE(lon(ni,nj),lat(ni,nj),field_A(0:ni+1,-1:nj+2,llm),lonvalue(ni,nj)) 77 lon(:,:)=lon_glo(ibegin+1:iend+1,jbegin+1:jend+1) 78 lat(:,:)=lat_glo(ibegin+1:iend+1,jbegin+1:jend+1) 79 field_A(1:ni,1:nj,:)=field_A_glo(ibegin+1:iend+1,jbegin+1:jend+1,:) 80 81 print*, "xios init OK", rank, size 82 83 84 CALL xios_context_initialize("test",comm) 85 print*, "xios_context init OK", rank, size 67 ni=ni_glo ; ibegin=0 68 69 jbegin=0 70 DO n=0,size-1 71 nj=nj_glo/size 72 IF (n<MOD(nj_glo,size)) nj=nj+1 73 IF (n==rank) exit 74 jbegin=jbegin+nj 75 ENDDO 76 77 iend=ibegin+ni-1 ; jend=jbegin+nj-1 78 79 ALLOCATE(lon(ni,nj),lat(ni,nj),field_A(0:ni+1,-1:nj+2,llm),lonvalue(ni,nj)) 80 lon(:,:)=lon_glo(ibegin+1:iend+1,jbegin+1:jend+1) 81 lat(:,:)=lat_glo(ibegin+1:iend+1,jbegin+1:jend+1) 82 field_A(1:ni,1:nj,:)=field_A_glo(ibegin+1:iend+1,jbegin+1:jend+1,:) 83 84 print*, "xios init OK", rank, size 85 86 CALL xios_context_initialize("test",comm) 87 print*, "xios_context init OK", rank, size 88 89 ! CALL xios_context_finalize() 90 ! print*, "xios_context finalize OK", rank, size 91 92 CALL xios_finalize() 93 print*, "xios finalize OK", rank, size 94 95 96 97 !$omp barrier 98 !call MPI_Barrier(MPI_COMM_WORLD) 99 !$omp barrier 100 101 call MPI_Abort() 102 103 86 104 87 105 CALL xios_get_handle("test",ctx_hdl) … … 93 111 CALL xios_get_calendar_type(calendar_type) 94 112 print*, "xios_get_calendar_type OK", rank, size 95 96 !CALL xios_context_finalize() 97 !print*, "xios_context finalize OK", rank, size 113 114 115 CALL xios_set_axis_attr("axis_A",n_glo=llm ,value=lval) ; 116 CALL xios_set_domain_attr("domain_A",ni_glo=ni_glo, nj_glo=nj_glo, ibegin=ibegin, ni=ni,jbegin=jbegin,nj=nj,type='curvilinear') 117 CALL xios_set_domain_attr("domain_A",data_dim=2, data_ibegin=-1, data_ni=ni+2, data_jbegin=-2, data_nj=nj+4) 118 CALL xios_set_domain_attr("domain_A",lonvalue_2D=lon,latvalue_2D=lat) 119 CALL xios_set_fieldgroup_attr("field_definition",enabled=.TRUE.) 120 121 CALL xios_get_handle("field_definition",fieldgroup_hdl) 122 CALL xios_add_child(fieldgroup_hdl,field_hdl,"field_B") 123 CALL xios_set_attr(field_hdl,field_ref="field_A",name="field_B") 124 125 CALL xios_get_handle("output",file_hdl) 126 CALL xios_add_child(file_hdl,field_hdl) 127 CALL xios_set_attr(field_hdl,field_ref="field_A_zoom",name="field_C") 128 129 dtime%second = 3600 130 CALL xios_set_timestep(dtime) 131 print*, "xios_set_timestep OK", rank, size 132 133 CALL xios_get_time_origin(date) 134 135 PRINT *, "--> year length = ", xios_get_year_length_in_seconds(date%year) 136 PRINT *, "--> day length = ", xios_get_day_length_in_seconds() 137 CALL xios_date_convert_to_string(date, date_str) 138 PRINT *, "time_origin = ", date_str 139 PRINT *, "xios_date_get_second_of_year(time_origin) = ", xios_date_get_second_of_year(date) 140 PRINT *, "xios_date_get_day_of_year(time_origin) = ", xios_date_get_day_of_year(date) 141 PRINT *, "xios_date_get_fraction_of_year(time_origin) = ", xios_date_get_fraction_of_year(date) 142 PRINT *, "xios_date_get_second_of_day(time_origin) = ", xios_date_get_second_of_day(date) 143 PRINT *, "xios_date_get_fraction_of_day(time_origin) = ", xios_date_get_fraction_of_day(date) 144 dtime%timestep = 1 145 dtime = 0.5 * dtime 146 CALL xios_duration_convert_to_string(dtime, dtime_str) 147 PRINT *, "duration = ", dtime_str 148 date = date + 3 * (dtime + dtime) 149 CALL xios_date_convert_to_string(date, date_str) 150 PRINT *, "date = time_origin + 3 * (duration + duration) = ", date_str 151 PRINT *, "xios_date_convert_to_seconds(date) = ", xios_date_convert_to_seconds(date) 152 PRINT *, "xios_date_convert_to_seconds(date - 2.5h) = ", xios_date_convert_to_seconds(date - 2.5 * xios_hour) 153 154 ni=0 ; lonvalue(:,:)=0; 155 CALL xios_get_domain_attr("domain_A",ni=ni,lonvalue_2D=lonvalue) 156 print *,"ni",ni 157 158 CALL xios_is_defined_field_attr("field_A",enabled=ok) 159 PRINT *,"field_A : attribute enabled is defined ? ",ok 160 161 162 ! CALL xios_close_context_definition() 163 164 ! print*, "xios_close_context_definition OK", rank, size 98 165 99 166
Note: See TracChangeset
for help on using the changeset viewer.