Changeset 1642 for XIOS/dev/branch_openmp/src/cxios.cpp
- Timestamp:
- 01/23/19 10:31:44 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/src/cxios.cpp
r1556 r1642 11 11 #include "memtrack.hpp" 12 12 #include "registry.hpp" 13 using namespace ep_lib;14 13 15 14 namespace xios 16 15 { 17 const string CXios::rootFile="./iodef.xml" ; 18 const string CXios::xiosCodeId="xios.x" ; 19 const string CXios::clientFile="./xios_client"; 20 const string CXios::serverFile="./xios_server"; 21 const string CXios::serverPrmFile="./xios_server1"; 22 const string CXios::serverSndFile="./xios_server2"; 16 string CXios::rootFile="./iodef.xml" ; 17 string CXios::xiosCodeId="xios.x" ; 18 string CXios::clientFile="./xios_client"; 19 string CXios::serverFile="./xios_server"; 20 string CXios::serverPrmFile="./xios_server1"; 21 string CXios::serverSndFile="./xios_server2"; 22 23 bool CXios::xiosStack = true; 24 bool CXios::systemStack = false; 23 25 24 26 bool CXios::isClient ; 25 27 bool CXios::isServer ; 26 MPI_Comm CXios::globalComm ;28 ep_lib::MPI_Comm CXios::globalComm ; 27 29 bool CXios::usingOasis ; 28 30 bool CXios::usingServer = false; … … 44 46 { 45 47 set_new_handler(noMemory); 46 int tmp_rank; 47 MPI_Comm_rank(MPI_COMM_WORLD, &tmp_rank); 48 #pragma omp critical 49 { 50 std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsing rootfile"<<std::endl; 51 parseFile(rootFile); 52 std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsed rootfile"<<std::endl; 53 } 54 #pragma omp barrier 48 parseFile(rootFile); 55 49 parseXiosConfig(); 56 50 } … … 71 65 printLogs2Files=getin<bool>("print_file",false); 72 66 67 xiosStack=getin<bool>("xios_stack",true) ; 68 systemStack=getin<bool>("system_stack",false) ; 69 if (xiosStack && systemStack) 70 { 71 xiosStack = false; 72 } 73 73 74 StdString bufMemory("memory"); 74 75 StdString bufPerformance("performance"); … … 89 90 90 91 checkEventSync = getin<bool>("check_event_sync", checkEventSync); 91 92 //globalComm=MPI_COMM_WORLD ; 93 int num_ep; 94 if(isClient) 95 { 96 num_ep = omp_get_num_threads(); 97 } 98 99 if(isServer) 100 { 101 num_ep = 1; 102 } 103 104 MPI_Info info; 105 #pragma omp master 106 { 107 MPI_Comm *ep_comm; 108 MPI_Comm_create_endpoints(MPI_COMM_WORLD->mpi_comm, num_ep, info, ep_comm); // servers should reach here too. 109 passage = ep_comm; 110 } 111 112 #pragma omp barrier 113 114 115 CXios::globalComm = passage[omp_get_thread_num()]; 92 #ifdef _usingMPI 93 globalComm=MPI_COMM_WORLD ; 94 #elif _usingEP 95 ep_lib::MPI_Comm *ep_comm; 96 ep_lib::MPI_Info info; 97 ep_lib::MPI_Comm_create_endpoints(EP_COMM_WORLD->mpi_comm, 1, info, ep_comm); 98 ep_lib::passage = ep_comm; 99 globalComm=ep_lib::passage[0] ; 100 #endif 116 101 } 117 102 … … 122 107 \param [in/out] returnComm communicator corresponding to group of client with same codeId 123 108 */ 124 void CXios::initClientSide(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 109 void CXios::initClientSide(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) 110 TRY 125 111 { 126 112 isClient = true; … … 129 115 initialize() ; 130 116 117 131 118 CClient::initialize(codeId,localComm,returnComm) ; 132 119 if (CClient::getRank()==0) globalRegistry = new CRegistry(returnComm) ; 120 133 121 134 122 // If there are no server processes then we are in attached mode … … 138 126 if (printLogs2Files) 139 127 { 140 #pragma omp critical141 128 CClient::openInfoStream(clientFile); 142 129 CClient::openErrorStream(clientFile); … … 148 135 } 149 136 } 137 CATCH 150 138 151 139 void CXios::clientFinalize(void) … … 154 142 if (CClient::getRank()==0) 155 143 { 156 #pragma omp critical (_output)157 144 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 158 145 globalRegistry->toFile("xios_registry.bin") ; … … 190 177 void CXios::initServerSide(void) 191 178 { 192 193 179 isClient = false; 194 180 isServer = true; 195 181 196 182 initServer(); 197 183 isClient = false; 184 isServer = true; 198 185 // Initialize all aspects MPI 199 186 CServer::initialize(); … … 248 235 int firstPoolGlobalRank = secondaryServerGlobalRanks[0]; 249 236 int rankGlobal; 250 MPI_Comm_rank(globalComm, &rankGlobal);237 ep_lib::MPI_Comm_rank(globalComm, &rankGlobal); 251 238 252 239 // Merge registries defined on each pools … … 260 247 globalRegistrySndServers.mergeRegistry(*globalRegistry) ; 261 248 int registrySize = globalRegistrySndServers.size(); 262 MPI_Send(®istrySize,1,MPI_LONG,firstPoolGlobalRank,15,CXios::globalComm) ;249 ep_lib::MPI_Send(®istrySize,1,EP_LONG,firstPoolGlobalRank,15,CXios::globalComm) ; 263 250 CBufferOut buffer(registrySize) ; 264 251 globalRegistrySndServers.toBuffer(buffer) ; 265 MPI_Send(buffer.start(),registrySize,MPI_CHAR,firstPoolGlobalRank,15,CXios::globalComm) ;252 ep_lib::MPI_Send(buffer.start(),registrySize,EP_CHAR,firstPoolGlobalRank,15,CXios::globalComm) ; 266 253 } 267 254 } … … 270 257 if (rankGlobal == firstPoolGlobalRank) 271 258 { 272 MPI_Status status;259 ep_lib::MPI_Status status; 273 260 char* recvBuff; 274 261 … … 279 266 int rank = secondaryServerGlobalRanks[i]; 280 267 int registrySize = 0; 281 MPI_Recv(®istrySize, 1, MPI_LONG, rank, 15, CXios::globalComm, &status);268 ep_lib::MPI_Recv(®istrySize, 1, EP_LONG, rank, 15, CXios::globalComm, &status); 282 269 recvBuff = new char[registrySize]; 283 MPI_Recv(recvBuff, registrySize, MPI_CHAR, rank, 15, CXios::globalComm, &status);270 ep_lib::MPI_Recv(recvBuff, registrySize, EP_CHAR, rank, 15, CXios::globalComm, &status); 284 271 CBufferIn buffer(recvBuff, registrySize) ; 285 272 CRegistry recvRegistry;
Note: See TracChangeset
for help on using the changeset viewer.