Changeset 490 for XIOS/trunk/src/client.cpp
- Timestamp:
- 09/26/14 14:52:04 (10 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/src/client.cpp
r400 r490 1 #include "globalScopeData.hpp" 1 2 #include "xmlioserver_spl.hpp" 2 3 #include "cxios.hpp" … … 12 13 13 14 namespace xios 14 { 15 { 15 16 16 17 MPI_Comm CClient::intraComm ; … … 18 19 int CClient::serverLeader ; 19 20 bool CClient::is_MPI_Initialized ; 20 21 21 int CClient::rank = INVALID_RANK; 22 StdOFStream CClient::m_infoStream; 23 22 24 void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) 23 25 { … … 26 28 if (initialized) is_MPI_Initialized=true ; 27 29 else is_MPI_Initialized=false ; 28 30 29 31 // don't use OASIS 30 32 if (!CXios::usingOasis) … … 33 35 if (localComm == MPI_COMM_NULL) 34 36 { 35 if (!is_MPI_Initialized) 37 if (!is_MPI_Initialized) 36 38 { 37 39 int argc=0; … … 41 43 CTimer::get("XIOS").resume() ; 42 44 CTimer::get("XIOS init").resume() ; 43 boost::hash<string> hashString ; 44 45 boost::hash<string> hashString ; 46 45 47 unsigned long hashClient=hashString(codeId) ; 46 48 unsigned long hashServer=hashString(CXios::xiosCodeId) ; 47 49 unsigned long* hashAll ; 48 int rank ;49 50 int size ; 50 51 int myColor ; 51 52 int i,c ; 52 53 MPI_Comm newComm ; 53 54 54 55 MPI_Comm_size(CXios::globalComm,&size) ; 55 56 MPI_Comm_rank(CXios::globalComm,&rank); 57 56 58 hashAll=new unsigned long[size] ; 57 59 58 60 MPI_Allgather(&hashClient,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ; 59 61 60 62 map<unsigned long, int> colors ; 61 63 map<unsigned long, int> leaders ; 62 64 63 65 for(i=0,c=0;i<size;i++) 64 66 { … … 70 72 } 71 73 } 72 74 73 75 myColor=colors[hashClient] ; 74 76 75 77 MPI_Comm_split(CXios::globalComm,myColor,rank,&intraComm) ; 76 78 77 79 if (CXios::usingServer) 78 { 80 { 79 81 int clientLeader=leaders[hashClient] ; 80 82 serverLeader=leaders[hashServer] ; … … 88 90 } 89 91 // localComm argument is given 90 else 92 else 91 93 { 92 94 if (CXios::usingServer) 93 { 95 { 94 96 //ERROR("void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm)", << " giving a local communictor is not compatible with using server mode") ; 95 97 } … … 113 115 CTimer::get("XIOS").resume() ; 114 116 CTimer::get("XIOS init").resume() ; 115 116 if (CXios::usingServer) 117 118 if (CXios::usingServer) 117 119 { 118 120 MPI_Status status ; 119 int rank ; 120 MPI_Comm_rank(intraComm,&rank) ; 121 121 MPI_Comm_rank(intraComm,&rank) ; 122 122 oasis_get_intercomm(interComm,CXios::xiosCodeId) ; 123 123 if (rank==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ; 124 124 MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ; 125 125 126 126 } 127 127 else MPI_Comm_dup(intraComm,&interComm) ; 128 128 } 129 129 130 130 MPI_Comm_dup(intraComm,&returnComm) ; 131 131 } 132 133 132 133 134 134 void CClient::registerContext(const string& id,MPI_Comm contextComm) 135 135 { 136 136 CContext::setCurrent(id) ; 137 137 CContext* context=CContext::create(id) ; 138 138 139 139 if (!CXios::isServer) 140 140 { … … 143 143 int leaderRank ; 144 144 MPI_Comm contextInterComm ; 145 145 146 146 MPI_Comm_size(contextComm,&size) ; 147 147 MPI_Comm_rank(contextComm,&rank) ; 148 148 MPI_Comm_rank(CXios::globalComm,&globalRank) ; 149 149 if (rank!=0) globalRank=0 ; 150 151 150 151 152 152 CMessage msg ; 153 153 msg<<id<<size<<globalRank ; … … 157 157 CBufferOut buffer(buff,messageSize) ; 158 158 buffer<<msg ; 159 159 160 160 MPI_Send(buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; 161 161 delete [] buff ; 162 162 163 163 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 164 164 info(10)<<"Register new Context : "<<id<<endl ; 165 165 166 166 MPI_Comm inter ; 167 167 MPI_Intercomm_merge(contextInterComm,0,&inter) ; … … 178 178 } 179 179 } 180 180 181 181 void CClient::finalize(void) 182 182 { … … 185 185 if (!CXios::isServer) 186 186 { 187 MPI_Comm_rank(intraComm,&rank) ; 188 if (rank==0) 187 MPI_Comm_rank(intraComm,&rank) ; 188 if (rank==0) 189 189 { 190 190 MPI_Send(&msg,1,MPI_INT,0,0,interComm) ; 191 191 } 192 192 } 193 193 194 194 CTimer::get("XIOS finalize").suspend() ; 195 195 CTimer::get("XIOS").suspend() ; … … 201 201 } 202 202 info(20) << "Client side context is finalized"<<endl ; 203 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 203 report(0) <<" Performance report : total time spent for XIOS : "<< CTimer::get("XIOS").getCumulatedTime()<<" s"<<endl ; 204 204 report(0)<< " Performance report : time spent for waiting free buffer : "<< CTimer::get("Blocking time").getCumulatedTime()<<" s"<<endl ; 205 205 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS").getCumulatedTime()*100.<<" %"<<endl ; … … 209 209 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 210 210 } 211 212 int CClient::getRank() 213 { 214 return rank; 215 } 216 217 /*! 218 * \brief Open file stream to write in 219 * Opening a file stream with a specific file name suffix-client+rank 220 * \param [in] protype file name 221 */ 222 void CClient::openInfoStream(const StdString& fileName) 223 { 224 std::filebuf* fb = m_infoStream.rdbuf(); 225 StdStringStream fileNameClient; 226 fileNameClient << fileName <<"_client_" << getRank() << ".txt"; 227 fb->open(fileNameClient.str().c_str(), std::ios::out); 228 if (!fb->is_open()) 229 ERROR("void CClient::openInfoStream(const StdString& fileName)", 230 <<endl<< "Can not open <"<<fileNameClient<<"> file to write" ); 231 232 info.write2File(fb); 233 } 234 235 //! Write out to standard output 236 void CClient::openInfoStream() 237 { 238 info.write2StdOut(); 239 } 240 241 //! Close file if it opens 242 void CClient::closeInfoStream() 243 { 244 if (m_infoStream.is_open()) m_infoStream.close(); 245 } 246 247 211 248 }
Note: See TracChangeset
for help on using the changeset viewer.