Changeset 1009 for XIOS/dev/dev_olga/src/server.cpp
- Timestamp:
- 12/05/16 17:47:54 (7 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_olga/src/server.cpp
r992 r1009 22 22 list<MPI_Comm> CServer::interComm ; 23 23 std::list<MPI_Comm> CServer::contextInterComms; 24 int CServer::nbSndSrvPools = (CXios::serverLevel == 0) ? 0 : 1; 25 int CServer::poolNb = 0; 24 26 bool CServer::isRoot = false ; 25 27 int CServer::rank = INVALID_RANK; 28 int CServer::rankSndServers = 0; 26 29 StdOFStream CServer::m_infoStream; 27 30 StdOFStream CServer::m_errorStream; … … 43 46 else is_MPI_Initialized=false ; 44 47 48 45 49 // Not using OASIS 46 50 if (!CXios::usingOasis) … … 52 56 } 53 57 CTimer::get("XIOS").resume() ; 54 55 int nbSrvLevels = 2;56 58 57 59 boost::hash<string> hashString ; … … 61 63 62 64 unsigned long* hashAll ; 65 unsigned long* hashAllServers ; 63 66 64 67 // int rank ; … … 66 69 int myColor ; 67 70 int i,c ; 68 MPI_Comm newComm 71 MPI_Comm newComm, serversInterComm; 69 72 70 73 MPI_Comm_size(CXios::globalComm, &size) ; 71 74 MPI_Comm_rank(CXios::globalComm, &rank); 75 72 76 hashAll=new unsigned long[size] ; 73 77 MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ; 74 78 75 map<unsigned long, int> colors ;79 map<unsigned long, int> colors, colorsServers ; 76 80 map<unsigned long, int> leaders ; 77 81 map<unsigned long, int>::iterator it ; 82 // map<unsigned long, int> leadersServers ; 83 vector<int> leadersServers; 78 84 79 85 for(i=0,c=0;i<size;i++) … … 87 93 } 88 94 89 myColor=colors[hashServer] ; 90 MPI_Comm_split(MPI_COMM_WORLD, myColor, rank, &intraComm) ; 95 nbSndSrvPools = size - leaders[hashServer2]; // one proc per secondary-server pool 96 97 // Taking into account multiple pools on secondary server 98 if (nbSndSrvPools > 1) 99 { 100 if (CXios::serverLevel > 1) 101 { 102 int nbProcs = size - leaders[hashServer2]; 103 int remain = nbProcs % nbSndSrvPools; 104 int procsPerPool = nbProcs / nbSndSrvPools; 105 rankSndServers = rank - leaders[hashServer2]; 106 StdString strTmp = CXios::xiosCodeIdSnd; 107 108 if (remain == 0) 109 { 110 poolNb = rankSndServers/procsPerPool; 111 } 112 else 113 { 114 if (rankSndServers <= (procsPerPool + 1) * remain) 115 poolNb = rankSndServers/(procsPerPool+1); 116 else 117 { 118 poolNb = remain + 1; 119 rankSndServers -= (procsPerPool + 1) * remain; 120 rankSndServers -= procsPerPool; 121 poolNb += rankSndServers/procsPerPool; 122 } 123 } 124 strTmp += boost::lexical_cast<string>(poolNb+1); // add 1 to avoid hashing zero 125 hashServer = hashString(strTmp); 126 hashServer2 = hashString(strTmp); 127 } 128 } 129 130 if (nbSndSrvPools > 1) 131 { 132 myColor = size; 133 MPI_Comm_split(MPI_COMM_WORLD, myColor, rank, &serversInterComm) ; 134 } 135 else 136 { 137 myColor=colors[hashServer] ; 138 MPI_Comm_split(MPI_COMM_WORLD, myColor, rank, &intraComm) ; 139 } 140 141 if (nbSndSrvPools > 1) 142 { 143 int sizeServers; 144 // int rankServers; 145 // MPI_Comm_rank(serversInterComm, &rankServers) ; 146 MPI_Comm_size(serversInterComm, &sizeServers) ; 147 hashAllServers=new unsigned long[sizeServers] ; 148 MPI_Allgather(&hashServer, 1, MPI_LONG, hashAllServers, 1, MPI_LONG, serversInterComm) ; 149 150 for(i=0, c=0; i<sizeServers; i++) 151 { 152 if (colorsServers.find(hashAllServers[i])==colorsServers.end()) 153 { 154 colorsServers[hashAllServers[i]]=c ; 155 // leadersServers[hashAllServers[i]]= leaders[hashServer1] + i ; 156 leadersServers.push_back( leaders[hashServer1] + i ); 157 c++ ; 158 } 159 } 160 myColor=colorsServers[hashServer] ; 161 MPI_Comm_split(serversInterComm, myColor, rank, &intraComm) ; 162 } 163 91 164 92 165 if (CXios::serverLevel == 0) … … 104 177 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 105 178 106 179 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 107 180 interCommLeft.push_back(newComm) ; 108 181 interComm.push_back(newComm) ; … … 110 183 } 111 184 } 112 113 else if ((CXios::serverLevel == 1)) 114 { 115 int clientLeader; 116 int srvSndLeader; 117 for(it=leaders.begin();it!=leaders.end();it++) 185 else 186 { 187 if ((CXios::serverLevel == 1)) 118 188 { 119 if (it->first != hashServer2) 120 { 121 if (it->first != hashServer1) 189 // Creating interComm with client (interCommLeft) 190 int clientLeader; 191 int srvSndLeader; 192 for(it=leaders.begin();it!=leaders.end();it++) 193 { 194 if (it->first != hashServer2) 122 195 { 123 clientLeader=it->second ; 124 int intraCommSize, intraCommRank ; 125 MPI_Comm_size(intraComm,&intraCommSize) ; 126 MPI_Comm_rank(intraComm,&intraCommRank) ; 127 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 128 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 129 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 130 interCommLeft.push_back(newComm) ; 131 interComm.push_back(newComm) ; 196 if (it->first != hashServer1) 197 { 198 clientLeader=it->second ; 199 int intraCommSize, intraCommRank ; 200 MPI_Comm_size(intraComm, &intraCommSize) ; 201 MPI_Comm_rank(intraComm, &intraCommRank) ; 202 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 203 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 204 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 205 interCommLeft.push_back(newComm) ; 206 interComm.push_back(newComm) ; 207 } 132 208 } 209 else 210 { 211 srvSndLeader = it->second; 212 } 213 } 214 215 // Creating interComm with secondary server pool(s) (interCommRight) 216 // if (nbSndSrvPools < 1) 217 if (nbSndSrvPools < 2) 218 { 219 CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 220 interCommRight.push_back(CClient::getInterComm()); 221 interComm.push_back(CClient::getInterComm()); 133 222 } 134 223 else 135 224 { 136 srvSndLeader = it->second; 137 } 138 } 139 140 CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 141 interCommRight.push_back(CClient::getInterComm()); 142 interComm.push_back(CClient::getInterComm()); 143 144 } 145 146 else // secondary server pool 147 { 148 int clientLeader; 149 for(it=leaders.begin();it!=leaders.end();it++) 225 // for(it=leadersServers.begin();it!=leadersServers.end();it++) 226 // { 227 // if (it->first != hashServer) 228 // { 229 // srvSndLeader = it->second; 230 // CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 231 // interCommRight.push_back(CClient::getInterComm()); 232 // interComm.push_back(CClient::getInterComm()); 233 // } 234 // } 235 236 for(int i = 1; i < leadersServers.size(); ++i) 237 { 238 srvSndLeader = leadersServers[i]; 239 CClient::initializeClientOnServer(rank, intraComm, srvSndLeader); 240 interCommRight.push_back(CClient::getInterComm()); 241 interComm.push_back(CClient::getInterComm()); 242 } 243 } 244 } // primary server 245 246 else // secondary server pool(s) 150 247 { 151 if (it->first == hashServer1) 152 { 153 clientLeader=it->second ; 154 int intraCommSize, intraCommRank ; 155 MPI_Comm_size(intraComm,&intraCommSize) ; 156 MPI_Comm_rank(intraComm,&intraCommRank) ; 157 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 158 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 159 160 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 161 interCommLeft.push_back(newComm) ; 162 interComm.push_back(newComm) ; 163 164 break; 165 } 166 } 167 } 168 248 int clientLeader; 249 if (nbSndSrvPools < 2) 250 // if (nbSndSrvPools < 1) 251 { 252 for(it=leaders.begin();it!=leaders.end();it++) 253 { 254 if (it->first == hashServer1) 255 { 256 clientLeader=it->second ; 257 int intraCommSize, intraCommRank ; 258 MPI_Comm_size(intraComm, &intraCommSize) ; 259 MPI_Comm_rank(intraComm, &intraCommRank) ; 260 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 261 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 262 263 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 264 interCommLeft.push_back(newComm) ; 265 interComm.push_back(newComm) ; 266 267 break; 268 } 269 } 270 } 271 else 272 { 273 // for(it=leadersServers.begin();it!=leadersServers.end();it++) 274 { 275 // if (it->first == hashServer1) 276 { 277 // clientLeader=it->second ; 278 clientLeader = leadersServers[0]; 279 int intraCommSize, intraCommRank ; 280 MPI_Comm_size(intraComm, &intraCommSize) ; 281 MPI_Comm_rank(intraComm, &intraCommRank) ; 282 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 283 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 284 285 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 286 interCommLeft.push_back(newComm) ; 287 interComm.push_back(newComm) ; 288 } 289 } 290 } 291 } // secondary server 292 } // CXios::serverLevel != 0 293 294 if (nbSndSrvPools > 1) delete [] hashAllServers; 169 295 delete [] hashAll ; 170 296 … … 204 330 } 205 331 206 // int rank; 207 MPI_Comm_rank(intraComm,&rank) ; 332 MPI_Comm_rank(intraComm, &rank) ; 208 333 if (rank==0) isRoot=true; 209 334 else isRoot=false; … … 489 614 if (CXios::serverLevel == 1) 490 615 { 491 CClient::registerContext(contextId, intraComm); 616 // CClient::registerContext(contextId, intraComm); 617 CClient::registerContextOnSrvPools(contextId, intraComm); 492 618 } 493 619 … … 531 657 int numDigit = 0; 532 658 int size = 0; 659 int mpiRank; 533 660 MPI_Comm_size(CXios::globalComm, &size); 534 661 while (size) … … 538 665 } 539 666 540 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 667 if (nbSndSrvPools < 2) 668 mpiRank = getRank(); 669 else 670 mpiRank = rankSndServers; 671 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << mpiRank << ext; 541 672 fb->open(fileNameClient.str().c_str(), std::ios::out); 542 673 if (!fb->is_open())
Note: See TracChangeset
for help on using the changeset viewer.