source: XIOS/trunk/src/server_distribution_description.cpp @ 871

Last change on this file since 871 was 865, checked in by mhnguyen, 8 years ago

Optimizing codes: Change the way to compute distribution of grid

+) Instead of using DHT on grid, we make use of it only for elements of grid
+) Make change to calculation of server distribution.

Test
+) On Curie
+) Two times faster than the precedent commit.

File size: 12.4 KB
Line 
1/*!
2   \file server_distribution_description.hpp
3   \author Ha NGUYEN
4   \since 04 Jan 2015
5   \date 11 Jan 2016
6
7   \brief Description of index distribution on server(s).
8 */
9
10#include "server_distribution_description.hpp"
11#include "exception.hpp"
12
13namespace xios
14{
15  /*!
16  \param [in] globalDimensionSize global dimension of grid
17  \param [in] nServer number of server
18  \param [in] serType type of server distribution. For now, we can distribute server by band or plan
19  */
20CServerDistributionDescription::CServerDistributionDescription(const std::vector<int>& globalDimensionSize,
21                                                               int nServer,
22                                                               ServerDistributionType serType)
23  : nGlobal_(globalDimensionSize), indexBegin_(), dimensionSizes_(), globalIndex_(),
24    vecGlobalIndex_(), serverType_(serType), nServer_(nServer), positionDimensionDistributed_(1)
25{
26}
27
28CServerDistributionDescription::~CServerDistributionDescription()
29{ /* Nothing to do */ }
30
31/*!
32  Compute pre-defined global index distribution of server(s).
33  \param [in] doComputeGlobalIndex flag to compute global index on each server. By default, false
34
35*/
36void CServerDistributionDescription::computeServerDistribution(bool doComputeGlobalIndex,
37                                                               int positionDimensionDistributed)
38{
39  switch (serverType_) {
40    case BAND_DISTRIBUTION:
41      computeBandDistribution(nServer_, positionDimensionDistributed);
42      break;
43    default:
44      break;
45  }
46
47  if (doComputeGlobalIndex)
48  {
49    vecGlobalIndex_.resize(nServer_);
50    int dim = nGlobal_.size();
51    std::vector<int> currentIndex(dim);
52
53    for (int idxServer = 0; idxServer < nServer_; ++idxServer)
54    {
55      size_t ssize = 1, idx = 0;
56      for (int j = 0; j < dim; ++j) ssize *= dimensionSizes_[idxServer][j];
57      vecGlobalIndex_[idxServer].resize(ssize);
58
59      std::vector<int> idxLoop(dim,0);
60
61      int innerLoopSize = dimensionSizes_[idxServer][0];
62
63      while (idx<ssize)
64      {
65        for (int idxDim = 0; idxDim < dim-1; ++idxDim)
66        {
67          if (idxLoop[idxDim] == dimensionSizes_[idxServer][idxDim])
68          {
69            idxLoop[idxDim] = 0;
70            ++idxLoop[idxDim+1];
71          }
72        }
73
74        for (int idxDim = 1; idxDim < dim; ++idxDim)  currentIndex[idxDim] = idxLoop[idxDim] + indexBegin_[idxServer][idxDim];
75
76        size_t mulDim, globalIndex;
77        for (int j = 0; j < innerLoopSize; ++j)
78        {
79          mulDim = 1;
80          globalIndex = j + indexBegin_[idxServer][0];
81
82          for (int k = 1; k < dim; ++k)
83          {
84            mulDim *= nGlobal_[k-1];
85            globalIndex += currentIndex[k] * mulDim;
86          }
87          vecGlobalIndex_[idxServer](idx) = globalIndex;
88          ++idx;
89        }
90        idxLoop[0] += innerLoopSize;
91      }
92    }
93  }
94}
95
96/*!
97  Compute global index assigned to a server with a range.E.g: if a grid has 100 points and
98  there are 2 servers, the first one takes index from 0 to 49, the second has index from 50 to 99
99  \param [in] indexBeginEnd begining and ending index of range
100  \param [in] positionDimensionDistributed dimension of server on which we make the cut.
101*/
102void CServerDistributionDescription::computeServerGlobalIndexInRange(const std::pair<size_t, size_t>& indexBeginEnd,
103                                                                     int positionDimensionDistributed)
104{
105  switch (serverType_) {
106    case BAND_DISTRIBUTION:
107      computeBandDistribution(nServer_, positionDimensionDistributed);
108      break;
109    default:
110      break;
111  }
112
113  size_t indexBegin = indexBeginEnd.first;
114  size_t indexEnd   = indexBeginEnd.second;
115  if (indexBegin > indexEnd)
116     ERROR("CServerDistributionDescription::computeServerGlobalIndexInRange",
117           << "Index begin is larger than index end");
118
119  globalIndex_.rehash(std::ceil((indexEnd-indexBegin+1)/globalIndex_.max_load_factor()));
120
121  int dim = nGlobal_.size();
122  std::vector<int> currentIndex(dim);
123
124  for (int idxServer = 0; idxServer < nServer_; ++idxServer)
125  {
126    size_t ssize = 1, idx = 0;
127    for (int j = 0; j < dim; ++j) ssize *= dimensionSizes_[idxServer][j];
128
129    std::vector<int> idxLoop(dim,0);
130    int innerLoopSize = dimensionSizes_[idxServer][0];
131
132    while (idx<ssize)
133    {
134      for (int idxDim = 0; idxDim < dim-1; ++idxDim)
135      {
136        if (idxLoop[idxDim] == dimensionSizes_[idxServer][idxDim])
137        {
138          idxLoop[idxDim] = 0;
139          ++idxLoop[idxDim+1];
140        }
141      }
142
143      for (int idxDim = 1; idxDim < dim; ++idxDim)  currentIndex[idxDim] = idxLoop[idxDim] + indexBegin_[idxServer][idxDim];
144
145      size_t mulDim, globalIndex;
146      for (int j = 0; j < innerLoopSize; ++j)
147      {
148        mulDim = 1;
149        globalIndex = j + indexBegin_[idxServer][0];
150
151        for (int k = 1; k < dim; ++k)
152        {
153          mulDim *= nGlobal_[k-1];
154          globalIndex += (currentIndex[k])*mulDim;
155        }
156        if ((indexBegin <= globalIndex) && (globalIndex <= indexEnd))
157          globalIndex_[globalIndex] = idxServer;
158        ++idx;
159      }
160      idxLoop[0] += innerLoopSize;
161    }
162  }
163}
164
165/*!
166  Compute the global index of grid elements (domain, axis) and their associated server rank.
167  Each client knows the general distribution of servers and from which they can compute the pieces of information to hold
168  \param [out] indexServerOnElement global index of each element as well as the corresponding server which contains these indices
169  \param [in] clientRank rank of client
170  \param [in] clientSize number of client
171  \param [in] axisDomainOrder the order of element in grid (true for domain, false for axis)
172  \param [in] positionDimensionDistributed dimension of server on which we make the cut.
173*/
174void CServerDistributionDescription::computeServerGlobalByElement(std::vector<boost::unordered_map<size_t,std::vector<int> > >& indexServerOnElement,
175                                                                  int clientRank,
176                                                                  int clientSize,
177                                                                  const CArray<bool,1>& axisDomainOrder,
178                                                                  int positionDimensionDistributed)
179{
180  switch (serverType_) {
181    case BAND_DISTRIBUTION:
182      computeBandDistribution(nServer_, positionDimensionDistributed);
183      break;
184    default:
185      break;
186  }
187
188  int nbElement = axisDomainOrder.numElements();
189  indexServerOnElement.resize(nbElement);
190  int idx = 0;
191  std::vector<int> idxMap(nbElement);
192  for (int i = 0; i < nbElement; ++i)
193  {
194    idxMap[i] = idx;
195    if (true == axisDomainOrder(i)) idx += 2;
196    else ++idx;
197  }
198
199  for (int idxServer = 0; idxServer < nServer_; ++idxServer)
200  {
201    std::vector<int> elementDimension(4);
202    for (int i = 0; i < nbElement; ++i)
203    {
204      int elementSize = 1;
205      if (axisDomainOrder(i))
206      {
207        elementSize *= dimensionSizes_[idxServer][idxMap[i]] * dimensionSizes_[idxServer][idxMap[i]+1];
208        elementDimension[0] = indexBegin_[idxServer][idxMap[i]];
209        elementDimension[1] = indexBegin_[idxServer][idxMap[i]+1];
210        elementDimension[2] = dimensionSizes_[idxServer][idxMap[i]];
211        elementDimension[3] = dimensionSizes_[idxServer][idxMap[i]+1];
212      }
213
214      else
215      {
216        elementSize *= dimensionSizes_[idxServer][idxMap[i]];
217        elementDimension[0] = indexBegin_[idxServer][idxMap[i]];
218        elementDimension[1] = 0;
219        elementDimension[2] = dimensionSizes_[idxServer][idxMap[i]];
220        elementDimension[3] = 1;
221      }
222
223      int rangeBegin, rangeSize;
224      computeRangeProcIndex(clientRank, clientSize, elementSize, rangeBegin, rangeSize);
225
226      size_t globalIndexElement;
227      idx = 0; int idxRange = 0;
228      for (int k = 0; k < elementDimension[3]; ++k)
229        for (int l = 0; l < elementDimension[2]; ++l)
230        {
231          globalIndexElement = (l+elementDimension[0]) + (k+elementDimension[1])*elementDimension[2];
232          if ((rangeBegin <= idx) && (idxRange < rangeSize))
233          {
234            indexServerOnElement[i][globalIndexElement].push_back(idxServer);
235            ++idxRange;
236          }
237          ++idx;
238        }
239    }
240  }
241}
242
243/*!
244  Compute a range of index on server which a client holds
245  For a range of index on a specific server, each client can hold a piece of the index range
246  If the range size is smaller than the number of client, there are some clients holding the same index
247  \param [in] clientRank rank of client
248  \param [in] clientSize number of client
249  \param [in] rangeProcSize index range size
250  \param [out] rangeBegin begin of range index a client holds
251  \param [out] rangeSize size of range index a client holds
252*/
253void CServerDistributionDescription::computeRangeProcIndex(int clientRank,
254                                                           int clientSize,
255                                                           int rangeProcSize,
256                                                           int& rangeBegin,
257                                                           int& rangeSize)
258{
259  if (rangeProcSize < clientSize)
260  {
261    int rangeIndex = 0;
262    for (int idx = 0; idx < clientSize; ++idx)
263    {
264      if (idx == clientRank)
265      {
266        rangeBegin = rangeIndex;
267        rangeSize = 1;
268      }
269      ++rangeIndex;
270      if (rangeIndex == rangeProcSize) rangeIndex = 0;
271    }
272    return;
273  }
274
275  int range, indexBegin = 0;
276  for (int i = 0; i < clientSize; ++i)
277  {
278    range = rangeProcSize / clientSize;
279    if (i < (rangeProcSize%clientSize)) ++range;
280    if (i == clientRank) break;
281    indexBegin += range;
282  }
283  rangeBegin = indexBegin;
284  rangeSize = range;
285}
286
287/*!
288  Compute global index of servers with band distribution
289  \param [in] nServer number of server
290*/
291void CServerDistributionDescription::computeBandDistribution(int nServer, int positionDimensionDistributed)
292{
293  int dim = nGlobal_.size();
294  positionDimensionDistributed_ = positionDimensionDistributed;
295  if (1 == dim) positionDimensionDistributed_ = 0;
296  if (positionDimensionDistributed_ > dim)
297    ERROR("CServerDistributionDescription::computeBandDistribution(int nServer, int positionDimensionDistributed)",
298          << "Position of distributed dimension is invalid" << std::endl
299          << "Position of distributed dimension is " << positionDimensionDistributed_
300          << "Dimension " << dim)
301
302  indexBegin_.resize(nServer);
303  dimensionSizes_.resize(nServer);
304
305  for (int i = 0; i< nServer; ++i)
306  {
307    indexBegin_[i].resize(dim);
308    dimensionSizes_[i].resize(dim);
309  }
310
311  int njRangeSize;
312  int nGlobTemp = 0;
313  std::vector<int> njRangeBegin(nServer,0);
314  std::vector<int> njRangeEnd(nServer,0);
315
316  int positionDistributed = (1<dim) ? positionDimensionDistributed_ : 0;
317  nGlobTemp = nGlobal_[positionDistributed];
318
319  for (int i = 0; i < nServer; ++i)
320  {
321    if (0 < i) njRangeBegin[i] = njRangeEnd[i-1];
322    njRangeSize = nGlobTemp / nServer;
323    if (i < nGlobTemp%nServer) ++njRangeSize;
324    njRangeEnd[i] = njRangeSize + njRangeBegin[i];
325  }
326  njRangeEnd[nServer-1] = nGlobTemp;
327
328  for (int i = 0; i < nServer; ++i)
329  {
330    for (int j = 0; j < dim; ++j)
331    {
332      if (positionDistributed != j)
333      {
334        if (1 == dim)
335        {
336          indexBegin_[i][j] = njRangeBegin[i];
337          dimensionSizes_[i][j] = njRangeEnd[i] - njRangeBegin[i];
338        }
339        else
340        {
341          indexBegin_[i][j] = 0;
342          dimensionSizes_[i][j] = nGlobal_[j];
343        }
344      }
345      else
346      {
347        indexBegin_[i][j] = njRangeBegin[i];
348        dimensionSizes_[i][j] = njRangeEnd[i] - njRangeBegin[i];
349      }
350    }
351  }
352}
353
354/*!
355  Get size of each dimension on distributed server
356  \return size of dimensions on server(s)
357*/
358std::vector<std::vector<int> > CServerDistributionDescription::getServerDimensionSizes() const
359{
360  return dimensionSizes_;
361}
362
363/*!
364  Get index begin of each dimension on distributed server
365  \return index begin of dimensions on server(s)
366*/
367std::vector<std::vector<int> > CServerDistributionDescription::getServerIndexBegin() const
368{
369  return indexBegin_;
370}
371
372/*!
373  Get global index on distributed server
374  \return global index on server(s)
375*/
376const std::vector<CArray<size_t,1> >& CServerDistributionDescription::getGlobalIndex() const
377{
378  return vecGlobalIndex_;
379}
380
381/*!
382  Get global index calculated by computeServerGlobalIndexInRange
383*/
384const boost::unordered_map<size_t,int>& CServerDistributionDescription::getGlobalIndexRange() const
385{
386  return globalIndex_;
387}
388
389int CServerDistributionDescription::getDimensionDistributed()
390{
391  return ((1<nGlobal_.size()) ? positionDimensionDistributed_ : 0);
392}
393
394} // namespace xios
Note: See TracBrowser for help on using the repository browser.