[553] | 1 | /*! |
---|
| 2 | \file server_distribution_description.hpp |
---|
| 3 | \author Ha NGUYEN |
---|
| 4 | \since 04 Jan 2015 |
---|
[569] | 5 | \date 09 Mars 2015 |
---|
[553] | 6 | |
---|
| 7 | \brief Description of index distribution on server(s). |
---|
| 8 | */ |
---|
| 9 | |
---|
| 10 | #include "server_distribution_description.hpp" |
---|
| 11 | |
---|
| 12 | namespace xios |
---|
| 13 | { |
---|
| 14 | CServerDistributionDescription::CServerDistributionDescription(const std::vector<int>& globalDimensionSize) |
---|
[568] | 15 | : nGlobal_(globalDimensionSize), indexBegin_(), dimensionSizes_(), globalIndex_(), vecGlobalIndex_() |
---|
[553] | 16 | { |
---|
| 17 | } |
---|
| 18 | |
---|
| 19 | CServerDistributionDescription::~CServerDistributionDescription() |
---|
| 20 | { |
---|
| 21 | if (!vecGlobalIndex_.empty()) |
---|
| 22 | for (int i = 0; i < vecGlobalIndex_.size(); ++i) delete vecGlobalIndex_[i]; |
---|
| 23 | } |
---|
| 24 | |
---|
| 25 | /*! |
---|
| 26 | Compute pre-defined global index distribution of server(s). |
---|
| 27 | \param [in] nServer number of server |
---|
| 28 | \param [in] doComputeGlobalIndex flag to compute global index on each server. By default, false |
---|
| 29 | \param [in] serType type of server distribution. For now, we can distribute server by band or plan |
---|
| 30 | */ |
---|
| 31 | void CServerDistributionDescription::computeServerDistribution(int nServer, |
---|
| 32 | bool doComputeGlobalIndex, |
---|
| 33 | ServerDistributionType serType) |
---|
| 34 | { |
---|
| 35 | switch (serType) { |
---|
| 36 | case BAND_DISTRIBUTION: |
---|
| 37 | computeBandDistribution(nServer); |
---|
| 38 | break; |
---|
| 39 | default: |
---|
| 40 | break; |
---|
| 41 | } |
---|
| 42 | |
---|
| 43 | if (doComputeGlobalIndex) |
---|
| 44 | { |
---|
| 45 | vecGlobalIndex_.resize(nServer); |
---|
| 46 | int dim = nGlobal_.size(); |
---|
| 47 | std::vector<int> currentIndex(dim); |
---|
| 48 | |
---|
| 49 | for (int idxServer = 0; idxServer < nServer; ++idxServer) |
---|
| 50 | { |
---|
| 51 | size_t ssize = 1, idx = 0; |
---|
| 52 | for (int j = 0; j < dim; ++j) ssize *= dimensionSizes_[idxServer][j]; |
---|
| 53 | vecGlobalIndex_[idxServer] = new CArray<size_t,1>(ssize); |
---|
| 54 | |
---|
| 55 | std::vector<int> idxLoop(dim,0); |
---|
| 56 | |
---|
| 57 | int innerLoopSize = dimensionSizes_[idxServer][0]; |
---|
| 58 | |
---|
| 59 | while (idx<ssize) |
---|
| 60 | { |
---|
| 61 | for (int idxDim = 0; idxDim < dim-1; ++idxDim) |
---|
| 62 | { |
---|
| 63 | if (idxLoop[idxDim] == dimensionSizes_[idxServer][idxDim]) |
---|
| 64 | { |
---|
| 65 | idxLoop[idxDim] = 0; |
---|
| 66 | ++idxLoop[idxDim+1]; |
---|
| 67 | } |
---|
| 68 | } |
---|
| 69 | |
---|
| 70 | for (int idxDim = 1; idxDim < dim; ++idxDim) currentIndex[idxDim] = idxLoop[idxDim] + indexBegin_[idxServer][idxDim]; |
---|
| 71 | |
---|
| 72 | size_t mulDim, globalIndex; |
---|
| 73 | for (int j = 0; j < innerLoopSize; ++j) |
---|
| 74 | { |
---|
| 75 | mulDim = 1; |
---|
| 76 | globalIndex = j + indexBegin_[idxServer][0]; |
---|
| 77 | |
---|
| 78 | for (int k = 1; k < dim; ++k) |
---|
| 79 | { |
---|
| 80 | mulDim *= nGlobal_[k-1]; |
---|
| 81 | globalIndex += (currentIndex[k])*mulDim; |
---|
| 82 | } |
---|
| 83 | (*vecGlobalIndex_[idxServer])(idx) = globalIndex; |
---|
| 84 | ++idx; |
---|
| 85 | } |
---|
| 86 | idxLoop[0] += innerLoopSize; |
---|
| 87 | } |
---|
| 88 | } |
---|
| 89 | } |
---|
| 90 | } |
---|
| 91 | |
---|
[569] | 92 | /*! |
---|
| 93 | Compute global index assigned to a server with a range.E.g: if a grid has 100 points and |
---|
| 94 | there are 2 servers, the first one takes index from 0 to 49, the second has index from 50 to 99 |
---|
| 95 | \param [in] nServer number of server |
---|
| 96 | \param [in] indexBeginEnd begining and ending index of range |
---|
| 97 | \param [in] serType type of server distribution. For now, we can distribute server by band or plan |
---|
| 98 | */ |
---|
[568] | 99 | void CServerDistributionDescription::computeServerGlobalIndexInRange(int nServer, |
---|
| 100 | const std::pair<size_t, size_t>& indexBeginEnd, |
---|
| 101 | ServerDistributionType distributionType) |
---|
| 102 | { |
---|
| 103 | switch (distributionType) { |
---|
| 104 | case BAND_DISTRIBUTION: |
---|
| 105 | computeBandDistribution(nServer); |
---|
| 106 | break; |
---|
| 107 | default: |
---|
| 108 | break; |
---|
| 109 | } |
---|
| 110 | |
---|
| 111 | size_t indexBegin = indexBeginEnd.first; |
---|
| 112 | size_t indexEnd = indexBeginEnd.second; |
---|
| 113 | if (indexBegin > indexEnd) |
---|
| 114 | ERROR("CServerDistributionDescription::computeServerGlobalIndexInRange", |
---|
| 115 | << "Index begin is larger than index end"); |
---|
| 116 | |
---|
| 117 | int dim = nGlobal_.size(); |
---|
| 118 | std::vector<int> currentIndex(dim); |
---|
| 119 | |
---|
| 120 | for (int idxServer = 0; idxServer < nServer; ++idxServer) |
---|
| 121 | { |
---|
| 122 | size_t ssize = 1, idx = 0; |
---|
| 123 | for (int j = 0; j < dim; ++j) ssize *= dimensionSizes_[idxServer][j]; |
---|
| 124 | |
---|
| 125 | std::vector<int> idxLoop(dim,0); |
---|
| 126 | int innerLoopSize = dimensionSizes_[idxServer][0]; |
---|
| 127 | |
---|
| 128 | while (idx<ssize) |
---|
| 129 | { |
---|
| 130 | for (int idxDim = 0; idxDim < dim-1; ++idxDim) |
---|
| 131 | { |
---|
| 132 | if (idxLoop[idxDim] == dimensionSizes_[idxServer][idxDim]) |
---|
| 133 | { |
---|
| 134 | idxLoop[idxDim] = 0; |
---|
| 135 | ++idxLoop[idxDim+1]; |
---|
| 136 | } |
---|
| 137 | } |
---|
| 138 | |
---|
| 139 | for (int idxDim = 1; idxDim < dim; ++idxDim) currentIndex[idxDim] = idxLoop[idxDim] + indexBegin_[idxServer][idxDim]; |
---|
| 140 | |
---|
| 141 | size_t mulDim, globalIndex; |
---|
| 142 | for (int j = 0; j < innerLoopSize; ++j) |
---|
| 143 | { |
---|
| 144 | mulDim = 1; |
---|
| 145 | globalIndex = j + indexBegin_[idxServer][0]; |
---|
| 146 | |
---|
| 147 | for (int k = 1; k < dim; ++k) |
---|
| 148 | { |
---|
| 149 | mulDim *= nGlobal_[k-1]; |
---|
| 150 | globalIndex += (currentIndex[k])*mulDim; |
---|
| 151 | } |
---|
| 152 | if ((indexBegin <= globalIndex) && (globalIndex <= indexEnd)) |
---|
| 153 | globalIndex_.insert(std::make_pair<size_t,int>(globalIndex, idxServer)); |
---|
| 154 | ++idx; |
---|
| 155 | } |
---|
| 156 | idxLoop[0] += innerLoopSize; |
---|
| 157 | } |
---|
| 158 | } |
---|
| 159 | |
---|
| 160 | } |
---|
| 161 | |
---|
[553] | 162 | /*! |
---|
| 163 | Compute global index of servers with band distribution |
---|
| 164 | \param [in] nServer number of server |
---|
| 165 | */ |
---|
| 166 | void CServerDistributionDescription::computeBandDistribution(int nServer) |
---|
| 167 | { |
---|
| 168 | int dim = nGlobal_.size(); |
---|
[557] | 169 | indexBegin_.resize(nServer); |
---|
| 170 | dimensionSizes_.resize(nServer); |
---|
[553] | 171 | |
---|
| 172 | for (int i = 0; i< nServer; ++i) |
---|
| 173 | { |
---|
[557] | 174 | indexBegin_[i].resize(dim); |
---|
| 175 | dimensionSizes_[i].resize(dim); |
---|
[553] | 176 | } |
---|
| 177 | |
---|
| 178 | int njRangeSize; |
---|
| 179 | int nGlobTemp = 0; |
---|
| 180 | std::vector<int> njRangeBegin(nServer,0); |
---|
| 181 | std::vector<int> njRangeEnd(nServer,0); |
---|
| 182 | |
---|
| 183 | if (1<dim) nGlobTemp = nGlobal_[1]; |
---|
| 184 | else nGlobTemp = nGlobal_[0]; |
---|
| 185 | |
---|
| 186 | for (int i = 0; i < nServer; ++i) |
---|
| 187 | { |
---|
| 188 | if (0 < i) njRangeBegin[i] = njRangeEnd[i-1]; |
---|
| 189 | njRangeSize = nGlobTemp / nServer; |
---|
| 190 | if (i < nGlobTemp%nServer) ++njRangeSize; |
---|
| 191 | njRangeEnd[i] = njRangeSize + njRangeBegin[i]; |
---|
| 192 | } |
---|
| 193 | njRangeEnd[nServer-1] = nGlobTemp; |
---|
| 194 | |
---|
| 195 | for (int i = 0; i < nServer; ++i) |
---|
| 196 | { |
---|
| 197 | for (int j = 0; j < dim; ++j) |
---|
| 198 | { |
---|
| 199 | if (1 != j) |
---|
| 200 | { |
---|
| 201 | if (1 == dim) |
---|
| 202 | { |
---|
| 203 | indexBegin_[i][j] = njRangeBegin[i]; |
---|
| 204 | dimensionSizes_[i][j] = njRangeEnd[i] - njRangeBegin[i]; |
---|
| 205 | } |
---|
| 206 | else |
---|
| 207 | { |
---|
| 208 | indexBegin_[i][j] = 0; |
---|
| 209 | dimensionSizes_[i][j] = nGlobal_[j]; |
---|
| 210 | } |
---|
| 211 | } |
---|
| 212 | else |
---|
| 213 | { |
---|
| 214 | indexBegin_[i][j] = njRangeBegin[i]; |
---|
| 215 | dimensionSizes_[i][j] = njRangeEnd[i] - njRangeBegin[i]; |
---|
| 216 | } |
---|
| 217 | } |
---|
| 218 | } |
---|
| 219 | } |
---|
| 220 | |
---|
| 221 | /*! |
---|
| 222 | Get size of each dimension on distributed server |
---|
| 223 | \return size of dimensions on server(s) |
---|
| 224 | */ |
---|
| 225 | std::vector<std::vector<int> > CServerDistributionDescription::getServerDimensionSizes() const |
---|
| 226 | { |
---|
| 227 | return dimensionSizes_; |
---|
| 228 | } |
---|
| 229 | |
---|
| 230 | /*! |
---|
| 231 | Get index begin of each dimension on distributed server |
---|
| 232 | \return index begin of dimensions on server(s) |
---|
| 233 | */ |
---|
| 234 | std::vector<std::vector<int> > CServerDistributionDescription::getServerIndexBegin() const |
---|
| 235 | { |
---|
| 236 | return indexBegin_; |
---|
| 237 | } |
---|
| 238 | |
---|
| 239 | /*! |
---|
| 240 | Get global index on distributed server |
---|
| 241 | \return global index on server(s) |
---|
| 242 | */ |
---|
| 243 | const std::vector<CArray<size_t,1>* >& CServerDistributionDescription::getGlobalIndex() const |
---|
| 244 | { |
---|
| 245 | return vecGlobalIndex_; |
---|
| 246 | } |
---|
| 247 | |
---|
[569] | 248 | /*! |
---|
| 249 | Get global index calculated by computeServerGlobalIndexInRange |
---|
| 250 | */ |
---|
[568] | 251 | const boost::unordered_map<size_t,int>& CServerDistributionDescription::getGlobalIndexRange() const |
---|
| 252 | { |
---|
| 253 | return globalIndex_; |
---|
| 254 | } |
---|
[553] | 255 | } // namespace xios |
---|