source: XIOS/trunk/src/transformation/grid_transformation.cpp @ 888

Last change on this file since 888 was 888, checked in by mhnguyen, 8 years ago

Adding new transformation for scalar: Reducing an axis to a scalar

+) Add new xml node for new transformation
+) Add new algorithms for axis reduction
+) Make change in some place to make sure everything work fine

Test
+) On Curie
+) Tests pass and are correct

  • Property svn:executable set to *
File size: 27.2 KB
Line 
1/*!
2   \file grid_transformation.cpp
3   \author Ha NGUYEN
4   \since 14 May 2015
5   \date 02 Jul 2015
6
7   \brief Interface for all transformations.
8 */
9#include "grid_transformation.hpp"
10#include "reduce_axis_to_scalar.hpp"
11#include "scalar_algorithm_reduce_axis.hpp"
12#include "axis_algorithm_inverse.hpp"
13#include "axis_algorithm_zoom.hpp"
14#include "axis_algorithm_interpolate.hpp"
15#include "domain_algorithm_zoom.hpp"
16#include "domain_algorithm_interpolate.hpp"
17#include "context.hpp"
18#include "context_client.hpp"
19#include "axis_algorithm_transformation.hpp"
20#include "distribution_client.hpp"
21#include "mpi_tag.hpp"
22#include "grid.hpp"
23#include <boost/unordered_map.hpp>
24
25namespace xios {
26CGridTransformation::CGridTransformation(CGrid* destination, CGrid* source)
27 : CGridTransformationSelector(destination, source),
28  tmpGridDestination_(destination), originalGridSource_(source),
29  tempGridSrcs_(), tempGridDests_(),
30  dynamicalTransformation_(false), timeStamp_()
31{
32}
33
34CGridTransformation::~CGridTransformation()
35{
36}
37
38/*!
39  Select algorithm of a scalar correspoding to its transformation type and its position in each element
40  \param [in] elementPositionInGrid position of element in grid. e.g: a grid has 1 domain and 1 axis, then position of domain is 0 and position of axis is 1
41  \param [in] transType transformation type, for now we have
42  \param [in] transformationOrder position of the transformation in an element (an element can have several transformation)
43*/
44void CGridTransformation::selectScalarAlgo(int elementPositionInGrid, ETranformationType transType, int transformationOrder)
45{
46  int scalarSrcIndex = -1, axisSrcIndex = -1, domainSrcIndex = -1;
47  std::vector<CScalar*> scaListDestP = gridDestination_->getScalars();
48  std::vector<CScalar*> scaListSrcP  = gridSource_->getScalars();
49  std::vector<CAxis*> axisListSrcP   = gridSource_->getAxis();
50  std::vector<CDomain*> domainListSrcP = gridSource_->getDomains();
51
52  int scalarDstIndex =  elementPositionInGridDst2ScalarPosition_[elementPositionInGrid];
53  CScalar::TransMapTypes trans = scaListDestP[scalarDstIndex]->getAllTransformations();
54  CScalar::TransMapTypes::const_iterator it = trans.begin();
55
56  for (int i = 0; i < transformationOrder; ++i, ++it) {}  // Find the correct transformation
57
58  CReduceAxisToScalar* reduceAxis = 0;
59  CGenericAlgorithmTransformation* algo = 0;
60  switch (transType)
61  {
62    case TRANS_REDUCE_AXIS_TO_SCALAR:
63      reduceAxis = dynamic_cast<CReduceAxisToScalar*> (it->second);
64      axisSrcIndex = elementPositionInGridSrc2AxisPosition_[elementPositionInGrid];
65      algo = new CScalarAlgorithmReduceScalar(scaListDestP[scalarDstIndex], axisListSrcP[axisSrcIndex], reduceAxis);
66      break;
67    default:
68      break;
69  }
70  algoTransformation_.push_back(algo);
71}
72
73/*!
74  Select algorithm of an axis correspoding to its transformation type and its position in each element
75  \param [in] elementPositionInGrid position of element in grid. e.g: a grid has 1 domain and 1 axis, then position of domain is 0 and position of axis is 1
76  \param [in] transType transformation type, for now we have zoom_axis, inverse_axis, interpolate_axis
77  \param [in] transformationOrder position of the transformation in an element (an element can have several transformation)
78*/
79void CGridTransformation::selectAxisAlgo(int elementPositionInGrid, ETranformationType transType, int transformationOrder)
80{
81  int axisSrcIndex = -1, domainSrcIndex = -1;
82  std::vector<CAxis*> axisListDestP = gridDestination_->getAxis();
83  std::vector<CAxis*> axisListSrcP = gridSource_->getAxis();
84  std::vector<CDomain*> domainListSrcP = gridSource_->getDomains();
85
86  int axisDstIndex =  elementPositionInGridDst2AxisPosition_[elementPositionInGrid];
87  CAxis::TransMapTypes trans = axisListDestP[axisDstIndex]->getAllTransformations();
88  CAxis::TransMapTypes::const_iterator it = trans.begin();
89
90  for (int i = 0; i < transformationOrder; ++i, ++it) {}  // Find the correct transformation
91
92  CZoomAxis* zoomAxis = 0;
93  CInterpolateAxis* interpAxis = 0;
94  CGenericAlgorithmTransformation* algo = 0;
95  switch (transType)
96  {
97    case TRANS_INTERPOLATE_AXIS:
98      interpAxis = dynamic_cast<CInterpolateAxis*> (it->second);
99      axisSrcIndex = elementPositionInGridSrc2AxisPosition_[elementPositionInGrid];
100      algo = new CAxisAlgorithmInterpolate(axisListDestP[axisDstIndex], axisListSrcP[axisSrcIndex], interpAxis);
101      break;
102    case TRANS_ZOOM_AXIS:
103      zoomAxis = dynamic_cast<CZoomAxis*> (it->second);
104      axisSrcIndex = elementPositionInGridSrc2AxisPosition_[elementPositionInGrid];
105      algo = new CAxisAlgorithmZoom(axisListDestP[axisDstIndex], axisListSrcP[axisSrcIndex], zoomAxis);
106      break;
107    case TRANS_INVERSE_AXIS:
108      axisSrcIndex = elementPositionInGridSrc2AxisPosition_[elementPositionInGrid];
109      algo = new CAxisAlgorithmInverse(axisListDestP[axisDstIndex], axisListSrcP[axisSrcIndex]);
110      break;
111    default:
112      break;
113  }
114  algoTransformation_.push_back(algo);
115}
116
117/*!
118  Select algorithm of a domain correspoding to its transformation type and its position in each element
119  \param [in] elementPositionInGrid position of element in grid. e.g: a grid has 1 domain and 1 axis, then position of domain is 0 and position of axis is 1
120  \param [in] transType transformation type, for now we have zoom_domain, interpolate_domain
121  \param [in] transformationOrder position of the transformation in an element (an element can have several transformation)
122*/
123void CGridTransformation::selectDomainAlgo(int elementPositionInGrid, ETranformationType transType, int transformationOrder)
124{
125  std::vector<CDomain*> domainListDestP = gridDestination_->getDomains();
126  std::vector<CDomain*> domainListSrcP = gridSource_->getDomains();
127
128  int domainIndex =  elementPositionInGridDst2DomainPosition_[elementPositionInGrid];
129  CDomain::TransMapTypes trans = domainListDestP[domainIndex]->getAllTransformations();
130  CDomain::TransMapTypes::const_iterator it = trans.begin();
131
132  for (int i = 0; i < transformationOrder; ++i, ++it) {}  // Find the correct transformation
133
134  CZoomDomain* zoomDomain = 0;
135  CInterpolateDomain* interpFileDomain = 0;
136  CGenericAlgorithmTransformation* algo = 0;
137  switch (transType)
138  {
139    case TRANS_INTERPOLATE_DOMAIN:
140      interpFileDomain = dynamic_cast<CInterpolateDomain*> (it->second);
141      algo = new CDomainAlgorithmInterpolate(domainListDestP[domainIndex], domainListSrcP[domainIndex],interpFileDomain);
142      break;
143    case TRANS_ZOOM_DOMAIN:
144      zoomDomain = dynamic_cast<CZoomDomain*> (it->second);
145      algo = new CDomainAlgorithmZoom(domainListDestP[domainIndex], domainListSrcP[domainIndex], zoomDomain);
146      break;
147    default:
148      break;
149  }
150  algoTransformation_.push_back(algo);
151}
152
153/*!
154  If there are more than one transformation, a new temporary grid will be created and it will play the role of grid destination.
155This new created one keeps a pointer to the real transformed element of grid destination and generate new copies of other elements from grid source.
156  \param [in] elementPositionInGrid position of element in grid
157  \param [in] transType transformation type
158*/
159void CGridTransformation::setUpGridDestination(int elementPositionInGrid, ETranformationType transType, int nbTransformation)
160{
161  if (isSpecialTransformation(transType)) return;
162
163  if (!tempGridDests_.empty() && (getNbAlgo() == tempGridDests_.size()))
164  {
165    tempGridDests_.resize(0);
166  }
167
168  std::vector<CScalar*> scalarListDestP = gridDestination_->getScalars();
169  std::vector<CScalar*> scalarListSrcP = gridSource_->getScalars(), scalarDst;
170
171  std::vector<CAxis*> axisListDestP = gridDestination_->getAxis();
172  std::vector<CAxis*> axisListSrcP = gridSource_->getAxis(), axisDst;
173
174  std::vector<CDomain*> domListDestP = gridDestination_->getDomains();
175  std::vector<CDomain*> domListSrcP = gridSource_->getDomains(), domainDst;
176
177  CArray<int,1> axisDomainOrderSrc = gridSource_->axis_domain_order;
178  CArray<int,1> axisDomainOrderDst = gridDestination_->axis_domain_order;
179
180  int scalarIndex = -1, axisIndex = -1, domainIndex = -1;
181  switch (transType)
182  {
183    case TRANS_INTERPOLATE_DOMAIN:
184    case TRANS_ZOOM_DOMAIN:
185      domainIndex = elementPositionInGridDst2DomainPosition_[elementPositionInGrid];
186      break;
187
188    case TRANS_INTERPOLATE_AXIS:
189    case TRANS_ZOOM_AXIS:
190    case TRANS_INVERSE_AXIS:
191      axisIndex =  elementPositionInGridDst2AxisPosition_[elementPositionInGrid];
192      break;
193
194    case TRANS_REDUCE_AXIS_TO_SCALAR:
195      scalarIndex = elementPositionInGridDst2ScalarPosition_[elementPositionInGrid];
196      break;
197    default:
198      break;
199  }
200
201  for (int idx = 0; idx < axisDomainOrderDst.numElements(); ++idx)
202  {
203    int dimElementDst = axisDomainOrderDst(idx);
204    if (2 == dimElementDst)
205    {
206      if (elementPositionInGrid == idx)
207        domainDst.push_back(domListDestP[domainIndex]);
208      else
209        domainDst.push_back(domListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]);
210    }
211    else if (1 == dimElementDst)
212    {
213      if (elementPositionInGrid == idx)
214        axisDst.push_back(axisListDestP[axisIndex]);
215      else
216        axisDst.push_back(axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]);
217    }
218    else
219    {
220      if (elementPositionInGrid == idx)
221        scalarDst.push_back(scalarListDestP[scalarIndex]);
222      else
223        scalarDst.push_back(scalarListSrcP[elementPositionInGridSrc2ScalarPosition_[elementPositionInGrid]]);
224    }
225  }
226
227  tmpGridDestination_ = CGrid::createGrid(domainDst, axisDst, scalarDst, gridDestination_->axis_domain_order);
228  tmpGridDestination_->computeGridGlobalDimension(domainDst, axisDst, scalarDst, gridDestination_->axis_domain_order);
229  tempGridDests_.push_back(tmpGridDestination_);
230}
231
232/*!
233  Assign the current grid destination to the grid source in the new transformation.
234The current grid destination plays the role of grid source in next transformation (if any).
235Only element on which the transformation is performed is modified
236  \param [in] elementPositionInGrid position of element in grid
237  \param [in] transType transformation type
238*/
239void CGridTransformation::setUpGridSource(int elementPositionInGrid, ETranformationType transType, int nbTransformation)
240{
241  if (!tempGridSrcs_.empty() && (getNbAlgo()-1) == tempGridSrcs_.size())
242  {
243    tempGridSrcs_.resize(0);
244  }
245
246  std::vector<CScalar*> scalarListDestP = tmpGridDestination_->getScalars();
247  std::vector<CScalar*> scalarListSrcP = gridSource_->getScalars(), scalarSrc;
248
249  std::vector<CAxis*> axisListDestP = tmpGridDestination_->getAxis();
250  std::vector<CAxis*> axisListSrcP = gridSource_->getAxis(), axisSrc;
251
252  std::vector<CDomain*> domListDestP = tmpGridDestination_->getDomains();
253  std::vector<CDomain*> domListSrcP = gridSource_->getDomains(), domainSrc;
254
255  CArray<int,1> axisDomainOrderDst = gridDestination_->axis_domain_order;
256
257  int axisIndex = -1, domainIndex = -1, scalarIndex = -1;
258  int axisListIndex = 0, domainListIndex = 0, scalarListIndex = 0;
259  switch (transType)
260  {
261    case TRANS_INTERPOLATE_DOMAIN:
262    case TRANS_ZOOM_DOMAIN:
263      domainIndex = elementPositionInGridDst2DomainPosition_[elementPositionInGrid];
264      break;
265
266    case TRANS_INTERPOLATE_AXIS:
267    case TRANS_ZOOM_AXIS:
268    case TRANS_INVERSE_AXIS:
269      axisIndex =  elementPositionInGridDst2AxisPosition_[elementPositionInGrid];
270      break;
271
272    case TRANS_REDUCE_AXIS_TO_SCALAR:
273      scalarIndex = elementPositionInGridDst2ScalarPosition_[elementPositionInGrid];
274      break;
275    default:
276      break;
277  }
278
279  for (int idx = 0; idx < axisDomainOrderDst.numElements(); ++idx)
280  {
281    int dimElementDst = axisDomainOrderDst(idx);
282    if (2 == dimElementDst)
283    {
284      if (elementPositionInGrid == idx)
285        domainSrc.push_back(domListDestP[domainIndex]);
286      else
287      {
288        CDomain* domain = CDomain::createDomain();
289        domain->domain_ref.setValue(domListDestP[domainListIndex]->getId());
290        domain->solveRefInheritance(true);
291        domain->checkAttributesOnClient();
292        domainSrc.push_back(domain);
293      }
294      ++domainListIndex;
295    }
296    else if (1 == dimElementDst)
297    {
298      if (elementPositionInGrid == idx)
299        axisSrc.push_back(axisListDestP[axisIndex]);
300      else
301      {
302        CAxis* axis = CAxis::createAxis();
303        axis->axis_ref.setValue(axisListDestP[axisListIndex]->getId());
304        axis->solveRefInheritance(true);
305        axis->checkAttributesOnClient();
306        axisSrc.push_back(axis);
307      }
308      ++axisListIndex;
309    }
310    else
311    {
312      if (elementPositionInGrid == idx)
313        scalarSrc.push_back(scalarListDestP[scalarIndex]);
314      else
315      {
316        CScalar* scalar = CScalar::createScalar();
317        scalar->scalar_ref.setValue(scalarListDestP[scalarListIndex]->getId());
318        scalar->solveRefInheritance(true);
319        scalar->checkAttributesOnClient();
320        scalarSrc.push_back(scalar);
321      }
322      ++scalarListIndex;
323    }
324  }
325
326  gridSource_ = CGrid::createGrid(domainSrc, axisSrc, scalarSrc, tmpGridDestination_->axis_domain_order);
327  gridSource_->computeGridGlobalDimension(domainSrc, axisSrc, scalarSrc, tmpGridDestination_->axis_domain_order);
328
329  tempGridSrcs_.push_back(gridSource_);
330}
331
332/*!
333  Perform all transformations
334  For each transformation, there are some things to do:
335  -) Chose the correct algorithm by transformation type and position of element
336  -) Calculate the mapping of global index between the current grid source and grid destination
337  -) Calculate the mapping of global index between current grid DESTINATION and grid SOURCE
338  -) Make current grid destination become grid source in the next transformation
339*/
340void CGridTransformation::computeAll(const std::vector<CArray<double,1>* >& dataAuxInputs, Time timeStamp)
341{
342  if (nbNormalAlgos_ < 1) return;
343  if (!auxInputs_.empty() && !dynamicalTransformation_) { dynamicalTransformation_ = true; return; }
344  if (dynamicalTransformation_)
345  {
346    if (timeStamp_.insert(timeStamp).second)   //Reset map
347    {
348      std::list<SendingIndexGridSourceMap>().swap(localIndexToSendFromGridSource_);
349      std::list<RecvIndexGridDestinationMap>().swap(localIndexToReceiveOnGridDest_);
350      std::list<size_t>().swap(nbLocalIndexOnGridDest_);
351      std::list<std::vector<bool> >().swap(localMaskOnGridDest_);
352    }
353    else
354      return;
355  }
356
357  CContext* context = CContext::getCurrent();
358  CContextClient* client = context->client;
359
360  ListAlgoType::const_iterator itb = listAlgos_.begin(),
361                               ite = listAlgos_.end(), it;
362
363  CGenericAlgorithmTransformation* algo = 0;
364  int nbAgloTransformation = 0; // Only count for executed transformation. Generate domain is a special one, not executed in the list
365  for (it = itb; it != ite; ++it)
366  {
367    int elementPositionInGrid = it->first;
368    ETranformationType transType = (it->second).first;
369    int transformationOrder = (it->second).second;
370    SourceDestinationIndexMap globaIndexWeightFromSrcToDst;
371
372    // Create a temporary grid destination which contains transformed element of grid destination and
373    // non-transformed elements fo grid source
374    setUpGridDestination(elementPositionInGrid, transType, nbAgloTransformation);
375
376    // First of all, select an algorithm
377    if (!dynamicalTransformation_ || (algoTransformation_.size() < listAlgos_.size()))
378    {
379      selectAlgo(elementPositionInGrid, transType, transformationOrder, algoTypes_[std::distance(itb, it)]);
380      algo = algoTransformation_.back();
381    }
382    else
383      algo = algoTransformation_[std::distance(itb, it)];
384
385    if (0 != algo) // Only registered transformation can be executed
386    {
387      algo->computeIndexSourceMapping(dataAuxInputs);
388
389      // ComputeTransformation of global index of each element
390      int elementPosition = it->first;
391      algo->computeGlobalSourceIndex(elementPosition,
392                                     gridSource_,
393                                     tmpGridDestination_,
394                                     globaIndexWeightFromSrcToDst);
395
396      // Compute transformation of global indexes among grids
397      computeTransformationMapping(globaIndexWeightFromSrcToDst);
398
399      if (1 < nbNormalAlgos_)
400      {
401        // Now grid destination becomes grid source in a new transformation
402        if (nbAgloTransformation != (nbNormalAlgos_-1)) setUpGridSource(elementPositionInGrid, transType, nbAgloTransformation);
403      }
404      ++nbAgloTransformation;
405    }
406  }
407}
408
409/*!
410  Compute exchange index between grid source and grid destination
411  \param [in] globalIndexWeightFromDestToSource global index mapping between grid destination and grid source
412*/
413void CGridTransformation::computeTransformationMapping(const SourceDestinationIndexMap& globaIndexWeightFromSrcToDst)
414{
415  CContext* context = CContext::getCurrent();
416  CContextClient* client = context->client;
417  int nbClient = client->clientSize;
418  int clientRank = client->clientRank;
419
420  // Recalculate the distribution of grid destination
421  CDistributionClient distributionClientDest(client->clientRank, tmpGridDestination_);
422  CDistributionClient::GlobalLocalDataMap& globalLocalIndexGridDestSendToServer = distributionClientDest.getGlobalLocalDataSendToServer();
423
424  // Update number of local index on each transformation
425  size_t nbLocalIndex = globalLocalIndexGridDestSendToServer.size();
426  nbLocalIndexOnGridDest_.push_back(nbLocalIndex);
427  localMaskOnGridDest_.push_back(std::vector<bool>());
428  std::vector<bool>& tmpMask = localMaskOnGridDest_.back();
429  tmpMask.resize(nbLocalIndex,false);
430
431  // Find out number of index sent from grid source and number of index received on grid destination
432  SourceDestinationIndexMap::const_iterator itbIndex = globaIndexWeightFromSrcToDst.begin(),
433                                            iteIndex = globaIndexWeightFromSrcToDst.end(), itIndex;
434  typedef boost::unordered_map<size_t, std::vector<std::pair<size_t,double> > > SendIndexMap;
435  std::map<int,int> sendRankSizeMap,recvRankSizeMap;
436  int connectedClient = globaIndexWeightFromSrcToDst.size();
437  int* recvCount=new int[nbClient];
438  int* displ=new int[nbClient];
439  int* sendRankBuff=new int[connectedClient];
440  int* sendSizeBuff=new int[connectedClient];
441  int n = 0;
442  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex, ++n)
443  {
444    sendRankBuff[n] = itIndex->first;
445    const SendIndexMap& sendIndexMap = itIndex->second;
446    SendIndexMap::const_iterator itbSend = sendIndexMap.begin(), iteSend = sendIndexMap.end(), itSend;
447    int sendSize = 0;
448    for (itSend = itbSend; itSend != iteSend; ++itSend)
449    {
450      sendSize += itSend->second.size();
451    }
452    sendSizeBuff[n] = sendSize;
453    sendRankSizeMap[itIndex->first] = sendSize;
454  }
455  MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm);
456
457  displ[0]=0 ;
458  for(int n=1;n<nbClient;n++) displ[n]=displ[n-1]+recvCount[n-1];
459  int recvSize=displ[nbClient-1]+recvCount[nbClient-1];
460  int* recvRankBuff=new int[recvSize];
461  int* recvSizeBuff=new int[recvSize];
462  MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm);
463  MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm);
464  for (int i = 0; i < nbClient; ++i)
465  {
466    int currentPos = displ[i];
467    for (int j = 0; j < recvCount[i]; ++j)
468      if (recvRankBuff[currentPos+j] == clientRank)
469      {
470        recvRankSizeMap[i] = recvSizeBuff[currentPos+j];
471      }
472  }
473
474  // Sending global index of grid source to corresponding process as well as the corresponding mask
475  std::vector<MPI_Request> requests;
476  std::vector<MPI_Status> status;
477  boost::unordered_map<int, unsigned char* > recvMaskDst;
478  boost::unordered_map<int, unsigned long* > recvGlobalIndexSrc;
479  for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv)
480  {
481    int recvRank = itRecv->first;
482    int recvSize = itRecv->second;
483    recvMaskDst[recvRank] = new unsigned char [recvSize];
484    recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize];
485
486    requests.push_back(MPI_Request());
487    MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back());
488    requests.push_back(MPI_Request());
489    MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back());
490  }
491
492  boost::unordered_map<int, CArray<size_t,1> > globalIndexDst;
493  boost::unordered_map<int, CArray<double,1> > weightDst;
494  boost::unordered_map<int, unsigned char* > sendMaskDst;
495  boost::unordered_map<int, unsigned long* > sendGlobalIndexSrc;
496  for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex)
497  {
498    int sendRank = itIndex->first;
499    int sendSize = sendRankSizeMap[sendRank];
500    const SendIndexMap& sendIndexMap = itIndex->second;
501    SendIndexMap::const_iterator itbSend = sendIndexMap.begin(), iteSend = sendIndexMap.end(), itSend;
502    globalIndexDst[sendRank].resize(sendSize);
503    weightDst[sendRank].resize(sendSize);
504    sendMaskDst[sendRank] = new unsigned char [sendSize];
505    sendGlobalIndexSrc[sendRank] = new unsigned long [sendSize];
506    int countIndex = 0;
507    for (itSend = itbSend; itSend != iteSend; ++itSend)
508    {
509      const std::vector<std::pair<size_t,double> >& dstWeight = itSend->second;
510      for (int idx = 0; idx < dstWeight.size(); ++idx)
511      {
512        globalIndexDst[sendRank](countIndex) = dstWeight[idx].first;
513        weightDst[sendRank](countIndex) = dstWeight[idx].second;
514        if (0 < globalLocalIndexGridDestSendToServer.count(dstWeight[idx].first))
515          sendMaskDst[sendRank][countIndex] = 1;
516        else
517          sendMaskDst[sendRank][countIndex] = 0;
518        sendGlobalIndexSrc[sendRank][countIndex] = itSend->first;
519        ++countIndex;
520      }
521    }
522
523    // Send global index source and mask
524    requests.push_back(MPI_Request());
525    MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back());
526    requests.push_back(MPI_Request());
527    MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back());
528  }
529
530  status.resize(requests.size());
531  MPI_Waitall(requests.size(), &requests[0], &status[0]);
532
533  // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return
534  std::vector<MPI_Request>().swap(requests);
535  std::vector<MPI_Status>().swap(status);
536  // Okie, on destination side, we will wait for information of masked index of source
537  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend)
538  {
539    int recvRank = itSend->first;
540    int recvSize = itSend->second;
541
542    requests.push_back(MPI_Request());
543    MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back());
544  }
545
546  // Ok, now we fill in local index of grid source (we even count for masked index)
547  CDistributionClient distributionClientSrc(client->clientRank, gridSource_);
548  CDistributionClient::GlobalLocalDataMap& globalLocalIndexGridSrcSendToServer = distributionClientSrc.getGlobalLocalDataSendToServer();
549  localIndexToSendFromGridSource_.push_back(SendingIndexGridSourceMap());
550  SendingIndexGridSourceMap& tmpSend = localIndexToSendFromGridSource_.back();
551  for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv)
552  {
553    int recvRank = itRecv->first;
554    int recvSize = itRecv->second;
555    unsigned char* recvMask = recvMaskDst[recvRank];
556    unsigned long* recvIndexSrc = recvGlobalIndexSrc[recvRank];
557    int realSendSize = 0;
558    for (int idx = 0; idx < recvSize; ++idx)
559    {
560      if (0 != (*(recvMask+idx))) // OKie, now we have a demand from non-masked index destination
561        if (0 < globalLocalIndexGridSrcSendToServer.count(*(recvIndexSrc+idx))) // check whether index source is masked
562         ++realSendSize;
563        else // inform the destination that this index is masked
564         *(recvMask+idx) = 0;
565    }
566
567    tmpSend[recvRank].resize(realSendSize);
568    realSendSize = 0;
569    for (int idx = 0; idx < recvSize; ++idx)
570    {
571      if (0 != (*(recvMask+idx))) // OKie, now we have a demand from non-masked index destination
572      {
573        tmpSend[recvRank](realSendSize) = globalLocalIndexGridSrcSendToServer[*(recvIndexSrc+idx)];
574         ++realSendSize;
575      }
576    }
577
578    // Okie, now inform the destination which source index are masked
579    requests.push_back(MPI_Request());
580    MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back());
581  }
582  status.resize(requests.size());
583  MPI_Waitall(requests.size(), &requests[0], &status[0]);
584
585  // Cool, now we can fill in local index of grid destination (counted for masked index)
586  localIndexToReceiveOnGridDest_.push_back(RecvIndexGridDestinationMap());
587  RecvIndexGridDestinationMap& recvTmp = localIndexToReceiveOnGridDest_.back();
588  for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend)
589  {
590    int recvRank = itSend->first;
591    int recvSize = itSend->second;
592    unsigned char* recvMask = sendMaskDst[recvRank];
593
594    CArray<size_t,1>& recvIndexDst = globalIndexDst[recvRank];
595    CArray<double,1>& recvWeightDst = weightDst[recvRank];
596    int realRecvSize = 0;
597    for (int idx = 0; idx < recvSize; ++idx)
598    {
599      if (0 != *(recvMask+idx)) // OKie, now we have a non-masked index destination
600         ++realRecvSize;
601    }
602
603    int localIndexDst;
604    recvTmp[recvRank].resize(realRecvSize);
605    realRecvSize = 0;
606    for (int idx = 0; idx < recvSize; ++idx)
607    {
608      if (0 != *(recvMask+idx)) // OKie, now we have a demand from non-masked index destination
609      {
610        recvTmp[recvRank][realRecvSize].first = globalLocalIndexGridDestSendToServer[recvIndexDst(idx)];
611        recvTmp[recvRank][realRecvSize].second = recvWeightDst(idx);
612        tmpMask[globalLocalIndexGridDestSendToServer[recvIndexDst(idx)]] = true;
613         ++realRecvSize;
614      }
615    }
616  }
617
618  delete [] recvCount;
619  delete [] displ;
620  delete [] sendRankBuff;
621  delete [] recvRankBuff;
622  delete [] sendSizeBuff;
623  delete [] recvSizeBuff;
624
625  boost::unordered_map<int, unsigned char* >::const_iterator itChar;
626  for (itChar = sendMaskDst.begin(); itChar != sendMaskDst.end(); ++itChar)
627    delete [] itChar->second;
628  for (itChar = recvMaskDst.begin(); itChar != recvMaskDst.end(); ++itChar)
629    delete [] itChar->second;
630  boost::unordered_map<int, unsigned long* >::const_iterator itLong;
631  for (itLong = sendGlobalIndexSrc.begin(); itLong != sendGlobalIndexSrc.end(); ++itLong)
632    delete [] itLong->second;
633  for (itLong = recvGlobalIndexSrc.begin(); itLong != recvGlobalIndexSrc.end(); ++itLong)
634    delete [] itLong->second;
635
636}
637
638/*!
639  Local index of data which need sending from the grid source
640  \return local index of data
641*/
642const std::list<CGridTransformation::SendingIndexGridSourceMap>& CGridTransformation::getLocalIndexToSendFromGridSource() const
643{
644  return localIndexToSendFromGridSource_;
645}
646
647/*!
648  Local index of data which will be received on the grid destination
649  \return local index of data
650*/
651const std::list<CGridTransformation::RecvIndexGridDestinationMap>& CGridTransformation::getLocalIndexToReceiveOnGridDest() const
652{
653  return localIndexToReceiveOnGridDest_;
654}
655
656/*!
657 Number of index will be received on the grid destination
658  \return number of index of data
659*/
660const std::list<size_t>& CGridTransformation::getNbLocalIndexToReceiveOnGridDest() const
661{
662  return nbLocalIndexOnGridDest_;
663}
664
665/*!
666  Local mask of data which will be received on the grid destination
667  \return local mask of data
668*/
669const std::list<std::vector<bool> >& CGridTransformation::getLocalMaskIndexOnGridDest() const
670{
671  return localMaskOnGridDest_;
672}
673
674}
Note: See TracBrowser for help on using the repository browser.