source: XIOS/trunk/src/transformation/transformation_mapping.cpp @ 843

Last change on this file since 843 was 843, checked in by mhnguyen, 6 years ago

Several improvements

+) Replace some time-consuming operations by simpler ones

Test
+) On Curie
+) All tests pass

File size: 9.9 KB
Line 
1/*!
2   \file transformation_mapping.cpp
3   \author Ha NGUYEN
4   \since 14 May 2015
5   \date 18 June 2015
6
7   \brief Take charge of communication among clients to exchange transformed data.
8 */
9
10#include "transformation_mapping.hpp"
11#include <boost/unordered_map.hpp>
12#include "context.hpp"
13#include "context_client.hpp"
14#include "distribution_client.hpp"
15#include "client_client_dht_template.hpp"
16#include "dht_data_types.hpp"
17#include "mpi_tag.hpp"
18
19namespace xios {
20
21CTransformationMapping::CTransformationMapping(CGrid* destination, CGrid* source)
22  : gridSource_(source), gridDestination_(destination)
23{
24  CContext* context = CContext::getCurrent();
25  CContextClient* client=context->client;
26  int clientRank = client->clientRank;
27
28  CDistributionClient distributionClientSrc(client->clientRank, gridSource_);
29  const CDistributionClient::GlobalLocalDataMap& globalLocalIndexGridSrc = distributionClientSrc.getGlobalLocalDataSendToServer();
30  CDistributionClient::GlobalLocalDataMap::const_iterator itIndex = globalLocalIndexGridSrc.begin(), iteIndex = globalLocalIndexGridSrc.end();
31
32  // Mapping of global index and pair containing rank and local index
33  CClientClientDHTPairIntInt::Index2InfoTypeMap globalIndexOfClientSrc;
34  globalIndexOfClientSrc.rehash(std::ceil(globalLocalIndexGridSrc.size()/globalIndexOfClientSrc.max_load_factor()));
35
36  PairIntInt pairIntInt;
37  for (; itIndex != iteIndex; ++itIndex)
38  {
39    pairIntInt.first  = clientRank;
40    pairIntInt.second = itIndex->second;
41    globalIndexOfClientSrc[itIndex->first] = pairIntInt;
42  }
43
44  gridIndexClientClientMapping_ = new CClientClientDHTPairIntInt(globalIndexOfClientSrc,
45                                                                 client->intraComm);
46}
47
48CTransformationMapping::CTransformationMapping(CAxis* destination, CAxis* source)
49  : gridSource_(0), gridDestination_(0)
50{
51  CContext* context = CContext::getCurrent();
52  CContextClient* client=context->client;
53  int clientRank = client->clientRank;
54
55  int niSrc     = source->n.getValue();
56  int ibeginSrc = source->begin.getValue();
57
58  CClientClientDHTPairIntInt::Index2InfoTypeMap globalIndexOfAxisSource;
59  PairIntInt pii;
60  for (int idx = 0; idx < niSrc; ++idx)
61  {
62    pii.first  = clientRank;
63    pii.second = idx;
64    globalIndexOfAxisSource[idx+ibeginSrc] = pii; //std::make_pair(clientRank,idx);
65  }
66
67  gridIndexClientClientMapping_ = new CClientClientDHTPairIntInt(globalIndexOfAxisSource,
68                                                                 client->intraComm);
69}
70
71CTransformationMapping::~CTransformationMapping()
72{
73  if (0 != gridIndexClientClientMapping_) delete gridIndexClientClientMapping_;
74}
75
76/*!
77  Suppose that we have transformations between two grids, which are represented in form of mapping between global indexes of these two grids,
78this function tries to find out which clients a client needs to send and receive these global indexes to accomplish the transformations.
79  The grid destination is the grid whose global indexes demande global indexes from the other grid
80  Grid destination and grid source are also distributed among clients but in different manners.
81  \param [in] globaIndexWeightFromDestToSource mapping representing the transformations
82*/
83void CTransformationMapping::computeTransformationMapping(const DestinationIndexMap& globaIndexWeightFromDestToSource)
84{
85  CContext* context = CContext::getCurrent();
86  CContextClient* client=context->client;
87
88  DestinationIndexMap::const_iterator itbMap = globaIndexWeightFromDestToSource.begin(), itMap,
89                                      iteMap = globaIndexWeightFromDestToSource.end();
90
91  // Not only one index on grid destination can demande two indexes from grid source
92  // but an index on grid source has to be sent to two indexes of grid destination
93  size_t nbGlobalIndexSrc = 0;
94  for (itMap = itbMap; itMap != iteMap; ++itMap)
95  {
96    nbGlobalIndexSrc += (itMap->second).size();
97  }
98
99  CArray<size_t,1> globalIndexMap(nbGlobalIndexSrc);
100  std::vector<std::pair<int, std::pair<size_t,double> > >::const_iterator itbVecPair, itVecPair, iteVecPair;
101  size_t idxSrc = 0;
102  for (itMap = itbMap; itMap != iteMap; ++itMap)
103  {
104    itbVecPair = (itMap->second).begin();
105    iteVecPair = (itMap->second).end();
106    for (itVecPair = itbVecPair; itVecPair != iteVecPair; ++itVecPair)
107    {
108      globalIndexMap(idxSrc) = itVecPair->second.first;
109      ++idxSrc;
110    }
111  }
112
113  // Find out on which clients the necessary indexes of grid source are.
114  gridIndexClientClientMapping_->computeIndexInfoMapping(globalIndexMap);
115  const CClientClientDHTPairIntInt::Index2InfoTypeMap& globalIndexSentFromGridSource = gridIndexClientClientMapping_->getInfoIndexMap();
116  CClientClientDHTPairIntInt::Index2InfoTypeMap::const_iterator itbMapSrc = globalIndexSentFromGridSource.begin(), itMapSrc,
117                                                                iteMapSrc = globalIndexSentFromGridSource.end();
118  size_t currentIndexSrc;
119  int nbClient = client->clientSize;
120  std::vector<int> nbIndexEachClient(nbClient,0);
121  std::vector<int> sendNbClientBuff(nbClient,0);
122  for (idxSrc = 0; idxSrc < nbGlobalIndexSrc; ++idxSrc)
123  {
124    currentIndexSrc = globalIndexMap(idxSrc);
125    itMapSrc = globalIndexSentFromGridSource.find(currentIndexSrc);
126    if (iteMapSrc != itMapSrc)
127    {
128      ++nbIndexEachClient[itMapSrc->second.first];
129    }
130  }
131
132  boost::unordered_map<int,size_t* > sendIndexMap;
133  for (int idx = 0; idx < nbClient; ++idx)
134  {
135    if (0 != nbIndexEachClient[idx])
136    {
137      globalIndexReceivedOnGridDestMapping_[idx].resize(nbIndexEachClient[idx]);
138      sendIndexMap[idx] = new unsigned long [2*nbIndexEachClient[idx]];
139      nbIndexEachClient[idx] = 0;
140      sendNbClientBuff[idx] = 1;
141    }
142  }
143
144  int srcRank;
145  for (itMap = itbMap; itMap != iteMap; ++itMap)
146  {
147    itbVecPair = (itMap->second).begin();
148    iteVecPair = (itMap->second).end();
149    for (itVecPair = itbVecPair; itVecPair != iteVecPair; ++itVecPair)
150    {
151      currentIndexSrc = itVecPair->second.first;
152      itMapSrc = globalIndexSentFromGridSource.find(currentIndexSrc);
153      if (iteMapSrc != itMapSrc)
154      {
155        srcRank = (itMapSrc->second).first;
156        int& ind = nbIndexEachClient[srcRank];
157        globalIndexReceivedOnGridDestMapping_[srcRank][ind] = ReceivedIndex(itVecPair->first, itMap->first, itVecPair->second.second);
158        sendIndexMap[srcRank][2*ind] = (itMapSrc->second).second;
159        sendIndexMap[srcRank][2*ind+1] = itMapSrc->first;
160        ++ind;
161      }
162    }
163  }
164
165  std::vector<int> recvNbClientBuff(nbClient,0);
166  MPI_Allreduce(&sendNbClientBuff[0], &recvNbClientBuff[0], nbClient, MPI_INT, MPI_SUM, client->intraComm);
167  int numClientToReceive = recvNbClientBuff[client->clientRank];
168
169  // Then specify the size of receiving buffer, because we use synch send/receive so only necessary to know maximum size
170  std::vector<int> recvIndexBuff(nbClient,0);
171  MPI_Allreduce(&nbIndexEachClient[0], &recvIndexBuff[0], nbClient, MPI_INT, MPI_MAX, client->intraComm);
172  std::vector<size_t>::const_iterator itbVec, itVec, iteVec;
173
174  int buffSize = 2*recvIndexBuff[client->clientRank]; // we send global as well as local index
175  unsigned long* recvBuffGlobalIndex;
176  if (0 != buffSize) recvBuffGlobalIndex = new unsigned long [buffSize];
177
178  std::map<int, MPI_Request> requests;
179
180  // Inform all "source clients" about index that they need to send
181  boost::unordered_map<int,size_t* >::const_iterator itSendIndex = sendIndexMap.begin(),
182                                                     iteSendIndex= sendIndexMap.end();
183  for (; itSendIndex != iteSendIndex; ++itSendIndex)
184  {
185    MPI_Isend((itSendIndex->second),
186              2*nbIndexEachClient[itSendIndex->first],
187              MPI_UNSIGNED_LONG,
188              (itSendIndex->first),
189              MPI_TRANSFORMATION_MAPPING_INDEX,
190              client->intraComm,
191              &requests[(itSendIndex->first)]);
192  }
193
194  // Now all the "source clients" try listening messages from other "destination clients"
195  int numClientReceived = 0;  // number of client to which data has been already sent
196  int countBuff;
197  while (numClientReceived < numClientToReceive)
198  {
199    MPI_Status status;
200    MPI_Recv(recvBuffGlobalIndex,
201             buffSize,
202             MPI_UNSIGNED_LONG,
203             MPI_ANY_SOURCE,
204             MPI_TRANSFORMATION_MAPPING_INDEX,
205             client->intraComm,
206             &status);
207
208    MPI_Get_count(&status, MPI_UNSIGNED_LONG, &countBuff);
209    int clientDestRank = status.MPI_SOURCE;
210    int sizeMap = countBuff/2;
211    globalIndexSendToGridDestMapping_[clientDestRank].resize(sizeMap);
212    for (int idx = 0, i = 0; idx < countBuff; idx += 2, ++i)
213    {
214      globalIndexSendToGridDestMapping_[clientDestRank][i].first = recvBuffGlobalIndex[idx];
215      globalIndexSendToGridDestMapping_[clientDestRank][i].second = recvBuffGlobalIndex[idx+1];
216    }
217    ++numClientReceived;
218  }
219
220  std::map<int, MPI_Request>::iterator itRequest;
221  for (itRequest = requests.begin(); itRequest != requests.end(); ++itRequest)
222    MPI_Wait(&itRequest->second, MPI_STATUS_IGNORE);
223
224  if (0 != buffSize) delete [] recvBuffGlobalIndex;
225  for (itSendIndex = sendIndexMap.begin(); itSendIndex != iteSendIndex; ++itSendIndex)
226    delete [] itSendIndex->second;
227}
228
229/*!
230  Return (grid) global index on grid destination. This mapping contains the rank of client source (that sends info to grid destination)
231and the corresponding global index to write on grid destination.
232  \return global index mapping to receive on grid destination
233*/
234const CTransformationMapping::ReceivedIndexMap& CTransformationMapping::getGlobalIndexReceivedOnGridDestMapping() const
235{
236  return globalIndexReceivedOnGridDestMapping_;
237}
238
239/*!
240  Return (grid) global index on grid source. This mapping contains the rank of client destination (which receives transformation info) and
241the corresponding global index to send
242  \return global index mapping to send on grid source
243*/
244const CTransformationMapping::SentIndexMap& CTransformationMapping::getGlobalIndexSendToGridDestMapping() const
245{
246  return globalIndexSendToGridDestMapping_;
247}
248
249}
Note: See TracBrowser for help on using the repository browser.