source: XIOS/trunk/src/transformation/transformation_mapping.cpp @ 829

Last change on this file since 829 was 829, checked in by mhnguyen, 8 years ago

Refactoring transformation code

+) On exchanging information during transformation, not only global index are sent but also local index
+) Correct a bug in distributed hash table (dht)
+) Add new type for dht
+) Clean up some redundant codes

Test
+) On Curie
+) Every test passes
+) Code runs faster in some cases (up to 30%)

File size: 10.0 KB
Line 
1/*!
2   \file transformation_mapping.cpp
3   \author Ha NGUYEN
4   \since 14 May 2015
5   \date 18 June 2015
6
7   \brief Take charge of communication among clients to exchange transformed data.
8 */
9
10#include "transformation_mapping.hpp"
11#include <boost/unordered_map.hpp>
12#include "context.hpp"
13#include "context_client.hpp"
14#include "distribution_client.hpp"
15#include "client_client_dht_template.hpp"
16#include "dht_data_types.hpp"
17#include "mpi_tag.hpp"
18
19namespace xios {
20
21CTransformationMapping::CTransformationMapping(CGrid* destination, CGrid* source)
22  : gridSource_(source), gridDestination_(destination)
23{
24  CContext* context = CContext::getCurrent();
25  CContextClient* client=context->client;
26  int clientRank = client->clientRank;
27
28  CDistributionClient distributionClientSrc(client->clientRank, gridSource_);
29
30  const std::vector<size_t>& globalIndexGridSrc = distributionClientSrc.getGlobalDataIndexSendToServer(); //gridSource_->getDistributionClient()->getGlobalDataIndexSendToServer();
31  const std::vector<int>& localIndexGridSrc = distributionClientSrc.getLocalDataIndexSendToServer();
32
33  // Mapping of global index and pair containing rank and local index
34  CClientClientDHTPairIntInt::Index2InfoTypeMap globalIndexOfServer;
35  int globalIndexSize = globalIndexGridSrc.size();
36  PairIntInt pii;
37  for (int idx = 0; idx < globalIndexSize; ++idx)
38  {
39    pii.first  = clientRank;
40    pii.second = localIndexGridSrc[idx];
41    globalIndexOfServer[globalIndexGridSrc[idx]] = pii; //std::make_pair(clientRank, localIndexGridSrc[idx]);
42  }
43
44  gridIndexClientClientMapping_ = new CClientClientDHTPairIntInt(globalIndexOfServer,
45                                                                 client->intraComm,
46                                                                 true);
47}
48
49CTransformationMapping::CTransformationMapping(CAxis* destination, CAxis* source)
50  : gridSource_(0), gridDestination_(0)
51{
52  CContext* context = CContext::getCurrent();
53  CContextClient* client=context->client;
54  int clientRank = client->clientRank;
55
56  int niSrc     = source->n.getValue();
57  int ibeginSrc = source->begin.getValue();
58
59  CClientClientDHTPairIntInt::Index2InfoTypeMap globalIndexOfAxisSource;
60  PairIntInt pii;
61  for (int idx = 0; idx < niSrc; ++idx)
62  {
63    pii.first  = clientRank;
64    pii.second = idx;
65    globalIndexOfAxisSource[idx+ibeginSrc] = pii; //std::make_pair(clientRank,idx);
66  }
67
68  gridIndexClientClientMapping_ = new CClientClientDHTPairIntInt(globalIndexOfAxisSource,
69                                                                 client->intraComm,
70                                                                 true);
71}
72
73CTransformationMapping::~CTransformationMapping()
74{
75  if (0 != gridIndexClientClientMapping_) delete gridIndexClientClientMapping_;
76}
77
78/*!
79  Suppose that we have transformations between two grids, which are represented in form of mapping between global indexes of these two grids,
80this function tries to find out which clients a client needs to send and receive these global indexes to accomplish the transformations.
81  The grid destination is the grid whose global indexes demande global indexes from the other grid
82  Grid destination and grid source are also distributed among clients but in different manners.
83  \param [in] globaIndexWeightFromDestToSource mapping representing the transformations
84*/
85void CTransformationMapping::computeTransformationMapping(const DestinationIndexMap& globaIndexWeightFromDestToSource)
86{
87  CContext* context = CContext::getCurrent();
88  CContextClient* client=context->client;
89
90  DestinationIndexMap::const_iterator itbMap = globaIndexWeightFromDestToSource.begin(), itMap,
91                                      iteMap = globaIndexWeightFromDestToSource.end();
92
93  // Not only one index on grid destination can demande two indexes from grid source
94  // but an index on grid source has to be sent to two indexes of grid destination
95  DestinationIndexMap globalIndexMapFromSrcToDest;
96  boost::unordered_map<size_t, int> nbGlobalIndexMapFromSrcToDest;
97  std::vector<std::pair<int, std::pair<size_t,double> > >::const_iterator itbVecPair, itVecPair, iteVecPair;
98  for (itMap = itbMap; itMap != iteMap; ++itMap)
99  {
100    itbVecPair = (itMap->second).begin();
101    iteVecPair = (itMap->second).end();
102    for (itVecPair = itbVecPair; itVecPair != iteVecPair; ++itVecPair)
103    {
104      ++nbGlobalIndexMapFromSrcToDest[(itVecPair->second).first];
105    }
106  }
107
108  for (boost::unordered_map<size_t, int>::const_iterator it = nbGlobalIndexMapFromSrcToDest.begin();
109                                                         it != nbGlobalIndexMapFromSrcToDest.end(); ++it)
110  {
111    globalIndexMapFromSrcToDest[it->first].reserve(it->second);
112  }
113
114  for (itMap = itbMap; itMap != iteMap; ++itMap)
115  {
116    itbVecPair = (itMap->second).begin();
117    iteVecPair = (itMap->second).end();
118    for (itVecPair = itbVecPair; itVecPair != iteVecPair; ++itVecPair)
119    {
120      globalIndexMapFromSrcToDest[(itVecPair->second).first].push_back(std::make_pair(itVecPair->first, std::make_pair(itMap->first, (itVecPair->second).second)));
121    }
122  }
123
124  // All global indexes of a client on grid destination
125  CArray<size_t,1> globalIndexMap(globalIndexMapFromSrcToDest.size());
126  DestinationIndexMap::const_iterator itbMapBoost = globalIndexMapFromSrcToDest.begin(), itMapBoost;
127  DestinationIndexMap::const_iterator iteMapBoost = globalIndexMapFromSrcToDest.end();
128  int idx = 0;
129  for (itMapBoost = itbMapBoost; itMapBoost != iteMapBoost; ++itMapBoost)
130  {
131    globalIndexMap(idx) = itMapBoost->first;
132    ++idx;
133  }
134
135  // Find out on which clients the necessary indexes of grid source are.
136  gridIndexClientClientMapping_->computeIndexInfoMapping(globalIndexMap);
137  const CClientClientDHTPairIntInt::Index2InfoTypeMap& globalIndexSentFromGridSource = gridIndexClientClientMapping_->getInfoIndexMap();
138  CClientClientDHTPairIntInt::Index2InfoTypeMap::const_iterator itbMapSrc = globalIndexSentFromGridSource.begin(), itMapSrc,
139                                                                iteMapSrc = globalIndexSentFromGridSource.end();
140  std::vector<size_t>::const_iterator itbVec, itVec, iteVec;
141    // Inform client about the destination to which it needs to send global indexes
142  int nbClient = client->clientSize;
143  std::vector<int> sendNbClientBuff(nbClient,0);
144  std::vector<int> recvNbClientBuff(nbClient,0);
145  std::vector<int> sendIndexBuff(nbClient,0);
146  std::vector<int> recvIndexBuff(nbClient,0);
147  boost::unordered_map<int,std::vector<size_t> > sendIndexMap;
148  for (itMapSrc = itbMapSrc; itMapSrc != iteMapSrc; ++itMapSrc)
149  {
150    int sourceRank = (itMapSrc->second).first;
151    (globalIndexReceivedOnGridDestMapping_[sourceRank]).push_back(globalIndexMapFromSrcToDest[itMapSrc->first]);
152    sendIndexMap[sourceRank].push_back((itMapSrc->second).second);
153    sendIndexMap[sourceRank].push_back(itMapSrc->first);
154    sendNbClientBuff[sourceRank] = 1;
155    ++sendIndexBuff[sourceRank];
156  }
157
158  MPI_Allreduce(&sendNbClientBuff[0], &recvNbClientBuff[0], nbClient, MPI_INT, MPI_SUM, client->intraComm);
159  int numClientToReceive = recvNbClientBuff[client->clientRank];
160
161  // Then specify the size of receiving buffer, because we use synch send/receive so only necessary to know maximum size
162  MPI_Allreduce(&sendIndexBuff[0], &recvIndexBuff[0], nbClient, MPI_INT, MPI_MAX, client->intraComm);
163  int buffSize = 2*recvIndexBuff[client->clientRank]; // we send global as well as local index
164  unsigned long* recvBuffGlobalIndex;
165  if (0 != buffSize) recvBuffGlobalIndex = new unsigned long [buffSize];
166
167  std::map<int, MPI_Request> requests;
168
169  // Inform all "source clients" about index that they need to send
170  boost::unordered_map<int,std::vector<size_t> >::const_iterator itSendIndex = sendIndexMap.begin(),
171                                                                 iteSendIndex= sendIndexMap.end();
172  for (; itSendIndex != iteSendIndex; ++itSendIndex)
173  {
174    unsigned long* sendPtr = const_cast<unsigned long*>(&(itSendIndex->second)[0]);
175    MPI_Isend(sendPtr,
176              (itSendIndex->second).size(),
177              MPI_UNSIGNED_LONG,
178              (itSendIndex->first),
179              MPI_TRANSFORMATION_MAPPING_INDEX,
180              client->intraComm,
181              &requests[(itSendIndex->first)]);
182  }
183
184  // Now all the "source clients" try listening messages from other "destination clients"
185  int numClientReceived = 0;  // number of client to which data has been already sent
186  int countBuff;
187  while (numClientReceived < numClientToReceive)
188  {
189    MPI_Status status;
190    MPI_Recv(recvBuffGlobalIndex,
191             buffSize,
192             MPI_UNSIGNED_LONG,
193             MPI_ANY_SOURCE,
194             MPI_TRANSFORMATION_MAPPING_INDEX,
195             client->intraComm,
196             &status);
197
198    MPI_Get_count(&status, MPI_UNSIGNED_LONG, &countBuff);
199    int clientDestRank = status.MPI_SOURCE;
200    for (int idx = 0; idx < countBuff; idx += 2)
201    {
202      globalIndexSendToGridDestMapping_[clientDestRank].push_back(std::make_pair<int,size_t>(recvBuffGlobalIndex[idx], recvBuffGlobalIndex[idx+1]));
203    }
204    ++numClientReceived;
205  }
206
207  std::map<int, MPI_Request>::iterator itRequest;
208  for (itRequest = requests.begin(); itRequest != requests.end(); ++itRequest)
209    MPI_Wait(&itRequest->second, MPI_STATUS_IGNORE);
210
211  if (0 != buffSize) delete [] recvBuffGlobalIndex;
212}
213
214/*!
215  Return (grid) global index on grid destination. This mapping contains the rank of client source (that sends info to grid destination)
216and the corresponding global index to write on grid destination.
217  \return global index mapping to receive on grid destination
218*/
219const CTransformationMapping::ReceivedIndexMap& CTransformationMapping::getGlobalIndexReceivedOnGridDestMapping() const
220{
221  return globalIndexReceivedOnGridDestMapping_;
222}
223
224/*!
225  Return (grid) global index on grid source. This mapping contains the rank of client destination (which receives transformation info) and
226the corresponding global index to send
227  \return global index mapping to send on grid source
228*/
229const CTransformationMapping::SentIndexMap& CTransformationMapping::getGlobalIndexSendToGridDestMapping() const
230{
231  return globalIndexSendToGridDestMapping_;
232}
233
234}
Note: See TracBrowser for help on using the repository browser.