1 | #include "transformation_mapping.hpp" |
---|
2 | #include <boost/unordered_map.hpp> |
---|
3 | #include "context.hpp" |
---|
4 | #include "context_client.hpp" |
---|
5 | |
---|
6 | namespace xios { |
---|
7 | |
---|
8 | CTransformationMapping::CTransformationMapping(CGrid* destination, CGrid* source) |
---|
9 | : gridSource_(source), gridDestination_(destination) |
---|
10 | { |
---|
11 | CContext* context = CContext::getCurrent(); |
---|
12 | CContextClient* client=context->client; |
---|
13 | int clientRank = client->clientRank; |
---|
14 | |
---|
15 | const CArray<size_t,1>& globalIndexGridSrc = gridSource_->getDistributionClient()->getGlobalDataIndexSendToServer(); |
---|
16 | boost::unordered_map<size_t,int> globalIndexOfServer; |
---|
17 | int globalIndexSize = globalIndexGridSrc.numElements(); |
---|
18 | for (int idx = 0; idx < globalIndexSize; ++idx) |
---|
19 | { |
---|
20 | globalIndexOfServer[globalIndexGridSrc(idx)] = clientRank; |
---|
21 | } |
---|
22 | |
---|
23 | gridIndexClientClientMapping_ = new CClientServerMappingDistributed(globalIndexOfServer, |
---|
24 | client->intraComm, |
---|
25 | true); |
---|
26 | } |
---|
27 | |
---|
28 | CTransformationMapping::~CTransformationMapping() |
---|
29 | { |
---|
30 | if (0 != gridIndexClientClientMapping_) delete gridIndexClientClientMapping_; |
---|
31 | } |
---|
32 | |
---|
33 | /*! |
---|
34 | Suppose that we have transformations between two grids, which are represented in form of mapping between global indexes of these two grids, |
---|
35 | this function tries to find out which clients a client need to send and receive these global indexes to accomplish the transformations. |
---|
36 | The grid destination is the grid whose global indexes demande global indexes from the grid source |
---|
37 | Grid destination and grid source are also distributed among clients but in a different manner. |
---|
38 | \param [in] globaIndexMapFromDestToSource mapping representing the transformations |
---|
39 | */ |
---|
40 | void CTransformationMapping::computeTransformationMapping(const std::map<size_t, std::set<size_t> >& globaIndexMapFromDestToSource) |
---|
41 | { |
---|
42 | CContext* context = CContext::getCurrent(); |
---|
43 | CContextClient* client=context->client; |
---|
44 | |
---|
45 | int numMappingPoints = 0; |
---|
46 | std::map<size_t, std::set<size_t> >::const_iterator itbMap = globaIndexMapFromDestToSource.begin(), itMap, |
---|
47 | iteMap = globaIndexMapFromDestToSource.end(); |
---|
48 | for (itMap = itbMap; itMap != iteMap; ++itMap) |
---|
49 | { |
---|
50 | numMappingPoints += (itMap->second).size(); |
---|
51 | } |
---|
52 | |
---|
53 | // All global indexes of a client on grid destination |
---|
54 | CArray<size_t,1> globalIndexMap(numMappingPoints); |
---|
55 | // Not only one index on grid destination can demande two indexes from grid source |
---|
56 | // but an index on grid destination have to be sent to two indexes of grid destination |
---|
57 | std::map<size_t, std::vector<size_t> > globalIndexMapFromSrcToDest; |
---|
58 | std::set<size_t>::const_iterator itbSet, itSet, iteSet; |
---|
59 | int idx = 0; |
---|
60 | for (itMap = itbMap; itMap != iteMap; ++itMap) |
---|
61 | { |
---|
62 | itbSet = (itMap->second).begin(); |
---|
63 | iteSet = (itMap->second).end(); |
---|
64 | for (itSet = itbSet; itSet != iteSet; ++itSet) |
---|
65 | { |
---|
66 | globalIndexMap(idx) = *itSet; |
---|
67 | globalIndexMapFromSrcToDest[*itSet].push_back(itMap->first); |
---|
68 | ++idx; |
---|
69 | } |
---|
70 | } |
---|
71 | |
---|
72 | // Find out on which clients the necessary indexes of grid source are. |
---|
73 | gridIndexClientClientMapping_->computeServerIndexMapping(globalIndexMap); |
---|
74 | const std::map<int, std::vector<size_t> >& globalIndexSentFromGridSource = gridIndexClientClientMapping_->getGlobalIndexOnServer(); |
---|
75 | std::map<int, std::vector<size_t> >::const_iterator itbMapSrc = globalIndexSentFromGridSource.begin(), itMapSrc, |
---|
76 | iteMapSrc = globalIndexSentFromGridSource.end(); |
---|
77 | std::vector<size_t>::const_iterator itbVec, itVec, iteVec; |
---|
78 | for (itMapSrc = itbMapSrc; itMapSrc != iteMapSrc; ++itMapSrc) |
---|
79 | { |
---|
80 | int sourceRank = itMapSrc->first; |
---|
81 | itbVec = (itMapSrc->second).begin(); |
---|
82 | iteVec = (itMapSrc->second).end(); |
---|
83 | for (itVec = itbVec; itVec != iteVec; ++itVec) |
---|
84 | { |
---|
85 | (globalIndexReceivedOnGridDestMapping_[sourceRank]).push_back(globalIndexMapFromSrcToDest[*itVec]); |
---|
86 | } |
---|
87 | } |
---|
88 | |
---|
89 | // Inform client about the destination to which it needs to send global indexes |
---|
90 | int nbClient = client->clientSize; |
---|
91 | int* sendBuff = new int[nbClient]; |
---|
92 | int* recvBuff = new int[nbClient]; |
---|
93 | for (int i = 0; i < nbClient; ++i) sendBuff[i] = 0; |
---|
94 | |
---|
95 | // First of all, inform the number of destination a client needs to send global index |
---|
96 | for (itMapSrc = itbMapSrc; itMapSrc != iteMapSrc; ++itMapSrc) sendBuff[itMapSrc->first] = 1; |
---|
97 | MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); |
---|
98 | int numClientToReceive = recvBuff[client->clientRank]; |
---|
99 | |
---|
100 | // Then specify the size of receiving buffer, because we use synch send/receive so only necessary to know maximum size |
---|
101 | for (itMapSrc = itbMapSrc; itMapSrc != iteMapSrc; ++itMapSrc) sendBuff[itMapSrc->first] = (itMapSrc->second).size(); |
---|
102 | MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_MAX, client->intraComm); |
---|
103 | |
---|
104 | int buffSize = recvBuff[client->clientRank]; |
---|
105 | unsigned long* recvBuffGlobalIndex; |
---|
106 | if (0 != buffSize) recvBuffGlobalIndex = new unsigned long [buffSize]; |
---|
107 | |
---|
108 | // Inform all "source clients" about index that they need to send |
---|
109 | for (itMapSrc = itbMapSrc; itMapSrc != iteMapSrc; ++itMapSrc) |
---|
110 | { |
---|
111 | MPI_Request request; |
---|
112 | unsigned long* sendPtr = const_cast<unsigned long*>(&(itMapSrc->second)[0]); |
---|
113 | MPI_Isend(sendPtr, |
---|
114 | (itMapSrc->second).size(), |
---|
115 | MPI_UNSIGNED_LONG, |
---|
116 | itMapSrc->first, |
---|
117 | 11, |
---|
118 | client->intraComm, |
---|
119 | &request); |
---|
120 | } |
---|
121 | |
---|
122 | // Now all the "source clients" try listening messages from other "destination clients" |
---|
123 | int numClientReceived = 0; // number of client to which data has been already sent |
---|
124 | int countBuff; |
---|
125 | while (numClientReceived < numClientToReceive) |
---|
126 | { |
---|
127 | MPI_Status status; |
---|
128 | MPI_Recv(recvBuffGlobalIndex, |
---|
129 | buffSize, |
---|
130 | MPI_UNSIGNED_LONG, |
---|
131 | MPI_ANY_SOURCE, |
---|
132 | 11, |
---|
133 | client->intraComm, |
---|
134 | &status); |
---|
135 | |
---|
136 | MPI_Get_count(&status, MPI_UNSIGNED_LONG, &countBuff); |
---|
137 | int clientDestRank = status.MPI_SOURCE; |
---|
138 | for (int idx = 0; idx < countBuff; ++idx) |
---|
139 | { |
---|
140 | globalIndexSendToGridDestMapping_[clientDestRank].push_back(recvBuffGlobalIndex[idx]); |
---|
141 | } |
---|
142 | ++numClientReceived; |
---|
143 | } |
---|
144 | |
---|
145 | delete [] sendBuff; |
---|
146 | delete [] recvBuff; |
---|
147 | if (0 != buffSize) delete [] recvBuffGlobalIndex; |
---|
148 | } |
---|
149 | |
---|
150 | const std::map<int,std::vector<std::vector<size_t> > >& CTransformationMapping::getGlobalIndexReceivedOnGridDestMapping() const |
---|
151 | { |
---|
152 | return globalIndexReceivedOnGridDestMapping_; |
---|
153 | } |
---|
154 | |
---|
155 | const std::map<int,std::vector<size_t> >& CTransformationMapping::getGlobalIndexSendToGridDestMapping() const |
---|
156 | { |
---|
157 | return globalIndexSendToGridDestMapping_; |
---|
158 | } |
---|
159 | |
---|
160 | } |
---|