1 | #include "mpi_routing.hpp" |
---|
2 | #include "mpi.hpp" |
---|
3 | #include "node.hpp" |
---|
4 | #include "elt.hpp" |
---|
5 | #include "timerRemap.hpp" |
---|
6 | #include <iostream> |
---|
7 | |
---|
8 | namespace sphereRemap { |
---|
9 | |
---|
10 | const int verbose = 0; |
---|
11 | |
---|
12 | CMPIRouting::CMPIRouting(ep_lib::MPI_Comm comm) : communicator(comm) |
---|
13 | { |
---|
14 | ep_lib::MPI_Comm_rank(comm, &mpiRank); |
---|
15 | ep_lib::MPI_Comm_size(comm, &mpiSize); |
---|
16 | } |
---|
17 | |
---|
18 | /* sparse alltoallv when it is known that only a subset of ranks will communicate, |
---|
19 | but message lengths are *known* to receiver */ |
---|
20 | template <typename T> |
---|
21 | void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator) |
---|
22 | { |
---|
23 | vector<ep_lib::MPI_Request> request(ranks.size() * 2); |
---|
24 | vector<ep_lib::MPI_Status> status(ranks.size() * 2); |
---|
25 | |
---|
26 | // communicate data |
---|
27 | int nbRequest = 0; |
---|
28 | for (int i = 0; i < ranks.size(); i++) |
---|
29 | if (recv[i].size()) |
---|
30 | ep_lib::MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
31 | for (int i = 0; i < ranks.size(); i++) |
---|
32 | if (send[i].size()) |
---|
33 | ep_lib::MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
34 | ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]); |
---|
35 | } |
---|
36 | |
---|
37 | /* sparse alltoallv when it is known that only a subset of ranks will communicate, |
---|
38 | but message lengths are *unknown* to receiver */ |
---|
39 | template <typename T> |
---|
40 | void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator) |
---|
41 | { |
---|
42 | vector<ep_lib::MPI_Request> request(ranks.size() * 2); |
---|
43 | vector<ep_lib::MPI_Status> status(ranks.size() * 2); |
---|
44 | |
---|
45 | // communicate sizes |
---|
46 | int nbRequest = 0; |
---|
47 | vector<int> sendSizes(ranks.size()); |
---|
48 | vector<int> recvSizes(ranks.size()); |
---|
49 | for (int i = 0; i < ranks.size(); i++) |
---|
50 | sendSizes[i] = send[i].size(); |
---|
51 | for (int i = 0; i < ranks.size(); i++) |
---|
52 | ep_lib::MPI_Irecv(&recvSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
53 | for (int i = 0; i < ranks.size(); i++) |
---|
54 | ep_lib::MPI_Isend(&sendSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
55 | ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]); |
---|
56 | |
---|
57 | // allocate |
---|
58 | for (int i = 0; i < ranks.size(); i++) |
---|
59 | if (recvSizes[i]) recv[i].resize(recvSizes[i]); |
---|
60 | // communicate data |
---|
61 | alltoalls_known(send, recv, ranks, communicator); |
---|
62 | } |
---|
63 | |
---|
64 | void setElementsSendCnt(int route, vector<int>& sendCnt) |
---|
65 | { |
---|
66 | sendCnt[route]++; |
---|
67 | } |
---|
68 | |
---|
69 | void setElementsSendCnt(const vector<int>& route, vector<int>& sendCnt) |
---|
70 | { |
---|
71 | for (int i = 0; i < route.size(); i++) |
---|
72 | sendCnt[route[i]]++; |
---|
73 | } |
---|
74 | |
---|
75 | void setTargetElementIndex(int route, vector<int>& indices, const int *rankToIndex) |
---|
76 | { |
---|
77 | int index = rankToIndex[route]; |
---|
78 | indices.push_back(index); |
---|
79 | } |
---|
80 | |
---|
81 | void setTargetElementIndex(const vector<int>& route, vector<int>& indices, const int *rankToIndex) |
---|
82 | { |
---|
83 | for (int i = 0; i < route.size(); i++) |
---|
84 | { |
---|
85 | int index = rankToIndex[route[i]]; |
---|
86 | indices.push_back(index); |
---|
87 | } |
---|
88 | } |
---|
89 | |
---|
90 | template<typename T> |
---|
91 | void CMPIRouting::init(const vector<T>& route, CMPICascade *cascade) |
---|
92 | { |
---|
93 | vector<int> nbElementSend(mpiSize); |
---|
94 | int *toSend = new int[mpiSize]; |
---|
95 | int *recvCount = new int[mpiSize]; |
---|
96 | int *targetRankToIndex; |
---|
97 | |
---|
98 | for (int i = 0; i < route.size(); i++) |
---|
99 | setElementsSendCnt(route[i], nbElementSend); |
---|
100 | |
---|
101 | nbTarget = 0; |
---|
102 | vector<int> destRanks; |
---|
103 | for (int i = 0; i < mpiSize; i++) |
---|
104 | { |
---|
105 | if (nbElementSend[i] == 0) |
---|
106 | toSend[i] = 0; |
---|
107 | else |
---|
108 | { |
---|
109 | destRanks.push_back(i); |
---|
110 | toSend[i] = 1; |
---|
111 | nbTarget++; |
---|
112 | } |
---|
113 | recvCount[i] = 1; |
---|
114 | } |
---|
115 | //int recvCntDeb = (stree) ? stree->scatter_reduce(destRanks) : -1; |
---|
116 | |
---|
117 | MPI_Barrier(communicator); |
---|
118 | CTimer::get("CMPIRouting::init(reduce_scatter)").reset(); |
---|
119 | CTimer::get("CMPIRouting::init(reduce_scatter)").resume(); |
---|
120 | ep_lib::MPI_Reduce_scatter(toSend, &nbSource, recvCount, EP_INT, EP_SUM, communicator); |
---|
121 | CTimer::get("CMPIRouting::init(reduce_scatter)").suspend(); |
---|
122 | CTimer::get("CMPIRouting::init(reduce_scatter)").print(); |
---|
123 | |
---|
124 | ep_lib::MPI_Alloc_mem(nbTarget *sizeof(int), EP_INFO_NULL, &targetRank); |
---|
125 | ep_lib::MPI_Alloc_mem(nbSource *sizeof(int), EP_INFO_NULL, &sourceRank); |
---|
126 | |
---|
127 | targetRankToIndex = new int[mpiSize]; |
---|
128 | int index = 0; |
---|
129 | for (int i = 0; i < mpiSize; i++) |
---|
130 | { |
---|
131 | if (toSend[i] == 1) |
---|
132 | { |
---|
133 | targetRank[index] = i; |
---|
134 | targetRankToIndex[i] = index; |
---|
135 | index++; |
---|
136 | } |
---|
137 | } |
---|
138 | |
---|
139 | ep_lib::MPI_Barrier(communicator); |
---|
140 | CTimer::get("CMPIRouting::init(get_source)").reset(); |
---|
141 | CTimer::get("CMPIRouting::init(get_source)").resume(); |
---|
142 | |
---|
143 | ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; |
---|
144 | ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; |
---|
145 | |
---|
146 | int indexRequest = 0; |
---|
147 | if (verbose) cout << "CMPIRouting::init nbSource : " << nbSource << " nbTarget : " << nbTarget << endl; |
---|
148 | // assert(recvCntDeb == -1 || recvCntDeb == nbSource); |
---|
149 | //cout << mpiRank << "DEB : " << recvCntDeb << " nbSource " << nbSource << " nbTarget : " << nbTarget << endl; |
---|
150 | for (int i = 0; i < nbSource; i++) |
---|
151 | { |
---|
152 | #ifdef _usingMPI |
---|
153 | ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); |
---|
154 | #elif _usingEP |
---|
155 | ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); |
---|
156 | #endif |
---|
157 | indexRequest++; |
---|
158 | } |
---|
159 | MPI_Barrier(communicator); |
---|
160 | for (int i = 0; i < nbTarget; i++) |
---|
161 | { |
---|
162 | ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
163 | indexRequest++; |
---|
164 | } |
---|
165 | MPI_Waitall(indexRequest, request, status); |
---|
166 | MPI_Barrier(communicator); //needed |
---|
167 | CTimer::get("CMPIRouting::init(get_source)").suspend(); |
---|
168 | CTimer::get("CMPIRouting::init(get_source)").print(); |
---|
169 | |
---|
170 | CTimer::get("CMPIRouting::init(get_source)").reset(); |
---|
171 | CTimer::get("CMPIRouting::init(get_source)").resume(); |
---|
172 | |
---|
173 | indexRequest = 0; |
---|
174 | for (int i = 0; i < nbSource; i++) |
---|
175 | { |
---|
176 | #ifdef _usingMPI |
---|
177 | ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); |
---|
178 | #elif _usingEP |
---|
179 | ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); |
---|
180 | #endif |
---|
181 | indexRequest++; |
---|
182 | } |
---|
183 | |
---|
184 | for (int i = 0; i < nbTarget; i++) |
---|
185 | { |
---|
186 | ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
187 | indexRequest++; |
---|
188 | } |
---|
189 | MPI_Waitall(indexRequest, request, status); |
---|
190 | MPI_Barrier(communicator); |
---|
191 | CTimer::get("CMPIRouting::init(get_source)").suspend(); |
---|
192 | CTimer::get("CMPIRouting::init(get_source)").print(); |
---|
193 | |
---|
194 | CTimer::get("CMPIRouting::init(send_element)").reset(); |
---|
195 | CTimer::get("CMPIRouting::init(send_element)").resume(); |
---|
196 | |
---|
197 | nbTargetElement.resize(nbTarget); |
---|
198 | nbSourceElement.resize(nbSource); |
---|
199 | |
---|
200 | for (int i = 0; i < route.size(); i++) |
---|
201 | setTargetElementIndex(route[i], targetElementIndex, targetRankToIndex); |
---|
202 | |
---|
203 | for (int i = 0; i < targetElementIndex.size(); i++) |
---|
204 | nbTargetElement[targetElementIndex[i]]++; |
---|
205 | |
---|
206 | indexRequest = 0; |
---|
207 | totalSourceElement = 0; |
---|
208 | totalTargetElement = 0; |
---|
209 | for (int i = 0; i < nbSource; i++) |
---|
210 | { |
---|
211 | ep_lib::MPI_Irecv(&nbSourceElement[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
212 | indexRequest++; |
---|
213 | } |
---|
214 | |
---|
215 | for (int i = 0; i < nbTarget; i++) |
---|
216 | { |
---|
217 | totalTargetElement += nbTargetElement[i]; |
---|
218 | ep_lib::MPI_Isend(&nbTargetElement[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
219 | indexRequest++; |
---|
220 | } |
---|
221 | |
---|
222 | MPI_Waitall(indexRequest, request, status); |
---|
223 | |
---|
224 | CTimer::get("CMPIRouting::init(send_element)").suspend(); |
---|
225 | CTimer::get("CMPIRouting::init(send_element)").print(); |
---|
226 | |
---|
227 | totalSourceElement = 0; |
---|
228 | for (int i = 0; i < nbSource; i++) |
---|
229 | totalSourceElement += nbSourceElement[i]; |
---|
230 | |
---|
231 | sourceElementIndex.resize(totalSourceElement); |
---|
232 | |
---|
233 | totalSourceElement = 0; |
---|
234 | for (int i = 0; i < nbSource; i++) |
---|
235 | { |
---|
236 | for (int j = 0; j < nbSourceElement[i]; j++) |
---|
237 | { |
---|
238 | sourceElementIndex[totalSourceElement] = i; |
---|
239 | totalSourceElement++; |
---|
240 | } |
---|
241 | } |
---|
242 | |
---|
243 | delete[] toSend; |
---|
244 | delete[] recvCount; |
---|
245 | delete[] request; |
---|
246 | delete[] status; |
---|
247 | } |
---|
248 | |
---|
249 | |
---|
250 | int CMPIRouting::getTotalSourceElement(void) |
---|
251 | { |
---|
252 | return totalSourceElement; |
---|
253 | } |
---|
254 | |
---|
255 | template<typename T> |
---|
256 | void CMPIRouting::transferToTarget(T* targetElements, T* sourceElements) |
---|
257 | { |
---|
258 | char** targetBuffer = new char*[nbTarget]; |
---|
259 | int* indexTargetBuffer = new int[nbTarget]; |
---|
260 | |
---|
261 | for(int i=0;i<nbTarget; i++) |
---|
262 | { |
---|
263 | targetBuffer[i] = new char[sizeof(T)*nbTargetElement[i]]; |
---|
264 | indexTargetBuffer[i]= 0; |
---|
265 | } |
---|
266 | |
---|
267 | char** sourceBuffer = new char*[nbSource]; |
---|
268 | int* indexSourceBuffer = new int[nbSource]; |
---|
269 | |
---|
270 | for(int i=0;i<nbSource; i++) |
---|
271 | { |
---|
272 | sourceBuffer[i] = new char[sizeof(T)*nbSourceElement[i]]; |
---|
273 | indexSourceBuffer[i]= 0; |
---|
274 | } |
---|
275 | |
---|
276 | // pack the data |
---|
277 | int index; |
---|
278 | for(int i=0;i<totalTargetElement;i++) |
---|
279 | { |
---|
280 | index=targetElementIndex[i]; |
---|
281 | *((T*) &(targetBuffer[index][indexTargetBuffer[index]])) = targetElements[i]; |
---|
282 | indexTargetBuffer[index]+=sizeof(T); |
---|
283 | } |
---|
284 | |
---|
285 | |
---|
286 | ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget]; |
---|
287 | ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget]; |
---|
288 | int indexRequest=0; |
---|
289 | |
---|
290 | ep_lib::MPI_Barrier(communicator); |
---|
291 | CTimer::get("CMPIRouting::transferToTarget").reset(); |
---|
292 | CTimer::get("CMPIRouting::transferToTarget").resume(); |
---|
293 | |
---|
294 | for(int i=0; i<nbSource; i++) |
---|
295 | { |
---|
296 | ep_lib::MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
297 | indexRequest++; |
---|
298 | } |
---|
299 | |
---|
300 | for(int i=0;i<nbTarget; i++) |
---|
301 | { |
---|
302 | ep_lib::MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
303 | indexRequest++; |
---|
304 | } |
---|
305 | |
---|
306 | ep_lib::MPI_Waitall(indexRequest,request,status); |
---|
307 | |
---|
308 | CTimer::get("CMPIRouting::transferToTarget").suspend(); |
---|
309 | CTimer::get("CMPIRouting::transferToTarget").print(); |
---|
310 | ep_lib::MPI_Barrier(communicator); |
---|
311 | |
---|
312 | // unpack the data |
---|
313 | for(int i=0;i<totalSourceElement;i++) |
---|
314 | { |
---|
315 | index=sourceElementIndex[i]; |
---|
316 | sourceElements[i] = *((T*) &(sourceBuffer[index][indexSourceBuffer[index]])); |
---|
317 | indexSourceBuffer[index]+=sizeof(T); |
---|
318 | } |
---|
319 | |
---|
320 | for(int i=0;i<nbTarget; i++) delete[] targetBuffer[i]; |
---|
321 | for(int i=0;i<nbSource; i++) delete[] sourceBuffer[i]; |
---|
322 | delete[] targetBuffer; |
---|
323 | delete[] indexTargetBuffer; |
---|
324 | delete[] sourceBuffer; |
---|
325 | delete[] indexSourceBuffer; |
---|
326 | delete[] request; |
---|
327 | delete[] status; |
---|
328 | } |
---|
329 | |
---|
330 | |
---|
331 | template<typename T, typename t_pack, typename t_unpack> |
---|
332 | void CMPIRouting::transferToTarget(T* targetElements, T* sourceElements, t_pack pack, t_unpack unpack) |
---|
333 | { |
---|
334 | char** targetBuffer = new char*[nbTarget]; |
---|
335 | int* indexTargetBuffer = new int[nbTarget]; |
---|
336 | int* targetMessageSize = new int[nbTarget]; |
---|
337 | int* sourceMessageSize = new int[nbSource]; |
---|
338 | int index; |
---|
339 | |
---|
340 | // compute target buffer size |
---|
341 | for (int i = 0; i < nbTarget; i++) |
---|
342 | targetMessageSize[i] = 0; |
---|
343 | |
---|
344 | for (int i = 0; i < totalTargetElement; i++) |
---|
345 | { |
---|
346 | index = targetElementIndex[i]; |
---|
347 | pack(targetElements[i], NULL, targetMessageSize[index]); |
---|
348 | } |
---|
349 | |
---|
350 | ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; |
---|
351 | ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; |
---|
352 | int indexRequest = 0; |
---|
353 | |
---|
354 | ep_lib::MPI_Barrier(communicator); |
---|
355 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").reset(); |
---|
356 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").resume(); |
---|
357 | |
---|
358 | for(int i=0; i<nbSource; i++) |
---|
359 | { |
---|
360 | ep_lib::MPI_Irecv(&sourceMessageSize[i],1,EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
361 | indexRequest++; |
---|
362 | } |
---|
363 | |
---|
364 | for(int i=0; i<nbTarget; i++) |
---|
365 | { |
---|
366 | ep_lib::MPI_Isend(&targetMessageSize[i],1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
367 | indexRequest++; |
---|
368 | } |
---|
369 | |
---|
370 | ep_lib::MPI_Waitall(indexRequest,request,status); |
---|
371 | |
---|
372 | ep_lib::MPI_Barrier(communicator); |
---|
373 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").suspend(); |
---|
374 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").print(); |
---|
375 | |
---|
376 | for(int i=0; i<nbTarget; i++) |
---|
377 | { |
---|
378 | targetBuffer[i] = new char[targetMessageSize[i]]; |
---|
379 | indexTargetBuffer[i] = 0; |
---|
380 | } |
---|
381 | |
---|
382 | char** sourceBuffer = new char*[nbSource]; |
---|
383 | int* indexSourceBuffer = new int[nbSource]; |
---|
384 | |
---|
385 | for(int i=0;i<nbSource; i++) |
---|
386 | { |
---|
387 | sourceBuffer[i] = new char[sourceMessageSize[i]]; |
---|
388 | indexSourceBuffer[i]= 0; |
---|
389 | } |
---|
390 | |
---|
391 | // pack the data |
---|
392 | for(int i=0; i<totalTargetElement; i++) |
---|
393 | { |
---|
394 | index=targetElementIndex[i]; |
---|
395 | pack(targetElements[i], targetBuffer[index], indexTargetBuffer[index] ); |
---|
396 | } |
---|
397 | |
---|
398 | indexRequest=0; |
---|
399 | |
---|
400 | MPI_Barrier(communicator); |
---|
401 | CTimer::get("CMPIRouting::transferToTarget(data)").reset(); |
---|
402 | CTimer::get("CMPIRouting::transferToTarget(data)").resume(); |
---|
403 | for(int i=0; i<nbSource; i++) |
---|
404 | { |
---|
405 | ep_lib::MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
406 | indexRequest++; |
---|
407 | } |
---|
408 | |
---|
409 | for(int i=0;i<nbTarget; i++) |
---|
410 | { |
---|
411 | ep_lib::MPI_Isend(targetBuffer[i],targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
412 | indexRequest++; |
---|
413 | } |
---|
414 | |
---|
415 | MPI_Waitall(indexRequest,request,status); |
---|
416 | |
---|
417 | MPI_Barrier(communicator); |
---|
418 | CTimer::get("CMPIRouting::transferToTarget(data)").suspend(); |
---|
419 | CTimer::get("CMPIRouting::transferToTarget(data)").print(); |
---|
420 | |
---|
421 | // unpack the data |
---|
422 | for(int i=0; i<totalSourceElement; i++) |
---|
423 | { |
---|
424 | index=sourceElementIndex[i]; |
---|
425 | unpack(sourceElements[i], sourceBuffer[index], indexSourceBuffer[index]); |
---|
426 | } |
---|
427 | |
---|
428 | for (int i=0; i<nbTarget; i++) delete[] targetBuffer[i]; |
---|
429 | for (int i=0; i<nbSource; i++) delete[] sourceBuffer[i]; |
---|
430 | delete[] targetBuffer; |
---|
431 | delete[] indexTargetBuffer; |
---|
432 | delete[] targetMessageSize; |
---|
433 | delete[] sourceBuffer; |
---|
434 | delete[] indexSourceBuffer; |
---|
435 | delete[] sourceMessageSize; |
---|
436 | delete[] request; |
---|
437 | delete[] status; |
---|
438 | } |
---|
439 | |
---|
440 | template<typename T> |
---|
441 | void CMPIRouting::transferFromSource(T* targetElements, T* sourceElements) |
---|
442 | { |
---|
443 | char** targetBuffer = new char*[nbTarget]; |
---|
444 | int* indexTargetBuffer = new int[nbTarget]; |
---|
445 | |
---|
446 | for (int i = 0; i < nbTarget; i++) |
---|
447 | { |
---|
448 | targetBuffer[i] = new char[sizeof(T)*nbTargetElement[i]]; |
---|
449 | indexTargetBuffer[i] = 0; |
---|
450 | } |
---|
451 | |
---|
452 | char** sourceBuffer = new char*[nbSource]; |
---|
453 | int* indexSourceBuffer = new int[nbSource]; |
---|
454 | |
---|
455 | for (int i = 0; i < nbSource; i++) |
---|
456 | { |
---|
457 | sourceBuffer[i] = new char[sizeof(T)*nbSourceElement[i]]; |
---|
458 | indexSourceBuffer[i] = 0; |
---|
459 | } |
---|
460 | |
---|
461 | // pack the data |
---|
462 | int index; |
---|
463 | for (int i = 0; i < totalSourceElement; i++) |
---|
464 | { |
---|
465 | index = sourceElementIndex[i]; |
---|
466 | *((T*) &(sourceBuffer[index][indexSourceBuffer[index]])) = sourceElements[i]; |
---|
467 | indexSourceBuffer[index] += sizeof(T); |
---|
468 | } |
---|
469 | |
---|
470 | ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget]; |
---|
471 | ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget]; |
---|
472 | int indexRequest=0; |
---|
473 | |
---|
474 | for(int i=0; i<nbSource; i++) |
---|
475 | { |
---|
476 | ep_lib::MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
477 | indexRequest++; |
---|
478 | } |
---|
479 | |
---|
480 | for(int i=0;i<nbTarget; i++) |
---|
481 | { |
---|
482 | ep_lib::MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
483 | indexRequest++; |
---|
484 | } |
---|
485 | |
---|
486 | ep_lib::MPI_Waitall(indexRequest,request,status); |
---|
487 | |
---|
488 | // unpack the data |
---|
489 | for(int i=0;i<totalTargetElement;i++) |
---|
490 | { |
---|
491 | index=targetElementIndex[i]; |
---|
492 | targetElements[i] = *((T*) &(targetBuffer[index][indexTargetBuffer[index]])); |
---|
493 | indexTargetBuffer[index]+=sizeof(T); |
---|
494 | } |
---|
495 | |
---|
496 | for(int i=0;i<nbTarget; i++) delete[] targetBuffer[i]; |
---|
497 | for(int i=0;i<nbSource; i++) delete[] sourceBuffer[i]; |
---|
498 | delete[] targetBuffer; |
---|
499 | delete[] indexTargetBuffer; |
---|
500 | delete[] sourceBuffer; |
---|
501 | delete[] indexSourceBuffer; |
---|
502 | delete[] request; |
---|
503 | delete[] status; |
---|
504 | } |
---|
505 | |
---|
506 | |
---|
507 | /* number of source and target elements is known from previous call to init() */ |
---|
508 | template<typename T, typename t_pack, typename t_unpack> |
---|
509 | void CMPIRouting::transferFromSource(T *targetElements, T *sourceElements, t_pack pack, t_unpack unpack) |
---|
510 | { |
---|
511 | char **targetBuffer = new char*[nbTarget]; |
---|
512 | int *indexTargetBuffer = new int[nbTarget]; |
---|
513 | int *targetMessageSize = new int[nbTarget]; |
---|
514 | int *sourceMessageSize = new int[nbSource]; |
---|
515 | int index; |
---|
516 | |
---|
517 | // compute target buffer size |
---|
518 | for (int i = 0; i < nbSource; i++) sourceMessageSize[i] = 0; |
---|
519 | |
---|
520 | for (int i = 0; i < totalSourceElement; i++) |
---|
521 | { |
---|
522 | index = sourceElementIndex[i]; |
---|
523 | pack(sourceElements[i], NULL, sourceMessageSize[index]); |
---|
524 | } |
---|
525 | |
---|
526 | ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; |
---|
527 | ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; |
---|
528 | int indexRequest = 0; |
---|
529 | for (int i = 0; i < nbSource; i++) |
---|
530 | { |
---|
531 | ep_lib::MPI_Isend(&sourceMessageSize[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
532 | indexRequest++; |
---|
533 | } |
---|
534 | for (int i = 0; i < nbTarget; i++) |
---|
535 | { |
---|
536 | ep_lib::MPI_Irecv(&targetMessageSize[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
537 | indexRequest++; |
---|
538 | } |
---|
539 | ep_lib::MPI_Waitall(indexRequest, request, status); |
---|
540 | |
---|
541 | for (int i = 0; i < nbTarget; i++) |
---|
542 | { |
---|
543 | targetBuffer[i] = new char[targetMessageSize[i]]; |
---|
544 | indexTargetBuffer[i] = 0; |
---|
545 | } |
---|
546 | |
---|
547 | char **sourceBuffer = new char*[nbSource]; |
---|
548 | int *indexSourceBuffer = new int[nbSource]; |
---|
549 | |
---|
550 | for (int i = 0; i < nbSource; i++) |
---|
551 | { |
---|
552 | sourceBuffer[i] = new char[sourceMessageSize[i]]; |
---|
553 | indexSourceBuffer[i] = 0; |
---|
554 | } |
---|
555 | |
---|
556 | |
---|
557 | // pack the data |
---|
558 | for (int i = 0; i < totalSourceElement; i++) |
---|
559 | { |
---|
560 | index = sourceElementIndex[i]; |
---|
561 | pack(sourceElements[i], sourceBuffer[index], indexSourceBuffer[index] ); |
---|
562 | } |
---|
563 | |
---|
564 | indexRequest = 0; |
---|
565 | for (int i = 0; i < nbSource; i++) |
---|
566 | { |
---|
567 | ep_lib::MPI_Isend(sourceBuffer[i], sourceMessageSize[i], EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
568 | indexRequest++; |
---|
569 | } |
---|
570 | for (int i = 0; i < nbTarget; i++) |
---|
571 | { |
---|
572 | ep_lib::MPI_Irecv(targetBuffer[i], targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
573 | indexRequest++; |
---|
574 | } |
---|
575 | ep_lib::MPI_Waitall(indexRequest, request, status); |
---|
576 | |
---|
577 | // unpack the data |
---|
578 | for (int i = 0; i < totalTargetElement; i++) |
---|
579 | { |
---|
580 | index = targetElementIndex[i]; |
---|
581 | unpack(targetElements[i], targetBuffer[index], indexTargetBuffer[index]); |
---|
582 | } |
---|
583 | |
---|
584 | for (int i = 0; i < nbTarget; i++) delete[] targetBuffer[i]; |
---|
585 | for (int i = 0; i < nbSource; i++) delete[] sourceBuffer[i]; |
---|
586 | delete[] targetBuffer; |
---|
587 | delete[] indexTargetBuffer; |
---|
588 | delete[] targetMessageSize; |
---|
589 | delete[] sourceBuffer; |
---|
590 | delete[] indexSourceBuffer; |
---|
591 | delete[] sourceMessageSize; |
---|
592 | delete[] request; |
---|
593 | delete[] status; |
---|
594 | } |
---|
595 | |
---|
596 | CMPIRouting::~CMPIRouting() |
---|
597 | { |
---|
598 | }; |
---|
599 | |
---|
600 | template void CMPIRouting::init(const std::vector<int>& route, CMPICascade *cascade); |
---|
601 | template void CMPIRouting::init(const std::vector<vector<int> >& route, CMPICascade *cascade); |
---|
602 | |
---|
603 | template void CMPIRouting::transferToTarget(int *targetElements, int *sourceElements); |
---|
604 | template void CMPIRouting::transferToTarget(int *targetElements, int *sourceElements, void (*pack)(int&, char*, int&), void (* unpack)(int&, char *, int&)); |
---|
605 | template void CMPIRouting::transferFromSource(int *targetElements, int *sourceElements); |
---|
606 | template void CMPIRouting::transferFromSource(int *targetElements, int *sourceElements, void (*pack)(int&, char*, int&), void (* unpack)(int&, char *, int&)); |
---|
607 | |
---|
608 | template void CMPIRouting::transferToTarget(Node* targetElements, Node* sourceElements, void (*pack)(Node&, char*, int&), void (* unpack)(Node&, char *, int&)); |
---|
609 | template void CMPIRouting::transferToTarget(Elt **targetElements, Elt **sourceElements, void (*pack)(Elt *, char*, int&), void (* unpack)(Elt *, char *, int&)); |
---|
610 | template void CMPIRouting::transferFromSource(std::vector<int> *targetElements, std::vector<int> *sourceElements, void (*pack)(const std::vector<int>&, char*, int&), void (* unpack)(std::vector<int>&, const char *, int&)); |
---|
611 | |
---|
612 | struct NES { int cnt; int risc; int rank; }; |
---|
613 | |
---|
614 | template void alltoalls_unknown(const std::vector<std::vector<NES> >& send, std::vector<std::vector<NES> >& recv, |
---|
615 | const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); |
---|
616 | |
---|
617 | template void alltoalls_known(const std::vector<std::vector<int> >& send, std::vector<std::vector<int> >& recv, |
---|
618 | const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); |
---|
619 | |
---|
620 | } |
---|