1 | #include "mpi_routing.hpp" |
---|
2 | #include "mpi.hpp" |
---|
3 | #include "node.hpp" |
---|
4 | #include "elt.hpp" |
---|
5 | #include "timerRemap.hpp" |
---|
6 | #include <iostream> |
---|
7 | |
---|
8 | namespace sphereRemap { |
---|
9 | |
---|
10 | const int verbose = 0; |
---|
11 | |
---|
12 | CMPIRouting::CMPIRouting(MPI_Comm comm) : communicator(comm) |
---|
13 | { |
---|
14 | MPI_Comm_rank(comm, &mpiRank); |
---|
15 | MPI_Comm_size(comm, &mpiSize); |
---|
16 | } |
---|
17 | |
---|
18 | /* sparse alltoallv when it is known that only a subset of ranks will communicate, |
---|
19 | but message lengths are *known* to receiver */ |
---|
20 | template <typename T> |
---|
21 | void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator) |
---|
22 | { |
---|
23 | vector<MPI_Request> request(ranks.size() * 2); |
---|
24 | vector<MPI_Status> status(ranks.size() * 2); |
---|
25 | |
---|
26 | // communicate data |
---|
27 | int nbRequest = 0; |
---|
28 | for (int i = 0; i < ranks.size(); i++) |
---|
29 | if (recv[i].size()) |
---|
30 | MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
31 | for (int i = 0; i < ranks.size(); i++) |
---|
32 | if (send[i].size()) |
---|
33 | MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
34 | MPI_Waitall(nbRequest, &request[0], &status[0]); |
---|
35 | } |
---|
36 | |
---|
37 | /* sparse alltoallv when it is known that only a subset of ranks will communicate, |
---|
38 | but message lengths are *unknown* to receiver */ |
---|
39 | template <typename T> |
---|
40 | void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator) |
---|
41 | { |
---|
42 | vector<MPI_Request> request(ranks.size() * 2); |
---|
43 | vector<MPI_Status> status(ranks.size() * 2); |
---|
44 | |
---|
45 | // communicate sizes |
---|
46 | int nbRequest = 0; |
---|
47 | vector<int> sendSizes(ranks.size()); |
---|
48 | vector<int> recvSizes(ranks.size()); |
---|
49 | for (int i = 0; i < ranks.size(); i++) |
---|
50 | sendSizes[i] = send[i].size(); |
---|
51 | for (int i = 0; i < ranks.size(); i++) |
---|
52 | MPI_Irecv(&recvSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
53 | for (int i = 0; i < ranks.size(); i++) |
---|
54 | MPI_Isend(&sendSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]); |
---|
55 | MPI_Waitall(nbRequest, &request[0], &status[0]); |
---|
56 | |
---|
57 | // allocate |
---|
58 | for (int i = 0; i < ranks.size(); i++) |
---|
59 | if (recvSizes[i]) recv[i].resize(recvSizes[i]); |
---|
60 | // communicate data |
---|
61 | alltoalls_known(send, recv, ranks, communicator); |
---|
62 | } |
---|
63 | |
---|
64 | void setElementsSendCnt(int route, vector<int>& sendCnt) |
---|
65 | { |
---|
66 | sendCnt[route]++; |
---|
67 | } |
---|
68 | |
---|
69 | void setElementsSendCnt(const vector<int>& route, vector<int>& sendCnt) |
---|
70 | { |
---|
71 | for (int i = 0; i < route.size(); i++) |
---|
72 | sendCnt[route[i]]++; |
---|
73 | } |
---|
74 | |
---|
75 | void setTargetElementIndex(int route, vector<int>& indices, const int *rankToIndex) |
---|
76 | { |
---|
77 | int index = rankToIndex[route]; |
---|
78 | indices.push_back(index); |
---|
79 | } |
---|
80 | |
---|
81 | void setTargetElementIndex(const vector<int>& route, vector<int>& indices, const int *rankToIndex) |
---|
82 | { |
---|
83 | for (int i = 0; i < route.size(); i++) |
---|
84 | { |
---|
85 | int index = rankToIndex[route[i]]; |
---|
86 | indices.push_back(index); |
---|
87 | } |
---|
88 | } |
---|
89 | |
---|
90 | template<typename T> |
---|
91 | void CMPIRouting::init(const vector<T>& route, CMPICascade *cascade) |
---|
92 | { |
---|
93 | vector<int> nbElementSend(mpiSize); |
---|
94 | int *toSend = new int[mpiSize]; |
---|
95 | int *recvCount = new int[mpiSize]; |
---|
96 | int *targetRankToIndex; |
---|
97 | |
---|
98 | for (int i = 0; i < route.size(); i++) |
---|
99 | setElementsSendCnt(route[i], nbElementSend); |
---|
100 | |
---|
101 | nbTarget = 0; |
---|
102 | vector<int> destRanks; |
---|
103 | for (int i = 0; i < mpiSize; i++) |
---|
104 | { |
---|
105 | if (nbElementSend[i] == 0) |
---|
106 | toSend[i] = 0; |
---|
107 | else |
---|
108 | { |
---|
109 | destRanks.push_back(i); |
---|
110 | toSend[i] = 1; |
---|
111 | nbTarget++; |
---|
112 | } |
---|
113 | recvCount[i] = 1; |
---|
114 | } |
---|
115 | //int recvCntDeb = (stree) ? stree->scatter_reduce(destRanks) : -1; |
---|
116 | |
---|
117 | MPI_Barrier(communicator); |
---|
118 | CTimer::get("CMPIRouting::init(reduce_scatter)").reset(); |
---|
119 | CTimer::get("CMPIRouting::init(reduce_scatter)").resume(); |
---|
120 | MPI_Reduce_scatter(toSend, &nbSource, recvCount, MPI_INT, MPI_SUM, communicator); |
---|
121 | CTimer::get("CMPIRouting::init(reduce_scatter)").suspend(); |
---|
122 | CTimer::get("CMPIRouting::init(reduce_scatter)").print(); |
---|
123 | |
---|
124 | MPI_Alloc_mem(nbTarget *sizeof(int), MPI_INFO_NULL, &targetRank); |
---|
125 | MPI_Alloc_mem(nbSource *sizeof(int), MPI_INFO_NULL, &sourceRank); |
---|
126 | |
---|
127 | targetRankToIndex = new int[mpiSize]; |
---|
128 | int index = 0; |
---|
129 | for (int i = 0; i < mpiSize; i++) |
---|
130 | { |
---|
131 | if (toSend[i] == 1) |
---|
132 | { |
---|
133 | targetRank[index] = i; |
---|
134 | targetRankToIndex[i] = index; |
---|
135 | index++; |
---|
136 | } |
---|
137 | } |
---|
138 | |
---|
139 | MPI_Barrier(communicator); |
---|
140 | CTimer::get("CMPIRouting::init(get_source)").reset(); |
---|
141 | CTimer::get("CMPIRouting::init(get_source)").resume(); |
---|
142 | |
---|
143 | MPI_Request *request = new MPI_Request[nbSource + nbTarget]; |
---|
144 | MPI_Status *status = new MPI_Status[nbSource + nbTarget]; |
---|
145 | |
---|
146 | int indexRequest = 0; |
---|
147 | if (verbose) cout << "CMPIRouting::init nbSource : " << nbSource << " nbTarget : " << nbTarget << endl; |
---|
148 | // assert(recvCntDeb == -1 || recvCntDeb == nbSource); |
---|
149 | //cout << mpiRank << "DEB : " << recvCntDeb << " nbSource " << nbSource << " nbTarget : " << nbTarget << endl; |
---|
150 | for (int i = 0; i < nbSource; i++) |
---|
151 | { |
---|
152 | MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); |
---|
153 | indexRequest++; |
---|
154 | } |
---|
155 | MPI_Barrier(communicator); |
---|
156 | for (int i = 0; i < nbTarget; i++) |
---|
157 | { |
---|
158 | MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
159 | indexRequest++; |
---|
160 | } |
---|
161 | MPI_Waitall(indexRequest, request, status); |
---|
162 | MPI_Barrier(communicator); //needed |
---|
163 | CTimer::get("CMPIRouting::init(get_source)").suspend(); |
---|
164 | CTimer::get("CMPIRouting::init(get_source)").print(); |
---|
165 | |
---|
166 | CTimer::get("CMPIRouting::init(get_source)").reset(); |
---|
167 | CTimer::get("CMPIRouting::init(get_source)").resume(); |
---|
168 | |
---|
169 | indexRequest = 0; |
---|
170 | for (int i = 0; i < nbSource; i++) |
---|
171 | { |
---|
172 | MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); |
---|
173 | indexRequest++; |
---|
174 | } |
---|
175 | |
---|
176 | for (int i = 0; i < nbTarget; i++) |
---|
177 | { |
---|
178 | MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
179 | indexRequest++; |
---|
180 | } |
---|
181 | MPI_Waitall(indexRequest, request, status); |
---|
182 | MPI_Barrier(communicator); |
---|
183 | CTimer::get("CMPIRouting::init(get_source)").suspend(); |
---|
184 | CTimer::get("CMPIRouting::init(get_source)").print(); |
---|
185 | |
---|
186 | CTimer::get("CMPIRouting::init(send_element)").reset(); |
---|
187 | CTimer::get("CMPIRouting::init(send_element)").resume(); |
---|
188 | |
---|
189 | nbTargetElement.resize(nbTarget); |
---|
190 | nbSourceElement.resize(nbSource); |
---|
191 | |
---|
192 | for (int i = 0; i < route.size(); i++) |
---|
193 | setTargetElementIndex(route[i], targetElementIndex, targetRankToIndex); |
---|
194 | |
---|
195 | for (int i = 0; i < targetElementIndex.size(); i++) |
---|
196 | nbTargetElement[targetElementIndex[i]]++; |
---|
197 | |
---|
198 | indexRequest = 0; |
---|
199 | totalSourceElement = 0; |
---|
200 | totalTargetElement = 0; |
---|
201 | for (int i = 0; i < nbSource; i++) |
---|
202 | { |
---|
203 | MPI_Irecv(&nbSourceElement[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
204 | indexRequest++; |
---|
205 | } |
---|
206 | |
---|
207 | for (int i = 0; i < nbTarget; i++) |
---|
208 | { |
---|
209 | totalTargetElement += nbTargetElement[i]; |
---|
210 | MPI_Isend(&nbTargetElement[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
211 | indexRequest++; |
---|
212 | } |
---|
213 | |
---|
214 | MPI_Waitall(indexRequest, request, status); |
---|
215 | |
---|
216 | CTimer::get("CMPIRouting::init(send_element)").suspend(); |
---|
217 | CTimer::get("CMPIRouting::init(send_element)").print(); |
---|
218 | |
---|
219 | totalSourceElement = 0; |
---|
220 | for (int i = 0; i < nbSource; i++) |
---|
221 | totalSourceElement += nbSourceElement[i]; |
---|
222 | |
---|
223 | sourceElementIndex.resize(totalSourceElement); |
---|
224 | |
---|
225 | totalSourceElement = 0; |
---|
226 | for (int i = 0; i < nbSource; i++) |
---|
227 | { |
---|
228 | for (int j = 0; j < nbSourceElement[i]; j++) |
---|
229 | { |
---|
230 | sourceElementIndex[totalSourceElement] = i; |
---|
231 | totalSourceElement++; |
---|
232 | } |
---|
233 | } |
---|
234 | |
---|
235 | delete[] toSend; |
---|
236 | delete[] recvCount; |
---|
237 | delete[] request; |
---|
238 | delete[] status; |
---|
239 | } |
---|
240 | |
---|
241 | |
---|
242 | int CMPIRouting::getTotalSourceElement(void) |
---|
243 | { |
---|
244 | return totalSourceElement; |
---|
245 | } |
---|
246 | |
---|
247 | template<typename T> |
---|
248 | void CMPIRouting::transferToTarget(T* targetElements, T* sourceElements) |
---|
249 | { |
---|
250 | char** targetBuffer = new char*[nbTarget]; |
---|
251 | int* indexTargetBuffer = new int[nbTarget]; |
---|
252 | |
---|
253 | for(int i=0;i<nbTarget; i++) |
---|
254 | { |
---|
255 | targetBuffer[i] = new char[sizeof(T)*nbTargetElement[i]]; |
---|
256 | indexTargetBuffer[i]= 0; |
---|
257 | } |
---|
258 | |
---|
259 | char** sourceBuffer = new char*[nbSource]; |
---|
260 | int* indexSourceBuffer = new int[nbSource]; |
---|
261 | |
---|
262 | for(int i=0;i<nbSource; i++) |
---|
263 | { |
---|
264 | sourceBuffer[i] = new char[sizeof(T)*nbSourceElement[i]]; |
---|
265 | indexSourceBuffer[i]= 0; |
---|
266 | } |
---|
267 | |
---|
268 | // pack the data |
---|
269 | int index; |
---|
270 | for(int i=0;i<totalTargetElement;i++) |
---|
271 | { |
---|
272 | index=targetElementIndex[i]; |
---|
273 | *((T*) &(targetBuffer[index][indexTargetBuffer[index]])) = targetElements[i]; |
---|
274 | indexTargetBuffer[index]+=sizeof(T); |
---|
275 | } |
---|
276 | |
---|
277 | |
---|
278 | MPI_Request* request=new MPI_Request[nbSource+nbTarget]; |
---|
279 | MPI_Status* status=new MPI_Status[nbSource+nbTarget]; |
---|
280 | int indexRequest=0; |
---|
281 | |
---|
282 | MPI_Barrier(communicator); |
---|
283 | CTimer::get("CMPIRouting::transferToTarget").reset(); |
---|
284 | CTimer::get("CMPIRouting::transferToTarget").resume(); |
---|
285 | |
---|
286 | for(int i=0; i<nbSource; i++) |
---|
287 | { |
---|
288 | MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
289 | indexRequest++; |
---|
290 | } |
---|
291 | |
---|
292 | for(int i=0;i<nbTarget; i++) |
---|
293 | { |
---|
294 | MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
295 | indexRequest++; |
---|
296 | } |
---|
297 | |
---|
298 | MPI_Waitall(indexRequest,request,status); |
---|
299 | |
---|
300 | CTimer::get("CMPIRouting::transferToTarget").suspend(); |
---|
301 | CTimer::get("CMPIRouting::transferToTarget").print(); |
---|
302 | MPI_Barrier(communicator); |
---|
303 | |
---|
304 | // unpack the data |
---|
305 | for(int i=0;i<totalSourceElement;i++) |
---|
306 | { |
---|
307 | index=sourceElementIndex[i]; |
---|
308 | sourceElements[i] = *((T*) &(sourceBuffer[index][indexSourceBuffer[index]])); |
---|
309 | indexSourceBuffer[index]+=sizeof(T); |
---|
310 | } |
---|
311 | |
---|
312 | for(int i=0;i<nbTarget; i++) delete[] targetBuffer[i]; |
---|
313 | for(int i=0;i<nbSource; i++) delete[] sourceBuffer[i]; |
---|
314 | delete[] targetBuffer; |
---|
315 | delete[] indexTargetBuffer; |
---|
316 | delete[] sourceBuffer; |
---|
317 | delete[] indexSourceBuffer; |
---|
318 | delete[] request; |
---|
319 | delete[] status; |
---|
320 | } |
---|
321 | |
---|
322 | |
---|
323 | template<typename T, typename t_pack, typename t_unpack> |
---|
324 | void CMPIRouting::transferToTarget(T* targetElements, T* sourceElements, t_pack pack, t_unpack unpack) |
---|
325 | { |
---|
326 | char** targetBuffer = new char*[nbTarget]; |
---|
327 | int* indexTargetBuffer = new int[nbTarget]; |
---|
328 | int* targetMessageSize = new int[nbTarget]; |
---|
329 | int* sourceMessageSize = new int[nbSource]; |
---|
330 | int index; |
---|
331 | |
---|
332 | // compute target buffer size |
---|
333 | for (int i = 0; i < nbTarget; i++) |
---|
334 | targetMessageSize[i] = 0; |
---|
335 | |
---|
336 | for (int i = 0; i < totalTargetElement; i++) |
---|
337 | { |
---|
338 | index = targetElementIndex[i]; |
---|
339 | pack(targetElements[i], NULL, targetMessageSize[index]); |
---|
340 | } |
---|
341 | |
---|
342 | MPI_Request *request = new MPI_Request[nbSource + nbTarget]; |
---|
343 | MPI_Status *status = new MPI_Status[nbSource + nbTarget]; |
---|
344 | int indexRequest = 0; |
---|
345 | |
---|
346 | MPI_Barrier(communicator); |
---|
347 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").reset(); |
---|
348 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").resume(); |
---|
349 | |
---|
350 | for(int i=0; i<nbSource; i++) |
---|
351 | { |
---|
352 | MPI_Irecv(&sourceMessageSize[i],1,MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
353 | indexRequest++; |
---|
354 | } |
---|
355 | |
---|
356 | for(int i=0; i<nbTarget; i++) |
---|
357 | { |
---|
358 | MPI_Isend(&targetMessageSize[i],1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
359 | indexRequest++; |
---|
360 | } |
---|
361 | |
---|
362 | MPI_Waitall(indexRequest,request,status); |
---|
363 | |
---|
364 | MPI_Barrier(communicator); |
---|
365 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").suspend(); |
---|
366 | CTimer::get("CMPIRouting::transferToTarget(messageSize)").print(); |
---|
367 | |
---|
368 | for(int i=0; i<nbTarget; i++) |
---|
369 | { |
---|
370 | targetBuffer[i] = new char[targetMessageSize[i]]; |
---|
371 | indexTargetBuffer[i] = 0; |
---|
372 | } |
---|
373 | |
---|
374 | char** sourceBuffer = new char*[nbSource]; |
---|
375 | int* indexSourceBuffer = new int[nbSource]; |
---|
376 | |
---|
377 | for(int i=0;i<nbSource; i++) |
---|
378 | { |
---|
379 | sourceBuffer[i] = new char[sourceMessageSize[i]]; |
---|
380 | indexSourceBuffer[i]= 0; |
---|
381 | } |
---|
382 | |
---|
383 | // pack the data |
---|
384 | for(int i=0; i<totalTargetElement; i++) |
---|
385 | { |
---|
386 | index=targetElementIndex[i]; |
---|
387 | pack(targetElements[i], targetBuffer[index], indexTargetBuffer[index] ); |
---|
388 | } |
---|
389 | |
---|
390 | indexRequest=0; |
---|
391 | |
---|
392 | MPI_Barrier(communicator); |
---|
393 | CTimer::get("CMPIRouting::transferToTarget(data)").reset(); |
---|
394 | CTimer::get("CMPIRouting::transferToTarget(data)").resume(); |
---|
395 | for(int i=0; i<nbSource; i++) |
---|
396 | { |
---|
397 | MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
398 | indexRequest++; |
---|
399 | } |
---|
400 | |
---|
401 | for(int i=0;i<nbTarget; i++) |
---|
402 | { |
---|
403 | MPI_Isend(targetBuffer[i],targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
404 | indexRequest++; |
---|
405 | } |
---|
406 | |
---|
407 | MPI_Waitall(indexRequest,request,status); |
---|
408 | |
---|
409 | MPI_Barrier(communicator); |
---|
410 | CTimer::get("CMPIRouting::transferToTarget(data)").suspend(); |
---|
411 | CTimer::get("CMPIRouting::transferToTarget(data)").print(); |
---|
412 | |
---|
413 | // unpack the data |
---|
414 | for(int i=0; i<totalSourceElement; i++) |
---|
415 | { |
---|
416 | index=sourceElementIndex[i]; |
---|
417 | unpack(sourceElements[i], sourceBuffer[index], indexSourceBuffer[index]); |
---|
418 | } |
---|
419 | |
---|
420 | for (int i=0; i<nbTarget; i++) delete[] targetBuffer[i]; |
---|
421 | for (int i=0; i<nbSource; i++) delete[] sourceBuffer[i]; |
---|
422 | delete[] targetBuffer; |
---|
423 | delete[] indexTargetBuffer; |
---|
424 | delete[] targetMessageSize; |
---|
425 | delete[] sourceBuffer; |
---|
426 | delete[] indexSourceBuffer; |
---|
427 | delete[] sourceMessageSize; |
---|
428 | delete[] request; |
---|
429 | delete[] status; |
---|
430 | } |
---|
431 | |
---|
432 | template<typename T> |
---|
433 | void CMPIRouting::transferFromSource(T* targetElements, T* sourceElements) |
---|
434 | { |
---|
435 | char** targetBuffer = new char*[nbTarget]; |
---|
436 | int* indexTargetBuffer = new int[nbTarget]; |
---|
437 | |
---|
438 | for (int i = 0; i < nbTarget; i++) |
---|
439 | { |
---|
440 | targetBuffer[i] = new char[sizeof(T)*nbTargetElement[i]]; |
---|
441 | indexTargetBuffer[i] = 0; |
---|
442 | } |
---|
443 | |
---|
444 | char** sourceBuffer = new char*[nbSource]; |
---|
445 | int* indexSourceBuffer = new int[nbSource]; |
---|
446 | |
---|
447 | for (int i = 0; i < nbSource; i++) |
---|
448 | { |
---|
449 | sourceBuffer[i] = new char[sizeof(T)*nbSourceElement[i]]; |
---|
450 | indexSourceBuffer[i] = 0; |
---|
451 | } |
---|
452 | |
---|
453 | // pack the data |
---|
454 | int index; |
---|
455 | for (int i = 0; i < totalSourceElement; i++) |
---|
456 | { |
---|
457 | index = sourceElementIndex[i]; |
---|
458 | *((T*) &(sourceBuffer[index][indexSourceBuffer[index]])) = sourceElements[i]; |
---|
459 | indexSourceBuffer[index] += sizeof(T); |
---|
460 | } |
---|
461 | |
---|
462 | MPI_Request* request=new MPI_Request[nbSource+nbTarget]; |
---|
463 | MPI_Status* status=new MPI_Status[nbSource+nbTarget]; |
---|
464 | int indexRequest=0; |
---|
465 | |
---|
466 | for(int i=0; i<nbSource; i++) |
---|
467 | { |
---|
468 | MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
469 | indexRequest++; |
---|
470 | } |
---|
471 | |
---|
472 | for(int i=0;i<nbTarget; i++) |
---|
473 | { |
---|
474 | MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
475 | indexRequest++; |
---|
476 | } |
---|
477 | |
---|
478 | MPI_Waitall(indexRequest,request,status); |
---|
479 | |
---|
480 | // unpack the data |
---|
481 | for(int i=0;i<totalTargetElement;i++) |
---|
482 | { |
---|
483 | index=targetElementIndex[i]; |
---|
484 | targetElements[i] = *((T*) &(targetBuffer[index][indexTargetBuffer[index]])); |
---|
485 | indexTargetBuffer[index]+=sizeof(T); |
---|
486 | } |
---|
487 | |
---|
488 | for(int i=0;i<nbTarget; i++) delete[] targetBuffer[i]; |
---|
489 | for(int i=0;i<nbSource; i++) delete[] sourceBuffer[i]; |
---|
490 | delete[] targetBuffer; |
---|
491 | delete[] indexTargetBuffer; |
---|
492 | delete[] sourceBuffer; |
---|
493 | delete[] indexSourceBuffer; |
---|
494 | delete[] request; |
---|
495 | delete[] status; |
---|
496 | } |
---|
497 | |
---|
498 | |
---|
499 | /* number of source and target elements is known from previous call to init() */ |
---|
500 | template<typename T, typename t_pack, typename t_unpack> |
---|
501 | void CMPIRouting::transferFromSource(T *targetElements, T *sourceElements, t_pack pack, t_unpack unpack) |
---|
502 | { |
---|
503 | char **targetBuffer = new char*[nbTarget]; |
---|
504 | int *indexTargetBuffer = new int[nbTarget]; |
---|
505 | int *targetMessageSize = new int[nbTarget]; |
---|
506 | int *sourceMessageSize = new int[nbSource]; |
---|
507 | int index; |
---|
508 | |
---|
509 | // compute target buffer size |
---|
510 | for (int i = 0; i < nbSource; i++) sourceMessageSize[i] = 0; |
---|
511 | |
---|
512 | for (int i = 0; i < totalSourceElement; i++) |
---|
513 | { |
---|
514 | index = sourceElementIndex[i]; |
---|
515 | pack(sourceElements[i], NULL, sourceMessageSize[index]); |
---|
516 | } |
---|
517 | |
---|
518 | MPI_Request *request = new MPI_Request[nbSource + nbTarget]; |
---|
519 | MPI_Status *status = new MPI_Status[nbSource + nbTarget]; |
---|
520 | int indexRequest = 0; |
---|
521 | for (int i = 0; i < nbSource; i++) |
---|
522 | { |
---|
523 | MPI_Isend(&sourceMessageSize[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
524 | indexRequest++; |
---|
525 | } |
---|
526 | for (int i = 0; i < nbTarget; i++) |
---|
527 | { |
---|
528 | MPI_Irecv(&targetMessageSize[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
529 | indexRequest++; |
---|
530 | } |
---|
531 | MPI_Waitall(indexRequest, request, status); |
---|
532 | |
---|
533 | for (int i = 0; i < nbTarget; i++) |
---|
534 | { |
---|
535 | targetBuffer[i] = new char[targetMessageSize[i]]; |
---|
536 | indexTargetBuffer[i] = 0; |
---|
537 | } |
---|
538 | |
---|
539 | char **sourceBuffer = new char*[nbSource]; |
---|
540 | int *indexSourceBuffer = new int[nbSource]; |
---|
541 | |
---|
542 | for (int i = 0; i < nbSource; i++) |
---|
543 | { |
---|
544 | sourceBuffer[i] = new char[sourceMessageSize[i]]; |
---|
545 | indexSourceBuffer[i] = 0; |
---|
546 | } |
---|
547 | |
---|
548 | |
---|
549 | // pack the data |
---|
550 | for (int i = 0; i < totalSourceElement; i++) |
---|
551 | { |
---|
552 | index = sourceElementIndex[i]; |
---|
553 | pack(sourceElements[i], sourceBuffer[index], indexSourceBuffer[index] ); |
---|
554 | } |
---|
555 | |
---|
556 | indexRequest = 0; |
---|
557 | for (int i = 0; i < nbSource; i++) |
---|
558 | { |
---|
559 | MPI_Isend(sourceBuffer[i], sourceMessageSize[i], MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); |
---|
560 | indexRequest++; |
---|
561 | } |
---|
562 | for (int i = 0; i < nbTarget; i++) |
---|
563 | { |
---|
564 | MPI_Irecv(targetBuffer[i], targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); |
---|
565 | indexRequest++; |
---|
566 | } |
---|
567 | MPI_Waitall(indexRequest, request, status); |
---|
568 | |
---|
569 | // unpack the data |
---|
570 | for (int i = 0; i < totalTargetElement; i++) |
---|
571 | { |
---|
572 | index = targetElementIndex[i]; |
---|
573 | unpack(targetElements[i], targetBuffer[index], indexTargetBuffer[index]); |
---|
574 | } |
---|
575 | |
---|
576 | for (int i = 0; i < nbTarget; i++) delete[] targetBuffer[i]; |
---|
577 | for (int i = 0; i < nbSource; i++) delete[] sourceBuffer[i]; |
---|
578 | delete[] targetBuffer; |
---|
579 | delete[] indexTargetBuffer; |
---|
580 | delete[] targetMessageSize; |
---|
581 | delete[] sourceBuffer; |
---|
582 | delete[] indexSourceBuffer; |
---|
583 | delete[] sourceMessageSize; |
---|
584 | delete[] request; |
---|
585 | delete[] status; |
---|
586 | } |
---|
587 | |
---|
588 | CMPIRouting::~CMPIRouting() |
---|
589 | { |
---|
590 | }; |
---|
591 | |
---|
592 | template void CMPIRouting::init(const std::vector<int>& route, CMPICascade *cascade); |
---|
593 | template void CMPIRouting::init(const std::vector<vector<int> >& route, CMPICascade *cascade); |
---|
594 | |
---|
595 | template void CMPIRouting::transferToTarget(int *targetElements, int *sourceElements); |
---|
596 | template void CMPIRouting::transferToTarget(int *targetElements, int *sourceElements, void (*pack)(int&, char*, int&), void (* unpack)(int&, char *, int&)); |
---|
597 | template void CMPIRouting::transferFromSource(int *targetElements, int *sourceElements); |
---|
598 | template void CMPIRouting::transferFromSource(int *targetElements, int *sourceElements, void (*pack)(int&, char*, int&), void (* unpack)(int&, char *, int&)); |
---|
599 | |
---|
600 | template void CMPIRouting::transferToTarget(Node* targetElements, Node* sourceElements, void (*pack)(Node&, char*, int&), void (* unpack)(Node&, char *, int&)); |
---|
601 | template void CMPIRouting::transferToTarget(Elt **targetElements, Elt **sourceElements, void (*pack)(Elt *, char*, int&), void (* unpack)(Elt *, char *, int&)); |
---|
602 | template void CMPIRouting::transferFromSource(std::vector<int> *targetElements, std::vector<int> *sourceElements, void (*pack)(const std::vector<int>&, char*, int&), void (* unpack)(std::vector<int>&, const char *, int&)); |
---|
603 | |
---|
604 | struct NES { int cnt; int risc; int rank; }; |
---|
605 | |
---|
606 | template void alltoalls_unknown(const std::vector<std::vector<NES> >& send, std::vector<std::vector<NES> >& recv, |
---|
607 | const std::vector<int>& ranks, MPI_Comm communicator); |
---|
608 | |
---|
609 | template void alltoalls_known(const std::vector<std::vector<int> >& send, std::vector<std::vector<int> >& recv, |
---|
610 | const std::vector<int>& ranks, MPI_Comm communicator); |
---|
611 | |
---|
612 | } |
---|