1 | #include "ep_lib.hpp" |
---|
2 | #include <stdio.h> |
---|
3 | #include <assert.h> |
---|
4 | #include "ep_declaration.hpp" |
---|
5 | #include <omp.h> |
---|
6 | #include <time.h> /* time */ |
---|
7 | #include <ctime> |
---|
8 | #include <ratio> |
---|
9 | #include <chrono> |
---|
10 | |
---|
11 | using namespace ep_lib; |
---|
12 | using namespace std::chrono; |
---|
13 | |
---|
14 | |
---|
15 | int main(int argc, char **argv) |
---|
16 | { |
---|
17 | srand (time(NULL)); |
---|
18 | |
---|
19 | printf("Testing ep_lib\n"); |
---|
20 | int required=3, provided; |
---|
21 | |
---|
22 | MPI_Init_thread(&argc, &argv, required, &provided); |
---|
23 | |
---|
24 | assert(required==provided); |
---|
25 | |
---|
26 | int mpi_rank; |
---|
27 | int mpi_size; |
---|
28 | |
---|
29 | MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); |
---|
30 | MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); |
---|
31 | |
---|
32 | int num_threads; |
---|
33 | if(mpi_rank < mpi_size-2) |
---|
34 | { |
---|
35 | printf("Proc %d is client\n", mpi_rank); |
---|
36 | num_threads = 2; |
---|
37 | } |
---|
38 | else |
---|
39 | { |
---|
40 | printf("Proc %d is server\n", mpi_rank); |
---|
41 | num_threads = 1; |
---|
42 | } |
---|
43 | |
---|
44 | omp_set_num_threads(num_threads); |
---|
45 | |
---|
46 | #pragma omp parallel default(shared) firstprivate(num_threads) |
---|
47 | { |
---|
48 | int num_ep = num_threads; |
---|
49 | MPI_Info info; |
---|
50 | |
---|
51 | //printf("omp_get_thread_num() = %d, omp_get_num_threads() = %d, num_threads = %d\n", omp_get_thread_num(), omp_get_num_threads(), num_threads); |
---|
52 | MPI_Comm *ep_comm; |
---|
53 | #pragma omp master |
---|
54 | { |
---|
55 | MPI_Comm *ep_comm; |
---|
56 | MPI_Comm_create_endpoints(MPI_COMM_WORLD.mpi_comm, num_ep, info, ep_comm); |
---|
57 | passage = ep_comm; |
---|
58 | } |
---|
59 | |
---|
60 | #pragma omp barrier |
---|
61 | |
---|
62 | |
---|
63 | MPI_Comm comm; // this should act as EP_COMM_WORLD |
---|
64 | comm = passage[omp_get_thread_num()]; |
---|
65 | |
---|
66 | int rank, size; |
---|
67 | MPI_Comm_rank(comm, &rank); |
---|
68 | MPI_Comm_size(comm, &size); |
---|
69 | |
---|
70 | |
---|
71 | |
---|
72 | bool isClient = false; |
---|
73 | bool isServer = false; |
---|
74 | |
---|
75 | if(omp_get_num_threads()>1) isClient = true; |
---|
76 | else isServer = true; |
---|
77 | |
---|
78 | printf("mpi_rank = %d, ep_rank = %d, isClient = %d\n", mpi_rank, rank, isClient); |
---|
79 | |
---|
80 | MPI_Win ep_win; |
---|
81 | MPI_Aint buf_size(1); |
---|
82 | int buf; |
---|
83 | MPI_Win_create(&buf, buf_size, sizeof(int), info, comm, &ep_win); |
---|
84 | MPI_Barrier(comm); |
---|
85 | |
---|
86 | // MPI_Win_fence(MPI_MODE_NOPRECEDE, ep_win); |
---|
87 | |
---|
88 | MPI_Barrier(comm); |
---|
89 | sleep(0.2); |
---|
90 | MPI_Barrier(comm); |
---|
91 | |
---|
92 | MPI_Win_fence(MPI_MODE_NOSUCCEED, ep_win); |
---|
93 | |
---|
94 | MPI_Barrier(comm); |
---|
95 | |
---|
96 | |
---|
97 | MPI_Win_free(&ep_win); |
---|
98 | printf("comm free\n"); |
---|
99 | |
---|
100 | MPI_Comm_free(&comm); |
---|
101 | |
---|
102 | } |
---|
103 | |
---|
104 | |
---|
105 | |
---|
106 | |
---|
107 | |
---|
108 | omp_set_num_threads(4); |
---|
109 | |
---|
110 | |
---|
111 | |
---|
112 | #pragma omp parallel default(shared) |
---|
113 | { |
---|
114 | MPI_Comm_rank(MPI_COMM_WORLD , &mpi_rank); |
---|
115 | |
---|
116 | int num_ep = omp_get_num_threads(); |
---|
117 | MPI_Info info; |
---|
118 | |
---|
119 | //printf("mpi_rank = %d, thread_num = %d\n", mpi_rank, omp_get_thread_num()); |
---|
120 | |
---|
121 | MPI_Comm *ep_comm; |
---|
122 | #pragma omp master |
---|
123 | { |
---|
124 | MPI_Comm *ep_comm; |
---|
125 | MPI_Comm_create_endpoints(MPI_COMM_WORLD.mpi_comm, num_ep, info, ep_comm); |
---|
126 | passage = ep_comm; |
---|
127 | } |
---|
128 | |
---|
129 | #pragma omp barrier |
---|
130 | |
---|
131 | |
---|
132 | MPI_Comm comm; // this should act as EP_COMM_WORLD |
---|
133 | comm = passage[omp_get_thread_num()]; |
---|
134 | |
---|
135 | int rank, size; |
---|
136 | MPI_Comm_rank(comm, &rank); |
---|
137 | MPI_Comm_size(comm, &size); |
---|
138 | |
---|
139 | // TIMING SYCHRONIZATION |
---|
140 | { |
---|
141 | int n=100000; |
---|
142 | |
---|
143 | high_resolution_clock::time_point t1 = high_resolution_clock::now(); |
---|
144 | |
---|
145 | for(int i=0; i<n; i++) |
---|
146 | MPI_Barrier_local(comm); |
---|
147 | |
---|
148 | high_resolution_clock::time_point t2 = high_resolution_clock::now(); |
---|
149 | duration<double> time_span = duration_cast<duration<double>>(t2 - t1); |
---|
150 | #pragma omp master |
---|
151 | std::cout << "proc "<< mpi_rank <<" ep_barrier "<< time_span.count() << " seconds."<<std::endl; |
---|
152 | |
---|
153 | t1 = high_resolution_clock::now(); |
---|
154 | |
---|
155 | for(int i=0; i<n; i++) |
---|
156 | { |
---|
157 | #pragma omp barrier |
---|
158 | } |
---|
159 | |
---|
160 | t2 = high_resolution_clock::now(); |
---|
161 | time_span = duration_cast<duration<double>>(t2 - t1); |
---|
162 | |
---|
163 | #pragma omp master |
---|
164 | std::cout << "proc "<< mpi_rank <<" omp_barrier "<< time_span.count() << " seconds."<<std::endl; |
---|
165 | |
---|
166 | t1 = high_resolution_clock::now(); |
---|
167 | |
---|
168 | for(int i=0; i<n; i++) |
---|
169 | { |
---|
170 | //#pragma omp barrier |
---|
171 | } |
---|
172 | |
---|
173 | t2 = high_resolution_clock::now(); |
---|
174 | time_span = duration_cast<duration<double>>(t2 - t1); |
---|
175 | |
---|
176 | MPI_Barrier(comm); |
---|
177 | |
---|
178 | #pragma omp master |
---|
179 | std::cout << "proc "<< mpi_rank <<" for_loop "<< time_span.count() << " seconds."<<std::endl; |
---|
180 | } |
---|
181 | |
---|
182 | |
---|
183 | // TEST OF BCAST FROM A RANDOM ROOT |
---|
184 | { |
---|
185 | int bcast_root; |
---|
186 | |
---|
187 | if(rank == 0) bcast_root = rand() % size; |
---|
188 | |
---|
189 | MPI_Bcast(&bcast_root, 1, MPI_INT, 0, comm); |
---|
190 | |
---|
191 | int sendbuf[2]; |
---|
192 | |
---|
193 | sendbuf[0] = rank; |
---|
194 | sendbuf[1] = size; |
---|
195 | |
---|
196 | MPI_Bcast(sendbuf, 2, MPI_INT, bcast_root, comm); |
---|
197 | |
---|
198 | int bcast_test = 0; |
---|
199 | if(sendbuf[0] == bcast_root && sendbuf[1] == size) bcast_test = 1; |
---|
200 | |
---|
201 | int bcast_result; |
---|
202 | |
---|
203 | MPI_Reduce(&bcast_test, &bcast_result, 1, MPI_INT, MPI_MIN, bcast_root, comm); |
---|
204 | |
---|
205 | if(bcast_result && rank == bcast_root) printf("root = %d : \t test MPI_Bcast \t OK\n", bcast_root); |
---|
206 | if(!bcast_result && rank == bcast_root) printf("root = %d : \t test MPI_Bcast \t FAILED %d\n", bcast_root, bcast_result); |
---|
207 | } |
---|
208 | |
---|
209 | MPI_Barrier(comm); |
---|
210 | |
---|
211 | // TEST OF GATHER FROM A RAMDOM ROOT |
---|
212 | { |
---|
213 | int gather_root; |
---|
214 | |
---|
215 | if(rank == 0) gather_root = rand() % size; |
---|
216 | |
---|
217 | MPI_Bcast(&gather_root, 1, MPI_INT, 0, comm); |
---|
218 | |
---|
219 | double sendbuf[2]; |
---|
220 | sendbuf[0] = rank * 1.0; |
---|
221 | sendbuf[1] = size * (-1.0); |
---|
222 | |
---|
223 | std::vector<double>recvbuf(2*size, 0); |
---|
224 | |
---|
225 | MPI_Gather(sendbuf, 2, MPI_DOUBLE, recvbuf.data(), 2, MPI_DOUBLE, gather_root, comm); |
---|
226 | |
---|
227 | bool gather_result = true; |
---|
228 | |
---|
229 | if(rank == gather_root) |
---|
230 | { |
---|
231 | for(int i=0; i<size; i++) |
---|
232 | { |
---|
233 | if(abs(recvbuf[2*i] - i) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
234 | { |
---|
235 | gather_result = false; |
---|
236 | break; |
---|
237 | } |
---|
238 | } |
---|
239 | |
---|
240 | if(gather_result) printf("root = %d : \t test MPI_Gather \t OK \n", gather_root); |
---|
241 | else printf("root = %d : \t test MPI_Gather \t FAILED\n", gather_root); |
---|
242 | } |
---|
243 | } |
---|
244 | |
---|
245 | MPI_Barrier(comm); |
---|
246 | |
---|
247 | // TEST OF GATHERV FROM A RAMDOM ROOT |
---|
248 | { |
---|
249 | int gatherv_root; |
---|
250 | |
---|
251 | if(rank == 0) gatherv_root = rand() % size; |
---|
252 | |
---|
253 | MPI_Bcast(&gatherv_root, 1, MPI_INT, 0, comm); |
---|
254 | |
---|
255 | int sendbuf[2]; |
---|
256 | sendbuf[0] = rank; |
---|
257 | sendbuf[1] = -size; |
---|
258 | |
---|
259 | std::vector<int>recvbuf(2*size, 0); |
---|
260 | |
---|
261 | std::vector<int>recvcounts(size, 2); |
---|
262 | std::vector<int>displs(size, 0); |
---|
263 | |
---|
264 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
265 | |
---|
266 | MPI_Gatherv(sendbuf, 2, MPI_INT, recvbuf.data(), recvcounts.data(), displs.data(), MPI_INT, gatherv_root, comm); |
---|
267 | |
---|
268 | bool gatherv_result = true; |
---|
269 | |
---|
270 | if(rank == gatherv_root) |
---|
271 | { |
---|
272 | for(int i=0; i<size; i++) |
---|
273 | { |
---|
274 | if(abs(recvbuf[2*i] - (size-1-i)) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
275 | { |
---|
276 | gatherv_result = false; printf("%lf %lf root = %d, i = %d\n", recvbuf[2*i], recvbuf[2*i+1], gatherv_root, i); |
---|
277 | break; |
---|
278 | } |
---|
279 | } |
---|
280 | |
---|
281 | //for(int i=0; i<size*2; i++) printf("%lf\t", recvbuf[i]); |
---|
282 | //printf("\n"); |
---|
283 | |
---|
284 | if(gatherv_result) printf("root = %d : \t test MPI_Gatherv \t OK\n", gatherv_root); |
---|
285 | else printf("root = %d : \t test MPI_Gatherv \t FAILED\n", gatherv_root); |
---|
286 | } |
---|
287 | } |
---|
288 | |
---|
289 | MPI_Barrier(comm); |
---|
290 | |
---|
291 | // TEST OF ALLGATHER |
---|
292 | { |
---|
293 | double sendbuf[2]; |
---|
294 | sendbuf[0] = rank * 1.0; |
---|
295 | sendbuf[1] = size * (-1.0); |
---|
296 | |
---|
297 | std::vector<double>recvbuf(2*size, 0); |
---|
298 | |
---|
299 | MPI_Allgather(sendbuf, 2, MPI_DOUBLE, recvbuf.data(), 2, MPI_DOUBLE, comm); |
---|
300 | |
---|
301 | int allgather_test = 1; |
---|
302 | |
---|
303 | for(int i=0; i<size; i++) |
---|
304 | { |
---|
305 | if(abs(recvbuf[2*i] - i) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
306 | { |
---|
307 | allgather_test = 0; |
---|
308 | break; |
---|
309 | } |
---|
310 | } |
---|
311 | |
---|
312 | int allgather_result; |
---|
313 | MPI_Reduce(&allgather_test, &allgather_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
314 | |
---|
315 | if(rank == 0 && allgather_result) printf(" \t test MPI_Allgather \t OK \n"); |
---|
316 | if(rank == 0 && !allgather_result) printf(" \t test MPI_Allgather \t OK \n"); |
---|
317 | |
---|
318 | } |
---|
319 | |
---|
320 | MPI_Barrier(comm); |
---|
321 | |
---|
322 | // TEST OF ALLGATHERV |
---|
323 | { |
---|
324 | int sendbuf[2]; |
---|
325 | sendbuf[0] = rank; |
---|
326 | sendbuf[1] = -size; |
---|
327 | |
---|
328 | std::vector<int>recvbuf(2*size, 0); |
---|
329 | |
---|
330 | std::vector<int>recvcounts(size, 2); |
---|
331 | std::vector<int>displs(size, 0); |
---|
332 | |
---|
333 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
334 | |
---|
335 | MPI_Allgatherv(sendbuf, 2, MPI_INT, recvbuf.data(), recvcounts.data(), displs.data(), MPI_INT, comm); |
---|
336 | |
---|
337 | int allgatherv_test = 1; |
---|
338 | |
---|
339 | |
---|
340 | |
---|
341 | for(int i=0; i<size; i++) |
---|
342 | { |
---|
343 | if(abs(recvbuf[2*i] - (size-1-i)) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
344 | { |
---|
345 | allgatherv_test = 0; printf("ID : %d %d %d %d %d\n", rank, recvbuf[2*i], recvbuf[2*i+1] , recvbuf[2*i] - (size-1-i), recvbuf[2*i+1] + size); |
---|
346 | break; |
---|
347 | } |
---|
348 | } |
---|
349 | |
---|
350 | |
---|
351 | int allgatherv_result; |
---|
352 | MPI_Reduce(&allgatherv_test, &allgatherv_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
353 | |
---|
354 | if(rank == 0 && allgatherv_result) printf(" \t test MPI_Allgatherv \t OK \n"); |
---|
355 | if(rank == 0 && !allgatherv_result) printf(" \t test MPI_Allgatherv \t FAILED %d\n", allgatherv_result); |
---|
356 | |
---|
357 | } |
---|
358 | |
---|
359 | MPI_Barrier(comm); |
---|
360 | |
---|
361 | // TEST OF REDUCE |
---|
362 | { |
---|
363 | int reduce_root; |
---|
364 | |
---|
365 | if(rank == 0) reduce_root = rand() % size; |
---|
366 | |
---|
367 | MPI_Bcast(&reduce_root, 1, MPI_INT, 0, comm); |
---|
368 | |
---|
369 | int sendbuf[2]; |
---|
370 | sendbuf[0] = rank; |
---|
371 | sendbuf[1] = -size; |
---|
372 | |
---|
373 | std::vector<int>recvbuf(2, 0); |
---|
374 | |
---|
375 | MPI_Op op = MPI_MIN; |
---|
376 | |
---|
377 | MPI_Reduce(sendbuf, recvbuf.data(), 2, MPI_INT, op, reduce_root, comm); |
---|
378 | |
---|
379 | |
---|
380 | bool reduce_result = true; |
---|
381 | |
---|
382 | if(rank == reduce_root) |
---|
383 | { |
---|
384 | for(int i=0; i<2; i++) |
---|
385 | { |
---|
386 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[1] + size * size) > 1.e-10) ) || |
---|
387 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) || |
---|
388 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) ) |
---|
389 | { |
---|
390 | reduce_result = false; printf("%d %d root = %d, i = %d\n", recvbuf[0], recvbuf[1], reduce_root, i); |
---|
391 | break; |
---|
392 | } |
---|
393 | } |
---|
394 | } |
---|
395 | |
---|
396 | if(rank == reduce_root && reduce_result) printf("root = %d : \t test MPI_Reduce \t OK\n", reduce_root); |
---|
397 | if(rank == reduce_root && !reduce_result) printf("root = %d : \t test MPI_Reduce \t FAILED\n", reduce_root); |
---|
398 | } |
---|
399 | |
---|
400 | |
---|
401 | MPI_Barrier(comm); |
---|
402 | |
---|
403 | // TEST OF ALLREDUCE |
---|
404 | { |
---|
405 | |
---|
406 | int sendbuf[2]; |
---|
407 | sendbuf[0] = rank; |
---|
408 | sendbuf[1] = -size; |
---|
409 | |
---|
410 | std::vector<int>recvbuf(2, 0); |
---|
411 | |
---|
412 | MPI_Op op = MPI_MIN; |
---|
413 | |
---|
414 | MPI_Allreduce(sendbuf, recvbuf.data(), 2, MPI_INT, op, comm); |
---|
415 | |
---|
416 | |
---|
417 | int allreduce_test = 1; |
---|
418 | |
---|
419 | |
---|
420 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[1] + size * size) > 1.e-10) ) || |
---|
421 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) || |
---|
422 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) ) |
---|
423 | { |
---|
424 | allreduce_test = 0; printf("%d %d\n", recvbuf[0], recvbuf[1]); |
---|
425 | } |
---|
426 | |
---|
427 | |
---|
428 | int allreduce_result; |
---|
429 | MPI_Reduce(&allreduce_test, &allreduce_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
430 | |
---|
431 | if(rank == 0 && allreduce_result) printf(" \t test MPI_Allreduce \t OK\n"); |
---|
432 | if(rank == 0 && !allreduce_result) printf(" \t test MPI_Allreduce \t FAILED\n"); |
---|
433 | } |
---|
434 | |
---|
435 | |
---|
436 | MPI_Barrier(comm); |
---|
437 | |
---|
438 | // TEST OF REDUCE_SCATTER |
---|
439 | { |
---|
440 | |
---|
441 | std::vector<int>sendbuf(2*size, rank); |
---|
442 | std::vector<int>recvbuf(2, -1); |
---|
443 | std::vector<int>recvcounts(size, 2); |
---|
444 | |
---|
445 | MPI_Op op = MPI_MIN; |
---|
446 | |
---|
447 | MPI_Reduce_scatter(sendbuf.data(), recvbuf.data(), recvcounts.data(), MPI_INT, op, comm); |
---|
448 | |
---|
449 | |
---|
450 | int reduce_scatter_test = 1; |
---|
451 | |
---|
452 | |
---|
453 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[0]-(size-1)*size/2) > 1.e-10) ) || |
---|
454 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1]-(size-1)) > 1.e-10) ) || |
---|
455 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] ) > 1.e-10) ) ) |
---|
456 | { |
---|
457 | reduce_scatter_test = 0; //printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
458 | } |
---|
459 | |
---|
460 | |
---|
461 | int reduce_scatter_result; |
---|
462 | MPI_Reduce(&reduce_scatter_test, &reduce_scatter_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
463 | |
---|
464 | if(rank == 0 && reduce_scatter_result) printf(" \t test MPI_Reduce_scatter OK\n"); |
---|
465 | if(rank == 0 && !reduce_scatter_result) printf(" \t test MPI_Reduce_scatter FAILED\n"); |
---|
466 | } |
---|
467 | |
---|
468 | MPI_Barrier(comm); |
---|
469 | |
---|
470 | // TEST OF SCATTER |
---|
471 | { |
---|
472 | |
---|
473 | int scatter_root; |
---|
474 | |
---|
475 | if(rank == 0) scatter_root = rand() % size; |
---|
476 | |
---|
477 | MPI_Bcast(&scatter_root, 1, MPI_INT, 0, comm); |
---|
478 | |
---|
479 | std::vector<int>sendbuf(2*size, rank); |
---|
480 | std::vector<int>recvbuf(2, -1); |
---|
481 | std::vector<int>recvcounts(size, 2); |
---|
482 | |
---|
483 | if(rank == scatter_root) |
---|
484 | { |
---|
485 | for(int i=0; i<size; i++) |
---|
486 | { |
---|
487 | sendbuf[2*i] = i; |
---|
488 | sendbuf[2*i+1] = size; |
---|
489 | } |
---|
490 | //for(int i=0; i<size*2; i++) printf("%d\t", sendbuf[i]); |
---|
491 | } |
---|
492 | |
---|
493 | |
---|
494 | MPI_Scatter(sendbuf.data(), 2, MPI_INT, recvbuf.data(), 2, MPI_INT, scatter_root, comm); |
---|
495 | |
---|
496 | //printf("ID = %d : %d %d\n", rank, recvbuf[0], recvbuf[1]); |
---|
497 | |
---|
498 | int scatter_test = 1; |
---|
499 | |
---|
500 | |
---|
501 | if( abs(recvbuf[0]-rank) > 1.e-10 || abs(recvbuf[1]-size) > 1.e-10 ) |
---|
502 | { |
---|
503 | scatter_test = 0; //printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
504 | } |
---|
505 | |
---|
506 | |
---|
507 | int scatter_result; |
---|
508 | MPI_Reduce(&scatter_test, &scatter_result, 1, MPI_INT, MPI_MIN, scatter_root, comm); |
---|
509 | |
---|
510 | if(rank == scatter_root && scatter_result) printf("root = %d : \t test MPI_Scatter \t OK\n", scatter_root); |
---|
511 | if(rank == scatter_root && !scatter_result) printf("root = %d : \t test MPI_Scatter \t FAILED\n", scatter_root); |
---|
512 | } |
---|
513 | |
---|
514 | MPI_Barrier(comm); |
---|
515 | |
---|
516 | // TEST OF SCATTERV |
---|
517 | { |
---|
518 | |
---|
519 | int scatterv_root; |
---|
520 | |
---|
521 | if(rank == 0) scatterv_root = rand() % size; |
---|
522 | |
---|
523 | MPI_Bcast(&scatterv_root, 1, MPI_INT, 0, comm); |
---|
524 | |
---|
525 | std::vector<int>sendbuf(2*size, rank); |
---|
526 | std::vector<int>recvbuf(2, -1); |
---|
527 | std::vector<int>sendcounts(size, 2); |
---|
528 | std::vector<int>displs(size, 0); |
---|
529 | |
---|
530 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
531 | |
---|
532 | if(rank == scatterv_root) |
---|
533 | { |
---|
534 | for(int i=0; i<size; i++) |
---|
535 | { |
---|
536 | sendbuf[2*i] = i; |
---|
537 | sendbuf[2*i+1] = size; |
---|
538 | } |
---|
539 | } |
---|
540 | |
---|
541 | |
---|
542 | MPI_Scatterv(sendbuf.data(), sendcounts.data(), displs.data(), MPI_INT, recvbuf.data(), 2, MPI_INT, scatterv_root, comm); |
---|
543 | |
---|
544 | //printf("ID = %d : %d %d\n", rank, recvbuf[0], recvbuf[1]); |
---|
545 | |
---|
546 | int scatterv_test = 1; |
---|
547 | |
---|
548 | |
---|
549 | if( abs(recvbuf[0]-(size-1-rank)) > 1.e-10 || abs(recvbuf[1]-size) > 1.e-10 ) |
---|
550 | { |
---|
551 | scatterv_test = 0; printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
552 | } |
---|
553 | |
---|
554 | |
---|
555 | int scatterv_result; |
---|
556 | MPI_Reduce(&scatterv_test, &scatterv_result, 1, MPI_INT, MPI_MIN, scatterv_root, comm); |
---|
557 | |
---|
558 | if(rank == scatterv_root && scatterv_result) printf("root = %d : \t test MPI_Scatterv \t OK\n", scatterv_root); |
---|
559 | if(rank == scatterv_root && !scatterv_result) printf("root = %d : \t test MPI_Scatterv \t FAILED\n", scatterv_root); |
---|
560 | } |
---|
561 | |
---|
562 | MPI_Barrier(comm); |
---|
563 | |
---|
564 | // TEST OF ALLTOALL |
---|
565 | { |
---|
566 | |
---|
567 | std::vector<int>sendbuf(size, rank); |
---|
568 | std::vector<int>recvbuf(size, -1); |
---|
569 | |
---|
570 | |
---|
571 | MPI_Alltoall(sendbuf.data(), 1, MPI_INT, recvbuf.data(), 1, MPI_INT, comm); |
---|
572 | |
---|
573 | int alltoall_result = 1; |
---|
574 | |
---|
575 | |
---|
576 | for(int i=0; i<size; i++) |
---|
577 | if( abs(recvbuf[i]-i) > 1.e-10 ) |
---|
578 | { |
---|
579 | alltoall_result = 0; printf("%d id = %d\n", recvbuf[i], rank); |
---|
580 | } |
---|
581 | |
---|
582 | if(rank == 0 && alltoall_result) printf(" \t test MPI_Alltoall \t OK\n"); |
---|
583 | if(rank == 0 && !alltoall_result) printf(" \t test MPI_Alltoall \t FAILED\n"); |
---|
584 | } |
---|
585 | |
---|
586 | // TEST OF SCAN |
---|
587 | { |
---|
588 | |
---|
589 | std::vector<int>sendbuf(2, rank); |
---|
590 | std::vector<int>recvbuf(2, -1); |
---|
591 | |
---|
592 | MPI_Op op = MPI_SUM; |
---|
593 | |
---|
594 | |
---|
595 | MPI_Scan(sendbuf.data(), recvbuf.data(), 2, MPI_INT, op, comm); |
---|
596 | |
---|
597 | int scan_test = 1; |
---|
598 | |
---|
599 | // printf(" ID=%d : %d %d \n", rank, recvbuf[0], recvbuf[1]); |
---|
600 | |
---|
601 | if((op == MPI_SUM && (abs(recvbuf[0]-rank*(rank+1)/2) > 1.e-10 || abs(recvbuf[1]-rank*(rank+1)/2) > 1.e-10) ) || |
---|
602 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1]) > 1.e-10) ) || |
---|
603 | (op == MPI_MAX && (abs(recvbuf[0] - rank) > 1.e-10 || abs(recvbuf[1] - rank) > 1.e-10) ) ) |
---|
604 | { |
---|
605 | scan_test = 0; //printf("%d id = %d\n", recvbuf[i], rank); |
---|
606 | } |
---|
607 | |
---|
608 | int scan_result; |
---|
609 | MPI_Reduce(&scan_test, &scan_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
610 | |
---|
611 | if(rank == 0 && scan_result) printf(" \t test MPI_Scan \t\t OK\n"); |
---|
612 | if(rank == 0 && !scan_result) printf(" \t test MPI_Scan \t\t FAILED\n"); |
---|
613 | } |
---|
614 | |
---|
615 | |
---|
616 | // TEST OF EXSCAN |
---|
617 | { |
---|
618 | |
---|
619 | std::vector<int>sendbuf(2, rank); |
---|
620 | std::vector<int>recvbuf(2, -1); |
---|
621 | |
---|
622 | MPI_Op op = MPI_SUM; |
---|
623 | |
---|
624 | |
---|
625 | MPI_Exscan(sendbuf.data(), recvbuf.data(), 2, MPI_INT, op, comm); |
---|
626 | |
---|
627 | int exscan_test = 1; |
---|
628 | |
---|
629 | // printf(" ID=%d : %d %d \n", rank, recvbuf[0], recvbuf[1]); |
---|
630 | |
---|
631 | if(rank >0) |
---|
632 | if((op == MPI_SUM && (abs(recvbuf[0]-rank*(rank-1)/2) > 1.e-10 || abs(recvbuf[1]-rank*(rank-1)/2) > 1.e-10) ) || |
---|
633 | (op == MPI_MIN && (abs(recvbuf[0] ) > 1.e-10 || abs(recvbuf[1]) > 1.e-10) ) || |
---|
634 | (op == MPI_MAX && (abs(recvbuf[0] - rank+1) > 1.e-10 || abs(recvbuf[1] - rank+1) > 1.e-10) ) ) |
---|
635 | { |
---|
636 | exscan_test = 0; //printf("%d id = %d\n", recvbuf[i], rank); |
---|
637 | } |
---|
638 | |
---|
639 | int exscan_result; |
---|
640 | MPI_Reduce(&exscan_test, &exscan_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
641 | |
---|
642 | if(rank == 0 && exscan_result) printf(" \t test MPI_Exscan \t OK\n"); |
---|
643 | if(rank == 0 && !exscan_result) printf(" \t test MPI_Exscan \t FAILED\n"); |
---|
644 | } |
---|
645 | |
---|
646 | |
---|
647 | |
---|
648 | |
---|
649 | /* |
---|
650 | MPI_Barrier(comm); |
---|
651 | { |
---|
652 | int rank, size; |
---|
653 | MPI_Comm_rank(comm, &rank); |
---|
654 | MPI_Comm_size(comm, &size); |
---|
655 | |
---|
656 | //int color = rank%2; |
---|
657 | int color, remote_leader; |
---|
658 | if(rank<size-2) {color = 1; remote_leader = size-2;} |
---|
659 | else {color = 0; remote_leader = 0;} |
---|
660 | |
---|
661 | printf("rank = %d, color = %d, remote_leader = %d\n", rank, color, remote_leader); |
---|
662 | |
---|
663 | MPI_Comm sub_comm; |
---|
664 | MPI_Comm_split(comm, color, rank, &sub_comm); |
---|
665 | |
---|
666 | |
---|
667 | |
---|
668 | int sub_rank; |
---|
669 | MPI_Comm_rank(sub_comm, &sub_rank); |
---|
670 | |
---|
671 | |
---|
672 | MPI_Barrier(comm); |
---|
673 | if(rank == 0) printf("\tMPI_Comm_split OK\n"); |
---|
674 | MPI_Barrier(comm); |
---|
675 | |
---|
676 | MPI_Comm inter_comm; |
---|
677 | //MPI_Intercomm_create(sub_comm, 0, comm, (color+1)%2, 99, &inter_comm); |
---|
678 | MPI_Intercomm_create(sub_comm, 0, comm, remote_leader, 99, &inter_comm); |
---|
679 | |
---|
680 | MPI_Barrier(comm); |
---|
681 | if(rank == 0) printf("\tMPI_Intercomm_create OK\n"); |
---|
682 | MPI_Barrier(comm); |
---|
683 | |
---|
684 | |
---|
685 | |
---|
686 | int high=color; |
---|
687 | MPI_Comm intra_comm; |
---|
688 | MPI_Intercomm_merge(inter_comm, high, &intra_comm); |
---|
689 | |
---|
690 | int intra_rank, intra_size; |
---|
691 | MPI_Comm_rank(intra_comm, &intra_rank); |
---|
692 | MPI_Comm_size(intra_comm, &intra_size); |
---|
693 | |
---|
694 | MPI_Barrier(comm); |
---|
695 | if(rank == 0) printf("\tMPI_Intercomm_merge OK\n"); |
---|
696 | MPI_Barrier(comm); |
---|
697 | } |
---|
698 | |
---|
699 | //check_test_gatherv(comm); |
---|
700 | |
---|
701 | // MPI_Barrier(comm); |
---|
702 | // MPI_Comm_free(&sub_comm); |
---|
703 | |
---|
704 | |
---|
705 | // MPI_Barrier(comm); |
---|
706 | // MPI_Comm_free(&inter_comm); |
---|
707 | */ |
---|
708 | |
---|
709 | MPI_Barrier(comm); |
---|
710 | MPI_Comm_free(&comm); |
---|
711 | } |
---|
712 | |
---|
713 | MPI_Finalize(); |
---|
714 | |
---|
715 | } |
---|