1 | #include "ep_lib.hpp" |
---|
2 | #include <stdio.h> |
---|
3 | #include <assert.h> |
---|
4 | #include "ep_declaration.hpp" |
---|
5 | #include <omp.h> |
---|
6 | #include <time.h> /* time */ |
---|
7 | #include <ctime> |
---|
8 | #include <ratio> |
---|
9 | #include <chrono> |
---|
10 | |
---|
11 | using namespace ep_lib; |
---|
12 | using namespace std::chrono; |
---|
13 | |
---|
14 | |
---|
15 | int main(int argc, char **argv) |
---|
16 | { |
---|
17 | srand (time(NULL)); |
---|
18 | |
---|
19 | printf("Testing ep_lib\n"); |
---|
20 | int required=3, provided; |
---|
21 | |
---|
22 | MPI_Init_thread(&argc, &argv, required, &provided); |
---|
23 | |
---|
24 | assert(required==provided); |
---|
25 | |
---|
26 | int mpi_rank; |
---|
27 | int mpi_size; |
---|
28 | |
---|
29 | MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); |
---|
30 | MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); |
---|
31 | |
---|
32 | #pragma omp parallel default(shared) |
---|
33 | { |
---|
34 | MPI_Comm_rank(MPI_COMM_WORLD , &mpi_rank); |
---|
35 | |
---|
36 | int num_ep = omp_get_num_threads(); |
---|
37 | MPI_Info info; |
---|
38 | |
---|
39 | //printf("mpi_rank = %d, thread_num = %d\n", mpi_rank, omp_get_thread_num()); |
---|
40 | |
---|
41 | MPI_Comm *ep_comm; |
---|
42 | #pragma omp master |
---|
43 | { |
---|
44 | MPI_Comm *ep_comm; |
---|
45 | MPI_Comm_create_endpoints(MPI_COMM_WORLD.mpi_comm, num_ep, info, ep_comm); |
---|
46 | passage = ep_comm; |
---|
47 | } |
---|
48 | |
---|
49 | #pragma omp barrier |
---|
50 | |
---|
51 | |
---|
52 | MPI_Comm comm; // this should act as EP_COMM_WORLD |
---|
53 | comm = passage[omp_get_thread_num()]; |
---|
54 | |
---|
55 | int rank, size; |
---|
56 | MPI_Comm_rank(comm, &rank); |
---|
57 | MPI_Comm_size(comm, &size); |
---|
58 | |
---|
59 | // TIMING SYCHRONIZATION |
---|
60 | { |
---|
61 | int n=100000; |
---|
62 | |
---|
63 | MPI_Barrier(comm); |
---|
64 | |
---|
65 | high_resolution_clock::time_point t1 = high_resolution_clock::now(); |
---|
66 | |
---|
67 | for(int i=0; i<n; i++) |
---|
68 | MPI_Barrier_local(comm); |
---|
69 | |
---|
70 | high_resolution_clock::time_point t2 = high_resolution_clock::now(); |
---|
71 | duration<double> time_span = duration_cast<duration<double>>(t2 - t1); |
---|
72 | #pragma omp master |
---|
73 | std::cout << "proc "<< mpi_rank <<" ep_barrier "<< time_span.count() << " seconds."<<std::endl; |
---|
74 | |
---|
75 | t1 = high_resolution_clock::now(); |
---|
76 | |
---|
77 | for(int i=0; i<n; i++) |
---|
78 | { |
---|
79 | #pragma omp barrier |
---|
80 | } |
---|
81 | |
---|
82 | t2 = high_resolution_clock::now(); |
---|
83 | time_span = duration_cast<duration<double>>(t2 - t1); |
---|
84 | |
---|
85 | #pragma omp master |
---|
86 | std::cout << "proc "<< mpi_rank <<" omp_barrier "<< time_span.count() << " seconds."<<std::endl; |
---|
87 | |
---|
88 | t1 = high_resolution_clock::now(); |
---|
89 | |
---|
90 | for(int i=0; i<n; i++) |
---|
91 | { |
---|
92 | //#pragma omp barrier |
---|
93 | } |
---|
94 | |
---|
95 | t2 = high_resolution_clock::now(); |
---|
96 | time_span = duration_cast<duration<double>>(t2 - t1); |
---|
97 | |
---|
98 | MPI_Barrier(comm); |
---|
99 | |
---|
100 | #pragma omp master |
---|
101 | std::cout << "proc "<< mpi_rank <<" for_loop "<< time_span.count() << " seconds."<<std::endl; |
---|
102 | } |
---|
103 | |
---|
104 | |
---|
105 | // TEST OF BCAST FROM A RANDOM ROOT |
---|
106 | { |
---|
107 | int bcast_root; |
---|
108 | |
---|
109 | if(rank == 0) bcast_root = rand() % size; |
---|
110 | |
---|
111 | MPI_Bcast(&bcast_root, 1, MPI_INT, 0, comm); |
---|
112 | |
---|
113 | int sendbuf[2]; |
---|
114 | |
---|
115 | sendbuf[0] = rank; |
---|
116 | sendbuf[1] = size; |
---|
117 | |
---|
118 | MPI_Bcast(sendbuf, 2, MPI_INT, bcast_root, comm); |
---|
119 | |
---|
120 | int bcast_test = 0; |
---|
121 | if(sendbuf[0] == bcast_root && sendbuf[1] == size) bcast_test = 1; |
---|
122 | |
---|
123 | int bcast_result; |
---|
124 | |
---|
125 | MPI_Reduce(&bcast_test, &bcast_result, 1, MPI_INT, MPI_MIN, bcast_root, comm); |
---|
126 | |
---|
127 | if(bcast_result && rank == bcast_root) printf("root = %d : \t test MPI_Bcast \t OK\n", bcast_root); |
---|
128 | if(!bcast_result && rank == bcast_root) printf("root = %d : \t test MPI_Bcast \t FAILED %d\n", bcast_root, bcast_result); |
---|
129 | } |
---|
130 | |
---|
131 | MPI_Barrier(comm); |
---|
132 | |
---|
133 | // TEST OF GATHER FROM A RAMDOM ROOT |
---|
134 | { |
---|
135 | int gather_root; |
---|
136 | |
---|
137 | if(rank == 0) gather_root = rand() % size; |
---|
138 | |
---|
139 | MPI_Bcast(&gather_root, 1, MPI_INT, 0, comm); |
---|
140 | |
---|
141 | double sendbuf[2]; |
---|
142 | sendbuf[0] = rank * 1.0; |
---|
143 | sendbuf[1] = size * (-1.0); |
---|
144 | |
---|
145 | std::vector<double>recvbuf(2*size, 0); |
---|
146 | |
---|
147 | MPI_Gather(sendbuf, 2, MPI_DOUBLE, recvbuf.data(), 2, MPI_DOUBLE, gather_root, comm); |
---|
148 | |
---|
149 | bool gather_result = true; |
---|
150 | |
---|
151 | if(rank == gather_root) |
---|
152 | { |
---|
153 | for(int i=0; i<size; i++) |
---|
154 | { |
---|
155 | if(abs(recvbuf[2*i] - i) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
156 | { |
---|
157 | gather_result = false; |
---|
158 | break; |
---|
159 | } |
---|
160 | } |
---|
161 | |
---|
162 | if(gather_result) printf("root = %d : \t test MPI_Gather \t OK \n", gather_root); |
---|
163 | else printf("root = %d : \t test MPI_Gather \t FAILED\n", gather_root); |
---|
164 | } |
---|
165 | } |
---|
166 | |
---|
167 | MPI_Barrier(comm); |
---|
168 | |
---|
169 | // TEST OF GATHERV FROM A RAMDOM ROOT |
---|
170 | { |
---|
171 | int gatherv_root; |
---|
172 | |
---|
173 | if(rank == 0) gatherv_root = rand() % size; |
---|
174 | |
---|
175 | MPI_Bcast(&gatherv_root, 1, MPI_INT, 0, comm); |
---|
176 | |
---|
177 | int sendbuf[2]; |
---|
178 | sendbuf[0] = rank; |
---|
179 | sendbuf[1] = -size; |
---|
180 | |
---|
181 | std::vector<int>recvbuf(2*size, 0); |
---|
182 | |
---|
183 | std::vector<int>recvcounts(size, 2); |
---|
184 | std::vector<int>displs(size, 0); |
---|
185 | |
---|
186 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
187 | |
---|
188 | MPI_Gatherv(sendbuf, 2, MPI_INT, recvbuf.data(), recvcounts.data(), displs.data(), MPI_INT, gatherv_root, comm); |
---|
189 | |
---|
190 | bool gatherv_result = true; |
---|
191 | |
---|
192 | if(rank == gatherv_root) |
---|
193 | { |
---|
194 | for(int i=0; i<size; i++) |
---|
195 | { |
---|
196 | if(abs(recvbuf[2*i] - (size-1-i)) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
197 | { |
---|
198 | gatherv_result = false; printf("%lf %lf root = %d, i = %d\n", recvbuf[2*i], recvbuf[2*i+1], gatherv_root, i); |
---|
199 | break; |
---|
200 | } |
---|
201 | } |
---|
202 | |
---|
203 | //for(int i=0; i<size*2; i++) printf("%lf\t", recvbuf[i]); |
---|
204 | //printf("\n"); |
---|
205 | |
---|
206 | if(gatherv_result) printf("root = %d : \t test MPI_Gatherv \t OK\n", gatherv_root); |
---|
207 | else printf("root = %d : \t test MPI_Gatherv \t FAILED\n", gatherv_root); |
---|
208 | } |
---|
209 | } |
---|
210 | |
---|
211 | MPI_Barrier(comm); |
---|
212 | |
---|
213 | // TEST OF ALLGATHER |
---|
214 | { |
---|
215 | double sendbuf[2]; |
---|
216 | sendbuf[0] = rank * 1.0; |
---|
217 | sendbuf[1] = size * (-1.0); |
---|
218 | |
---|
219 | std::vector<double>recvbuf(2*size, 0); |
---|
220 | |
---|
221 | MPI_Allgather(sendbuf, 2, MPI_DOUBLE, recvbuf.data(), 2, MPI_DOUBLE, comm); |
---|
222 | |
---|
223 | int allgather_test = 1; |
---|
224 | |
---|
225 | for(int i=0; i<size; i++) |
---|
226 | { |
---|
227 | if(abs(recvbuf[2*i] - i) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
228 | { |
---|
229 | allgather_test = 0; |
---|
230 | break; |
---|
231 | } |
---|
232 | } |
---|
233 | |
---|
234 | int allgather_result; |
---|
235 | MPI_Reduce(&allgather_test, &allgather_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
236 | |
---|
237 | if(rank == 0 && allgather_result) printf(" \t test MPI_Allgather \t OK \n"); |
---|
238 | if(rank == 0 && !allgather_result) printf(" \t test MPI_Allgather \t OK \n"); |
---|
239 | |
---|
240 | } |
---|
241 | |
---|
242 | MPI_Barrier(comm); |
---|
243 | |
---|
244 | // TEST OF ALLGATHERV |
---|
245 | { |
---|
246 | int sendbuf[2]; |
---|
247 | sendbuf[0] = rank; |
---|
248 | sendbuf[1] = -size; |
---|
249 | |
---|
250 | std::vector<int>recvbuf(2*size, 0); |
---|
251 | |
---|
252 | std::vector<int>recvcounts(size, 2); |
---|
253 | std::vector<int>displs(size, 0); |
---|
254 | |
---|
255 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
256 | |
---|
257 | MPI_Allgatherv(sendbuf, 2, MPI_INT, recvbuf.data(), recvcounts.data(), displs.data(), MPI_INT, comm); |
---|
258 | |
---|
259 | int allgatherv_test = 1; |
---|
260 | |
---|
261 | |
---|
262 | |
---|
263 | for(int i=0; i<size; i++) |
---|
264 | { |
---|
265 | if(abs(recvbuf[2*i] - (size-1-i)) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
266 | { |
---|
267 | allgatherv_test = 0; printf("ID : %d %d %d %d %d\n", rank, recvbuf[2*i], recvbuf[2*i+1] , recvbuf[2*i] - (size-1-i), recvbuf[2*i+1] + size); |
---|
268 | break; |
---|
269 | } |
---|
270 | } |
---|
271 | |
---|
272 | |
---|
273 | int allgatherv_result; |
---|
274 | MPI_Reduce(&allgatherv_test, &allgatherv_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
275 | |
---|
276 | if(rank == 0 && allgatherv_result) printf(" \t test MPI_Allgatherv \t OK \n"); |
---|
277 | if(rank == 0 && !allgatherv_result) printf(" \t test MPI_Allgatherv \t FAILED %d\n", allgatherv_result); |
---|
278 | |
---|
279 | } |
---|
280 | |
---|
281 | MPI_Barrier(comm); |
---|
282 | |
---|
283 | // TEST OF REDUCE |
---|
284 | { |
---|
285 | int reduce_root; |
---|
286 | |
---|
287 | if(rank == 0) reduce_root = rand() % size; |
---|
288 | |
---|
289 | MPI_Bcast(&reduce_root, 1, MPI_INT, 0, comm); |
---|
290 | |
---|
291 | int sendbuf[2]; |
---|
292 | sendbuf[0] = rank; |
---|
293 | sendbuf[1] = -size; |
---|
294 | |
---|
295 | std::vector<int>recvbuf(2, 0); |
---|
296 | |
---|
297 | MPI_Op op = MPI_MIN; |
---|
298 | |
---|
299 | MPI_Reduce(sendbuf, recvbuf.data(), 2, MPI_INT, op, reduce_root, comm); |
---|
300 | |
---|
301 | |
---|
302 | bool reduce_result = true; |
---|
303 | |
---|
304 | if(rank == reduce_root) |
---|
305 | { |
---|
306 | for(int i=0; i<2; i++) |
---|
307 | { |
---|
308 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[1] + size * size) > 1.e-10) ) || |
---|
309 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) || |
---|
310 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) ) |
---|
311 | { |
---|
312 | reduce_result = false; printf("%d %d root = %d, i = %d\n", recvbuf[0], recvbuf[1], reduce_root, i); |
---|
313 | break; |
---|
314 | } |
---|
315 | } |
---|
316 | } |
---|
317 | |
---|
318 | if(rank == reduce_root && reduce_result) printf("root = %d : \t test MPI_Reduce \t OK\n", reduce_root); |
---|
319 | if(rank == reduce_root && !reduce_result) printf("root = %d : \t test MPI_Reduce \t FAILED\n", reduce_root); |
---|
320 | } |
---|
321 | |
---|
322 | |
---|
323 | MPI_Barrier(comm); |
---|
324 | |
---|
325 | // TEST OF ALLREDUCE |
---|
326 | { |
---|
327 | |
---|
328 | int sendbuf[2]; |
---|
329 | sendbuf[0] = rank; |
---|
330 | sendbuf[1] = -size; |
---|
331 | |
---|
332 | std::vector<int>recvbuf(2, 0); |
---|
333 | |
---|
334 | MPI_Op op = MPI_MIN; |
---|
335 | |
---|
336 | MPI_Allreduce(sendbuf, recvbuf.data(), 2, MPI_INT, op, comm); |
---|
337 | |
---|
338 | |
---|
339 | int allreduce_test = 1; |
---|
340 | |
---|
341 | |
---|
342 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[1] + size * size) > 1.e-10) ) || |
---|
343 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) || |
---|
344 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) ) |
---|
345 | { |
---|
346 | allreduce_test = 0; printf("%d %d\n", recvbuf[0], recvbuf[1]); |
---|
347 | } |
---|
348 | |
---|
349 | |
---|
350 | int allreduce_result; |
---|
351 | MPI_Reduce(&allreduce_test, &allreduce_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
352 | |
---|
353 | if(rank == 0 && allreduce_result) printf(" \t test MPI_Allreduce \t OK\n"); |
---|
354 | if(rank == 0 && !allreduce_result) printf(" \t test MPI_Allreduce \t FAILED\n"); |
---|
355 | } |
---|
356 | |
---|
357 | |
---|
358 | MPI_Barrier(comm); |
---|
359 | |
---|
360 | // TEST OF REDUCE_SCATTER |
---|
361 | { |
---|
362 | |
---|
363 | std::vector<int>sendbuf(2*size, rank); |
---|
364 | std::vector<int>recvbuf(2, -1); |
---|
365 | std::vector<int>recvcounts(size, 2); |
---|
366 | |
---|
367 | MPI_Op op = MPI_MIN; |
---|
368 | |
---|
369 | MPI_Reduce_scatter(sendbuf.data(), recvbuf.data(), recvcounts.data(), MPI_INT, op, comm); |
---|
370 | |
---|
371 | |
---|
372 | int reduce_scatter_test = 1; |
---|
373 | |
---|
374 | |
---|
375 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[0]-(size-1)*size/2) > 1.e-10) ) || |
---|
376 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1]-(size-1)) > 1.e-10) ) || |
---|
377 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] ) > 1.e-10) ) ) |
---|
378 | { |
---|
379 | reduce_scatter_test = 0; //printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
380 | } |
---|
381 | |
---|
382 | |
---|
383 | int reduce_scatter_result; |
---|
384 | MPI_Reduce(&reduce_scatter_test, &reduce_scatter_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
385 | |
---|
386 | if(rank == 0 && reduce_scatter_result) printf(" \t test MPI_Reduce_scatter OK\n"); |
---|
387 | if(rank == 0 && !reduce_scatter_result) printf(" \t test MPI_Reduce_scatter FAILED\n"); |
---|
388 | } |
---|
389 | |
---|
390 | MPI_Barrier(comm); |
---|
391 | |
---|
392 | // TEST OF SCATTER |
---|
393 | { |
---|
394 | |
---|
395 | int scatter_root; |
---|
396 | |
---|
397 | if(rank == 0) scatter_root = rand() % size; |
---|
398 | |
---|
399 | MPI_Bcast(&scatter_root, 1, MPI_INT, 0, comm); |
---|
400 | |
---|
401 | std::vector<int>sendbuf(2*size, rank); |
---|
402 | std::vector<int>recvbuf(2, -1); |
---|
403 | std::vector<int>recvcounts(size, 2); |
---|
404 | |
---|
405 | if(rank == scatter_root) |
---|
406 | { |
---|
407 | for(int i=0; i<size; i++) |
---|
408 | { |
---|
409 | sendbuf[2*i] = i; |
---|
410 | sendbuf[2*i+1] = size; |
---|
411 | } |
---|
412 | //for(int i=0; i<size*2; i++) printf("%d\t", sendbuf[i]); |
---|
413 | } |
---|
414 | |
---|
415 | |
---|
416 | MPI_Scatter(sendbuf.data(), 2, MPI_INT, recvbuf.data(), 2, MPI_INT, scatter_root, comm); |
---|
417 | |
---|
418 | //printf("ID = %d : %d %d\n", rank, recvbuf[0], recvbuf[1]); |
---|
419 | |
---|
420 | int scatter_test = 1; |
---|
421 | |
---|
422 | |
---|
423 | if( abs(recvbuf[0]-rank) > 1.e-10 || abs(recvbuf[1]-size) > 1.e-10 ) |
---|
424 | { |
---|
425 | scatter_test = 0; //printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
426 | } |
---|
427 | |
---|
428 | |
---|
429 | int scatter_result; |
---|
430 | MPI_Reduce(&scatter_test, &scatter_result, 1, MPI_INT, MPI_MIN, scatter_root, comm); |
---|
431 | |
---|
432 | if(rank == scatter_root && scatter_result) printf("root = %d : \t test MPI_Scatter \t OK\n", scatter_root); |
---|
433 | if(rank == scatter_root && !scatter_result) printf("root = %d : \t test MPI_Scatter \t FAILED\n", scatter_root); |
---|
434 | } |
---|
435 | |
---|
436 | MPI_Barrier(comm); |
---|
437 | |
---|
438 | // TEST OF SCATTERV |
---|
439 | { |
---|
440 | |
---|
441 | int scatterv_root; |
---|
442 | |
---|
443 | if(rank == 0) scatterv_root = rand() % size; |
---|
444 | |
---|
445 | MPI_Bcast(&scatterv_root, 1, MPI_INT, 0, comm); |
---|
446 | |
---|
447 | std::vector<int>sendbuf(2*size, rank); |
---|
448 | std::vector<int>recvbuf(2, -1); |
---|
449 | std::vector<int>sendcounts(size, 2); |
---|
450 | std::vector<int>displs(size, 0); |
---|
451 | |
---|
452 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
453 | |
---|
454 | if(rank == scatterv_root) |
---|
455 | { |
---|
456 | for(int i=0; i<size; i++) |
---|
457 | { |
---|
458 | sendbuf[2*i] = i; |
---|
459 | sendbuf[2*i+1] = size; |
---|
460 | } |
---|
461 | } |
---|
462 | |
---|
463 | |
---|
464 | MPI_Scatterv(sendbuf.data(), sendcounts.data(), displs.data(), MPI_INT, recvbuf.data(), 2, MPI_INT, scatterv_root, comm); |
---|
465 | |
---|
466 | //printf("ID = %d : %d %d\n", rank, recvbuf[0], recvbuf[1]); |
---|
467 | |
---|
468 | int scatterv_test = 1; |
---|
469 | |
---|
470 | |
---|
471 | if( abs(recvbuf[0]-(size-1-rank)) > 1.e-10 || abs(recvbuf[1]-size) > 1.e-10 ) |
---|
472 | { |
---|
473 | scatterv_test = 0; printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
474 | } |
---|
475 | |
---|
476 | |
---|
477 | int scatterv_result; |
---|
478 | MPI_Reduce(&scatterv_test, &scatterv_result, 1, MPI_INT, MPI_MIN, scatterv_root, comm); |
---|
479 | |
---|
480 | if(rank == scatterv_root && scatterv_result) printf("root = %d : \t test MPI_Scatterv \t OK\n", scatterv_root); |
---|
481 | if(rank == scatterv_root && !scatterv_result) printf("root = %d : \t test MPI_Scatterv \t FAILED\n", scatterv_root); |
---|
482 | } |
---|
483 | |
---|
484 | MPI_Barrier(comm); |
---|
485 | |
---|
486 | // TEST OF ALLTOALL |
---|
487 | { |
---|
488 | |
---|
489 | std::vector<int>sendbuf(size, rank); |
---|
490 | std::vector<int>recvbuf(size, -1); |
---|
491 | |
---|
492 | |
---|
493 | MPI_Alltoall(sendbuf.data(), 1, MPI_INT, recvbuf.data(), 1, MPI_INT, comm); |
---|
494 | |
---|
495 | int alltoall_result = 1; |
---|
496 | |
---|
497 | |
---|
498 | for(int i=0; i<size; i++) |
---|
499 | if( abs(recvbuf[i]-i) > 1.e-10 ) |
---|
500 | { |
---|
501 | alltoall_result = 0; printf("%d id = %d\n", recvbuf[i], rank); |
---|
502 | } |
---|
503 | |
---|
504 | if(rank == 0 && alltoall_result) printf(" \t test MPI_Alltoall \t OK\n"); |
---|
505 | if(rank == 0 && !alltoall_result) printf(" \t test MPI_Alltoall \t FAILED\n"); |
---|
506 | } |
---|
507 | |
---|
508 | // TEST OF SCAN |
---|
509 | { |
---|
510 | |
---|
511 | std::vector<int>sendbuf(2, rank); |
---|
512 | std::vector<int>recvbuf(2, -1); |
---|
513 | |
---|
514 | MPI_Op op = MPI_SUM; |
---|
515 | |
---|
516 | |
---|
517 | MPI_Scan(sendbuf.data(), recvbuf.data(), 2, MPI_INT, op, comm); |
---|
518 | |
---|
519 | int scan_test = 1; |
---|
520 | |
---|
521 | // printf(" ID=%d : %d %d \n", rank, recvbuf[0], recvbuf[1]); |
---|
522 | |
---|
523 | if((op == MPI_SUM && (abs(recvbuf[0]-rank*(rank+1)/2) > 1.e-10 || abs(recvbuf[1]-rank*(rank+1)/2) > 1.e-10) ) || |
---|
524 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1]) > 1.e-10) ) || |
---|
525 | (op == MPI_MAX && (abs(recvbuf[0] - rank) > 1.e-10 || abs(recvbuf[1] - rank) > 1.e-10) ) ) |
---|
526 | { |
---|
527 | scan_test = 0; //printf("%d id = %d\n", recvbuf[i], rank); |
---|
528 | } |
---|
529 | |
---|
530 | int scan_result; |
---|
531 | MPI_Reduce(&scan_test, &scan_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
532 | |
---|
533 | if(rank == 0 && scan_result) printf(" \t test MPI_Scan \t\t OK\n"); |
---|
534 | if(rank == 0 && !scan_result) printf(" \t test MPI_Scan \t\t FAILED\n"); |
---|
535 | } |
---|
536 | |
---|
537 | |
---|
538 | // TEST OF EXSCAN |
---|
539 | { |
---|
540 | |
---|
541 | std::vector<int>sendbuf(2, rank); |
---|
542 | std::vector<int>recvbuf(2, -1); |
---|
543 | |
---|
544 | MPI_Op op = MPI_SUM; |
---|
545 | |
---|
546 | |
---|
547 | MPI_Exscan(sendbuf.data(), recvbuf.data(), 2, MPI_INT, op, comm); |
---|
548 | |
---|
549 | int exscan_test = 1; |
---|
550 | |
---|
551 | // printf(" ID=%d : %d %d \n", rank, recvbuf[0], recvbuf[1]); |
---|
552 | |
---|
553 | if(rank >0) |
---|
554 | if((op == MPI_SUM && (abs(recvbuf[0]-rank*(rank-1)/2) > 1.e-10 || abs(recvbuf[1]-rank*(rank-1)/2) > 1.e-10) ) || |
---|
555 | (op == MPI_MIN && (abs(recvbuf[0] ) > 1.e-10 || abs(recvbuf[1]) > 1.e-10) ) || |
---|
556 | (op == MPI_MAX && (abs(recvbuf[0] - rank+1) > 1.e-10 || abs(recvbuf[1] - rank+1) > 1.e-10) ) ) |
---|
557 | { |
---|
558 | exscan_test = 0; //printf("%d id = %d\n", recvbuf[i], rank); |
---|
559 | } |
---|
560 | |
---|
561 | int exscan_result; |
---|
562 | MPI_Reduce(&exscan_test, &exscan_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
563 | |
---|
564 | if(rank == 0 && exscan_result) printf(" \t test MPI_Exscan \t OK\n"); |
---|
565 | if(rank == 0 && !exscan_result) printf(" \t test MPI_Exscan \t FAILED\n"); |
---|
566 | } |
---|
567 | |
---|
568 | |
---|
569 | |
---|
570 | |
---|
571 | /* |
---|
572 | MPI_Barrier(comm); |
---|
573 | { |
---|
574 | int rank, size; |
---|
575 | MPI_Comm_rank(comm, &rank); |
---|
576 | MPI_Comm_size(comm, &size); |
---|
577 | |
---|
578 | //int color = rank%2; |
---|
579 | int color, remote_leader; |
---|
580 | if(rank<size-2) {color = 1; remote_leader = size-2;} |
---|
581 | else {color = 0; remote_leader = 0;} |
---|
582 | |
---|
583 | printf("rank = %d, color = %d, remote_leader = %d\n", rank, color, remote_leader); |
---|
584 | |
---|
585 | MPI_Comm sub_comm; |
---|
586 | MPI_Comm_split(comm, color, rank, &sub_comm); |
---|
587 | |
---|
588 | |
---|
589 | |
---|
590 | int sub_rank; |
---|
591 | MPI_Comm_rank(sub_comm, &sub_rank); |
---|
592 | |
---|
593 | |
---|
594 | MPI_Barrier(comm); |
---|
595 | if(rank == 0) printf("\tMPI_Comm_split OK\n"); |
---|
596 | MPI_Barrier(comm); |
---|
597 | |
---|
598 | MPI_Comm inter_comm; |
---|
599 | //MPI_Intercomm_create(sub_comm, 0, comm, (color+1)%2, 99, &inter_comm); |
---|
600 | MPI_Intercomm_create(sub_comm, 0, comm, remote_leader, 99, &inter_comm); |
---|
601 | |
---|
602 | MPI_Barrier(comm); |
---|
603 | if(rank == 0) printf("\tMPI_Intercomm_create OK\n"); |
---|
604 | MPI_Barrier(comm); |
---|
605 | |
---|
606 | |
---|
607 | |
---|
608 | int high=color; |
---|
609 | MPI_Comm intra_comm; |
---|
610 | MPI_Intercomm_merge(inter_comm, high, &intra_comm); |
---|
611 | |
---|
612 | int intra_rank, intra_size; |
---|
613 | MPI_Comm_rank(intra_comm, &intra_rank); |
---|
614 | MPI_Comm_size(intra_comm, &intra_size); |
---|
615 | |
---|
616 | MPI_Barrier(comm); |
---|
617 | if(rank == 0) printf("\tMPI_Intercomm_merge OK\n"); |
---|
618 | MPI_Barrier(comm); |
---|
619 | } |
---|
620 | |
---|
621 | //check_test_gatherv(comm); |
---|
622 | |
---|
623 | // MPI_Barrier(comm); |
---|
624 | // MPI_Comm_free(&sub_comm); |
---|
625 | |
---|
626 | |
---|
627 | // MPI_Barrier(comm); |
---|
628 | // MPI_Comm_free(&inter_comm); |
---|
629 | */ |
---|
630 | |
---|
631 | MPI_Barrier(comm); |
---|
632 | MPI_Comm_free(&comm); |
---|
633 | } |
---|
634 | |
---|
635 | int num_threads; |
---|
636 | if(mpi_rank < mpi_size-2) |
---|
637 | { |
---|
638 | printf("Proc %d is client\n", mpi_rank); |
---|
639 | num_threads = 2; |
---|
640 | } |
---|
641 | else |
---|
642 | { |
---|
643 | printf("Proc %d is server\n", mpi_rank); |
---|
644 | num_threads = 1; |
---|
645 | } |
---|
646 | |
---|
647 | omp_set_num_threads(num_threads); |
---|
648 | |
---|
649 | #pragma omp parallel default(shared) firstprivate(num_threads) |
---|
650 | { |
---|
651 | int num_ep = num_threads; |
---|
652 | MPI_Info info; |
---|
653 | |
---|
654 | //printf("omp_get_thread_num() = %d, omp_get_num_threads() = %d, num_threads = %d\n", omp_get_thread_num(), omp_get_num_threads(), num_threads); |
---|
655 | MPI_Comm *ep_comm; |
---|
656 | #pragma omp master |
---|
657 | { |
---|
658 | MPI_Comm *ep_comm; |
---|
659 | MPI_Comm_create_endpoints(MPI_COMM_WORLD.mpi_comm, num_ep, info, ep_comm); |
---|
660 | passage = ep_comm; |
---|
661 | } |
---|
662 | |
---|
663 | #pragma omp barrier |
---|
664 | |
---|
665 | |
---|
666 | MPI_Comm comm; // this should act as EP_COMM_WORLD |
---|
667 | comm = passage[omp_get_thread_num()]; |
---|
668 | |
---|
669 | int rank, size; |
---|
670 | MPI_Comm_rank(comm, &rank); |
---|
671 | MPI_Comm_size(comm, &size); |
---|
672 | |
---|
673 | |
---|
674 | |
---|
675 | bool isClient = false; |
---|
676 | bool isServer = false; |
---|
677 | |
---|
678 | if(omp_get_num_threads()>1) isClient = true; |
---|
679 | else isServer = true; |
---|
680 | |
---|
681 | printf("mpi_rank = %d, ep_rank = %d, isClient = %d\n", mpi_rank, rank, isClient); |
---|
682 | |
---|
683 | MPI_Win ep_win; |
---|
684 | MPI_Aint buf_size(1); |
---|
685 | int buf = rank; |
---|
686 | int local_buf = rank; |
---|
687 | MPI_Win_create(&buf, buf_size, sizeof(int), info, comm, &ep_win); |
---|
688 | MPI_Barrier(comm); |
---|
689 | |
---|
690 | // MPI_Win_fence(MPI_MODE_NOPRECEDE, ep_win); |
---|
691 | |
---|
692 | MPI_Barrier(comm); |
---|
693 | sleep(0.2); |
---|
694 | MPI_Barrier(comm); |
---|
695 | |
---|
696 | MPI_Win_fence(0, ep_win); |
---|
697 | |
---|
698 | if(rank == 0) |
---|
699 | { |
---|
700 | local_buf = 99; |
---|
701 | MPI_Aint displs(0); |
---|
702 | MPI_Put(&local_buf, 1, MPI_INT, size-1, displs, 1, MPI_INT, ep_win); |
---|
703 | } |
---|
704 | |
---|
705 | if(rank == size-2) |
---|
706 | { |
---|
707 | MPI_Aint displs(0); |
---|
708 | MPI_Get(&local_buf, 1, MPI_INT, 2, displs, 1, MPI_INT, ep_win); |
---|
709 | } |
---|
710 | |
---|
711 | MPI_Win_fence(0, ep_win); |
---|
712 | |
---|
713 | if(rank == 0) |
---|
714 | { |
---|
715 | MPI_Aint displs(0); |
---|
716 | MPI_Accumulate(&local_buf, 1, MPI_INT, size-1, displs, 1, MPI_INT, MPI_SUM, ep_win); |
---|
717 | } |
---|
718 | |
---|
719 | |
---|
720 | MPI_Barrier(comm); |
---|
721 | |
---|
722 | MPI_Win_fence(0, ep_win); |
---|
723 | |
---|
724 | |
---|
725 | MPI_Win_free(&ep_win); |
---|
726 | printf("rank = %d, buf = %d, local_buf = %d\n", rank, buf, local_buf); |
---|
727 | |
---|
728 | MPI_Comm_free(&comm); |
---|
729 | |
---|
730 | } |
---|
731 | |
---|
732 | MPI_Finalize(); |
---|
733 | |
---|
734 | } |
---|