1 | #include "ep_lib.hpp" |
---|
2 | |
---|
3 | #include <stdio.h> |
---|
4 | #include <assert.h> |
---|
5 | #include "ep_declaration.hpp" |
---|
6 | #include <omp.h> |
---|
7 | #include <time.h> /* time */ |
---|
8 | #include <ctime> |
---|
9 | #include <ratio> |
---|
10 | #include <chrono> |
---|
11 | |
---|
12 | using namespace ep_lib; |
---|
13 | using namespace std::chrono; |
---|
14 | |
---|
15 | |
---|
16 | |
---|
17 | int main(int argc, char **argv) |
---|
18 | { |
---|
19 | srand (time(NULL)); |
---|
20 | |
---|
21 | printf("Testing ep_lib\n"); |
---|
22 | int required=3, provided; |
---|
23 | |
---|
24 | MPI_Init_thread(&argc, &argv, required, &provided); |
---|
25 | |
---|
26 | assert(required==provided); |
---|
27 | |
---|
28 | int mpi_rank; |
---|
29 | int mpi_size; |
---|
30 | |
---|
31 | MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); |
---|
32 | MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); |
---|
33 | |
---|
34 | #pragma omp parallel default(shared) |
---|
35 | { |
---|
36 | MPI_Comm_rank(MPI_COMM_WORLD , &mpi_rank); |
---|
37 | |
---|
38 | int num_ep = omp_get_num_threads(); |
---|
39 | MPI_Info info; |
---|
40 | |
---|
41 | //printf("mpi_rank = %d, thread_num = %d\n", mpi_rank, omp_get_thread_num()); |
---|
42 | |
---|
43 | MPI_Comm *ep_comm; |
---|
44 | #pragma omp master |
---|
45 | { |
---|
46 | MPI_Comm *ep_comm; |
---|
47 | MPI_Comm_create_endpoints(MPI_COMM_WORLD->mpi_comm, num_ep, info, ep_comm); |
---|
48 | passage = ep_comm; |
---|
49 | } |
---|
50 | |
---|
51 | #pragma omp barrier |
---|
52 | |
---|
53 | |
---|
54 | MPI_Comm comm; // this should act as EP_COMM_WORLD |
---|
55 | comm = passage[omp_get_thread_num()]; |
---|
56 | |
---|
57 | int rank, size; |
---|
58 | MPI_Comm_rank(comm, &rank); |
---|
59 | MPI_Comm_size(comm, &size); |
---|
60 | /* |
---|
61 | // TIMING SYCHRONIZATION |
---|
62 | { |
---|
63 | int n=100000; |
---|
64 | |
---|
65 | MPI_Barrier(comm); |
---|
66 | |
---|
67 | high_resolution_clock::time_point t1 = high_resolution_clock::now(); |
---|
68 | |
---|
69 | for(int i=0; i<n; i++) |
---|
70 | MPI_Barrier_local(comm); |
---|
71 | |
---|
72 | high_resolution_clock::time_point t2 = high_resolution_clock::now(); |
---|
73 | duration<double> time_span = duration_cast<duration<double>>(t2 - t1); |
---|
74 | #pragma omp master |
---|
75 | std::cout << "proc "<< mpi_rank <<" ep_barrier "<< time_span.count() << " seconds."<<std::endl; |
---|
76 | |
---|
77 | t1 = high_resolution_clock::now(); |
---|
78 | |
---|
79 | for(int i=0; i<n; i++) |
---|
80 | { |
---|
81 | #pragma omp barrier |
---|
82 | } |
---|
83 | |
---|
84 | t2 = high_resolution_clock::now(); |
---|
85 | time_span = duration_cast<duration<double>>(t2 - t1); |
---|
86 | |
---|
87 | #pragma omp master |
---|
88 | std::cout << "proc "<< mpi_rank <<" omp_barrier "<< time_span.count() << " seconds."<<std::endl; |
---|
89 | |
---|
90 | t1 = high_resolution_clock::now(); |
---|
91 | |
---|
92 | for(int i=0; i<n; i++) |
---|
93 | { |
---|
94 | //#pragma omp barrier |
---|
95 | } |
---|
96 | |
---|
97 | t2 = high_resolution_clock::now(); |
---|
98 | time_span = duration_cast<duration<double>>(t2 - t1); |
---|
99 | |
---|
100 | MPI_Barrier(comm); |
---|
101 | |
---|
102 | #pragma omp master |
---|
103 | std::cout << "proc "<< mpi_rank <<" for_loop "<< time_span.count() << " seconds."<<std::endl; |
---|
104 | }// END TIMING SYCHRONIZATION |
---|
105 | */ |
---|
106 | // TEST of p2p blocking communication |
---|
107 | { |
---|
108 | MPI_Barrier(comm); |
---|
109 | MPI_Barrier(comm); |
---|
110 | |
---|
111 | double sendbuf[10]; |
---|
112 | double recvbuf[20]; |
---|
113 | |
---|
114 | int sender = 1; |
---|
115 | int receiver = 2; |
---|
116 | |
---|
117 | if(rank == sender) |
---|
118 | { |
---|
119 | for(int i=0; i<10; i++) sendbuf[i] = 99.99; |
---|
120 | MPI_Send(sendbuf, 10, MPI_DOUBLE, receiver, 99, comm); |
---|
121 | for(int i=0; i<10; i++) sendbuf[i] = -99.99; |
---|
122 | MPI_Send(sendbuf, 10, MPI_DOUBLE, receiver, 11, comm); |
---|
123 | } |
---|
124 | |
---|
125 | if(rank == receiver) |
---|
126 | { |
---|
127 | MPI_Status status; |
---|
128 | for(int i=0; i<20; i++) recvbuf[i] = 0.0; |
---|
129 | MPI_Recv(&recvbuf[10], 10, MPI_DOUBLE, sender, 99, comm, &status); |
---|
130 | MPI_Recv(recvbuf, 10, MPI_DOUBLE, sender, 11, comm, &status); |
---|
131 | |
---|
132 | for(int i=0; i<20; i++) std::cout << "recvbuf["<< i <<"] = "<< recvbuf[i] << std::endl; |
---|
133 | } |
---|
134 | |
---|
135 | MPI_Barrier(comm); |
---|
136 | |
---|
137 | }//TEST of p2p blocking communication |
---|
138 | |
---|
139 | // TEST of p2p non-blocking communication |
---|
140 | { |
---|
141 | MPI_Barrier(comm); |
---|
142 | MPI_Barrier(comm); |
---|
143 | |
---|
144 | double sendbuf[10]; |
---|
145 | double recvbuf[20]; |
---|
146 | |
---|
147 | int sender = 3; |
---|
148 | int receiver = 7; |
---|
149 | |
---|
150 | MPI_Request request[2]; |
---|
151 | |
---|
152 | if(rank == sender) |
---|
153 | { |
---|
154 | |
---|
155 | for(int i=0; i<10; i++) sendbuf[i] = 99.99; |
---|
156 | MPI_Isend(sendbuf, 10, MPI_DOUBLE, receiver, 99, comm, &request[0]); |
---|
157 | for(int i=0; i<10; i++) sendbuf[i] = -99.99; |
---|
158 | MPI_Isend(sendbuf, 10, MPI_DOUBLE, receiver, 11, comm, &request[1]); |
---|
159 | } |
---|
160 | |
---|
161 | if(rank == receiver) |
---|
162 | { |
---|
163 | for(int i=0; i<20; i++) recvbuf[i] = 0.0; |
---|
164 | MPI_Irecv(&recvbuf[10], 10, MPI_DOUBLE, sender, 11, comm, &request[0]); |
---|
165 | MPI_Irecv(recvbuf, 10, MPI_DOUBLE, sender, 99, comm, &request[1]); |
---|
166 | } |
---|
167 | |
---|
168 | MPI_Barrier(comm); |
---|
169 | |
---|
170 | if(rank == receiver || rank == sender) |
---|
171 | { |
---|
172 | MPI_Status status[2]; |
---|
173 | MPI_Waitall(2, request, status); |
---|
174 | } |
---|
175 | |
---|
176 | MPI_Barrier(comm); |
---|
177 | |
---|
178 | if(rank == receiver) |
---|
179 | { |
---|
180 | for(int i=0; i<20; i++) std::cout << "recvbuf["<< i <<"] = "<< recvbuf[i] << std::endl; |
---|
181 | } |
---|
182 | |
---|
183 | }//TEST of p2p blocking communication |
---|
184 | |
---|
185 | |
---|
186 | // TEST OF BCAST FROM A RANDOM ROOT |
---|
187 | { |
---|
188 | int bcast_root; |
---|
189 | |
---|
190 | if(rank == 0) bcast_root = rand() % size; |
---|
191 | |
---|
192 | MPI_Bcast(&bcast_root, 1, MPI_INT, 0, comm); |
---|
193 | |
---|
194 | int sendbuf[2]; |
---|
195 | |
---|
196 | sendbuf[0] = rank; |
---|
197 | sendbuf[1] = size; |
---|
198 | |
---|
199 | MPI_Bcast(sendbuf, 2, MPI_INT, bcast_root, comm); |
---|
200 | |
---|
201 | int bcast_test = 0; |
---|
202 | if(sendbuf[0] == bcast_root && sendbuf[1] == size) bcast_test = 1; |
---|
203 | |
---|
204 | int bcast_result; |
---|
205 | |
---|
206 | MPI_Reduce(&bcast_test, &bcast_result, 1, MPI_INT, MPI_MIN, bcast_root, comm); |
---|
207 | |
---|
208 | if(bcast_result && rank == bcast_root) printf("root = %d : \t test MPI_Bcast \t OK\n", bcast_root); |
---|
209 | if(!bcast_result && rank == bcast_root) printf("root = %d : \t test MPI_Bcast \t FAILED %d\n", bcast_root, bcast_result); |
---|
210 | } |
---|
211 | |
---|
212 | MPI_Barrier(comm); |
---|
213 | |
---|
214 | // TEST OF GATHER FROM A RAMDOM ROOT |
---|
215 | { |
---|
216 | int gather_root; |
---|
217 | |
---|
218 | if(rank == 0) gather_root = rand() % size; |
---|
219 | |
---|
220 | MPI_Bcast(&gather_root, 1, MPI_INT, 0, comm); |
---|
221 | |
---|
222 | double sendbuf[2]; |
---|
223 | sendbuf[0] = rank * 1.0; |
---|
224 | sendbuf[1] = size * (-1.0); |
---|
225 | |
---|
226 | std::vector<double>recvbuf(2*size, 0); |
---|
227 | |
---|
228 | MPI_Gather(sendbuf, 2, MPI_DOUBLE, recvbuf.data(), 2, MPI_DOUBLE, gather_root, comm); |
---|
229 | |
---|
230 | bool gather_result = true; |
---|
231 | |
---|
232 | if(rank == gather_root) |
---|
233 | { |
---|
234 | for(int i=0; i<size; i++) |
---|
235 | { |
---|
236 | if(abs(recvbuf[2*i] - i) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
237 | { |
---|
238 | gather_result = false; |
---|
239 | break; |
---|
240 | } |
---|
241 | } |
---|
242 | |
---|
243 | if(gather_result) printf("root = %d : \t test MPI_Gather \t OK \n", gather_root); |
---|
244 | else printf("root = %d : \t test MPI_Gather \t FAILED\n", gather_root); |
---|
245 | } |
---|
246 | } |
---|
247 | |
---|
248 | MPI_Barrier(comm); |
---|
249 | |
---|
250 | // TEST OF GATHERV FROM A RAMDOM ROOT |
---|
251 | { |
---|
252 | int gatherv_root; |
---|
253 | |
---|
254 | if(rank == 0) gatherv_root = rand() % size; |
---|
255 | |
---|
256 | MPI_Bcast(&gatherv_root, 1, MPI_INT, 0, comm); |
---|
257 | |
---|
258 | int sendbuf[2]; |
---|
259 | sendbuf[0] = rank; |
---|
260 | sendbuf[1] = -size; |
---|
261 | |
---|
262 | std::vector<int>recvbuf(2*size, 0); |
---|
263 | |
---|
264 | std::vector<int>recvcounts(size, 2); |
---|
265 | std::vector<int>displs(size, 0); |
---|
266 | |
---|
267 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
268 | |
---|
269 | MPI_Gatherv(sendbuf, 2, MPI_INT, recvbuf.data(), recvcounts.data(), displs.data(), MPI_INT, gatherv_root, comm); |
---|
270 | |
---|
271 | bool gatherv_result = true; |
---|
272 | |
---|
273 | if(rank == gatherv_root) |
---|
274 | { |
---|
275 | for(int i=0; i<size; i++) |
---|
276 | { |
---|
277 | if(abs(recvbuf[2*i] - (size-1-i)) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
278 | { |
---|
279 | gatherv_result = false; printf("%lf %lf root = %d, i = %d\n", recvbuf[2*i], recvbuf[2*i+1], gatherv_root, i); |
---|
280 | break; |
---|
281 | } |
---|
282 | } |
---|
283 | |
---|
284 | //for(int i=0; i<size*2; i++) printf("%lf\t", recvbuf[i]); |
---|
285 | //printf("\n"); |
---|
286 | |
---|
287 | if(gatherv_result) printf("root = %d : \t test MPI_Gatherv \t OK\n", gatherv_root); |
---|
288 | else printf("root = %d : \t test MPI_Gatherv \t FAILED\n", gatherv_root); |
---|
289 | } |
---|
290 | } |
---|
291 | |
---|
292 | MPI_Barrier(comm); |
---|
293 | |
---|
294 | // TEST OF ALLGATHER |
---|
295 | { |
---|
296 | double sendbuf[2]; |
---|
297 | sendbuf[0] = rank * 1.0; |
---|
298 | sendbuf[1] = size * (-1.0); |
---|
299 | |
---|
300 | std::vector<double>recvbuf(2*size, 0); |
---|
301 | |
---|
302 | MPI_Allgather(sendbuf, 2, MPI_DOUBLE, recvbuf.data(), 2, MPI_DOUBLE, comm); |
---|
303 | |
---|
304 | int allgather_test = 1; |
---|
305 | |
---|
306 | for(int i=0; i<size; i++) |
---|
307 | { |
---|
308 | if(abs(recvbuf[2*i] - i) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
309 | { |
---|
310 | allgather_test = 0; |
---|
311 | break; |
---|
312 | } |
---|
313 | } |
---|
314 | |
---|
315 | int allgather_result; |
---|
316 | MPI_Reduce(&allgather_test, &allgather_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
317 | |
---|
318 | if(rank == 0 && allgather_result) printf(" \t test MPI_Allgather \t OK \n"); |
---|
319 | if(rank == 0 && !allgather_result) printf(" \t test MPI_Allgather \t OK \n"); |
---|
320 | |
---|
321 | } |
---|
322 | |
---|
323 | MPI_Barrier(comm); |
---|
324 | |
---|
325 | // TEST OF ALLGATHERV |
---|
326 | { |
---|
327 | int sendbuf[2]; |
---|
328 | sendbuf[0] = rank; |
---|
329 | sendbuf[1] = -size; |
---|
330 | |
---|
331 | std::vector<int>recvbuf(2*size, 0); |
---|
332 | |
---|
333 | std::vector<int>recvcounts(size, 2); |
---|
334 | std::vector<int>displs(size, 0); |
---|
335 | |
---|
336 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
337 | |
---|
338 | MPI_Allgatherv(sendbuf, 2, MPI_INT, recvbuf.data(), recvcounts.data(), displs.data(), MPI_INT, comm); |
---|
339 | |
---|
340 | int allgatherv_test = 1; |
---|
341 | |
---|
342 | |
---|
343 | |
---|
344 | for(int i=0; i<size; i++) |
---|
345 | { |
---|
346 | if(abs(recvbuf[2*i] - (size-1-i)) > 1.e-10 || abs(recvbuf[2*i+1] + size) > 1.e-10) |
---|
347 | { |
---|
348 | allgatherv_test = 0; printf("ID : %d %d %d %d %d\n", rank, recvbuf[2*i], recvbuf[2*i+1] , recvbuf[2*i] - (size-1-i), recvbuf[2*i+1] + size); |
---|
349 | break; |
---|
350 | } |
---|
351 | } |
---|
352 | |
---|
353 | |
---|
354 | int allgatherv_result; |
---|
355 | MPI_Reduce(&allgatherv_test, &allgatherv_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
356 | |
---|
357 | if(rank == 0 && allgatherv_result) printf(" \t test MPI_Allgatherv \t OK \n"); |
---|
358 | if(rank == 0 && !allgatherv_result) printf(" \t test MPI_Allgatherv \t FAILED %d\n", allgatherv_result); |
---|
359 | |
---|
360 | } |
---|
361 | |
---|
362 | MPI_Barrier(comm); |
---|
363 | |
---|
364 | // TEST OF REDUCE |
---|
365 | { |
---|
366 | int reduce_root; |
---|
367 | |
---|
368 | if(rank == 0) reduce_root = rand() % size; |
---|
369 | |
---|
370 | MPI_Bcast(&reduce_root, 1, MPI_INT, 0, comm); |
---|
371 | |
---|
372 | int sendbuf[2]; |
---|
373 | sendbuf[0] = rank; |
---|
374 | sendbuf[1] = -size; |
---|
375 | |
---|
376 | std::vector<int>recvbuf(2, 0); |
---|
377 | |
---|
378 | MPI_Op op = MPI_MIN; |
---|
379 | |
---|
380 | MPI_Reduce(sendbuf, recvbuf.data(), 2, MPI_INT, op, reduce_root, comm); |
---|
381 | |
---|
382 | |
---|
383 | bool reduce_result = true; |
---|
384 | |
---|
385 | if(rank == reduce_root) |
---|
386 | { |
---|
387 | for(int i=0; i<2; i++) |
---|
388 | { |
---|
389 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[1] + size * size) > 1.e-10) ) || |
---|
390 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) || |
---|
391 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) ) |
---|
392 | { |
---|
393 | reduce_result = false; printf("%d %d root = %d, i = %d\n", recvbuf[0], recvbuf[1], reduce_root, i); |
---|
394 | break; |
---|
395 | } |
---|
396 | } |
---|
397 | } |
---|
398 | |
---|
399 | if(rank == reduce_root && reduce_result) printf("root = %d : \t test MPI_Reduce \t OK\n", reduce_root); |
---|
400 | if(rank == reduce_root && !reduce_result) printf("root = %d : \t test MPI_Reduce \t FAILED\n", reduce_root); |
---|
401 | } |
---|
402 | |
---|
403 | |
---|
404 | MPI_Barrier(comm); |
---|
405 | |
---|
406 | // TEST OF ALLREDUCE |
---|
407 | { |
---|
408 | |
---|
409 | int sendbuf[2]; |
---|
410 | sendbuf[0] = rank; |
---|
411 | sendbuf[1] = -size; |
---|
412 | |
---|
413 | std::vector<int>recvbuf(2, 0); |
---|
414 | |
---|
415 | MPI_Op op = MPI_MIN; |
---|
416 | |
---|
417 | MPI_Allreduce(sendbuf, recvbuf.data(), 2, MPI_INT, op, comm); |
---|
418 | |
---|
419 | |
---|
420 | int allreduce_test = 1; |
---|
421 | |
---|
422 | |
---|
423 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[1] + size * size) > 1.e-10) ) || |
---|
424 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) || |
---|
425 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] + size) > 1.e-10) ) ) |
---|
426 | { |
---|
427 | allreduce_test = 0; printf("%d %d\n", recvbuf[0], recvbuf[1]); |
---|
428 | } |
---|
429 | |
---|
430 | |
---|
431 | int allreduce_result; |
---|
432 | MPI_Reduce(&allreduce_test, &allreduce_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
433 | |
---|
434 | if(rank == 0 && allreduce_result) printf(" \t test MPI_Allreduce \t OK\n"); |
---|
435 | if(rank == 0 && !allreduce_result) printf(" \t test MPI_Allreduce \t FAILED\n"); |
---|
436 | } |
---|
437 | |
---|
438 | |
---|
439 | MPI_Barrier(comm); |
---|
440 | |
---|
441 | // TEST OF REDUCE_SCATTER |
---|
442 | { |
---|
443 | |
---|
444 | std::vector<int>sendbuf(2*size, rank); |
---|
445 | std::vector<int>recvbuf(2, -1); |
---|
446 | std::vector<int>recvcounts(size, 2); |
---|
447 | |
---|
448 | MPI_Op op = MPI_MIN; |
---|
449 | |
---|
450 | MPI_Reduce_scatter(sendbuf.data(), recvbuf.data(), recvcounts.data(), MPI_INT, op, comm); |
---|
451 | |
---|
452 | |
---|
453 | int reduce_scatter_test = 1; |
---|
454 | |
---|
455 | |
---|
456 | if((op == MPI_SUM && (abs(recvbuf[0]-(size-1)*size/2) > 1.e-10 || abs(recvbuf[0]-(size-1)*size/2) > 1.e-10) ) || |
---|
457 | (op == MPI_MAX && (abs(recvbuf[0]-(size-1)) > 1.e-10 || abs(recvbuf[1]-(size-1)) > 1.e-10) ) || |
---|
458 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1] ) > 1.e-10) ) ) |
---|
459 | { |
---|
460 | reduce_scatter_test = 0; //printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
461 | } |
---|
462 | |
---|
463 | |
---|
464 | int reduce_scatter_result; |
---|
465 | MPI_Reduce(&reduce_scatter_test, &reduce_scatter_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
466 | |
---|
467 | if(rank == 0 && reduce_scatter_result) printf(" \t test MPI_Reduce_scatter OK\n"); |
---|
468 | if(rank == 0 && !reduce_scatter_result) printf(" \t test MPI_Reduce_scatter FAILED\n"); |
---|
469 | } |
---|
470 | |
---|
471 | MPI_Barrier(comm); |
---|
472 | |
---|
473 | // TEST OF SCATTER |
---|
474 | { |
---|
475 | |
---|
476 | int scatter_root; |
---|
477 | |
---|
478 | if(rank == 0) scatter_root = rand() % size; |
---|
479 | |
---|
480 | MPI_Bcast(&scatter_root, 1, MPI_INT, 0, comm); |
---|
481 | |
---|
482 | std::vector<int>sendbuf(2*size, rank); |
---|
483 | std::vector<int>recvbuf(2, -1); |
---|
484 | std::vector<int>recvcounts(size, 2); |
---|
485 | |
---|
486 | if(rank == scatter_root) |
---|
487 | { |
---|
488 | for(int i=0; i<size; i++) |
---|
489 | { |
---|
490 | sendbuf[2*i] = i; |
---|
491 | sendbuf[2*i+1] = size; |
---|
492 | } |
---|
493 | //for(int i=0; i<size*2; i++) printf("%d\t", sendbuf[i]); |
---|
494 | } |
---|
495 | |
---|
496 | |
---|
497 | MPI_Scatter(sendbuf.data(), 2, MPI_INT, recvbuf.data(), 2, MPI_INT, scatter_root, comm); |
---|
498 | |
---|
499 | //printf("ID = %d : %d %d\n", rank, recvbuf[0], recvbuf[1]); |
---|
500 | |
---|
501 | int scatter_test = 1; |
---|
502 | |
---|
503 | |
---|
504 | if( abs(recvbuf[0]-rank) > 1.e-10 || abs(recvbuf[1]-size) > 1.e-10 ) |
---|
505 | { |
---|
506 | scatter_test = 0; //printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
507 | } |
---|
508 | |
---|
509 | |
---|
510 | int scatter_result; |
---|
511 | MPI_Reduce(&scatter_test, &scatter_result, 1, MPI_INT, MPI_MIN, scatter_root, comm); |
---|
512 | |
---|
513 | if(rank == scatter_root && scatter_result) printf("root = %d : \t test MPI_Scatter \t OK\n", scatter_root); |
---|
514 | if(rank == scatter_root && !scatter_result) printf("root = %d : \t test MPI_Scatter \t FAILED\n", scatter_root); |
---|
515 | } |
---|
516 | |
---|
517 | MPI_Barrier(comm); |
---|
518 | |
---|
519 | // TEST OF SCATTERV |
---|
520 | { |
---|
521 | |
---|
522 | int scatterv_root; |
---|
523 | |
---|
524 | if(rank == 0) scatterv_root = rand() % size; |
---|
525 | |
---|
526 | MPI_Bcast(&scatterv_root, 1, MPI_INT, 0, comm); |
---|
527 | |
---|
528 | std::vector<int>sendbuf(2*size, rank); |
---|
529 | std::vector<int>recvbuf(2, -1); |
---|
530 | std::vector<int>sendcounts(size, 2); |
---|
531 | std::vector<int>displs(size, 0); |
---|
532 | |
---|
533 | for(int i=0; i<size; i++) displs[i] = 2*(size-1-i); |
---|
534 | |
---|
535 | if(rank == scatterv_root) |
---|
536 | { |
---|
537 | for(int i=0; i<size; i++) |
---|
538 | { |
---|
539 | sendbuf[2*i] = i; |
---|
540 | sendbuf[2*i+1] = size; |
---|
541 | } |
---|
542 | } |
---|
543 | |
---|
544 | |
---|
545 | MPI_Scatterv(sendbuf.data(), sendcounts.data(), displs.data(), MPI_INT, recvbuf.data(), 2, MPI_INT, scatterv_root, comm); |
---|
546 | |
---|
547 | //printf("ID = %d : %d %d\n", rank, recvbuf[0], recvbuf[1]); |
---|
548 | |
---|
549 | int scatterv_test = 1; |
---|
550 | |
---|
551 | |
---|
552 | if( abs(recvbuf[0]-(size-1-rank)) > 1.e-10 || abs(recvbuf[1]-size) > 1.e-10 ) |
---|
553 | { |
---|
554 | scatterv_test = 0; printf("%d %d id = %d\n", recvbuf[0], recvbuf[1], rank); |
---|
555 | } |
---|
556 | |
---|
557 | |
---|
558 | int scatterv_result; |
---|
559 | MPI_Reduce(&scatterv_test, &scatterv_result, 1, MPI_INT, MPI_MIN, scatterv_root, comm); |
---|
560 | |
---|
561 | if(rank == scatterv_root && scatterv_result) printf("root = %d : \t test MPI_Scatterv \t OK\n", scatterv_root); |
---|
562 | if(rank == scatterv_root && !scatterv_result) printf("root = %d : \t test MPI_Scatterv \t FAILED\n", scatterv_root); |
---|
563 | } |
---|
564 | |
---|
565 | MPI_Barrier(comm); |
---|
566 | |
---|
567 | // TEST OF ALLTOALL |
---|
568 | { |
---|
569 | |
---|
570 | std::vector<int>sendbuf(size, rank); |
---|
571 | std::vector<int>recvbuf(size, -1); |
---|
572 | |
---|
573 | |
---|
574 | MPI_Alltoall(sendbuf.data(), 1, MPI_INT, recvbuf.data(), 1, MPI_INT, comm); |
---|
575 | |
---|
576 | int alltoall_result = 1; |
---|
577 | |
---|
578 | |
---|
579 | for(int i=0; i<size; i++) |
---|
580 | if( abs(recvbuf[i]-i) > 1.e-10 ) |
---|
581 | { |
---|
582 | alltoall_result = 0; printf("%d id = %d\n", recvbuf[i], rank); |
---|
583 | } |
---|
584 | |
---|
585 | if(rank == 0 && alltoall_result) printf(" \t test MPI_Alltoall \t OK\n"); |
---|
586 | if(rank == 0 && !alltoall_result) printf(" \t test MPI_Alltoall \t FAILED\n"); |
---|
587 | } |
---|
588 | |
---|
589 | // TEST OF SCAN |
---|
590 | { |
---|
591 | |
---|
592 | std::vector<int>sendbuf(2, rank); |
---|
593 | std::vector<int>recvbuf(2, -1); |
---|
594 | |
---|
595 | MPI_Op op = MPI_SUM; |
---|
596 | |
---|
597 | |
---|
598 | MPI_Scan(sendbuf.data(), recvbuf.data(), 2, MPI_INT, op, comm); |
---|
599 | |
---|
600 | int scan_test = 1; |
---|
601 | |
---|
602 | // printf(" ID=%d : %d %d \n", rank, recvbuf[0], recvbuf[1]); |
---|
603 | |
---|
604 | if((op == MPI_SUM && (abs(recvbuf[0]-rank*(rank+1)/2) > 1.e-10 || abs(recvbuf[1]-rank*(rank+1)/2) > 1.e-10) ) || |
---|
605 | (op == MPI_MIN && (abs(recvbuf[0]) > 1.e-10 || abs(recvbuf[1]) > 1.e-10) ) || |
---|
606 | (op == MPI_MAX && (abs(recvbuf[0] - rank) > 1.e-10 || abs(recvbuf[1] - rank) > 1.e-10) ) ) |
---|
607 | { |
---|
608 | scan_test = 0; //printf("%d id = %d\n", recvbuf[i], rank); |
---|
609 | } |
---|
610 | |
---|
611 | int scan_result; |
---|
612 | MPI_Reduce(&scan_test, &scan_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
613 | |
---|
614 | if(rank == 0 && scan_result) printf(" \t test MPI_Scan \t\t OK\n"); |
---|
615 | if(rank == 0 && !scan_result) printf(" \t test MPI_Scan \t\t FAILED\n"); |
---|
616 | } |
---|
617 | |
---|
618 | |
---|
619 | // TEST OF EXSCAN |
---|
620 | { |
---|
621 | |
---|
622 | std::vector<int>sendbuf(2, rank); |
---|
623 | std::vector<int>recvbuf(2, -1); |
---|
624 | |
---|
625 | MPI_Op op = MPI_SUM; |
---|
626 | |
---|
627 | |
---|
628 | MPI_Exscan(sendbuf.data(), recvbuf.data(), 2, MPI_INT, op, comm); |
---|
629 | |
---|
630 | int exscan_test = 1; |
---|
631 | |
---|
632 | // printf(" ID=%d : %d %d \n", rank, recvbuf[0], recvbuf[1]); |
---|
633 | |
---|
634 | if(rank >0) |
---|
635 | if((op == MPI_SUM && (abs(recvbuf[0]-rank*(rank-1)/2) > 1.e-10 || abs(recvbuf[1]-rank*(rank-1)/2) > 1.e-10) ) || |
---|
636 | (op == MPI_MIN && (abs(recvbuf[0] ) > 1.e-10 || abs(recvbuf[1]) > 1.e-10) ) || |
---|
637 | (op == MPI_MAX && (abs(recvbuf[0] - rank+1) > 1.e-10 || abs(recvbuf[1] - rank+1) > 1.e-10) ) ) |
---|
638 | { |
---|
639 | exscan_test = 0; //printf("%d id = %d\n", recvbuf[i], rank); |
---|
640 | } |
---|
641 | |
---|
642 | int exscan_result; |
---|
643 | MPI_Reduce(&exscan_test, &exscan_result, 1, MPI_INT, MPI_MIN, 0, comm); |
---|
644 | |
---|
645 | if(rank == 0 && exscan_result) printf(" \t test MPI_Exscan \t OK\n"); |
---|
646 | if(rank == 0 && !exscan_result) printf(" \t test MPI_Exscan \t FAILED\n"); |
---|
647 | } |
---|
648 | |
---|
649 | |
---|
650 | |
---|
651 | MPI_Barrier(comm); |
---|
652 | { |
---|
653 | int rank, size; |
---|
654 | MPI_Comm_rank(comm, &rank); |
---|
655 | MPI_Comm_size(comm, &size); |
---|
656 | |
---|
657 | //int color = rank%2; |
---|
658 | int color, remote_leader; |
---|
659 | if(rank<size-2) {color = 1; remote_leader = size-2;} |
---|
660 | else {color = 0; remote_leader = 0;} |
---|
661 | |
---|
662 | printf("rank = %d, color = %d, remote_leader = %d\n", rank, color, remote_leader); |
---|
663 | |
---|
664 | MPI_Comm sub_comm; |
---|
665 | MPI_Comm_split(comm, color, rank, &sub_comm); |
---|
666 | |
---|
667 | |
---|
668 | |
---|
669 | int sub_rank; |
---|
670 | MPI_Comm_rank(sub_comm, &sub_rank); |
---|
671 | |
---|
672 | |
---|
673 | MPI_Barrier(comm); |
---|
674 | if(rank == 0) printf("\tMPI_Comm_split OK\n"); |
---|
675 | MPI_Barrier(comm); |
---|
676 | /* |
---|
677 | MPI_Comm inter_comm; |
---|
678 | //MPI_Intercomm_create(sub_comm, 0, comm, (color+1)%2, 99, &inter_comm); |
---|
679 | MPI_Intercomm_create(sub_comm, 0, comm, remote_leader, 99, &inter_comm); |
---|
680 | |
---|
681 | MPI_Barrier(comm); |
---|
682 | if(rank == 0) printf("\tMPI_Intercomm_create OK\n"); |
---|
683 | MPI_Barrier(comm); |
---|
684 | |
---|
685 | |
---|
686 | |
---|
687 | int high=color; |
---|
688 | MPI_Comm intra_comm; |
---|
689 | MPI_Intercomm_merge(inter_comm, high, &intra_comm); |
---|
690 | |
---|
691 | int intra_rank, intra_size; |
---|
692 | MPI_Comm_rank(intra_comm, &intra_rank); |
---|
693 | MPI_Comm_size(intra_comm, &intra_size); |
---|
694 | |
---|
695 | MPI_Barrier(comm); |
---|
696 | if(rank == 0) printf("\tMPI_Intercomm_merge OK\n"); |
---|
697 | MPI_Barrier(comm); |
---|
698 | |
---|
699 | |
---|
700 | |
---|
701 | //check_test_gatherv(comm); |
---|
702 | */ |
---|
703 | MPI_Barrier(comm); |
---|
704 | MPI_Comm_free(&sub_comm); |
---|
705 | |
---|
706 | |
---|
707 | //MPI_Barrier(comm); |
---|
708 | //MPI_Comm_free(&inter_comm); |
---|
709 | |
---|
710 | |
---|
711 | MPI_Barrier(comm); |
---|
712 | MPI_Comm_free(&comm); |
---|
713 | } |
---|
714 | } |
---|
715 | |
---|
716 | /* |
---|
717 | int num_threads; |
---|
718 | if(mpi_rank < mpi_size-2) |
---|
719 | { |
---|
720 | printf("Proc %d is client\n", mpi_rank); |
---|
721 | num_threads = 2;//+mpi_rank; |
---|
722 | } |
---|
723 | else |
---|
724 | { |
---|
725 | printf("Proc %d is server\n", mpi_rank); |
---|
726 | num_threads = 1; |
---|
727 | } |
---|
728 | |
---|
729 | omp_set_num_threads(num_threads); |
---|
730 | |
---|
731 | #pragma omp parallel default(shared) firstprivate(num_threads) |
---|
732 | { |
---|
733 | int num_ep = num_threads; |
---|
734 | MPI_Info info; |
---|
735 | |
---|
736 | //printf("omp_get_thread_num() = %d, omp_get_num_threads() = %d, num_threads = %d\n", omp_get_thread_num(), omp_get_num_threads(), num_threads); |
---|
737 | MPI_Comm *ep_comm; |
---|
738 | #pragma omp master |
---|
739 | { |
---|
740 | MPI_Comm *ep_comm; |
---|
741 | MPI_Comm_create_endpoints(MPI_COMM_WORLD->mpi_comm, num_ep, info, ep_comm); |
---|
742 | passage = ep_comm; |
---|
743 | } |
---|
744 | |
---|
745 | #pragma omp barrier |
---|
746 | |
---|
747 | |
---|
748 | MPI_Comm comm; // this should act as EP_COMM_WORLD |
---|
749 | comm = passage[omp_get_thread_num()]; |
---|
750 | |
---|
751 | int rank, size; |
---|
752 | MPI_Comm_rank(comm, &rank); |
---|
753 | MPI_Comm_size(comm, &size); |
---|
754 | |
---|
755 | |
---|
756 | |
---|
757 | bool isClient = false; |
---|
758 | bool isServer = false; |
---|
759 | |
---|
760 | if(omp_get_num_threads()>1) isClient = true; |
---|
761 | else isServer = true; |
---|
762 | |
---|
763 | printf("mpi_rank = %d, ep_rank = %d, isClient = %d\n", mpi_rank, rank, isClient); |
---|
764 | |
---|
765 | MPI_Win ep_win; |
---|
766 | MPI_Aint buf_size=1; |
---|
767 | int buf = rank; |
---|
768 | int local_buf = rank; |
---|
769 | int result_buf = -1; |
---|
770 | MPI_Win_create(&buf, buf_size, sizeof(int), info, comm, &ep_win); |
---|
771 | MPI_Barrier(comm); |
---|
772 | |
---|
773 | // MPI_Win_fence(MPI_MODE_NOPRECEDE, ep_win); |
---|
774 | |
---|
775 | MPI_Barrier(comm); |
---|
776 | sleep(0.2); |
---|
777 | MPI_Barrier(comm); |
---|
778 | |
---|
779 | MPI_Win_fence(0, ep_win); |
---|
780 | |
---|
781 | if(rank == 0) |
---|
782 | { |
---|
783 | local_buf = 99; |
---|
784 | MPI_Aint displs=0; |
---|
785 | MPI_Put(&local_buf, 1, MPI_INT, size-1, displs, 1, MPI_INT, ep_win); |
---|
786 | } |
---|
787 | |
---|
788 | if(rank == size-2) |
---|
789 | { |
---|
790 | MPI_Aint displs(0); |
---|
791 | MPI_Get(&local_buf, 1, MPI_INT, 2, displs, 1, MPI_INT, ep_win); |
---|
792 | } |
---|
793 | |
---|
794 | MPI_Win_fence(0, ep_win); |
---|
795 | |
---|
796 | if(rank == 1) |
---|
797 | { |
---|
798 | MPI_Aint displs=0; |
---|
799 | MPI_Accumulate(&local_buf, 1, MPI_INT, size-1, displs, 1, MPI_INT, MPI_REPLACE, ep_win); |
---|
800 | } |
---|
801 | |
---|
802 | |
---|
803 | MPI_Barrier(comm); |
---|
804 | |
---|
805 | MPI_Win_fence(0, ep_win); |
---|
806 | |
---|
807 | if(rank == 2) |
---|
808 | { |
---|
809 | MPI_Aint displs = 0; |
---|
810 | MPI_Get_accumulate(&local_buf, 1, MPI_INT, &result_buf, |
---|
811 | 1, MPI_INT, size-2, displs, |
---|
812 | 1, MPI_INT, MPI_SUM, ep_win); |
---|
813 | } |
---|
814 | |
---|
815 | MPI_Win_fence(0, ep_win); |
---|
816 | |
---|
817 | if(rank == 6) |
---|
818 | { |
---|
819 | MPI_Aint displs = 0; |
---|
820 | MPI_Fetch_and_op(&local_buf, &result_buf, MPI_INT, size-1, displs, |
---|
821 | MPI_SUM, ep_win); |
---|
822 | } |
---|
823 | |
---|
824 | MPI_Win_fence(0, ep_win); |
---|
825 | |
---|
826 | if(rank == 7) |
---|
827 | { |
---|
828 | MPI_Aint displs = 0; |
---|
829 | MPI_Compare_and_swap(&local_buf, &buf, &result_buf, MPI_INT, size-1, displs, ep_win); |
---|
830 | } |
---|
831 | |
---|
832 | MPI_Win_fence(0, ep_win); |
---|
833 | |
---|
834 | //::MPI_Compare_and_swap(origin_addr, compare_addr, result_addr, to_mpi_type(datatype), target_mpi_rank, to_mpi_aint(target_disp), to_mpi_win(win.server_win[target_local_rank])); |
---|
835 | |
---|
836 | MPI_Win ep_win_allocated; |
---|
837 | int* baseptr = new int[10]; |
---|
838 | MPI_Aint base_size = 4; |
---|
839 | |
---|
840 | MPI_Win_allocate (base_size, sizeof(int), info, comm, baseptr, &ep_win_allocated); |
---|
841 | |
---|
842 | MPI_Win_fence(0, ep_win_allocated); |
---|
843 | |
---|
844 | MPI_Win_free(&ep_win_allocated); |
---|
845 | delete[] baseptr; |
---|
846 | |
---|
847 | MPI_Win_free(&ep_win); |
---|
848 | printf("rank = %d, buf = %d, local_buf = %d, result_buf = %d\n", rank, buf, local_buf, result_buf); |
---|
849 | |
---|
850 | MPI_Comm_free(&comm); |
---|
851 | |
---|
852 | } |
---|
853 | */ |
---|
854 | MPI_Finalize(); |
---|
855 | |
---|
856 | } |
---|