source: XIOS/dev/branch_openmp/extern/ep_dev/ep_intercomm_kernel.cpp @ 1527

Last change on this file since 1527 was 1527, checked in by yushan, 3 years ago

save dev

File size: 21.9 KB
Line 
1#include "ep_lib.hpp"
2#include <mpi.h>
3#include "ep_declaration.hpp"
4#include "ep_mpi.hpp"
5
6using namespace std;
7
8
9namespace ep_lib
10{
11  /*int MPI_Intercomm_create_kernel(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm)
12  {
13    int ep_rank, ep_rank_loc, mpi_rank;
14    int ep_size, num_ep, mpi_size;
15
16    ep_rank     = local_comm->ep_comm_ptr->size_rank_info[0].first;
17    ep_rank_loc = local_comm->ep_comm_ptr->size_rank_info[1].first;
18    mpi_rank    = local_comm->ep_comm_ptr->size_rank_info[2].first;
19    ep_size     = local_comm->ep_comm_ptr->size_rank_info[0].second;
20    num_ep      = local_comm->ep_comm_ptr->size_rank_info[1].second;
21    mpi_size    = local_comm->ep_comm_ptr->size_rank_info[2].second;
22
23    //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
24    // step 1 : local leaders exchange ep_size, leader_rank_in_peer, leader_rank_in_peer_mpi, leader_rank_in_world. //
25    //          local leaders bcast results to all ep in local_comm                                                 //
26    //////////////////////////////////////////////////////////////////////////////////////////////////////////////////
27
28    bool is_local_leader = ep_rank==local_leader? true: false;
29
30
31   
32    int local_leader_rank_in_peer;
33    int local_leader_rank_in_peer_mpi;
34    int local_leader_rank_in_world;
35
36    int remote_ep_size;
37    int remote_leader_rank_in_peer;
38    int remote_leader_rank_in_peer_mpi;
39    int remote_leader_rank_in_world;
40
41    int send_quadruple[4];
42    int recv_quadruple[4];
43
44
45    if(is_local_leader)
46    {
47      MPI_Comm_rank(peer_comm, &local_leader_rank_in_peer);
48      ::MPI_Comm_rank(to_mpi_comm(peer_comm->mpi_comm), &local_leader_rank_in_peer_mpi);
49      ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &local_leader_rank_in_world);
50
51      send_quadruple[0] = ep_size;
52      send_quadruple[1] = local_leader_rank_in_peer;
53      send_quadruple[2] = local_leader_rank_in_peer_mpi;
54      send_quadruple[3] = local_leader_rank_in_world;
55
56      MPI_Request request;
57      MPI_Status status;
58
59
60      if(remote_leader > local_leader_rank_in_peer)
61      {
62        MPI_Isend(send_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request);
63        MPI_Wait(&request, &status);
64       
65        MPI_Irecv(recv_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request);
66        MPI_Wait(&request, &status);
67      }
68      else
69      {
70        MPI_Irecv(recv_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request);
71        MPI_Wait(&request, &status);
72         
73        MPI_Isend(send_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request);
74        MPI_Wait(&request, &status);
75      }
76
77      remote_ep_size                 = recv_quadruple[0];
78      remote_leader_rank_in_peer     = recv_quadruple[1];
79      remote_leader_rank_in_peer_mpi = recv_quadruple[2];
80      remote_leader_rank_in_world    = recv_quadruple[3];
81#ifdef _showinfo
82      printf("peer_rank = %d, packed exchange OK\n", local_leader_rank_in_peer);
83#endif
84    }
85
86    MPI_Bcast(send_quadruple, 4, MPI_INT, local_leader, local_comm);
87    MPI_Bcast(recv_quadruple, 4, MPI_INT, local_leader, local_comm);
88
89    if(!is_local_leader)
90    {
91      local_leader_rank_in_peer     = send_quadruple[1];
92      local_leader_rank_in_peer_mpi = send_quadruple[2];
93      local_leader_rank_in_world    = send_quadruple[3];
94
95      remote_ep_size                 = recv_quadruple[0];
96      remote_leader_rank_in_peer     = recv_quadruple[1];
97      remote_leader_rank_in_peer_mpi = recv_quadruple[2];
98      remote_leader_rank_in_world    = recv_quadruple[3];
99    }
100
101
102#ifdef _showinfo
103    MPI_Barrier(peer_comm);
104    MPI_Barrier(peer_comm);
105    printf("peer_rank = %d, ep_size = %d, remote_ep_size = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_size, remote_ep_size);
106    MPI_Barrier(peer_comm);
107    MPI_Barrier(peer_comm);
108#endif
109
110    ///////////////////////////////////////////////////////////////////
111    // step 2 : gather ranks in world for both local and remote comm //
112    ///////////////////////////////////////////////////////////////////
113
114    int rank_in_world;
115    ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &rank_in_world);
116
117    int *ranks_in_world_local  = new int[ep_size];
118    int *ranks_in_world_remote = new int[remote_ep_size];
119
120    MPI_Allgather(&rank_in_world, 1, MPI_INT, ranks_in_world_local, 1, MPI_INT, local_comm);
121
122    if(is_local_leader)
123    {
124      MPI_Request request;
125      MPI_Status status;
126
127      if(remote_leader > local_leader_rank_in_peer)
128      {
129        MPI_Isend(ranks_in_world_local,  ep_size, MPI_INT, remote_leader, tag, peer_comm, &request);
130        MPI_Wait(&request, &status);
131       
132        MPI_Irecv(ranks_in_world_remote, remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request);
133        MPI_Wait(&request, &status);
134      }
135      else
136      {
137        MPI_Irecv(ranks_in_world_remote, remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request);
138        MPI_Wait(&request, &status);
139         
140        MPI_Isend(ranks_in_world_local,  ep_size, MPI_INT, remote_leader, tag, peer_comm, &request);
141        MPI_Wait(&request, &status);
142      }
143#ifdef _showinfo
144      printf("peer_rank = %d, ranks_in_world exchange OK\n", local_leader_rank_in_peer);
145#endif
146    }
147
148    MPI_Bcast(ranks_in_world_remote, remote_ep_size, MPI_INT, local_leader, local_comm);
149
150#ifdef _showinfo
151
152    MPI_Barrier(peer_comm);
153    MPI_Barrier(peer_comm);
154
155    if(remote_leader == 4)
156    {
157      for(int i=0; i<ep_size; i++)
158      {
159        if(ep_rank == i)
160        {
161          printf("peer_rank = %d, ranks_in_world_local = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first);
162          for(int i=0; i<ep_size; i++)
163          {
164            printf("%d\t", ranks_in_world_local[i]);
165          }
166   
167          printf("\npeer_rank = %d, ranks_in_world_remote = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first);
168          for(int i=0; i<remote_ep_size; i++)
169          {
170            printf("%d\t", ranks_in_world_remote[i]);
171          }
172          printf("\n");
173         
174        }
175       
176        MPI_Barrier(local_comm);
177        MPI_Barrier(local_comm);
178        MPI_Barrier(local_comm);
179      }
180    }
181
182    MPI_Barrier(peer_comm);
183    MPI_Barrier(peer_comm);
184    MPI_Barrier(peer_comm);
185   
186    if(remote_leader == 13)
187    {
188      for(int i=0; i<ep_size; i++)
189      {
190        if(ep_rank == i)
191        {
192          printf("peer_rank = %d, ranks_in_world_local = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first);
193          for(int i=0; i<ep_size; i++)
194          {
195            printf("%d\t", ranks_in_world_local[i]);
196          }
197   
198          printf("\npeer_rank = %d, ranks_in_world_remote = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first);
199          for(int i=0; i<remote_ep_size; i++)
200          {
201            printf("%d\t", ranks_in_world_remote[i]);
202          }
203          printf("\n");
204         
205        }
206       
207        MPI_Barrier(local_comm);
208        MPI_Barrier(local_comm);
209        MPI_Barrier(local_comm);
210      }
211    }
212
213    MPI_Barrier(peer_comm);
214    MPI_Barrier(peer_comm);
215
216#endif
217
218    //////////////////////////////////////////////////////////////
219    // step 3 : determine the priority and ownership of each ep //
220    //////////////////////////////////////////////////////////////
221
222    bool priority = local_leader_rank_in_peer > remote_leader_rank_in_peer? true : false;
223
224
225    int ownership;
226
227    if(rank_in_world == ranks_in_world_local[local_leader]) ownership = 1;
228    else if(rank_in_world == remote_leader_rank_in_world)   ownership = 0;
229    else
230    {
231      ownership = 1;
232      for(int i=0; i<remote_ep_size; i++)
233      {
234        if(rank_in_world == ranks_in_world_remote[i])
235        {
236          ownership = priority? 1 : 0;
237          break;
238        } 
239      }
240    }
241
242#ifdef _showinfo
243    MPI_Barrier(peer_comm);
244    MPI_Barrier(peer_comm);
245    printf("peer_rank = %d, priority = %d, local_leader_rank_in_peer = %d, remote_leader_rank_in_peer = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, priority, local_leader_rank_in_peer, remote_leader_rank_in_peer);
246    MPI_Barrier(peer_comm);
247    MPI_Barrier(peer_comm);
248#endif
249
250   
251#ifdef _showinfo
252    MPI_Barrier(peer_comm);
253    MPI_Barrier(peer_comm);
254    printf("peer_rank = %d, priority = %d, ownership = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, priority, ownership);
255    MPI_Barrier(peer_comm);
256    MPI_Barrier(peer_comm);
257#endif
258
259    //////////////////////////////////////////////////////
260    // step 4 : extract local_comm and create intercomm //
261    //////////////////////////////////////////////////////
262
263    bool is_involved = is_local_leader || (!is_local_leader && ep_rank_loc == 0 && rank_in_world != local_leader_rank_in_world);
264
265#ifdef _showinfo
266
267    MPI_Barrier(peer_comm);
268    MPI_Barrier(peer_comm);
269    printf("peer_rank = %d, is_involved = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, is_involved);
270    MPI_Barrier(peer_comm);
271    MPI_Barrier(peer_comm);
272
273#endif
274
275    if(is_involved)
276    {
277      ::MPI_Group local_group;
278      ::MPI_Group extracted_group;
279      ::MPI_Comm *extracted_comm = new ::MPI_Comm;
280
281
282      ::MPI_Comm_group(to_mpi_comm(local_comm->mpi_comm), &local_group);
283
284      int *ownership_list = new int[mpi_size];
285      int *mpi_rank_list = new int[mpi_size];
286
287      ::MPI_Allgather(&ownership, 1, to_mpi_type(MPI_INT), ownership_list, 1, to_mpi_type(MPI_INT), to_mpi_comm(local_comm->mpi_comm));
288      ::MPI_Allgather(&mpi_rank,  1, to_mpi_type(MPI_INT), mpi_rank_list,  1, to_mpi_type(MPI_INT), to_mpi_comm(local_comm->mpi_comm));
289
290     
291      int n=0;
292      for(int i=0; i<mpi_size; i++)
293      {
294        n+=ownership_list[i];
295      }
296
297      int *new_mpi_rank_list = new int[n];
298      int j=0;
299      for(int i=0; i<mpi_size; i++)
300      {
301        if(ownership_list[i] !=0)
302        {
303          new_mpi_rank_list[j++] = mpi_rank_list[i];
304        }
305      }
306
307
308      ::MPI_Group_incl(local_group, n, new_mpi_rank_list, &extracted_group);
309
310      ::MPI_Comm_create(to_mpi_comm(local_comm->mpi_comm), extracted_group, extracted_comm);
311
312      ::MPI_Comm *mpi_inter_comm = new ::MPI_Comm;
313
314      int local_leader_rank_in_extracted_comm;
315
316      if(is_local_leader)
317      {
318        ::MPI_Comm_rank(*extracted_comm, &local_leader_rank_in_extracted_comm);
319      }
320
321      ::MPI_Bcast(&local_leader_rank_in_extracted_comm, 1, to_mpi_type(MPI_INT), local_comm->ep_rank_map->at(local_leader).second, to_mpi_comm(local_comm->mpi_comm));
322
323
324      if(ownership)
325        ::MPI_Intercomm_create(*extracted_comm, local_leader_rank_in_extracted_comm, to_mpi_comm(peer_comm->mpi_comm), remote_leader_rank_in_peer_mpi, tag, mpi_inter_comm);
326
327      ////////////////////////////////////
328      // step 5 :: determine new num_ep //
329      ////////////////////////////////////
330
331      int num_ep_count=0;
332
333      for(int i=0; i<ep_size; i++)
334      {
335        if(rank_in_world == ranks_in_world_local[i])
336          num_ep_count++;
337      }
338     
339      for(int i=0; i<remote_ep_size; i++)
340      {
341        if(rank_in_world == ranks_in_world_remote[i])
342          num_ep_count++;
343      }
344
345
346      ///////////////////////////////////////////////////
347      // step 6 : create endpoints from extracted_comm //
348      ///////////////////////////////////////////////////
349
350      if(ownership)
351      {
352        MPI_Comm *ep_comm;
353        MPI_Info info;
354        MPI_Comm_create_endpoints(extracted_comm, num_ep_count, info, ep_comm);
355     
356#ifdef _showinfo
357        printf("new ep_comm->ep_comm_ptr->intercomm->mpi_inter_comm = %p\n", mpi_inter_comm);
358#endif
359
360        for(int i=0; i<num_ep_count; i++)
361        {
362          ep_comm[i]->is_intercomm = true;
363          ep_comm[i]->ep_comm_ptr->comm_label = ranks_in_world_local[local_leader];
364          ep_comm[i]->ep_comm_ptr->intercomm = new ep_lib::ep_intercomm;
365#ifdef _showinfo
366          printf("new ep_comm[%d]->ep_comm_ptr->intercomm = %p\n", i, ep_comm[i]->ep_comm_ptr->intercomm);
367#endif
368          ep_comm[i]->ep_comm_ptr->intercomm->mpi_inter_comm = mpi_inter_comm;
369         
370        }
371
372
373        #pragma omp critical (write_to_tag_list)     
374        intercomm_list.push_back(make_pair( make_pair(tag, min(local_leader_rank_in_world, remote_leader_rank_in_world)) , make_pair(ep_comm , make_pair(num_ep_count, 0))));
375        #pragma omp flush
376#ifdef _showinfo
377        for(int i=0; i<num_ep_count; i++)
378          printf("peer_rank = %d, ep_comm = %p, ep_comm[%d] -> new_ep_rank = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_comm, i, ep_comm[i]->ep_comm_ptr->size_rank_info[0].first);
379#endif
380      }
381
382
383      delete ownership_list;
384      delete mpi_rank_list;
385      delete new_mpi_rank_list;
386
387    }
388
389    int repeated=0;
390    for(int i=0; i<remote_ep_size; i++)
391    {
392      if(rank_in_world == ranks_in_world_remote[i])
393        repeated++;
394    }
395
396    int my_turn = ownership? ep_rank_loc : ep_rank_loc+repeated;
397
398#ifdef _showinfo
399
400    MPI_Barrier(peer_comm);
401    MPI_Barrier(peer_comm);
402    printf("peer_rank = %d, ep_rank_loc = %d, ownership = %d, repeated = %d, my_turn = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_rank_loc, ownership, repeated, my_turn);
403    MPI_Barrier(peer_comm);
404    MPI_Barrier(peer_comm);
405
406#endif
407
408
409    #pragma omp flush
410    #pragma omp critical (read_from_intercomm_list)
411    {
412      bool flag=true;
413      while(flag)
414      {
415        for(std::list<std::pair<std::pair<int, int> , std::pair<MPI_Comm * , std::pair<int, int> > > >::iterator iter = intercomm_list.begin(); iter!=intercomm_list.end(); iter++)
416        {
417          if(iter->first == make_pair(tag, min(local_leader_rank_in_world, remote_leader_rank_in_world)))
418          {
419            *newintercomm = iter->second.first[my_turn];
420           
421            iter->second.second.second++;
422           
423            if(iter->second.second.first == iter->second.second.second)
424              intercomm_list.erase(iter);
425
426            flag = false;
427            break;
428          }
429        }
430      }
431    }
432
433#ifdef _showinfo
434
435    MPI_Barrier(peer_comm);
436    MPI_Barrier(peer_comm);
437    printf("peer_rank = %d, test_rank = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, (*newintercomm)->ep_comm_ptr->size_rank_info[0].first);
438    MPI_Barrier(peer_comm);
439    MPI_Barrier(peer_comm);
440
441#endif
442
443    //////////////////////////////////////////////////////////
444    // step 7 : create intercomm_rank_map for local leaders //
445    //////////////////////////////////////////////////////////
446   
447    int my_quadruple[4];
448
449    my_quadruple[0] = ep_rank;
450    my_quadruple[1] = (*newintercomm)->ep_comm_ptr->size_rank_info[1].first;
451    my_quadruple[2] = (*newintercomm)->ep_comm_ptr->size_rank_info[2].first;
452    my_quadruple[3] = (*newintercomm)->ep_comm_ptr->comm_label;
453
454
455#ifdef _showinfo
456
457    MPI_Barrier(peer_comm);
458    MPI_Barrier(peer_comm);
459    printf("peer_rank = %d, my_quadruple = %d %d %d %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, my_quadruple[0], my_quadruple[1], my_quadruple[2], my_quadruple[3]);
460    MPI_Barrier(peer_comm);
461    MPI_Barrier(peer_comm);
462#endif
463
464    int *local_quadruple_list;
465    int *remote_quadruple_list;
466    if(is_involved)
467    {
468      local_quadruple_list = new int[4*ep_size];
469      remote_quadruple_list = new int[4*remote_ep_size];
470
471    }
472
473    MPI_Gather(my_quadruple, 4, MPI_INT, local_quadruple_list, 4, MPI_INT, local_leader, local_comm);
474
475
476    if(is_local_leader)
477    {
478      MPI_Request request;
479      MPI_Status status;
480
481      if(remote_leader > local_leader_rank_in_peer)
482      {
483        MPI_Isend(local_quadruple_list,  4*ep_size, MPI_INT, remote_leader, tag, peer_comm, &request);
484        MPI_Wait(&request, &status);       
485
486        MPI_Irecv(remote_quadruple_list, 4*remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request);
487        MPI_Wait(&request, &status);
488      }
489      else
490      {
491        MPI_Irecv(remote_quadruple_list, 4*remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request);
492        MPI_Wait(&request, &status);
493         
494        MPI_Isend(local_quadruple_list,  4*ep_size, MPI_INT, remote_leader, tag, peer_comm, &request);
495        MPI_Wait(&request, &status);       
496      }
497#ifdef _showinfo
498      printf("peer_rank = %d, quadruple_list exchange OK\n", local_leader_rank_in_peer);
499#endif
500    }
501
502    if(is_involved)
503    {
504      ::MPI_Bcast(remote_quadruple_list, 4*remote_ep_size, to_mpi_type(MPI_INT), local_comm->ep_rank_map->at(local_leader).second, to_mpi_comm(local_comm->mpi_comm));
505      (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map = new INTERCOMM_RANK_MAP;
506#ifdef _showinfo
507      printf("new intercomm_rank_map = %p\n", (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map);
508#endif
509      for(int i=0; i<remote_ep_size; i++)
510      {
511        (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map->insert(std::pair<int, std::pair< int, std::pair<int, int> > >(remote_quadruple_list[4*i], remote_quadruple_list[4*i+1], remote_quadruple_list[4*i+2], remote_quadruple_list[4*i+3]));
512      }
513    }
514
515    ////////////////////////////////////////////////////////
516    // step 8 : associate intercomm_rank_map to endpoints //
517    ////////////////////////////////////////////////////////
518
519    int *leader_rank_in_world_local_gathered = new int[(*newintercomm)->ep_comm_ptr->size_rank_info[1].second];
520
521    MPI_Allgather_local(&local_leader_rank_in_world, 1, MPI_INT, leader_rank_in_world_local_gathered, *newintercomm);
522
523
524    int new_rank_loc = (*newintercomm)->ep_comm_ptr->size_rank_info[1].first;
525    int *new_rank_loc_local_gathered = new int[(*newintercomm)->ep_comm_ptr->size_rank_info[1].second];
526
527    MPI_Allgather_local(&new_rank_loc, 1, MPI_INT, new_rank_loc_local_gathered, *newintercomm);
528
529    //printf("peer_rank = %d, leader_rank_in_world_local_gathered = %d %d %d %d, new_rank_loc_local_gathered = %d %d %d %d\n",
530    //  peer_comm->ep_comm_ptr->size_rank_info[0].first, leader_rank_in_world_local_gathered[0], leader_rank_in_world_local_gathered[1], leader_rank_in_world_local_gathered[2], leader_rank_in_world_local_gathered[3],
531    //  new_rank_loc_local_gathered[0], new_rank_loc_local_gathered[1], new_rank_loc_local_gathered[2], new_rank_loc_local_gathered[3]);
532
533    if(is_involved)
534    {
535
536      (*newintercomm)->ep_comm_ptr->intercomm->local_rank_map = new EP_RANK_MAP;
537#ifdef _showinfo
538      printf("new local_rank_map = %p\n", (*newintercomm)->ep_comm_ptr->intercomm->local_rank_map);
539#endif
540
541      for(std::map<int, std::pair<int, int> >::iterator it = local_comm->ep_rank_map->begin(); it != local_comm->ep_rank_map->end(); it++)
542      {
543        (*newintercomm)->ep_comm_ptr->intercomm->local_rank_map->insert(std::pair<int, std::pair< int, int > >(it->first, it->second.first, it->second.second));
544      }
545    }
546
547    MPI_Barrier_local(*newintercomm);
548
549   
550    if(!is_involved)
551    {
552      int target;
553      for(int i=0; i<(*newintercomm)->ep_comm_ptr->size_rank_info[1].second; i++)
554      {
555        if(local_leader_rank_in_world == leader_rank_in_world_local_gathered[i])
556        {
557          target = i;
558          (*newintercomm)->ep_comm_ptr->intercomm->intercomm_tag = target;
559          break;
560        }
561      }
562      (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map = (*newintercomm)->ep_comm_ptr->comm_list[target]->ep_comm_ptr->intercomm->intercomm_rank_map;
563      (*newintercomm)->ep_comm_ptr->intercomm->local_rank_map     = (*newintercomm)->ep_comm_ptr->comm_list[target]->ep_comm_ptr->intercomm->local_rank_map;
564    }
565    else
566    {
567      (*newintercomm)->ep_comm_ptr->intercomm->intercomm_tag = -1;
568    }
569
570    (*newintercomm)->ep_comm_ptr->intercomm->size_rank_info[0] = (*newintercomm)->ep_comm_ptr->size_rank_info[0];
571    (*newintercomm)->ep_comm_ptr->intercomm->size_rank_info[1] = (*newintercomm)->ep_comm_ptr->size_rank_info[1];
572    (*newintercomm)->ep_comm_ptr->intercomm->size_rank_info[2] = (*newintercomm)->ep_comm_ptr->size_rank_info[2];
573
574
575    (*newintercomm)->ep_comm_ptr->size_rank_info[0] = local_comm->ep_comm_ptr->size_rank_info[0];
576    (*newintercomm)->ep_comm_ptr->size_rank_info[1] = local_comm->ep_comm_ptr->size_rank_info[1];
577    (*newintercomm)->ep_comm_ptr->size_rank_info[2] = local_comm->ep_comm_ptr->size_rank_info[2];
578
579
580#ifdef _showinfo
581    MPI_Barrier(peer_comm);
582    MPI_Barrier(peer_comm);
583
584    printf("peer_rank = %d, size_rank_info = %d %d %d %d %d %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first,
585      (*newintercomm)->ep_comm_ptr->size_rank_info[0].first, (*newintercomm)->ep_comm_ptr->size_rank_info[0].second,
586      (*newintercomm)->ep_comm_ptr->size_rank_info[1].first, (*newintercomm)->ep_comm_ptr->size_rank_info[1].second,
587      (*newintercomm)->ep_comm_ptr->size_rank_info[2].first, (*newintercomm)->ep_comm_ptr->size_rank_info[2].second);
588   
589    MPI_Barrier(peer_comm);
590    MPI_Barrier(peer_comm);
591
592#endif
593
594/*
595    if(peer_comm->ep_comm_ptr->size_rank_info[0].first == 5)
596    {
597      int receiver = rand()%10;
598      printf("receiver = %d, intercomm_local_rank = %d, intercomm_mpi_rank = %d, comm_label = %d\n", receiver, (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map->at(receiver).first,
599        (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map->at(receiver).second.first, (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map->at(receiver).second.second);
600    }
601
602    if(peer_comm->ep_comm_ptr->size_rank_info[0].first == 9)
603    {
604      int receiver = rand()%6;
605      printf("receiver = %d, intercomm_local_rank = %d, intercomm_mpi_rank = %d, comm_label = %d\n", receiver, (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map->at(receiver).first,
606        (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map->at(receiver).second.first, (*newintercomm)->ep_comm_ptr->intercomm->intercomm_rank_map->at(receiver).second.second);
607    }
608
609
610    if(peer_comm->ep_comm_ptr->size_rank_info[0].first == 5)
611    {
612      for(int i=0; i<ep_size; i++)
613      {
614        printf("rank_map->at(%d) = %d, %d\n", i, (*newintercomm)->ep_rank_map->at(i).first, (*newintercomm)->ep_rank_map->at(i).second);
615      }
616    }
617*/
618    //////////////
619    // clean up //
620    //////////////
621/*
622    delete ranks_in_world_local;
623    delete ranks_in_world_remote;
624
625    if(is_involved)
626    {
627      delete local_quadruple_list;
628      delete remote_quadruple_list;
629    }
630
631    delete leader_rank_in_world_local_gathered;
632    delete new_rank_loc_local_gathered;
633
634
635  }
636
637*/
638
639 
640}
Note: See TracBrowser for help on using the repository browser.