source: XIOS/dev/branch_openmp/extern/ep_dev/ep_create.cpp @ 1503

Last change on this file since 1503 was 1503, checked in by yushan, 3 years ago

rank_map is passed from vector to map, in order to have more flexibility in comm_split

File size: 3.4 KB
Line 
1/*!
2   \file ep_create.cpp
3   \since 2 may 2016
4
5   \brief Definitions of MPI endpoint function: MPI_Comm_create_endpoints
6 */
7
8#include "ep_lib.hpp"
9#include <mpi.h>
10#include "ep_declaration.hpp"
11#include "ep_mpi.hpp"
12
13using namespace std;
14
15namespace ep_lib {
16
17  /*!
18    Dynamic creation of endpoints for each MPI process.
19    The output of this function is an array of communicator handles of length num_ep, where each handle
20    corresponds to a nez local randk in the output communicator.
21    Once created, endpoints behave as MPI processes.
22    \param [in] mpi_comm Parent MPI communicator.
23    \param [in] num_ep Number of endpoints per process.
24    \param [out] info Information of the EP creation.
25    \param [out] out_comm_hdls Handles of EP communicators.
26  */
27 
28  int MPI_Comm_create_endpoints(void* base_comm_ptr, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls)
29  {
30
31    int base_rank;
32    int base_size;
33   
34    ::MPI_Comm mpi_base_comm = to_mpi_comm(base_comm_ptr);
35
36    ::MPI_Comm_size(mpi_base_comm, &base_size);  // ep_lib::mpi_comm_size
37    ::MPI_Comm_rank(mpi_base_comm, &base_rank);  // ep_lib::mpi_comm_rank
38                    // parent_comm can also be endpoints communicators
39
40    std::vector<int> recv_num_ep(base_size);
41
42    out_comm_hdls = new MPI_Comm[num_ep];
43
44    for (int idx = 0; idx < num_ep; ++idx)
45    {
46      out_comm_hdls[idx] = new ep_comm;
47      out_comm_hdls[idx]->is_ep = true;
48      out_comm_hdls[idx]->is_intercomm = false;
49      out_comm_hdls[idx]->ep_comm_ptr = new ep_communicator;     
50      *(static_cast< ::MPI_Comm*>(out_comm_hdls[idx]->mpi_comm)) = *(static_cast< ::MPI_Comm*>(base_comm_ptr));
51      out_comm_hdls[idx]->ep_comm_ptr->comm_list = out_comm_hdls;
52      out_comm_hdls[idx]->ep_comm_ptr->comm_label = 0;
53    }
54
55    ::MPI_Allgather(&num_ep, 1, to_mpi_type(MPI_INT), &recv_num_ep[0], 1, to_mpi_type(MPI_INT), mpi_base_comm);
56
57
58    int sum = 0;  // representing total ep number of process with smaller rank
59    for (int i = 0; i < base_rank; ++i) {sum += recv_num_ep[i]; }
60
61    int ep_size = std::accumulate(recv_num_ep.begin(), recv_num_ep.end(), 0);
62
63    out_comm_hdls[0]->ep_barrier = new ep_barrier(num_ep);
64    out_comm_hdls[0]->my_buffer = new BUFFER;
65
66    //out_comm_hdls[0]->rank_map = new RANK_MAP;
67    //out_comm_hdls[0]->rank_map->resize(ep_size);
68
69    out_comm_hdls[0]->ep_rank_map = new EP_RANK_MAP;
70    //out_comm_hdls[0]->ep_rank_map->resize(ep_size);
71
72
73    for (int i = 1; i < num_ep; i++)
74    {
75      out_comm_hdls[i]->ep_barrier = out_comm_hdls[0]->ep_barrier;
76      out_comm_hdls[i]->my_buffer  = out_comm_hdls[0]->my_buffer;
77      //out_comm_hdls[i]->rank_map   = out_comm_hdls[0]->rank_map;
78      out_comm_hdls[i]->ep_rank_map= out_comm_hdls[0]->ep_rank_map;
79    }
80
81
82    for (int i = 0; i < num_ep; i++)
83    {
84      out_comm_hdls[i]->ep_comm_ptr->size_rank_info[0] = std::make_pair(sum+i, ep_size);
85      out_comm_hdls[i]->ep_comm_ptr->size_rank_info[1] = std::make_pair(i, num_ep);
86      out_comm_hdls[i]->ep_comm_ptr->size_rank_info[2] = std::make_pair(base_rank, base_size);
87
88      out_comm_hdls[i]->ep_comm_ptr->message_queue = new Message_list;
89    }
90
91
92    int ind = 0;
93
94    for(int i=0; i<base_size; i++)
95    {
96      for(int j=0; j<recv_num_ep[i]; j++)
97      {
98        //out_comm_hdls[0]->rank_map->at(ind) = make_pair(j, i);
99        out_comm_hdls[0]->ep_rank_map->insert(std::pair< int, std::pair<int,int> >(ind, j, i));
100        ind++;
101      }
102    }
103
104    return 0;
105
106  } //MPI_Comm_create_endpoints
107
108
109} //namespace ep_lib
Note: See TracBrowser for help on using the repository browser.