Changeset 1354 for XIOS/dev/branch_openmp/extern
- Timestamp:
- 12/04/17 17:12:00 (7 years ago)
- Location:
- XIOS/dev/branch_openmp/extern/src_ep_dev
- Files:
-
- 25 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_allgather.cpp
r1287 r1354 22 22 if(!comm.is_ep && comm.mpi_comm) 23 23 { 24 ::MPI_Allgather(const_cast<void*>(sendbuf), sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, recvcount, static_cast< ::MPI_Datatype>(recvtype), 25 static_cast< ::MPI_Comm>(comm.mpi_comm)); 26 return 0; 24 return ::MPI_Allgather(const_cast<void*>(sendbuf), sendcount, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), to_mpi_comm(comm.mpi_comm)); 27 25 } 28 29 if(!comm.mpi_comm) return 0;30 26 31 27 assert(sendcount == recvcount); -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_allgatherv.cpp
r1295 r1354 22 22 if(!comm.is_ep && comm.mpi_comm) 23 23 { 24 ::MPI_Allgatherv(sendbuf, sendcount, to_mpi_type(sendtype), recvbuf, recvcounts, displs, to_mpi_type(recvtype), to_mpi_comm(comm.mpi_comm)); 25 return 0; 24 return ::MPI_Allgatherv(sendbuf, sendcount, to_mpi_type(sendtype), recvbuf, recvcounts, displs, to_mpi_type(recvtype), to_mpi_comm(comm.mpi_comm)); 26 25 } 27 26 … … 80 79 81 80 int local_sendcount = std::accumulate(local_recvcounts.begin(), local_recvcounts.end(), 0); 82 MPI_Allgather(&local_sendcount, 1, MPI_INT, mpi_recvcounts.data(), 1, MPI_INT, to_mpi_comm(comm.mpi_comm));81 ::MPI_Allgather(&local_sendcount, 1, to_mpi_type(MPI_INT), mpi_recvcounts.data(), 1, to_mpi_type(MPI_INT), to_mpi_comm(comm.mpi_comm)); 83 82 84 83 for(int i=1; i<mpi_size; i++) -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_allreduce.cpp
r1295 r1354 38 38 ::MPI_Aint datasize, lb; 39 39 40 ::MPI_Type_get_extent( static_cast< ::MPI_Datatype>(datatype), &lb, &datasize);40 ::MPI_Type_get_extent(to_mpi_type(datatype), &lb, &datasize); 41 41 42 42 bool is_master = ep_rank_loc==0; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_alltoall.cpp
r1339 r1354 19 19 20 20 ::MPI_Aint datasize, llb; 21 ::MPI_Type_get_extent( static_cast< ::MPI_Datatype>(sendtype), &llb, &datasize);21 ::MPI_Type_get_extent(to_mpi_type(sendtype), &llb, &datasize); 22 22 23 23 int count = sendcount; … … 34 34 35 35 MPI_Gather(sendbuf, count*ep_size, sendtype, tmp_recvbuf, count*ep_size, recvtype, 0, comm); 36 37 38 36 39 37 // reorder tmp_buf … … 46 44 for(int j=0; j<ep_size; j++) 47 45 { 48 //printf("tmp_recv[%d] = tmp_send[%d]\n", i*ep_size*count + j*count, j*ep_size*count + i*count);49 50 46 memcpy(tmp_sendbuf + j*ep_size*count*datasize + i*count*datasize, tmp_recvbuf + i*ep_size*count*datasize + j*count*datasize, count*datasize); 51 47 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_barrier.cpp
r1287 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 namespace ep_lib … … 18 19 if(ep_rank_loc == 0) 19 20 { 20 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm >(comm.mpi_comm);21 ::MPI_Comm mpi_comm = to_mpi_comm(comm.mpi_comm); 21 22 22 23 ::MPI_Barrier(mpi_comm); … … 27 28 return 0; 28 29 } 29 else if(comm.mpi_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm))30 else if(comm.mpi_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL.mpi_comm)) 30 31 { 31 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm.mpi_comm);32 ::MPI_Comm mpi_comm = to_mpi_comm(comm.mpi_comm); 32 33 ::MPI_Barrier(mpi_comm); 33 34 return 0; … … 47 48 if(ep_rank_loc == 0) 48 49 { 49 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm >(comm.ep_comm_ptr->intercomm->mpi_inter_comm);50 ::MPI_Comm mpi_comm = to_mpi_comm(comm.ep_comm_ptr->intercomm->mpi_inter_comm); 50 51 ::MPI_Barrier(mpi_comm); 51 52 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_bcast.cpp
r1295 r1354 31 31 } 32 32 33 // #pragma omp flush34 33 MPI_Barrier_local(comm); 35 // #pragma omp flush36 34 37 35 if(ep_rank_loc != local_root) … … 50 48 { 51 49 #pragma omp single nowait 52 ::MPI_Bcast(buffer, count, static_cast< ::MPI_Datatype>(datatype), root, static_cast< ::MPI_Comm>(comm.mpi_comm));50 ::MPI_Bcast(buffer, count, to_mpi_type(datatype), root, to_mpi_comm(comm.mpi_comm)); 53 51 return 0; 54 52 } … … 62 60 int root_ep_rank_loc = comm.rank_map->at(root).first; 63 61 64 // printf("root_mpi_rank = %d\n", root_mpi_rank);65 62 66 63 if((ep_rank_loc==0 && mpi_rank != root_mpi_rank ) || ep_rank == root) -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_create.cpp
r1295 r1354 9 9 #include <mpi.h> 10 10 #include "ep_declaration.hpp" 11 #include "ep_mpi.hpp" 11 12 12 13 using namespace std; 13 14 15 14 16 15 namespace ep_lib { … … 26 25 \param [out] out_comm_hdls Handles of EP communicators. 27 26 */ 28 #ifdef _intelmpi 29 int MPI_Comm_create_endpoints(int base_comm_ptr, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls) 27 // #ifdef _intelmpi 28 // int MPI_Comm_create_endpoints(int base_comm_ptr, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls) 29 // { 30 // int base_rank; 31 // int base_size; 32 33 // ::MPI_Comm mpi_base_comm = static_cast< ::MPI_Comm> (base_comm_ptr); 34 35 // ::MPI_Comm_size(mpi_base_comm, &base_size); // ep_lib::mpi_comm_size 36 // ::MPI_Comm_rank(mpi_base_comm, &base_rank); // ep_lib::mpi_comm_rank 37 // // parent_comm can also be endpoints communicators 38 39 // std::vector<int> recv_num_ep(base_size); 40 41 // out_comm_hdls = new MPI_Comm[num_ep]; 42 43 // for (int idx = 0; idx < num_ep; ++idx) 44 // { 45 // out_comm_hdls[idx].is_ep = true; 46 // out_comm_hdls[idx].is_intercomm = false; 47 // out_comm_hdls[idx].ep_comm_ptr = new ep_communicator; 48 // out_comm_hdls[idx].mpi_comm = base_comm_ptr; 49 // out_comm_hdls[idx].ep_comm_ptr->comm_list = out_comm_hdls; 50 // out_comm_hdls[idx].ep_comm_ptr->comm_label = 0; 51 // } 52 53 // ::MPI_Allgather(&num_ep, 1, static_cast< ::MPI_Datatype>(MPI_INT), &recv_num_ep[0], 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_base_comm); 54 55 56 // int sum = 0; // representing total ep number of process with smaller rank 57 // for (int i = 0; i < base_rank; ++i) {sum += recv_num_ep[i]; } 58 59 // int ep_size = std::accumulate(recv_num_ep.begin(), recv_num_ep.end(), 0); 60 61 // out_comm_hdls[0].ep_barrier = new OMPbarrier(num_ep); 62 63 // out_comm_hdls[0].my_buffer = new BUFFER; 64 65 // out_comm_hdls[0].rank_map = new RANK_MAP; 66 // out_comm_hdls[0].rank_map->resize(ep_size); 67 68 69 // for (int i = 1; i < num_ep; i++) 70 // { 71 // out_comm_hdls[i].ep_barrier = out_comm_hdls[0].ep_barrier; 72 // out_comm_hdls[i].my_buffer = out_comm_hdls[0].my_buffer; 73 // out_comm_hdls[i].rank_map = out_comm_hdls[0].rank_map; 74 // } 75 76 77 // for (int i = 0; i < num_ep; i++) 78 // { 79 // out_comm_hdls[i].ep_comm_ptr->size_rank_info[0] = std::make_pair(sum+i, ep_size); 80 // out_comm_hdls[i].ep_comm_ptr->size_rank_info[1] = std::make_pair(i, num_ep); 81 // out_comm_hdls[i].ep_comm_ptr->size_rank_info[2] = std::make_pair(base_rank, base_size); 82 83 // out_comm_hdls[i].ep_comm_ptr->message_queue = new Message_list; 84 // } 85 86 87 // int ind = 0; 88 89 // for(int i=0; i<base_size; i++) 90 // { 91 // for(int j=0; j<recv_num_ep[i]; j++) 92 // { 93 // out_comm_hdls[0].rank_map->at(ind) = make_pair(j, i); 94 // ind++; 95 // } 96 // } 97 98 99 100 // return 0; 101 102 // } //MPI_Comm_create_endpoints 103 104 // #elif _openmpi 105 // int MPI_Comm_create_endpoints(void* base_comm_ptr, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls) 106 // { 107 108 // int base_rank; 109 // int base_size; 110 111 // ::MPI_Comm mpi_base_comm = static_cast< ::MPI_Comm> (base_comm_ptr); 112 113 // ::MPI_Comm_size(mpi_base_comm, &base_size); // ep_lib::mpi_comm_size 114 // ::MPI_Comm_rank(mpi_base_comm, &base_rank); // ep_lib::mpi_comm_rank 115 // // parent_comm can also be endpoints communicators ? 116 // std::vector<int> recv_num_ep(base_size); 117 118 // out_comm_hdls = new MPI_Comm[num_ep]; 119 120 // for (int idx = 0; idx < num_ep; ++idx) 121 // { 122 // out_comm_hdls[idx].is_ep = true; 123 // out_comm_hdls[idx].is_intercomm = false; 124 // out_comm_hdls[idx].ep_comm_ptr = new ep_communicator; 125 // out_comm_hdls[idx].mpi_comm = base_comm_ptr; 126 // out_comm_hdls[idx].ep_comm_ptr->comm_list = out_comm_hdls; 127 // out_comm_hdls[idx].ep_comm_ptr->comm_label = 0; 128 // } 129 130 // ::MPI_Allgather(&num_ep, 1, static_cast< ::MPI_Datatype> (MPI_INT), 131 // &recv_num_ep[0], 1, static_cast< ::MPI_Datatype> (MPI_INT), mpi_base_comm); 132 133 // int sum = 0; // representing total ep number of process with smaller rank 134 // for (int i = 0; i < base_rank; ++i) {sum += recv_num_ep[i]; } 135 136 // int ep_size = std::accumulate(recv_num_ep.begin(), recv_num_ep.end(), 0); 137 138 // out_comm_hdls[0].ep_barrier = new OMPbarrier(num_ep); 139 // out_comm_hdls[0].my_buffer = new BUFFER; 140 141 // out_comm_hdls[0].rank_map = new RANK_MAP; 142 // out_comm_hdls[0].rank_map->resize(ep_size); 143 144 145 // for (int i = 1; i < num_ep; i++) 146 // { 147 // out_comm_hdls[i].ep_barrier = out_comm_hdls[0].ep_barrier; 148 // out_comm_hdls[i].my_buffer = out_comm_hdls[0].my_buffer; 149 // out_comm_hdls[i].rank_map = out_comm_hdls[0].rank_map; 150 // } 151 152 153 // for (int i = 0; i < num_ep; i++) 154 // { 155 // out_comm_hdls[i].ep_comm_ptr->size_rank_info[0] = std::make_pair(sum+i, ep_size); 156 // out_comm_hdls[i].ep_comm_ptr->size_rank_info[1] = std::make_pair(i, num_ep); 157 // out_comm_hdls[i].ep_comm_ptr->size_rank_info[2] = std::make_pair(base_rank, base_size); 158 159 // out_comm_hdls[i].ep_comm_ptr->message_queue = new Message_list; 160 // } 161 162 163 // int ind = 0; 164 165 // for(int i=0; i<base_size; i++) 166 // { 167 // for(int j=0; j<recv_num_ep[i]; j++) 168 // { 169 // out_comm_hdls[0].rank_map->at(ind) = make_pair(j, i); 170 // ind++; 171 // } 172 // } 173 174 // return 0; 175 176 // } //MPI_Comm_create_endpoints 177 178 // #endif 179 180 181 int MPI_Comm_create_endpoints(void* base_comm_ptr, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls) 30 182 { 183 31 184 int base_rank; 32 185 int base_size; 33 34 ::MPI_Comm mpi_base_comm = static_cast< ::MPI_Comm>(base_comm_ptr);186 187 ::MPI_Comm mpi_base_comm = to_mpi_comm(base_comm_ptr); 35 188 36 189 ::MPI_Comm_size(mpi_base_comm, &base_size); // ep_lib::mpi_comm_size 37 190 ::MPI_Comm_rank(mpi_base_comm, &base_rank); // ep_lib::mpi_comm_rank 38 191 // parent_comm can also be endpoints communicators 39 192 40 193 std::vector<int> recv_num_ep(base_size); … … 52 205 } 53 206 54 ::MPI_Allgather(&num_ep, 1, static_cast< ::MPI_Datatype>(MPI_INT), &recv_num_ep[0], 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_base_comm);207 ::MPI_Allgather(&num_ep, 1, to_mpi_type(MPI_INT), &recv_num_ep[0], 1, to_mpi_type(MPI_INT), mpi_base_comm); 55 208 56 209 … … 61 214 62 215 out_comm_hdls[0].ep_barrier = new OMPbarrier(num_ep); 63 64 216 out_comm_hdls[0].my_buffer = new BUFFER; 65 217 … … 97 249 } 98 250 99 100 101 251 return 0; 102 252 103 253 } //MPI_Comm_create_endpoints 104 254 105 #elif _openmpi106 int MPI_Comm_create_endpoints(void* base_comm_ptr, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls)107 {108 109 int base_rank;110 int base_size;111 112 ::MPI_Comm mpi_base_comm = static_cast< ::MPI_Comm> (base_comm_ptr);113 114 ::MPI_Comm_size(mpi_base_comm, &base_size); // ep_lib::mpi_comm_size115 ::MPI_Comm_rank(mpi_base_comm, &base_rank); // ep_lib::mpi_comm_rank116 // parent_comm can also be endpoints communicators ?117 std::vector<int> recv_num_ep(base_size);118 119 out_comm_hdls = new MPI_Comm[num_ep];120 121 for (int idx = 0; idx < num_ep; ++idx)122 {123 out_comm_hdls[idx].is_ep = true;124 out_comm_hdls[idx].is_intercomm = false;125 out_comm_hdls[idx].ep_comm_ptr = new ep_communicator;126 out_comm_hdls[idx].mpi_comm = base_comm_ptr;127 out_comm_hdls[idx].ep_comm_ptr->comm_list = out_comm_hdls;128 out_comm_hdls[idx].ep_comm_ptr->comm_label = 0;129 }130 131 ::MPI_Allgather(&num_ep, 1, static_cast< ::MPI_Datatype> (MPI_INT),132 &recv_num_ep[0], 1, static_cast< ::MPI_Datatype> (MPI_INT), mpi_base_comm);133 134 int sum = 0; // representing total ep number of process with smaller rank135 for (int i = 0; i < base_rank; ++i) {sum += recv_num_ep[i]; }136 137 int ep_size = std::accumulate(recv_num_ep.begin(), recv_num_ep.end(), 0);138 139 out_comm_hdls[0].ep_barrier = new OMPbarrier(num_ep);140 out_comm_hdls[0].my_buffer = new BUFFER;141 142 out_comm_hdls[0].rank_map = new RANK_MAP;143 out_comm_hdls[0].rank_map->resize(ep_size);144 145 146 for (int i = 1; i < num_ep; i++)147 {148 out_comm_hdls[i].ep_barrier = out_comm_hdls[0].ep_barrier;149 out_comm_hdls[i].my_buffer = out_comm_hdls[0].my_buffer;150 out_comm_hdls[i].rank_map = out_comm_hdls[0].rank_map;151 }152 153 154 for (int i = 0; i < num_ep; i++)155 {156 out_comm_hdls[i].ep_comm_ptr->size_rank_info[0] = std::make_pair(sum+i, ep_size);157 out_comm_hdls[i].ep_comm_ptr->size_rank_info[1] = std::make_pair(i, num_ep);158 out_comm_hdls[i].ep_comm_ptr->size_rank_info[2] = std::make_pair(base_rank, base_size);159 160 out_comm_hdls[i].ep_comm_ptr->message_queue = new Message_list;161 }162 163 164 int ind = 0;165 166 for(int i=0; i<base_size; i++)167 {168 for(int j=0; j<recv_num_ep[i]; j++)169 {170 out_comm_hdls[0].rank_map->at(ind) = make_pair(j, i);171 ind++;172 }173 }174 175 return 0;176 177 } //MPI_Comm_create_endpoints178 179 #endif180 181 182 int MPI_Comm_create_endpoints(MPI_Comm base_comm, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls)183 {184 185 int base_rank;186 int base_size;187 188 assert(!base_comm.is_ep);189 190 ::MPI_Comm mpi_base_comm = static_cast< ::MPI_Comm> (base_comm.mpi_comm);191 192 ::MPI_Comm_size(mpi_base_comm, &base_size); // ep_lib::mpi_comm_size193 ::MPI_Comm_rank(mpi_base_comm, &base_rank); // ep_lib::mpi_comm_rank194 // parent_comm can also be endpoints communicators195 196 std::vector<int> recv_num_ep(base_size);197 198 out_comm_hdls = new MPI_Comm[num_ep];199 200 for (int idx = 0; idx < num_ep; ++idx)201 {202 out_comm_hdls[idx].is_ep = true;203 out_comm_hdls[idx].is_intercomm = false;204 out_comm_hdls[idx].ep_comm_ptr = new ep_communicator;205 out_comm_hdls[idx].mpi_comm = base_comm.mpi_comm;206 out_comm_hdls[idx].ep_comm_ptr->comm_list = out_comm_hdls;207 out_comm_hdls[idx].ep_comm_ptr->comm_label = 0;208 }209 210 ::MPI_Allgather(&num_ep, 1, static_cast< ::MPI_Datatype> (MPI_INT),211 &recv_num_ep[0], 1, static_cast< ::MPI_Datatype> (MPI_INT), mpi_base_comm);212 213 214 int sum = 0; // representing total ep number of process with smaller rank215 for (int i = 0; i < base_rank; ++i) {sum += recv_num_ep[i]; }216 217 int ep_size = std::accumulate(recv_num_ep.begin(), recv_num_ep.end(), 0);218 219 out_comm_hdls[0].ep_barrier = new OMPbarrier(num_ep);220 out_comm_hdls[0].my_buffer = new BUFFER;221 222 out_comm_hdls[0].rank_map = new RANK_MAP;223 out_comm_hdls[0].rank_map->resize(ep_size);224 225 226 for (int i = 1; i < num_ep; i++)227 {228 out_comm_hdls[i].ep_barrier = out_comm_hdls[0].ep_barrier;229 out_comm_hdls[i].my_buffer = out_comm_hdls[0].my_buffer;230 out_comm_hdls[i].rank_map = out_comm_hdls[0].rank_map;231 }232 233 234 for (int i = 0; i < num_ep; i++)235 {236 out_comm_hdls[i].ep_comm_ptr->size_rank_info[0] = std::make_pair(sum+i, ep_size);237 out_comm_hdls[i].ep_comm_ptr->size_rank_info[1] = std::make_pair(i, num_ep);238 out_comm_hdls[i].ep_comm_ptr->size_rank_info[2] = std::make_pair(base_rank, base_size);239 240 out_comm_hdls[i].ep_comm_ptr->message_queue = new Message_list;241 }242 243 244 int ind = 0;245 246 for(int i=0; i<base_size; i++)247 {248 for(int j=0; j<recv_num_ep[i]; j++)249 {250 out_comm_hdls[0].rank_map->at(ind) = make_pair(j, i);251 ind++;252 }253 }254 255 return 0;256 257 } //MPI_Comm_create_endpoints258 259 255 260 256 } //namespace ep_lib -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_declaration.cpp
r1287 r1354 99 99 ep_lib::MPI_Op MPI_MIN = MPI_MIN_STD; 100 100 101 ep_lib::MPI_Comm MPI_COMM_WORLD( MPI_COMM_WORLD_STD);102 ep_lib::MPI_Comm MPI_COMM_NULL( MPI_COMM_NULL_STD);101 ep_lib::MPI_Comm MPI_COMM_WORLD(&MPI_COMM_WORLD_STD); 102 ep_lib::MPI_Comm MPI_COMM_NULL(&MPI_COMM_NULL_STD); 103 103 104 104 ep_lib::MPI_Request MPI_REQUEST_NULL(MPI_REQUEST_NULL_STD); -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_dup.cpp
r1287 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 namespace ep_lib … … 14 15 newcomm->is_ep = comm.is_ep; 15 16 16 ::MPI_Comm input = static_cast< ::MPI_Comm>(comm.mpi_comm);17 ::MPI_Comm output;17 ::MPI_Comm input = to_mpi_comm(comm.mpi_comm); 18 ::MPI_Comm *output = new ::MPI_Comm; 18 19 19 20 20 ::MPI_Comm_dup(input, &output);21 ::MPI_Comm_dup(input, output); 21 22 22 23 newcomm->mpi_comm = output; … … 28 29 29 30 // for intracomm 30 if(comm.mpi_comm == static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm)) return 0;31 if(comm.mpi_comm == static_cast< ::MPI_Comm* >(MPI_COMM_NULL.mpi_comm)) return 0; 31 32 32 33 … … 39 40 MPI_Info info; 40 41 MPI_Comm *out_comm; 41 ::MPI_Comm mpi_dup;42 ::MPI_Comm *mpi_dup = new ::MPI_Comm; 42 43 43 ::MPI_Comm in_comm = static_cast< ::MPI_Comm>(comm.mpi_comm);44 ::MPI_Comm in_comm = to_mpi_comm(comm.mpi_comm); 44 45 45 ::MPI_Comm_dup(in_comm, &mpi_dup);46 ::MPI_Comm_dup(in_comm, mpi_dup); 46 47 47 48 MPI_Comm_create_endpoints(mpi_dup, num_ep, info, out_comm); … … 59 60 { 60 61 61 if(comm.mpi_comm == static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm)) return 0;62 if(comm.mpi_comm == static_cast< ::MPI_Comm* >(MPI_COMM_NULL.mpi_comm)) return 0; 62 63 63 64 int my_rank = comm.ep_comm_ptr->size_rank_info[1].first; … … 69 70 MPI_Info info; 70 71 MPI_Comm *out_comm; 71 ::MPI_Comm mpi_dup;72 ::MPI_Comm *mpi_dup = new ::MPI_Comm; 72 73 73 ::MPI_Comm in_comm = static_cast< ::MPI_Comm>(comm.mpi_comm);74 ::MPI_Comm in_comm = to_mpi_comm(comm.mpi_comm); 74 75 75 ::MPI_Comm_dup(in_comm, &mpi_dup);76 ::MPI_Comm_dup(in_comm, mpi_dup); 76 77 77 78 MPI_Comm_create_endpoints(mpi_dup, num_ep, info, out_comm); 78 79 79 ::MPI_Comm mpi_inter;80 ::MPI_Comm *mpi_inter = new ::MPI_Comm; 80 81 81 ::MPI_Comm_dup( static_cast< ::MPI_Comm>(comm.ep_comm_ptr->intercomm->mpi_inter_comm), &mpi_inter);82 ::MPI_Comm_dup(to_mpi_comm(comm.ep_comm_ptr->intercomm->mpi_inter_comm), mpi_inter); 82 83 83 84 for(int i=0; i<num_ep; i++) -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_finalize.cpp
r1134 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 4 #include "ep_mpi.hpp" 5 5 6 6 namespace ep_lib … … 26 26 if(id == 0) 27 27 { 28 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm >(comm.mpi_comm);28 ::MPI_Comm mpi_comm = to_mpi_comm(comm.mpi_comm); 29 29 ::MPI_Abort(mpi_comm, errorcode); 30 30 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_fortran.cpp
r1328 r1354 5 5 #include <utility> 6 6 #include "ep_declaration.hpp" 7 #include "ep_mpi.hpp" 7 8 8 #ifdef _openmpi9 // #undef MPI_Fint10 #endif9 // #ifdef _openmpi 10 // //#undef MPI_Fint 11 // #endif 11 12 12 13 namespace ep_lib … … 18 19 int fint; 19 20 #ifdef _intelmpi 20 fint = (::MPI_Fint)( comm.mpi_comm);21 fint = (::MPI_Fint)(to_mpi_comm(comm.mpi_comm)); 21 22 #elif _openmpi 22 fint = ::MPI_Comm_c2f( static_cast< ::MPI_Comm>(comm.mpi_comm));23 fint = ::MPI_Comm_c2f(to_mpi_comm(comm.mpi_comm)); 23 24 #endif 24 25 … … 62 63 ::MPI_Comm base_comm = ::MPI_Comm_f2c(comm); 63 64 #elif _intelmpi 64 ::MPI_Comm base_comm = (::MPI_Comm)(comm); 65 ::MPI_Comm *base_comm = new ::MPI_Comm; 66 *base_comm = (::MPI_Comm)(comm); 65 67 #endif 66 68 67 if(base_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm))69 if(base_comm != static_cast< ::MPI_Comm* >(MPI_COMM_NULL.mpi_comm)) 68 70 { 69 71 if(omp_get_thread_num() == 0) -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_free.cpp
r1295 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 namespace ep_lib … … 11 12 if(! comm->is_ep) 12 13 { 13 if(comm->mpi_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm))14 if(comm->mpi_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL.mpi_comm)) 14 15 { 15 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm->mpi_comm);16 ::MPI_Comm mpi_comm = to_mpi_comm(comm->mpi_comm); 16 17 17 18 ::MPI_Comm_free(&mpi_comm); … … 68 69 } 69 70 70 if( comm->mpi_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm)71 && comm->mpi_comm != static_cast< ::MPI_Comm >(MPI_COMM_WORLD.mpi_comm))71 if( comm->mpi_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL.mpi_comm) 72 && comm->mpi_comm != static_cast< ::MPI_Comm*>(MPI_COMM_WORLD.mpi_comm)) 72 73 { 73 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm->mpi_comm);74 ::MPI_Comm mpi_comm = to_mpi_comm(comm->mpi_comm); 74 75 ::MPI_Comm_free(&mpi_comm); 75 76 Debug("mpi_comm freed\n"); … … 134 135 } 135 136 136 if(comm->mpi_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm))137 if(comm->mpi_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL.mpi_comm)) 137 138 { 138 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm->mpi_comm);139 ::MPI_Comm mpi_comm = to_mpi_comm(comm->mpi_comm); 139 140 ::MPI_Comm_free(&mpi_comm); 140 141 Debug("mpi_comm freed\n"); -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_gatherv.cpp
r1295 r1354 26 26 int num_ep = comm.ep_comm_ptr->size_rank_info[1].second; 27 27 28 //if(ep_rank_loc == local_root) printf("local_gatherv : recvcounts = %d %d\n\n", recvcounts[0], recvcounts[1]);29 //if(ep_rank_loc == local_root) printf("local_gatherv : displs = %d %d\n\n", displs[0], displs[1]);30 28 31 29 #pragma omp critical (_gatherv) … … 50 48 if(!comm.is_ep) 51 49 { 52 ::MPI_Gatherv(const_cast<void*>(sendbuf), sendcount, static_cast< ::MPI_Datatype>(sendtype), recvbuf, const_cast<int*>(input_recvcounts), const_cast<int*>(input_displs), 53 static_cast< ::MPI_Datatype>(recvtype), root, static_cast< ::MPI_Comm>(comm.mpi_comm)); 54 return 0; 50 return ::MPI_Gatherv(const_cast<void*>(sendbuf), sendcount, to_mpi_type(sendtype), recvbuf, const_cast<int*>(input_recvcounts), const_cast<int*>(input_displs), 51 to_mpi_type(recvtype), root, to_mpi_comm(comm.mpi_comm)); 55 52 } 56 53 … … 114 111 else MPI_Gatherv_local(sendbuf, sendcount, sendtype, local_recvbuf, local_recvcounts.data(), local_displs.data(), 0, comm); 115 112 116 //if(is_master) printf("local_recvbuf = %d %d %d %d\n", static_cast<int*>(local_recvbuf)[0], static_cast<int*>(local_recvbuf)[1], static_cast<int*>(local_recvbuf)[2], static_cast<int*>(local_recvbuf)[3]);117 113 118 114 void* tmp_recvbuf; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_intercomm.cpp
r1328 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 using namespace std; … … 39 40 MPI_Comm_rank(peer_comm, &leader_ranks[2]); 40 41 41 //printf("leader_ranks = %d, %d, %d\n", leader_ranks[0], leader_ranks[1], leader_ranks[2]);42 42 MPI_Request request[2]; 43 43 MPI_Status status[2]; 44 44 45 MPI_Isend(&leader_ranks[0], 3, static_cast< ::MPI_Datatype>(MPI_INT), remote_leader, tag, peer_comm, &request[0]);46 MPI_Irecv(&leader_ranks[3], 3, static_cast< ::MPI_Datatype>(MPI_INT), remote_leader, tag, peer_comm, &request[1]);45 MPI_Isend(&leader_ranks[0], 3, MPI_INT, remote_leader, tag, peer_comm, &request[0]); 46 MPI_Irecv(&leader_ranks[3], 3, MPI_INT, remote_leader, tag, peer_comm, &request[1]); 47 47 48 48 MPI_Waitall(2, request, status); 49 49 50 //MPI_Send(&leader_ranks[0], 3, static_cast< ::MPI_Datatype>(MPI_INT), remote_leader, tag, peer_comm);51 //MPI_Recv(&leader_ranks[3], 3, static_cast< ::MPI_Datatype>(MPI_INT), remote_leader, tag, peer_comm, &status[1]);52 50 } 53 51 54 52 55 MPI_Bcast(leader_ranks, 6, static_cast< ::MPI_Datatype>(MPI_INT), local_leader, local_comm);53 MPI_Bcast(leader_ranks, 6, MPI_INT, local_leader, local_comm); 56 54 57 55 … … 132 130 new_tag_in_world = TAG++; 133 131 } 134 MPI_Bcast(&new_tag_in_world, 1, static_cast< ::MPI_Datatype> (MPI_INT), new_local_leader, local_comm);135 if(ep_rank == local_leader) MPI_Send(&new_tag_in_world, 1, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm);132 MPI_Bcast(&new_tag_in_world, 1, MPI_INT, new_local_leader, local_comm); 133 if(ep_rank == local_leader) MPI_Send(&new_tag_in_world, 1, MPI_INT, remote_leader, tag, peer_comm); 136 134 } 137 135 else … … 140 138 { 141 139 MPI_Status status; 142 MPI_Recv(&new_tag_in_world, 1, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &status);143 } 144 MPI_Bcast(&new_tag_in_world, 1, static_cast< ::MPI_Datatype> (MPI_INT), new_local_leader, local_comm);140 MPI_Recv(&new_tag_in_world, 1, MPI_INT, remote_leader, tag, peer_comm, &status); 141 } 142 MPI_Bcast(&new_tag_in_world, 1, MPI_INT, new_local_leader, local_comm); 145 143 } 146 144 … … 148 146 if(ep_rank == new_local_leader) 149 147 { 150 ::MPI_Comm_rank( static_cast< ::MPI_Comm >(MPI_COMM_WORLD.mpi_comm), &leader_in_world[0]);151 } 152 153 MPI_Bcast(&leader_in_world[0], 1, static_cast< ::MPI_Datatype> (MPI_INT), new_local_leader, local_comm);148 ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD.mpi_comm), &leader_in_world[0]); 149 } 150 151 MPI_Bcast(&leader_in_world[0], 1, MPI_INT, new_local_leader, local_comm); 154 152 155 153 … … 159 157 MPI_Status status[2]; 160 158 161 MPI_Isend(&leader_in_world[0], 1, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &request[0]);162 MPI_Irecv(&leader_in_world[1], 1, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &request[1]);159 MPI_Isend(&leader_in_world[0], 1, MPI_INT, remote_leader, tag, peer_comm, &request[0]); 160 MPI_Irecv(&leader_in_world[1], 1, MPI_INT, remote_leader, tag, peer_comm, &request[1]); 163 161 164 162 MPI_Waitall(2, request, status); … … 171 169 if(ep_rank == local_leader) Debug("calling MPI_Intercomm_create_from_world\n"); 172 170 173 return MPI_Intercomm_create_from_world(local_comm, new_local_leader, static_cast< ::MPI_Comm >(MPI_COMM_WORLD.mpi_comm), leader_in_world[1], new_tag_in_world, newintercomm);171 return MPI_Intercomm_create_from_world(local_comm, new_local_leader, MPI_COMM_WORLD.mpi_comm, leader_in_world[1], new_tag_in_world, newintercomm); 174 172 175 173 } … … 190 188 return 0; 191 189 } 192 else if(comm.mpi_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm))193 { 194 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(comm.mpi_comm);190 else if(comm.mpi_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL.mpi_comm)) 191 { 192 ::MPI_Comm mpi_comm = to_mpi_comm(comm.mpi_comm); 195 193 196 194 ::MPI_Comm_test_inter(mpi_comm, flag); -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_intercomm_kernel.cpp
r1304 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 using namespace std; … … 32 33 33 34 34 ::MPI_Comm local_mpi_comm = static_cast< ::MPI_Comm>(local_comm.mpi_comm);35 ::MPI_Comm local_mpi_comm = to_mpi_comm(local_comm.mpi_comm); 35 36 36 37 37 ::MPI_Comm_rank( static_cast< ::MPI_Comm>(MPI_COMM_WORLD.mpi_comm), &rank_in_world);38 ::MPI_Comm_rank( static_cast< ::MPI_Comm>(local_comm.mpi_comm), &rank_in_local_parent);38 ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD.mpi_comm), &rank_in_world); 39 ::MPI_Comm_rank(local_mpi_comm, &rank_in_local_parent); 39 40 40 41 … … 79 80 send_buf[2] = num_ep; 80 81 81 ::MPI_Allgather(send_buf.data(), 3, static_cast< ::MPI_Datatype> (MPI_INT), recv_buf.data(), 3, static_cast< ::MPI_Datatype>(MPI_INT), local_mpi_comm);82 ::MPI_Allgather(send_buf.data(), 3, to_mpi_type(MPI_INT), recv_buf.data(), 3, to_mpi_type(MPI_INT), local_mpi_comm); 82 83 83 84 for(int i=0; i<size_info[0]; i++) … … 93 94 leader_info[1] = remote_leader; 94 95 95 ::MPI_Comm_rank(static_cast< ::MPI_Comm>(peer_comm.mpi_comm), &rank_in_peer_mpi[0]); 96 97 96 ::MPI_Comm_rank(to_mpi_comm(peer_comm.mpi_comm), &rank_in_peer_mpi[0]); 98 97 99 98 send_buf[0] = size_info[0]; … … 106 105 MPI_Status statuses[2]; 107 106 108 MPI_Isend(send_buf.data(), 3, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &requests[0]);109 MPI_Irecv(recv_buf.data(), 3, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &requests[1]);107 MPI_Isend(send_buf.data(), 3, MPI_INT, remote_leader, tag, peer_comm, &requests[0]); 108 MPI_Irecv(recv_buf.data(), 3, MPI_INT, remote_leader, tag, peer_comm, &requests[1]); 110 109 111 110 … … 126 125 send_buf[4] = rank_in_peer_mpi[1]; 127 126 128 ::MPI_Bcast(send_buf.data(), 5, static_cast< ::MPI_Datatype>(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);127 ::MPI_Bcast(send_buf.data(), 5, to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 129 128 130 129 size_info[1] = send_buf[0]; … … 152 151 std::copy ( ep_info[0].data(), ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[0] ); 153 152 154 MPI_Isend(send_buf.data(), 3*size_info[0], static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+1, peer_comm, &requests[0]);155 MPI_Irecv(recv_buf.data(), 3*size_info[1], static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+1, peer_comm, &requests[1]);153 MPI_Isend(send_buf.data(), 3*size_info[0], MPI_INT, remote_leader, tag+1, peer_comm, &requests[0]); 154 MPI_Irecv(recv_buf.data(), 3*size_info[1], MPI_INT, remote_leader, tag+1, peer_comm, &requests[1]); 156 155 157 156 MPI_Waitall(2, requests, statuses); 158 157 } 159 158 160 ::MPI_Bcast(recv_buf.data(), 3*size_info[1], static_cast< ::MPI_Datatype> (MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);159 ::MPI_Bcast(recv_buf.data(), 3*size_info[1], MPI_INT, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 161 160 162 161 std::copy ( recv_buf.data(), recv_buf.data() + size_info[1], rank_info[2].begin() ); … … 276 275 MPI_Request requests[2]; 277 276 MPI_Status statuses[2]; 278 MPI_Isend(&size_info[2], 1, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+2, peer_comm, &requests[0]);279 MPI_Irecv(&size_info[3], 1, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+2, peer_comm, &requests[1]);277 MPI_Isend(&size_info[2], 1, MPI_INT, remote_leader, tag+2, peer_comm, &requests[0]); 278 MPI_Irecv(&size_info[3], 1, MPI_INT, remote_leader, tag+2, peer_comm, &requests[1]); 280 279 281 280 MPI_Waitall(2, requests, statuses); 282 281 } 283 282 284 ::MPI_Bcast(&size_info[2], 2, static_cast< ::MPI_Datatype> (MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);283 ::MPI_Bcast(&size_info[2], 2, MPI_INT, local_comm.rank_map->at(local_leader).second, local_mpi_comm); 285 284 286 285 new_rank_info[2].resize(size_info[3]); … … 300 299 std::copy ( new_ep_info[0].data(), new_ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[2] ); 301 300 302 MPI_Isend(send_buf.data(), 3*size_info[2], static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+3, peer_comm, &requests[0]);303 MPI_Irecv(recv_buf.data(), 3*size_info[3], static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+3, peer_comm, &requests[1]);301 MPI_Isend(send_buf.data(), 3*size_info[2], MPI_INT, remote_leader, tag+3, peer_comm, &requests[0]); 302 MPI_Irecv(recv_buf.data(), 3*size_info[3], MPI_INT, remote_leader, tag+3, peer_comm, &requests[1]); 304 303 305 304 MPI_Waitall(2, requests, statuses); 306 305 } 307 306 308 ::MPI_Bcast(recv_buf.data(), 3*size_info[3], static_cast< ::MPI_Datatype>(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);307 ::MPI_Bcast(recv_buf.data(), 3*size_info[3], to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 309 308 310 309 std::copy ( recv_buf.data(), recv_buf.data() + size_info[3], new_rank_info[2].begin() ); … … 322 321 ::MPI_Group local_group; 323 322 ::MPI_Group new_group; 324 ::MPI_Comm new_comm;325 ::MPI_Comm intercomm;323 ::MPI_Comm *new_comm = new ::MPI_Comm; 324 ::MPI_Comm *intercomm = new ::MPI_Comm; 326 325 327 326 ::MPI_Comm_group(local_mpi_comm, &local_group); … … 329 328 ::MPI_Group_incl(local_group, size_info[2], new_rank_info[1].data(), &new_group); 330 329 331 ::MPI_Comm_create(local_mpi_comm, new_group, &new_comm);330 ::MPI_Comm_create(local_mpi_comm, new_group, new_comm); 332 331 333 332 … … 335 334 if(is_local_leader) 336 335 { 337 ::MPI_Comm_rank( new_comm, &leader_info[2]);338 } 339 340 ::MPI_Bcast(&leader_info[2], 1, static_cast< ::MPI_Datatype>(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);341 342 if(new_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm))343 { 344 345 ::MPI_Barrier( new_comm);346 347 ::MPI_Intercomm_create( new_comm, leader_info[2], static_cast< ::MPI_Comm>(peer_comm.mpi_comm), rank_in_peer_mpi[1], tag, &intercomm);336 ::MPI_Comm_rank(*new_comm, &leader_info[2]); 337 } 338 339 ::MPI_Bcast(&leader_info[2], 1, to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 340 341 if(new_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL.mpi_comm)) 342 { 343 344 ::MPI_Barrier(*new_comm); 345 346 ::MPI_Intercomm_create(*new_comm, leader_info[2], to_mpi_comm(peer_comm.mpi_comm), rank_in_peer_mpi[1], tag, intercomm); 348 347 349 348 int id; 350 349 351 ::MPI_Comm_rank( new_comm, &id);350 ::MPI_Comm_rank(*new_comm, &id); 352 351 int my_num_ep = new_ep_info[0][id]; 353 352 … … 380 379 } 381 380 382 MPI_Bcast(bcast_buf.data(), 8, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm);381 MPI_Bcast(bcast_buf.data(), 8, MPI_INT, local_leader, local_comm); 383 382 384 383 if(!is_local_leader) … … 405 404 } 406 405 407 MPI_Bcast(bcast_buf.data(), size_info[2]+size_info[1]+size_info[0]+1, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm);406 MPI_Bcast(bcast_buf.data(), size_info[2]+size_info[1]+size_info[0]+1, MPI_INT, local_leader, local_comm); 408 407 409 408 if(!is_local_leader) … … 478 477 intercomm_mpi_size = newintercomm->ep_comm_ptr->size_rank_info[2].second; 479 478 480 MPI_Bcast(&remote_ep_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm);479 MPI_Bcast(&remote_ep_size, 1, MPI_INT, local_leader, local_comm); 481 480 482 481 int my_rank_map_elem[2]; … … 492 491 (*newintercomm).ep_comm_ptr->intercomm->local_rank_map->resize(local_ep_size); 493 492 494 MPI_Allgather(my_rank_map_elem, 2, static_cast< ::MPI_Datatype> (MPI_INT),495 (*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2, static_cast< ::MPI_Datatype> (MPI_INT), local_comm);493 MPI_Allgather(my_rank_map_elem, 2, MPI_INT, 494 (*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2, MPI_INT, local_comm); 496 495 497 496 (*newintercomm).ep_comm_ptr->intercomm->remote_rank_map = new RANK_MAP; … … 514 513 MPI_Status statuses[4]; 515 514 516 MPI_Isend((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_ep_size, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+4, peer_comm, &requests[0]);517 MPI_Irecv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+4, peer_comm, &requests[1]);518 519 MPI_Isend(&local_intercomm_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+5, peer_comm, &requests[2]);520 MPI_Irecv(&remote_intercomm_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+5, peer_comm, &requests[3]);515 MPI_Isend((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_ep_size, MPI_INT, remote_leader, tag+4, peer_comm, &requests[0]); 516 MPI_Irecv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, MPI_INT, remote_leader, tag+4, peer_comm, &requests[1]); 517 518 MPI_Isend(&local_intercomm_size, 1, MPI_INT, remote_leader, tag+5, peer_comm, &requests[2]); 519 MPI_Irecv(&remote_intercomm_size, 1, MPI_INT, remote_leader, tag+5, peer_comm, &requests[3]); 521 520 522 521 MPI_Waitall(4, requests, statuses); … … 525 524 } 526 525 527 MPI_Allreduce(&new_bcast_root_0, &new_bcast_root, 1, static_cast< ::MPI_Datatype> (MPI_INT), static_cast< ::MPI_Op>(MPI_SUM), *newintercomm);528 529 530 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm);531 MPI_Bcast(&remote_intercomm_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), new_bcast_root, *newintercomm);526 MPI_Allreduce(&new_bcast_root_0, &new_bcast_root, 1, MPI_INT, static_cast< ::MPI_Op>(MPI_SUM), *newintercomm); 527 528 529 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, MPI_INT, local_leader, local_comm); 530 MPI_Bcast(&remote_intercomm_size, 1, MPI_INT, new_bcast_root, *newintercomm); 532 531 533 532 … … 543 542 MPI_Status statuses[2]; 544 543 545 MPI_Isend((*newintercomm).rank_map->data(), 2*local_intercomm_size, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+6, peer_comm, &requests[0]);546 MPI_Irecv((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag+6, peer_comm, &requests[1]);544 MPI_Isend((*newintercomm).rank_map->data(), 2*local_intercomm_size, MPI_INT, remote_leader, tag+6, peer_comm, &requests[0]); 545 MPI_Irecv((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, MPI_INT, remote_leader, tag+6, peer_comm, &requests[1]); 547 546 548 547 MPI_Waitall(2, requests, statuses); 549 548 } 550 549 551 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, static_cast< ::MPI_Datatype> (MPI_INT), new_bcast_root, *newintercomm);550 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, MPI_INT, new_bcast_root, *newintercomm); 552 551 553 552 (*newintercomm).ep_comm_ptr->intercomm->local_comm = &(local_comm.ep_comm_ptr->comm_list[ep_rank_loc]); … … 610 609 int rank_in_peer_mpi[2]; 611 610 612 ::MPI_Comm_rank( static_cast< ::MPI_Comm >(MPI_COMM_WORLD.mpi_comm), &rank_in_world);611 ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD.mpi_comm), &rank_in_world); 613 612 614 613 … … 639 638 MPI_Request req_s, req_r; 640 639 641 MPI_Isend(send_buf.data(), 2, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &req_s);642 MPI_Irecv(recv_buf.data(), 2, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &req_r);640 MPI_Isend(send_buf.data(), 2, MPI_INT, remote_leader, tag, peer_comm, &req_s); 641 MPI_Irecv(recv_buf.data(), 2, MPI_INT, remote_leader, tag, peer_comm, &req_r); 643 642 644 643 … … 650 649 } 651 650 652 MPI_Bcast(recv_buf.data(), 3, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm);651 MPI_Bcast(recv_buf.data(), 3, MPI_INT, local_leader, local_comm); 653 652 654 653 remote_num_ep = recv_buf[0]; … … 665 664 if(ep_rank == local_leader) 666 665 { 667 ::MPI_Comm mpi_dup;666 ::MPI_Comm *mpi_dup = new ::MPI_Comm; 668 667 669 ::MPI_Comm_dup( static_cast< ::MPI_Comm>(local_comm.mpi_comm), &mpi_dup);668 ::MPI_Comm_dup(to_mpi_comm(local_comm.mpi_comm), mpi_dup); 670 669 671 670 MPI_Comm *ep_intercomm; … … 691 690 MPI_Request req_s; 692 691 MPI_Status sta_s; 693 MPI_Isend(tag_label, 2, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &req_s);692 MPI_Isend(tag_label, 2, MPI_INT, remote_leader, tag, peer_comm, &req_s); 694 693 695 694 MPI_Wait(&req_s, &sta_s); … … 705 704 MPI_Status status; 706 705 MPI_Request req_r; 707 MPI_Irecv(tag_label, 2, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &req_r);706 MPI_Irecv(tag_label, 2, MPI_INT, remote_leader, tag, peer_comm, &req_r); 708 707 MPI_Wait(&req_r, &status); 709 708 } 710 709 } 711 710 712 MPI_Bcast(tag_label, 2, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm);711 MPI_Bcast(tag_label, 2, MPI_INT, local_leader, local_comm); 713 712 714 713 … … 776 775 local_rank_map_ele[1] = (*newintercomm).ep_comm_ptr->comm_label; 777 776 778 MPI_Allgather(local_rank_map_ele, 2, static_cast< ::MPI_Datatype> (MPI_INT),779 (*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2, static_cast< ::MPI_Datatype> (MPI_INT), local_comm);777 MPI_Allgather(local_rank_map_ele, 2, MPI_INT, 778 (*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2, MPI_INT, local_comm); 780 779 781 780 if(ep_rank == local_leader) … … 784 783 MPI_Request req_s, req_r; 785 784 786 MPI_Isend((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_num_ep, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &req_s);787 MPI_Irecv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_num_ep, static_cast< ::MPI_Datatype> (MPI_INT), remote_leader, tag, peer_comm, &req_r);785 MPI_Isend((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_num_ep, MPI_INT, remote_leader, tag, peer_comm, &req_s); 786 MPI_Irecv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_num_ep, MPI_INT, remote_leader, tag, peer_comm, &req_r); 788 787 789 788 … … 793 792 } 794 793 795 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_num_ep, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm);794 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_num_ep, MPI_INT, local_leader, local_comm); 796 795 (*newintercomm).ep_comm_ptr->intercomm->local_comm = &(local_comm.ep_comm_ptr->comm_list[ep_rank_loc]); 797 796 (*newintercomm).ep_comm_ptr->intercomm->intercomm_tag = tag; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_intercomm_world.cpp
r1287 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 using namespace std; … … 8 9 { 9 10 10 #ifdef _openmpi 11 11 // #ifdef _openmpi 12 13 // int MPI_Intercomm_create_from_world(MPI_Comm local_comm, int local_leader, void* peer_comm_ptr, int mpi_remote_leader, int tag, MPI_Comm *newintercomm) 14 // { 15 16 // int ep_rank, ep_rank_loc, mpi_rank; 17 // int ep_size, num_ep, mpi_size; 18 19 // ep_rank = local_comm.ep_comm_ptr->size_rank_info[0].first; 20 // ep_rank_loc = local_comm.ep_comm_ptr->size_rank_info[1].first; 21 // mpi_rank = local_comm.ep_comm_ptr->size_rank_info[2].first; 22 // ep_size = local_comm.ep_comm_ptr->size_rank_info[0].second; 23 // num_ep = local_comm.ep_comm_ptr->size_rank_info[1].second; 24 // mpi_size = local_comm.ep_comm_ptr->size_rank_info[2].second; 25 26 27 // std::vector<int> rank_info[4]; //! 0->rank_in_world of local_comm, 1->rank_in_local_parent of local_comm 28 // //! 2->rank_in_world of remote_comm, 3->rank_in_local_parent of remote_comm 29 30 // int rank_in_world; 31 // int rank_in_local_parent; 32 33 // int local_ep_size = ep_size; 34 // int remote_ep_size; 35 36 // ::MPI_Comm peer_comm = to_mpi_comm(peer_comm_ptr); 37 // ::MPI_Comm local_mpi_comm = to_mpi_comm(local_comm.mpi_comm); 38 39 // ::MPI_Comm_rank(peer_comm, &rank_in_world); 40 41 // ::MPI_Comm_rank(local_mpi_comm, &rank_in_local_parent); 42 43 // bool is_proc_master = false; 44 // bool is_local_leader = false; 45 // bool is_final_master = false; 46 47 48 // if(ep_rank == local_leader) { is_proc_master = true; is_local_leader = true; is_final_master = true;} 49 // if(ep_rank_loc == 0 && mpi_rank != local_comm.rank_map->at(local_leader).second) is_proc_master = true; 50 51 52 // int size_info[4]; //! used for choose size of rank_info 0-> mpi_size of local_comm, 1-> mpi_size of remote_comm 53 54 // int leader_info[4]; //! 0->world rank of local_leader, 1->world rank of remote leader 55 56 57 // std::vector<int> ep_info[2]; //! 0-> num_ep in local_comm, 1->num_ep in remote_comm 58 59 // std::vector<int> new_rank_info[4]; 60 // std::vector<int> new_ep_info[2]; 61 62 // std::vector<int> offset; 63 64 // if(is_proc_master) 65 // { 66 67 // size_info[0] = mpi_size; 68 69 // rank_info[0].resize(size_info[0]); 70 // rank_info[1].resize(size_info[0]); 71 72 73 74 // ep_info[0].resize(size_info[0]); 75 76 // vector<int> send_buf(6); 77 // vector<int> recv_buf(3*size_info[0]); 78 79 // send_buf[0] = rank_in_world; 80 // send_buf[1] = rank_in_local_parent; 81 // send_buf[2] = num_ep; 82 83 // ::MPI_Allgather(send_buf.data(), 3, to_mpi_type(MPI_INT), recv_buf.data(), 3, to_mpi_type(MPI_INT), local_mpi_comm); 84 85 // for(int i=0; i<size_info[0]; i++) 86 // { 87 // rank_info[0][i] = recv_buf[3*i]; 88 // rank_info[1][i] = recv_buf[3*i+1]; 89 // ep_info[0][i] = recv_buf[3*i+2]; 90 // } 91 92 // if(is_local_leader) 93 // { 94 // leader_info[0] = rank_in_world; 95 // leader_info[1] = mpi_remote_leader; 96 97 // ::MPI_Status mpi_status; 98 99 // send_buf[0] = size_info[0]; 100 // send_buf[1] = local_ep_size; 101 102 // ::MPI_Send(send_buf.data(), 2, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 103 104 // ::MPI_Recv(recv_buf.data(), 2, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 105 106 // recv_buf[2] = rank_in_world; 107 // recv_buf[3] = mpi_remote_leader; 108 109 // } 110 111 // ::MPI_Bcast(recv_buf.data(), 4, to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 112 113 // size_info[1] = recv_buf[0]; 114 // remote_ep_size = recv_buf[1]; 115 // leader_info[0] = recv_buf[2]; 116 // leader_info[1] = recv_buf[3]; 117 118 // rank_info[2].resize(size_info[1]); 119 // rank_info[3].resize(size_info[1]); 120 121 // ep_info[1].resize(size_info[1]); 122 123 // send_buf.resize(3*size_info[0]); 124 // recv_buf.resize(3*size_info[1]); 125 126 // if(is_local_leader) 127 // { 128 // ::MPI_Status mpi_status; 129 130 131 // std::copy ( rank_info[0].data(), rank_info[0].data() + size_info[0], send_buf.begin() ); 132 // std::copy ( rank_info[1].data(), rank_info[1].data() + size_info[0], send_buf.begin() + size_info[0] ); 133 // std::copy ( ep_info[0].data(), ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[0] ); 134 135 // ::MPI_Send(send_buf.data(), 3*size_info[0], to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 136 137 // ::MPI_Recv(recv_buf.data(), 3*size_info[1], to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 138 139 // } 140 141 // ::MPI_Bcast(recv_buf.data(), 3*size_info[1], to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 142 143 // std::copy ( recv_buf.data(), recv_buf.data() + size_info[1], rank_info[2].begin() ); 144 // std::copy ( recv_buf.data() + size_info[1], recv_buf.data() + 2*size_info[1], rank_info[3].begin() ); 145 // std::copy ( recv_buf.data() + 2*size_info[1], recv_buf.data() + 3*size_info[1], ep_info[1].begin() ); 146 147 // offset.resize(size_info[0]); 148 149 // if(leader_info[0]<leader_info[1]) // erase all ranks doubled with remote_comm, except the local leader 150 // { 151 152 // bool found = false; 153 // int ep_tmp; 154 // int ep_local; 155 // int ep_remote; 156 // for(int i=0; i<size_info[0]; i++) 157 // { 158 // int target = rank_info[0][i]; 159 // found = false; 160 // for(int j=0; j<size_info[1]; j++) 161 // { 162 // if(target == rank_info[2][j]) 163 // { 164 // found = true; 165 // ep_tmp = ep_info[1][j]; 166 // ep_local = ep_info[0][j]; 167 // ep_remote = ep_info[1][j]; 168 // break; 169 // } 170 // } 171 // if(found) 172 // { 173 174 // if(target == leader_info[0]) // the leader is doubled in remote 175 // { 176 // new_rank_info[0].push_back(target); 177 // new_rank_info[1].push_back(rank_info[1][i]); 178 179 // new_ep_info[0].push_back(ep_local + ep_remote); 180 // offset[i] = 0; 181 // } 182 // else 183 // { 184 // offset[i] = ep_local; 185 // } 186 // } 187 // else 188 // { 189 // new_rank_info[0].push_back(target); 190 // new_rank_info[1].push_back(rank_info[1][i]); 191 192 // new_ep_info[0].push_back(ep_info[0][i]); 193 194 // offset[i] = 0; 195 // } 196 197 // } 198 // } 199 200 // else // erase rank doubled with remote leader 201 // { 202 203 // bool found = false; 204 // int ep_tmp; 205 // int ep_local; 206 // int ep_remote; 207 // for(int i=0; i<size_info[0]; i++) 208 // { 209 // int target = rank_info[0][i]; 210 // found = false; 211 // for(int j=0; j<size_info[1]; j++) 212 // { 213 214 // if(target == rank_info[2][j]) 215 // { 216 // found = true; 217 // ep_tmp = ep_info[1][j]; 218 // ep_local = ep_info[0][j]; 219 // ep_remote = ep_info[1][j]; 220 // break; 221 // } 222 // } 223 // if(found) 224 // { 225 // if(target != leader_info[1]) 226 // { 227 // new_rank_info[0].push_back(target); 228 // new_rank_info[1].push_back(rank_info[1][i]); 229 230 // new_ep_info[0].push_back(ep_local + ep_remote); 231 // offset[i] = 0; 232 // } 233 // else // found remote leader 234 // { 235 // offset[i] = ep_remote; 236 // } 237 // } 238 // else 239 // { 240 // new_rank_info[0].push_back(target); 241 // new_rank_info[1].push_back(rank_info[1][i]); 242 243 // new_ep_info[0].push_back(ep_info[0][i]); 244 // offset[i] = 0; 245 // } 246 // } 247 // } 248 249 // if(offset[mpi_rank] == 0) 250 // { 251 // is_final_master = true; 252 // } 253 254 255 // // size_info[4]: 2->size of new_ep_info for local, 3->size of new_ep_info for remote 256 257 // if(is_local_leader) 258 // { 259 // size_info[2] = new_ep_info[0].size(); 260 // ::MPI_Status mpi_status; 261 262 // ::MPI_Send(&size_info[2], 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 263 264 // ::MPI_Recv(&size_info[3], 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 265 // } 266 267 // ::MPI_Bcast(&size_info[2], 2, to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 268 269 // new_rank_info[2].resize(size_info[3]); 270 // new_rank_info[3].resize(size_info[3]); 271 // new_ep_info[1].resize(size_info[3]); 272 273 // send_buf.resize(size_info[2]); 274 // recv_buf.resize(size_info[3]); 275 276 // if(is_local_leader) 277 // { 278 // ::MPI_Status mpi_status; 279 280 // std::copy ( new_rank_info[0].data(), new_rank_info[0].data() + size_info[2], send_buf.begin() ); 281 // std::copy ( new_rank_info[1].data(), new_rank_info[1].data() + size_info[2], send_buf.begin() + size_info[2] ); 282 // std::copy ( new_ep_info[0].data(), new_ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[2] ); 283 284 // ::MPI_Send(send_buf.data(), 3*size_info[2], to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 285 // ::MPI_Recv(recv_buf.data(), 3*size_info[3], to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 286 287 // } 288 289 // ::MPI_Bcast(recv_buf.data(), 3*size_info[3], to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 290 291 // std::copy ( recv_buf.data(), recv_buf.data() + size_info[3], new_rank_info[2].begin() ); 292 // std::copy ( recv_buf.data() + size_info[3], recv_buf.data() + 2*size_info[3], new_rank_info[3].begin() ); 293 // std::copy ( recv_buf.data() + 2*size_info[3], recv_buf.data() + 3*size_info[3], new_ep_info[1].begin() ); 294 295 // } 296 297 298 299 // if(is_proc_master) 300 // { 301 // // leader_info[4]: 2-> rank of local leader in new_group generated comm; 302 // // 3-> rank of remote leader in new_group generated comm; 303 // ::MPI_Group local_group; 304 // ::MPI_Group new_group; 305 // ::MPI_Comm *new_comm = new ::MPI_Comm; 306 // ::MPI_Comm *intercomm = new ::MPI_Comm; 307 308 // ::MPI_Comm_group(local_mpi_comm, &local_group); 309 310 // ::MPI_Group_incl(local_group, size_info[2], new_rank_info[1].data(), &new_group); 311 312 // ::MPI_Comm_create(local_mpi_comm, new_group, new_comm); 313 314 315 316 // if(is_local_leader) 317 // { 318 // ::MPI_Comm_rank(*new_comm, &leader_info[2]); 319 // } 320 321 // ::MPI_Bcast(&leader_info[2], 1, to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 322 323 // if(new_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL.mpi_comm)) 324 // { 325 // ::MPI_Barrier(*new_comm); 326 327 // ::MPI_Intercomm_create(*new_comm, leader_info[2], peer_comm, leader_info[1], tag, intercomm); 328 329 // int id; 330 // ::MPI_Comm_rank(*new_comm, &id); 331 // int my_num_ep = new_ep_info[0][id]; 332 333 // MPI_Comm *ep_intercomm; 334 // MPI_Info info; 335 // MPI_Comm_create_endpoints(new_comm, my_num_ep, info, ep_intercomm); 336 337 338 339 // for(int i= 0; i<my_num_ep; i++) 340 // { 341 // ep_intercomm[i].is_intercomm = true; 342 343 // ep_intercomm[i].ep_comm_ptr->intercomm = new ep_lib::ep_intercomm; 344 // ep_intercomm[i].ep_comm_ptr->intercomm->mpi_inter_comm = intercomm; 345 // ep_intercomm[i].ep_comm_ptr->comm_label = leader_info[0]; 346 // } 347 348 // #pragma omp critical (write_to_tag_list) 349 // tag_list.push_back(make_pair( make_pair(tag, min(leader_info[0], leader_info[1])) , ep_intercomm)); 350 // } 351 352 353 // } 354 355 356 // MPI_Barrier_local(local_comm); 357 358 // vector<int> bcast_buf(8); 359 // if(is_local_leader) 360 // { 361 // std::copy(size_info, size_info+4, bcast_buf.begin()); 362 // std::copy(leader_info, leader_info+4, bcast_buf.begin()+4); 363 // } 364 365 // MPI_Bcast(bcast_buf.data(), 8, MPI_INT, local_leader, local_comm); 366 367 // if(!is_local_leader) 368 // { 369 // std::copy(bcast_buf.begin(), bcast_buf.begin()+4, size_info); 370 // std::copy(bcast_buf.begin()+4, bcast_buf.begin()+8, leader_info); 371 // } 372 373 // if(!is_local_leader) 374 // { 375 // new_rank_info[1].resize(size_info[2]); 376 // ep_info[1].resize(size_info[1]); 377 // offset.resize(size_info[0]); 378 // } 379 380 // bcast_buf.resize(size_info[2]+size_info[1]+size_info[0]+1); 381 382 // if(is_local_leader) 383 // { 384 // bcast_buf[0] = remote_ep_size; 385 // std::copy(new_rank_info[1].data(), new_rank_info[1].data()+size_info[2], bcast_buf.begin()+1); 386 // std::copy(ep_info[1].data(), ep_info[1].data()+size_info[1], bcast_buf.begin()+size_info[2]+1); 387 // std::copy(offset.data(), offset.data()+size_info[0], bcast_buf.begin()+size_info[2]+size_info[1]+1); 388 // } 389 390 // MPI_Bcast(bcast_buf.data(), size_info[2]+size_info[1]+size_info[0]+1, MPI_INT, local_leader, local_comm); 391 392 // if(!is_local_leader) 393 // { 394 // remote_ep_size = bcast_buf[0]; 395 // std::copy(bcast_buf.data()+1, bcast_buf.data()+1+size_info[2], new_rank_info[1].begin()); 396 // std::copy(bcast_buf.data()+1+size_info[2], bcast_buf.data()+1+size_info[2]+size_info[1], ep_info[1].begin()); 397 // std::copy(bcast_buf.data()+1+size_info[2]+size_info[1], bcast_buf.data()+1+size_info[2]+size_info[1]+size_info[0], offset.begin()); 398 // } 399 400 401 // int my_position = offset[rank_in_local_parent]+ep_rank_loc; 402 403 404 // MPI_Barrier_local(local_comm); 405 // #pragma omp flush 406 407 408 // #pragma omp critical (read_from_tag_list) 409 // { 410 // bool found = false; 411 // while(!found) 412 // { 413 // for(std::list<std::pair < std::pair<int,int>, MPI_Comm* > >::iterator iter = tag_list.begin(); iter!=tag_list.end(); iter++) 414 // { 415 // if((*iter).first == make_pair(tag, min(leader_info[0], leader_info[1]))) 416 // { 417 // *newintercomm = iter->second[my_position]; 418 419 // found = true; 420 // //tag_list.erase(iter); 421 // break; 422 // } 423 // } 424 // } 425 // } 426 427 // MPI_Barrier(local_comm); 428 429 // if(is_local_leader) 430 // { 431 // int local_flag = true; 432 // int remote_flag = false; 433 // ::MPI_Status mpi_status; 434 435 // ::MPI_Send(&local_flag, 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 436 437 // ::MPI_Recv(&remote_flag, 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 438 // } 439 440 // MPI_Barrier(local_comm); 441 442 // if(is_proc_master) 443 // { 444 // for(std::list<std::pair < std::pair<int,int>, MPI_Comm* > >::iterator iter = tag_list.begin(); iter!=tag_list.end(); iter++) 445 // { 446 // if((*iter).first == make_pair(tag, min(leader_info[0], leader_info[1]))) 447 // { 448 // tag_list.erase(iter); 449 // break; 450 // } 451 // } 452 // } 453 454 455 456 // int intercomm_ep_rank, intercomm_ep_rank_loc, intercomm_mpi_rank; 457 // int intercomm_ep_size, intercomm_num_ep, intercomm_mpi_size; 458 459 // intercomm_ep_rank = newintercomm->ep_comm_ptr->size_rank_info[0].first; 460 // intercomm_ep_rank_loc = newintercomm->ep_comm_ptr->size_rank_info[1].first; 461 // intercomm_mpi_rank = newintercomm->ep_comm_ptr->size_rank_info[2].first; 462 // intercomm_ep_size = newintercomm->ep_comm_ptr->size_rank_info[0].second; 463 // intercomm_num_ep = newintercomm->ep_comm_ptr->size_rank_info[1].second; 464 // intercomm_mpi_size = newintercomm->ep_comm_ptr->size_rank_info[2].second; 465 466 // MPI_Bcast(&remote_ep_size, 1, MPI_INT, local_leader, local_comm); 467 468 // int my_rank_map_elem[2]; 469 470 471 // my_rank_map_elem[0] = intercomm_ep_rank; 472 473 // my_rank_map_elem[1] = (*newintercomm).ep_comm_ptr->comm_label; 474 475 // vector<pair<int, int> > local_rank_map_array; 476 // vector<pair<int, int> > remote_rank_map_array; 477 478 479 // (*newintercomm).ep_comm_ptr->intercomm->local_rank_map = new RANK_MAP; 480 // (*newintercomm).ep_comm_ptr->intercomm->local_rank_map->resize(local_ep_size); 481 482 // MPI_Allgather2(my_rank_map_elem, 2, MPI_INT, (*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2, MPI_INT, local_comm); 483 484 // (*newintercomm).ep_comm_ptr->intercomm->remote_rank_map = new RANK_MAP; 485 // (*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->resize(remote_ep_size); 486 487 // int local_intercomm_size = intercomm_ep_size; 488 // int remote_intercomm_size; 489 490 491 492 493 // if(is_local_leader) 494 // { 495 // ::MPI_Status status; 496 497 // ::MPI_Send((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_ep_size, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 498 499 // ::MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &status); 500 501 // ::MPI_Send(&local_intercomm_size, 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 502 503 // ::MPI_Recv(&remote_intercomm_size, 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &status); 504 // } 505 506 // MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, MPI_INT, local_leader, local_comm); 507 // MPI_Bcast(&remote_intercomm_size, 1, MPI_INT, 0, *newintercomm); 508 509 510 // (*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map = new RANK_MAP; 511 // (*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->resize(remote_intercomm_size); 512 513 // (*newintercomm).ep_comm_ptr->intercomm->size_rank_info[0] = local_comm.ep_comm_ptr->size_rank_info[0]; 514 // (*newintercomm).ep_comm_ptr->intercomm->size_rank_info[1] = local_comm.ep_comm_ptr->size_rank_info[1]; 515 // (*newintercomm).ep_comm_ptr->intercomm->size_rank_info[2] = local_comm.ep_comm_ptr->size_rank_info[2]; 516 517 518 // if(is_local_leader) 519 // { 520 // ::MPI_Status status; 521 522 // ::MPI_Send((*newintercomm).rank_map->data(), 2*local_intercomm_size, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 523 524 // ::MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &status); 525 // } 526 527 // MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, to_mpi_type(MPI_INT), 0, *newintercomm); 528 529 // (*newintercomm).ep_comm_ptr->intercomm->local_comm = &(local_comm.ep_comm_ptr->comm_list[ep_rank_loc]); 530 // (*newintercomm).ep_comm_ptr->intercomm->intercomm_tag = local_comm.ep_comm_ptr->comm_label; 531 532 533 // return MPI_SUCCESS; 534 535 // } 536 537 538 539 // #elif _intelmpi 12 540 int MPI_Intercomm_create_from_world(MPI_Comm local_comm, int local_leader, void* peer_comm_ptr, int mpi_remote_leader, int tag, MPI_Comm *newintercomm) 13 541 { 14 15 542 int ep_rank, ep_rank_loc, mpi_rank; 16 543 int ep_size, num_ep, mpi_size; … … 23 550 mpi_size = local_comm.ep_comm_ptr->size_rank_info[2].second; 24 551 25 26 552 std::vector<int> rank_info[4]; //! 0->rank_in_world of local_comm, 1->rank_in_local_parent of local_comm 27 553 //! 2->rank_in_world of remote_comm, 3->rank_in_local_parent of remote_comm … … 33 559 int remote_ep_size; 34 560 35 ::MPI_Comm peer_comm = static_cast< ::MPI_Comm>(peer_comm_ptr);36 ::MPI_Comm local_mpi_comm = static_cast< ::MPI_Comm>(local_comm.mpi_comm);561 ::MPI_Comm peer_comm = to_mpi_comm(peer_comm_ptr); 562 ::MPI_Comm local_mpi_comm = to_mpi_comm(local_comm.mpi_comm); 37 563 38 564 ::MPI_Comm_rank(peer_comm, &rank_in_world); 39 565 40 ::MPI_Comm_rank( static_cast< ::MPI_Comm>(local_comm.mpi_comm), &rank_in_local_parent);566 ::MPI_Comm_rank(local_mpi_comm, &rank_in_local_parent); 41 567 42 568 bool is_proc_master = false; … … 69 595 rank_info[1].resize(size_info[0]); 70 596 71 72 73 597 ep_info[0].resize(size_info[0]); 74 598 … … 80 604 send_buf[2] = num_ep; 81 605 82 ::MPI_Allgather(send_buf.data(), 3, static_cast< ::MPI_Datatype> (MPI_INT), recv_buf.data(), 3, static_cast< ::MPI_Datatype>(MPI_INT), local_mpi_comm);606 ::MPI_Allgather(send_buf.data(), 3, to_mpi_type(MPI_INT), recv_buf.data(), 3, to_mpi_type(MPI_INT), local_mpi_comm); 83 607 84 608 for(int i=0; i<size_info[0]; i++) … … 89 613 } 90 614 615 91 616 if(is_local_leader) 92 617 { … … 99 624 send_buf[1] = local_ep_size; 100 625 101 ::MPI_Send(send_buf.data(), 2, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm);102 103 ::MPI_Recv(recv_buf.data(), 2, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status);626 ::MPI_Send(send_buf.data(), 2, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 627 628 ::MPI_Recv(recv_buf.data(), 2, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 104 629 105 630 recv_buf[2] = rank_in_world; … … 108 633 } 109 634 110 ::MPI_Bcast(recv_buf.data(), 4, static_cast< ::MPI_Datatype>(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);635 ::MPI_Bcast(recv_buf.data(), 4, to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 111 636 112 637 size_info[1] = recv_buf[0]; … … 132 657 std::copy ( ep_info[0].data(), ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[0] ); 133 658 134 ::MPI_Send(send_buf.data(), 3*size_info[0], static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm);135 136 ::MPI_Recv(recv_buf.data(), 3*size_info[1], static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status);137 138 } 139 140 ::MPI_Bcast(recv_buf.data(), 3*size_info[1], static_cast< ::MPI_Datatype>(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);659 ::MPI_Send(send_buf.data(), 3*size_info[0], to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 660 661 ::MPI_Recv(recv_buf.data(), 3*size_info[1], to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 662 663 } 664 665 ::MPI_Bcast(recv_buf.data(), 3*size_info[1], to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 141 666 142 667 std::copy ( recv_buf.data(), recv_buf.data() + size_info[1], rank_info[2].begin() ); … … 259 784 ::MPI_Status mpi_status; 260 785 261 ::MPI_Send(&size_info[2], 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm);262 263 ::MPI_Recv(&size_info[3], 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status);264 } 265 266 ::MPI_Bcast(&size_info[2], 2, static_cast< ::MPI_Datatype>(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);786 ::MPI_Send(&size_info[2], 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 787 788 ::MPI_Recv(&size_info[3], 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 789 } 790 791 ::MPI_Bcast(&size_info[2], 2, to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 267 792 268 793 new_rank_info[2].resize(size_info[3]); … … 281 806 std::copy ( new_ep_info[0].data(), new_ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[2] ); 282 807 283 ::MPI_Send(send_buf.data(), 3*size_info[2], static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm);284 ::MPI_Recv(recv_buf.data(), 3*size_info[3], static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 285 286 } 287 288 ::MPI_Bcast(recv_buf.data(), 3*size_info[3], static_cast< ::MPI_Datatype>(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);808 ::MPI_Send(send_buf.data(), 3*size_info[2], to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 809 810 ::MPI_Recv(recv_buf.data(), 3*size_info[3], to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 811 } 812 813 ::MPI_Bcast(recv_buf.data(), 3*size_info[3], to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 289 814 290 815 std::copy ( recv_buf.data(), recv_buf.data() + size_info[3], new_rank_info[2].begin() ); … … 302 827 ::MPI_Group local_group; 303 828 ::MPI_Group new_group; 304 ::MPI_Comm new_comm;305 ::MPI_Comm intercomm;829 ::MPI_Comm *new_comm = new ::MPI_Comm; 830 ::MPI_Comm *intercomm = new ::MPI_Comm; 306 831 307 832 ::MPI_Comm_group(local_mpi_comm, &local_group); … … 309 834 ::MPI_Group_incl(local_group, size_info[2], new_rank_info[1].data(), &new_group); 310 835 311 ::MPI_Comm_create(local_mpi_comm, new_group, &new_comm);836 ::MPI_Comm_create(local_mpi_comm, new_group, new_comm); 312 837 313 838 … … 315 840 if(is_local_leader) 316 841 { 317 ::MPI_Comm_rank( new_comm, &leader_info[2]);318 } 319 320 ::MPI_Bcast(&leader_info[2], 1, static_cast< ::MPI_Datatype>(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm);321 322 if(new_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm))323 { 324 ::MPI_Barrier( new_comm);325 326 ::MPI_Intercomm_create( new_comm, leader_info[2], peer_comm, leader_info[1], tag, &intercomm);842 ::MPI_Comm_rank(*new_comm, &leader_info[2]); 843 } 844 845 ::MPI_Bcast(&leader_info[2], 1, to_mpi_type(MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 846 847 if(new_comm != static_cast< ::MPI_Comm* >(MPI_COMM_NULL.mpi_comm)) 848 { 849 ::MPI_Barrier(*new_comm); 850 851 ::MPI_Intercomm_create(*new_comm, leader_info[2], peer_comm, leader_info[1], tag, intercomm); 327 852 328 853 int id; 329 MPI_Comm_rank(new_comm, &id);854 ::MPI_Comm_rank(*new_comm, &id); 330 855 int my_num_ep = new_ep_info[0][id]; 331 856 … … 333 858 MPI_Info info; 334 859 MPI_Comm_create_endpoints(new_comm, my_num_ep, info, ep_intercomm); 335 336 337 860 338 861 for(int i= 0; i<my_num_ep; i++) … … 345 868 } 346 869 870 347 871 #pragma omp critical (write_to_tag_list) 348 872 tag_list.push_back(make_pair( make_pair(tag, min(leader_info[0], leader_info[1])) , ep_intercomm)); 349 }350 351 352 } 353 354 355 MPI_Barrier_local(local_comm); 873 874 875 } 876 877 878 } 879 356 880 357 881 vector<int> bcast_buf(8); … … 362 886 } 363 887 364 MPI_Bcast(bcast_buf.data(), 8, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm); 888 889 890 MPI_Bcast(bcast_buf.data(), 8, MPI_INT, local_leader, local_comm); 891 365 892 366 893 if(!is_local_leader) … … 369 896 std::copy(bcast_buf.begin()+4, bcast_buf.begin()+8, leader_info); 370 897 } 898 899 371 900 372 901 if(!is_local_leader) … … 387 916 } 388 917 389 MPI_Bcast(bcast_buf.data(), size_info[2]+size_info[1]+size_info[0]+1, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm); 918 MPI_Bcast(bcast_buf.data(), size_info[2]+size_info[1]+size_info[0]+1, MPI_INT, local_leader, local_comm); 919 390 920 391 921 if(!is_local_leader) … … 399 929 400 930 int my_position = offset[rank_in_local_parent]+ep_rank_loc; 401 402 403 MPI_Barrier_local(local_comm);404 #pragma omp flush405 931 406 932 … … 417 943 418 944 found = true; 419 //tag_list.erase(iter);420 945 break; 421 946 } … … 432 957 ::MPI_Status mpi_status; 433 958 434 ::MPI_Send(&local_flag, 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm);435 436 ::MPI_Recv(&remote_flag, 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status);959 ::MPI_Send(&local_flag, 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 960 961 ::MPI_Recv(&remote_flag, 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 437 962 } 438 963 … … 450 975 } 451 976 } 452 453 454 977 455 978 int intercomm_ep_rank, intercomm_ep_rank_loc, intercomm_mpi_rank; … … 463 986 intercomm_mpi_size = newintercomm->ep_comm_ptr->size_rank_info[2].second; 464 987 465 MPI_Bcast(&remote_ep_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm); 466 467 int my_rank_map_elem[2]; 468 469 470 my_rank_map_elem[0] = intercomm_ep_rank; 471 472 my_rank_map_elem[1] = (*newintercomm).ep_comm_ptr->comm_label; 473 474 vector<pair<int, int> > local_rank_map_array; 475 vector<pair<int, int> > remote_rank_map_array; 476 477 478 (*newintercomm).ep_comm_ptr->intercomm->local_rank_map = new RANK_MAP; 479 (*newintercomm).ep_comm_ptr->intercomm->local_rank_map->resize(local_ep_size); 480 481 MPI_Allgather2(my_rank_map_elem, 2, MPI_INT, (*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2, MPI_INT, local_comm); 482 483 (*newintercomm).ep_comm_ptr->intercomm->remote_rank_map = new RANK_MAP; 484 (*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->resize(remote_ep_size); 485 486 int local_intercomm_size = intercomm_ep_size; 487 int remote_intercomm_size; 488 489 490 491 492 if(is_local_leader) 493 { 494 ::MPI_Status status; 495 496 ::MPI_Send((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_ep_size, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm); 497 498 ::MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm, &status); 499 500 ::MPI_Send(&local_intercomm_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm); 501 502 ::MPI_Recv(&remote_intercomm_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm, &status); 503 } 504 505 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm); 506 MPI_Bcast(&remote_intercomm_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), 0, *newintercomm); 507 508 509 (*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map = new RANK_MAP; 510 (*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->resize(remote_intercomm_size); 511 512 (*newintercomm).ep_comm_ptr->intercomm->size_rank_info[0] = local_comm.ep_comm_ptr->size_rank_info[0]; 513 (*newintercomm).ep_comm_ptr->intercomm->size_rank_info[1] = local_comm.ep_comm_ptr->size_rank_info[1]; 514 (*newintercomm).ep_comm_ptr->intercomm->size_rank_info[2] = local_comm.ep_comm_ptr->size_rank_info[2]; 515 516 517 if(is_local_leader) 518 { 519 ::MPI_Status status; 520 521 ::MPI_Send((*newintercomm).rank_map->data(), 2*local_intercomm_size, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm); 522 523 ::MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm, &status); 524 } 525 526 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, static_cast< ::MPI_Datatype> (MPI_INT), 0, *newintercomm); 527 528 (*newintercomm).ep_comm_ptr->intercomm->local_comm = &(local_comm.ep_comm_ptr->comm_list[ep_rank_loc]); 529 (*newintercomm).ep_comm_ptr->intercomm->intercomm_tag = local_comm.ep_comm_ptr->comm_label; 530 531 532 return MPI_SUCCESS; 533 534 } 535 536 537 538 #elif _intelmpi 539 int MPI_Intercomm_create_from_world(MPI_Comm local_comm, int local_leader, int peer_comm_ptr, int mpi_remote_leader, int tag, MPI_Comm *newintercomm) 540 { 541 int ep_rank, ep_rank_loc, mpi_rank; 542 int ep_size, num_ep, mpi_size; 543 544 ep_rank = local_comm.ep_comm_ptr->size_rank_info[0].first; 545 ep_rank_loc = local_comm.ep_comm_ptr->size_rank_info[1].first; 546 mpi_rank = local_comm.ep_comm_ptr->size_rank_info[2].first; 547 ep_size = local_comm.ep_comm_ptr->size_rank_info[0].second; 548 num_ep = local_comm.ep_comm_ptr->size_rank_info[1].second; 549 mpi_size = local_comm.ep_comm_ptr->size_rank_info[2].second; 550 551 std::vector<int> rank_info[4]; //! 0->rank_in_world of local_comm, 1->rank_in_local_parent of local_comm 552 //! 2->rank_in_world of remote_comm, 3->rank_in_local_parent of remote_comm 553 554 int rank_in_world; 555 int rank_in_local_parent; 556 557 int local_ep_size = ep_size; 558 int remote_ep_size; 559 560 ::MPI_Comm peer_comm = static_cast< ::MPI_Comm>(peer_comm_ptr); 561 ::MPI_Comm local_mpi_comm = static_cast< ::MPI_Comm>(local_comm.mpi_comm); 562 563 ::MPI_Comm_rank(peer_comm, &rank_in_world); 564 565 ::MPI_Comm_rank(static_cast< ::MPI_Comm>(local_comm.mpi_comm), &rank_in_local_parent); 566 567 bool is_proc_master = false; 568 bool is_local_leader = false; 569 bool is_final_master = false; 570 571 572 if(ep_rank == local_leader) { is_proc_master = true; is_local_leader = true; is_final_master = true;} 573 if(ep_rank_loc == 0 && mpi_rank != local_comm.rank_map->at(local_leader).second) is_proc_master = true; 574 575 576 int size_info[4]; //! used for choose size of rank_info 0-> mpi_size of local_comm, 1-> mpi_size of remote_comm 577 578 int leader_info[4]; //! 0->world rank of local_leader, 1->world rank of remote leader 579 580 581 std::vector<int> ep_info[2]; //! 0-> num_ep in local_comm, 1->num_ep in remote_comm 582 583 std::vector<int> new_rank_info[4]; 584 std::vector<int> new_ep_info[2]; 585 586 std::vector<int> offset; 587 588 if(is_proc_master) 589 { 590 591 size_info[0] = mpi_size; 592 593 rank_info[0].resize(size_info[0]); 594 rank_info[1].resize(size_info[0]); 595 596 ep_info[0].resize(size_info[0]); 597 598 vector<int> send_buf(6); 599 vector<int> recv_buf(3*size_info[0]); 600 601 send_buf[0] = rank_in_world; 602 send_buf[1] = rank_in_local_parent; 603 send_buf[2] = num_ep; 604 605 ::MPI_Allgather(send_buf.data(), 3, static_cast< ::MPI_Datatype> (MPI_INT), recv_buf.data(), 3, static_cast< ::MPI_Datatype> (MPI_INT), local_mpi_comm); 606 607 for(int i=0; i<size_info[0]; i++) 608 { 609 rank_info[0][i] = recv_buf[3*i]; 610 rank_info[1][i] = recv_buf[3*i+1]; 611 ep_info[0][i] = recv_buf[3*i+2]; 612 } 613 614 615 if(is_local_leader) 616 { 617 leader_info[0] = rank_in_world; 618 leader_info[1] = mpi_remote_leader; 619 620 ::MPI_Status mpi_status; 621 622 send_buf[0] = size_info[0]; 623 send_buf[1] = local_ep_size; 624 625 ::MPI_Send(send_buf.data(), 2, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm); 626 627 ::MPI_Recv(recv_buf.data(), 2, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 628 629 recv_buf[2] = rank_in_world; 630 recv_buf[3] = mpi_remote_leader; 631 632 } 633 634 ::MPI_Bcast(recv_buf.data(), 4, static_cast< ::MPI_Datatype> (MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 635 636 size_info[1] = recv_buf[0]; 637 remote_ep_size = recv_buf[1]; 638 leader_info[0] = recv_buf[2]; 639 leader_info[1] = recv_buf[3]; 640 641 rank_info[2].resize(size_info[1]); 642 rank_info[3].resize(size_info[1]); 643 644 ep_info[1].resize(size_info[1]); 645 646 send_buf.resize(3*size_info[0]); 647 recv_buf.resize(3*size_info[1]); 648 649 if(is_local_leader) 650 { 651 ::MPI_Status mpi_status; 652 653 654 std::copy ( rank_info[0].data(), rank_info[0].data() + size_info[0], send_buf.begin() ); 655 std::copy ( rank_info[1].data(), rank_info[1].data() + size_info[0], send_buf.begin() + size_info[0] ); 656 std::copy ( ep_info[0].data(), ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[0] ); 657 658 ::MPI_Send(send_buf.data(), 3*size_info[0], static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm); 659 660 ::MPI_Recv(recv_buf.data(), 3*size_info[1], static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 661 662 } 663 664 ::MPI_Bcast(recv_buf.data(), 3*size_info[1], static_cast< ::MPI_Datatype> (MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 665 666 std::copy ( recv_buf.data(), recv_buf.data() + size_info[1], rank_info[2].begin() ); 667 std::copy ( recv_buf.data() + size_info[1], recv_buf.data() + 2*size_info[1], rank_info[3].begin() ); 668 std::copy ( recv_buf.data() + 2*size_info[1], recv_buf.data() + 3*size_info[1], ep_info[1].begin() ); 669 670 offset.resize(size_info[0]); 671 672 if(leader_info[0]<leader_info[1]) // erase all ranks doubled with remote_comm, except the local leader 673 { 674 675 bool found = false; 676 int ep_tmp; 677 int ep_local; 678 int ep_remote; 679 for(int i=0; i<size_info[0]; i++) 680 { 681 int target = rank_info[0][i]; 682 found = false; 683 for(int j=0; j<size_info[1]; j++) 684 { 685 if(target == rank_info[2][j]) 686 { 687 found = true; 688 ep_tmp = ep_info[1][j]; 689 ep_local = ep_info[0][j]; 690 ep_remote = ep_info[1][j]; 691 break; 692 } 693 } 694 if(found) 695 { 696 697 if(target == leader_info[0]) // the leader is doubled in remote 698 { 699 new_rank_info[0].push_back(target); 700 new_rank_info[1].push_back(rank_info[1][i]); 701 702 new_ep_info[0].push_back(ep_local + ep_remote); 703 offset[i] = 0; 704 } 705 else 706 { 707 offset[i] = ep_local; 708 } 709 } 710 else 711 { 712 new_rank_info[0].push_back(target); 713 new_rank_info[1].push_back(rank_info[1][i]); 714 715 new_ep_info[0].push_back(ep_info[0][i]); 716 717 offset[i] = 0; 718 } 719 720 } 721 } 722 723 else // erase rank doubled with remote leader 724 { 725 726 bool found = false; 727 int ep_tmp; 728 int ep_local; 729 int ep_remote; 730 for(int i=0; i<size_info[0]; i++) 731 { 732 int target = rank_info[0][i]; 733 found = false; 734 for(int j=0; j<size_info[1]; j++) 735 { 736 737 if(target == rank_info[2][j]) 738 { 739 found = true; 740 ep_tmp = ep_info[1][j]; 741 ep_local = ep_info[0][j]; 742 ep_remote = ep_info[1][j]; 743 break; 744 } 745 } 746 if(found) 747 { 748 if(target != leader_info[1]) 749 { 750 new_rank_info[0].push_back(target); 751 new_rank_info[1].push_back(rank_info[1][i]); 752 753 new_ep_info[0].push_back(ep_local + ep_remote); 754 offset[i] = 0; 755 } 756 else // found remote leader 757 { 758 offset[i] = ep_remote; 759 } 760 } 761 else 762 { 763 new_rank_info[0].push_back(target); 764 new_rank_info[1].push_back(rank_info[1][i]); 765 766 new_ep_info[0].push_back(ep_info[0][i]); 767 offset[i] = 0; 768 } 769 } 770 } 771 772 if(offset[mpi_rank] == 0) 773 { 774 is_final_master = true; 775 } 776 777 778 // size_info[4]: 2->size of new_ep_info for local, 3->size of new_ep_info for remote 779 780 if(is_local_leader) 781 { 782 size_info[2] = new_ep_info[0].size(); 783 ::MPI_Status mpi_status; 784 785 ::MPI_Send(&size_info[2], 1, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm); 786 787 ::MPI_Recv(&size_info[3], 1, static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 788 } 789 790 ::MPI_Bcast(&size_info[2], 2, static_cast< ::MPI_Datatype> (MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 791 792 new_rank_info[2].resize(size_info[3]); 793 new_rank_info[3].resize(size_info[3]); 794 new_ep_info[1].resize(size_info[3]); 795 796 send_buf.resize(size_info[2]); 797 recv_buf.resize(size_info[3]); 798 799 if(is_local_leader) 800 { 801 ::MPI_Status mpi_status; 802 803 std::copy ( new_rank_info[0].data(), new_rank_info[0].data() + size_info[2], send_buf.begin() ); 804 std::copy ( new_rank_info[1].data(), new_rank_info[1].data() + size_info[2], send_buf.begin() + size_info[2] ); 805 std::copy ( new_ep_info[0].data(), new_ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[2] ); 806 807 ::MPI_Send(send_buf.data(), 3*size_info[2], static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm); 808 809 ::MPI_Recv(recv_buf.data(), 3*size_info[3], static_cast< ::MPI_Datatype> (MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 810 } 811 812 ::MPI_Bcast(recv_buf.data(), 3*size_info[3], static_cast< ::MPI_Datatype> (MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 813 814 std::copy ( recv_buf.data(), recv_buf.data() + size_info[3], new_rank_info[2].begin() ); 815 std::copy ( recv_buf.data() + size_info[3], recv_buf.data() + 2*size_info[3], new_rank_info[3].begin() ); 816 std::copy ( recv_buf.data() + 2*size_info[3], recv_buf.data() + 3*size_info[3], new_ep_info[1].begin() ); 817 818 } 819 820 821 822 if(is_proc_master) 823 { 824 // leader_info[4]: 2-> rank of local leader in new_group generated comm; 825 // 3-> rank of remote leader in new_group generated comm; 826 ::MPI_Group local_group; 827 ::MPI_Group new_group; 828 ::MPI_Comm new_comm; 829 ::MPI_Comm intercomm; 830 831 ::MPI_Comm_group(local_mpi_comm, &local_group); 832 833 ::MPI_Group_incl(local_group, size_info[2], new_rank_info[1].data(), &new_group); 834 835 ::MPI_Comm_create(local_mpi_comm, new_group, &new_comm); 836 837 838 839 if(is_local_leader) 840 { 841 ::MPI_Comm_rank(new_comm, &leader_info[2]); 842 } 843 844 ::MPI_Bcast(&leader_info[2], 1, static_cast< ::MPI_Datatype> (MPI_INT), local_comm.rank_map->at(local_leader).second, local_mpi_comm); 845 846 if(new_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm)) 847 { 848 ::MPI_Barrier(new_comm); 849 850 ::MPI_Intercomm_create(new_comm, leader_info[2], peer_comm, leader_info[1], tag, &intercomm); 851 852 int id; 853 ::MPI_Comm_rank(new_comm, &id); 854 int my_num_ep = new_ep_info[0][id]; 855 856 MPI_Comm *ep_intercomm; 857 MPI_Info info; 858 MPI_Comm_create_endpoints(new_comm, my_num_ep, info, ep_intercomm); 859 860 for(int i= 0; i<my_num_ep; i++) 861 { 862 ep_intercomm[i].is_intercomm = true; 863 864 ep_intercomm[i].ep_comm_ptr->intercomm = new ep_lib::ep_intercomm; 865 ep_intercomm[i].ep_comm_ptr->intercomm->mpi_inter_comm = intercomm; 866 ep_intercomm[i].ep_comm_ptr->comm_label = leader_info[0]; 867 } 868 869 870 #pragma omp critical (write_to_tag_list) 871 tag_list.push_back(make_pair( make_pair(tag, min(leader_info[0], leader_info[1])) , ep_intercomm)); 872 873 874 } 875 876 877 } 878 879 880 vector<int> bcast_buf(8); 881 if(is_local_leader) 882 { 883 std::copy(size_info, size_info+4, bcast_buf.begin()); 884 std::copy(leader_info, leader_info+4, bcast_buf.begin()+4); 885 } 886 887 888 889 MPI_Bcast(bcast_buf.data(), 8, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm); 890 891 892 if(!is_local_leader) 893 { 894 std::copy(bcast_buf.begin(), bcast_buf.begin()+4, size_info); 895 std::copy(bcast_buf.begin()+4, bcast_buf.begin()+8, leader_info); 896 } 897 898 899 900 if(!is_local_leader) 901 { 902 new_rank_info[1].resize(size_info[2]); 903 ep_info[1].resize(size_info[1]); 904 offset.resize(size_info[0]); 905 } 906 907 bcast_buf.resize(size_info[2]+size_info[1]+size_info[0]+1); 908 909 if(is_local_leader) 910 { 911 bcast_buf[0] = remote_ep_size; 912 std::copy(new_rank_info[1].data(), new_rank_info[1].data()+size_info[2], bcast_buf.begin()+1); 913 std::copy(ep_info[1].data(), ep_info[1].data()+size_info[1], bcast_buf.begin()+size_info[2]+1); 914 std::copy(offset.data(), offset.data()+size_info[0], bcast_buf.begin()+size_info[2]+size_info[1]+1); 915 } 916 917 MPI_Bcast(bcast_buf.data(), size_info[2]+size_info[1]+size_info[0]+1, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm); 918 919 920 if(!is_local_leader) 921 { 922 remote_ep_size = bcast_buf[0]; 923 std::copy(bcast_buf.data()+1, bcast_buf.data()+1+size_info[2], new_rank_info[1].begin()); 924 std::copy(bcast_buf.data()+1+size_info[2], bcast_buf.data()+1+size_info[2]+size_info[1], ep_info[1].begin()); 925 std::copy(bcast_buf.data()+1+size_info[2]+size_info[1], bcast_buf.data()+1+size_info[2]+size_info[1]+size_info[0], offset.begin()); 926 } 927 928 929 int my_position = offset[rank_in_local_parent]+ep_rank_loc; 930 931 932 #pragma omp critical (read_from_tag_list) 933 { 934 bool found = false; 935 while(!found) 936 { 937 for(std::list<std::pair < std::pair<int,int>, MPI_Comm* > >::iterator iter = tag_list.begin(); iter!=tag_list.end(); iter++) 938 { 939 if((*iter).first == make_pair(tag, min(leader_info[0], leader_info[1]))) 940 { 941 *newintercomm = iter->second[my_position]; 942 943 found = true; 944 break; 945 } 946 } 947 } 948 } 949 950 MPI_Barrier(local_comm); 951 952 if(is_local_leader) 953 { 954 int local_flag = true; 955 int remote_flag = false; 956 ::MPI_Status mpi_status; 957 958 ::MPI_Send(&local_flag, 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm); 959 960 ::MPI_Recv(&remote_flag, 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm, &mpi_status); 961 } 962 963 MPI_Barrier(local_comm); 964 965 if(is_proc_master) 966 { 967 for(std::list<std::pair < std::pair<int,int>, MPI_Comm* > >::iterator iter = tag_list.begin(); iter!=tag_list.end(); iter++) 968 { 969 if((*iter).first == make_pair(tag, min(leader_info[0], leader_info[1]))) 970 { 971 tag_list.erase(iter); 972 break; 973 } 974 } 975 } 976 977 int intercomm_ep_rank, intercomm_ep_rank_loc, intercomm_mpi_rank; 978 int intercomm_ep_size, intercomm_num_ep, intercomm_mpi_size; 979 980 intercomm_ep_rank = newintercomm->ep_comm_ptr->size_rank_info[0].first; 981 intercomm_ep_rank_loc = newintercomm->ep_comm_ptr->size_rank_info[1].first; 982 intercomm_mpi_rank = newintercomm->ep_comm_ptr->size_rank_info[2].first; 983 intercomm_ep_size = newintercomm->ep_comm_ptr->size_rank_info[0].second; 984 intercomm_num_ep = newintercomm->ep_comm_ptr->size_rank_info[1].second; 985 intercomm_mpi_size = newintercomm->ep_comm_ptr->size_rank_info[2].second; 986 987 988 MPI_Bcast(&remote_ep_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm); 988 989 MPI_Bcast(&remote_ep_size, 1, MPI_INT, local_leader, local_comm); 989 990 990 991 int my_rank_map_elem[2]; … … 1015 1016 ::MPI_Status status; 1016 1017 1017 ::MPI_Send((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_ep_size, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm);1018 1019 ::MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm, &status);1020 1021 ::MPI_Send(&local_intercomm_size, 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm);1022 1023 ::MPI_Recv(&remote_intercomm_size, 1, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm, &status);1024 } 1025 1026 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, static_cast< ::MPI_Datatype> (MPI_INT), local_leader, local_comm);1027 MPI_Bcast(&remote_intercomm_size, 1, static_cast< ::MPI_Datatype> (MPI_INT), 0, *newintercomm);1018 ::MPI_Send((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_ep_size, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 1019 1020 ::MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &status); 1021 1022 ::MPI_Send(&local_intercomm_size, 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 1023 1024 ::MPI_Recv(&remote_intercomm_size, 1, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &status); 1025 } 1026 1027 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, MPI_INT, local_leader, local_comm); 1028 MPI_Bcast(&remote_intercomm_size, 1, MPI_INT, 0, *newintercomm); 1028 1029 1029 1030 … … 1040 1041 ::MPI_Status status; 1041 1042 1042 ::MPI_Send((*newintercomm).rank_map->data(), 2*local_intercomm_size, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm);1043 1044 ::MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, static_cast< ::MPI_Datatype>(MPI_INT), mpi_remote_leader, tag, peer_comm, &status);1045 } 1046 1047 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, static_cast< ::MPI_Datatype> (MPI_INT), 0, *newintercomm);1043 ::MPI_Send((*newintercomm).rank_map->data(), 2*local_intercomm_size, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm); 1044 1045 ::MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, to_mpi_type(MPI_INT), mpi_remote_leader, tag, peer_comm, &status); 1046 } 1047 1048 MPI_Bcast((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, MPI_INT, 0, *newintercomm); 1048 1049 1049 1050 (*newintercomm).ep_comm_ptr->intercomm->local_comm = &(local_comm.ep_comm_ptr->comm_list[ep_rank_loc]); … … 1054 1055 } 1055 1056 1056 #endif1057 // #endif 1057 1058 1058 1059 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_lib.cpp
r1328 r1354 246 246 } 247 247 248 MPI_Comm to_mpi_comm( intcomm)248 MPI_Comm to_mpi_comm(void* comm) 249 249 { 250 return static_cast< MPI_Comm >(comm);250 return *(static_cast< MPI_Comm* >(comm)); 251 251 } 252 252 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_lib.hpp
r1287 r1354 43 43 int MPI_Get_count(const MPI_Status *status, MPI_Datatype datatype, int *count); 44 44 45 #ifdef _openmpi46 int MPI_Comm_create_endpoints(void* mpi_comm, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls); // from MPI to create endpoints47 #elif _intelmpi48 int MPI_Comm_create_endpoints(int mpi_comm, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls); // from MPI to create endpoints49 #endif45 // #ifdef _openmpi 46 // int MPI_Comm_create_endpoints(void* mpi_comm, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls); // from MPI to create endpoints 47 // #elif _intelmpi 48 // int MPI_Comm_create_endpoints(int mpi_comm, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls); // from MPI to create endpoints 49 // #endif 50 50 51 int MPI_Comm_create_endpoints( MPI_Comm base_comm, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls); // from EP to create endpoints51 int MPI_Comm_create_endpoints(void* base_comm_ptr, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls); // from EP to create endpoints 52 52 53 53 int MPI_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm); -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_lib_intercomm.hpp
r1185 r1354 35 35 int MPI_Intercomm_create_kernel(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm); 36 36 37 #ifdef _intelmpi 38 int MPI_Intercomm_create_from_world(MPI_Comm local_comm, int local_leader, int peer_comm_ptr, int mpi_remote_leader, int tag, MPI_Comm *newintercomm); 39 #elif _openmpi 37 // #ifdef _intelmpi 38 // int MPI_Intercomm_create_from_world(MPI_Comm local_comm, int local_leader, int peer_comm_ptr, int mpi_remote_leader, int tag, MPI_Comm *newintercomm); 39 // #elif _openmpi 40 // int MPI_Intercomm_create_from_world(MPI_Comm local_comm, int local_leader, void* peer_comm_ptr, int mpi_remote_leader, int tag, MPI_Comm *newintercomm); 41 // #endif 42 40 43 int MPI_Intercomm_create_from_world(MPI_Comm local_comm, int local_leader, void* peer_comm_ptr, int mpi_remote_leader, int tag, MPI_Comm *newintercomm); 41 #endif42 44 43 45 int MPI_Intercomm_create_unique_leader(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm); -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_merge.cpp
r1295 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 using namespace std; … … 84 85 if(intra_ep_rank_loc == 0) 85 86 { 86 ::MPI_Bcast(reorder, intra_ep_size, static_cast< ::MPI_Datatype> (MPI_INT), 0, static_cast< ::MPI_Comm>(newintracomm->mpi_comm));87 ::MPI_Bcast(reorder, intra_ep_size, to_mpi_type(MPI_INT), 0, to_mpi_comm(newintracomm->mpi_comm)); 87 88 88 89 vector< pair<int, int> > tmp_rank_map(intra_ep_size); … … 142 143 143 144 144 ::MPI_Comm mpi_intracomm;145 ::MPI_Comm *mpi_intracomm = new ::MPI_Comm; 145 146 MPI_Comm *ep_intracomm; 146 147 … … 148 149 { 149 150 150 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm>(inter_comm.ep_comm_ptr->intercomm->mpi_inter_comm);151 152 ::MPI_Intercomm_merge(mpi_comm, high, &mpi_intracomm);151 ::MPI_Comm mpi_comm = to_mpi_comm(inter_comm.ep_comm_ptr->intercomm->mpi_inter_comm); 152 153 ::MPI_Intercomm_merge(mpi_comm, high, mpi_intracomm); 153 154 MPI_Info info; 154 155 MPI_Comm_create_endpoints(mpi_intracomm, num_ep, info, ep_intracomm); -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_mpi.hpp
r1287 r1354 6 6 MPI_Datatype to_mpi_type(ep_lib::MPI_Datatype type); 7 7 MPI_Op to_mpi_op(ep_lib::MPI_Op op); 8 MPI_Comm to_mpi_comm( intcomm);8 MPI_Comm to_mpi_comm(void* comm); 9 9 10 10 #endif // EP_MPI_HPP_INCLUDED -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_rank.cpp
r1134 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 namespace ep_lib … … 30 31 if(comm != MPI_COMM_NULL) 31 32 { 32 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm >(comm.mpi_comm);33 ::MPI_Comm mpi_comm = to_mpi_comm(comm.mpi_comm); 33 34 ::MPI_Comm_rank(mpi_comm, rank); 34 35 return 0; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_size.cpp
r1287 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 namespace ep_lib … … 31 32 Debug("Calling EP_Comm_size\n"); 32 33 33 if(comm.mpi_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm))34 if(comm.mpi_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL.mpi_comm)) 34 35 { 35 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm >(comm.mpi_comm);36 ::MPI_Comm mpi_comm = to_mpi_comm(comm.mpi_comm); 36 37 int mpi_size; 37 38 … … 54 55 if(!comm.is_ep) 55 56 { 56 if(comm.mpi_comm != static_cast< ::MPI_Comm >(MPI_COMM_NULL.mpi_comm))57 if(comm.mpi_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL.mpi_comm)) 57 58 { 58 ::MPI_Comm mpi_comm = static_cast< ::MPI_Comm >(comm.mpi_comm);59 ::MPI_Comm mpi_comm = to_mpi_comm(comm.mpi_comm); 59 60 ::MPI_Comm_remote_size(mpi_comm, size); 60 61 return 0; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_split.cpp
r1338 r1354 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 5 6 using namespace std; … … 134 135 } 135 136 136 ::MPI_Comm split_mpi_comm[num_color]; 137 ::MPI_Comm **split_mpi_comm; 138 split_mpi_comm = new ::MPI_Comm* [num_color]; 139 for(int ii=0; ii<num_color; ii++) 140 split_mpi_comm[ii] = new ::MPI_Comm; 137 141 138 142 for(int j=0; j<num_color; j++) … … 143 147 if(matched_number_loc[j] == 0) master_color = MPI_UNDEFINED; 144 148 145 ::MPI_Comm_split( static_cast< ::MPI_Comm>(comm.mpi_comm), master_color, mpi_rank, &split_mpi_comm[j]);149 ::MPI_Comm_split(to_mpi_comm(comm.mpi_comm), master_color, mpi_rank, split_mpi_comm[j]); 146 150 147 151 comm.ep_comm_ptr->comm_list->mpi_bridge = split_mpi_comm[j]; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_type.hpp
r1328 r1354 153 153 154 154 155 #ifdef _intelmpi 156 int mpi_inter_comm; 157 #elif _openmpi 155 // #ifdef _intelmpi 156 // int mpi_inter_comm; 157 // #elif _openmpi 158 // void * mpi_inter_comm; 159 // #endif 160 158 161 void * mpi_inter_comm; 159 #endif160 162 161 163 RANK_MAP *intercomm_rank_map; … … 258 260 public: 259 261 260 #ifdef _intelmpi 261 int mpi_comm; 262 #elif _openmpi 262 // #ifdef _intelmpi 263 // int mpi_comm; 264 // #elif _openmpi 265 // void * mpi_comm; 266 // #endif 267 263 268 void * mpi_comm; 264 #endif265 269 266 270 bool is_ep; … … 276 280 277 281 278 #ifdef _intelmpi 279 int mpi_bridge; 280 #elif _openmpi 282 // #ifdef _intelmpi 283 // int mpi_bridge; 284 // #elif _openmpi 285 // void * mpi_bridge; 286 // #endif 287 281 288 void * mpi_bridge; 282 #endif283 289 284 290 MPI_Comm() … … 294 300 } 295 301 296 #ifdef _intelmpi 297 MPI_Comm(int comm) 302 // #ifdef _intelmpi 303 // MPI_Comm(int comm) 304 // { 305 // is_ep = false; 306 // is_intercomm = false; 307 // my_buffer = NULL; 308 // ep_barrier = NULL; 309 // rank_map = NULL; 310 // ep_comm_ptr = NULL; 311 // mem_bridge = NULL; 312 // mpi_bridge = NULL; 313 // mpi_comm = comm; 314 // } 315 316 // #elif _openmpi 317 318 // MPI_Comm(void* comm) 319 // { 320 // is_ep = false; 321 // is_intercomm = false; 322 // my_buffer = NULL; 323 // ep_barrier = NULL; 324 // rank_map = NULL; 325 // ep_comm_ptr = NULL; 326 // mem_bridge = NULL; 327 // mpi_bridge = NULL; 328 // mpi_comm = comm; 329 // } 330 // #endif 331 332 MPI_Comm(void* comm) 298 333 { 299 334 is_ep = false; … … 308 343 } 309 344 310 #elif _openmpi311 312 MPI_Comm(void* comm)313 {314 is_ep = false;315 is_intercomm = false;316 my_buffer = NULL;317 ep_barrier = NULL;318 rank_map = NULL;319 ep_comm_ptr = NULL;320 mem_bridge = NULL;321 mpi_bridge = NULL;322 mpi_comm = comm;323 }324 #endif325 326 327 345 bool operator == (MPI_Comm right) 328 346 {
Note: See TracChangeset
for help on using the changeset viewer.