Changeset 1539
- Timestamp:
- 06/12/18 11:54:13 (6 years ago)
- Location:
- XIOS/dev/branch_openmp/extern/src_ep_dev
- Files:
-
- 1 added
- 7 deleted
- 39 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_allgather.cpp
r1520 r1539 43 43 { 44 44 45 if(!comm->is_ep && comm->mpi_comm) 46 { 47 return ::MPI_Allgather(const_cast<void*>(sendbuf), sendcount, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), to_mpi_comm(comm->mpi_comm)); 48 } 45 if(!comm->is_ep) return ::MPI_Allgather(const_cast<void*>(sendbuf), sendcount, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), to_mpi_comm(comm->mpi_comm)); 46 if(comm->is_intercomm) return MPI_Allgather_intercomm(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); 47 49 48 50 49 assert(sendcount == recvcount); … … 119 118 MPI_Bcast_local(recvbuf, count*ep_size, datatype, 0, comm); 120 119 121 MPI_Barrier(comm);122 123 124 120 if(is_master) 125 121 { … … 131 127 } 132 128 129 int MPI_Allgather_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) 130 { 131 printf("MPI_Allgather_intercomm not yet implemented\n"); 132 MPI_Abort(comm, 0); 133 } 133 134 134 135 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_allgatherv.cpp
r1520 r1539 20 20 { 21 21 22 if(!comm->is_ep && comm->mpi_comm) 23 { 24 return ::MPI_Allgatherv(sendbuf, sendcount, to_mpi_type(sendtype), recvbuf, recvcounts, displs, to_mpi_type(recvtype), to_mpi_comm(comm->mpi_comm)); 25 } 26 27 if(!comm->mpi_comm) return 0; 22 if(!comm->is_ep) return ::MPI_Allgatherv(sendbuf, sendcount, to_mpi_type(sendtype), recvbuf, recvcounts, displs, to_mpi_type(recvtype), to_mpi_comm(comm->mpi_comm)); 23 if(comm->is_intercomm) return MPI_Allgatherv_intercomm(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); 28 24 29 25 … … 114 110 delete[] tmp_recvbuf; 115 111 } 116 117 118 112 } 119 113 120 114 115 int MPI_Allgatherv_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, MPI_Comm comm) 116 { 117 printf("MPI_Allgatherv_intercomm not yet implemented\n"); 118 MPI_Abort(comm, 0); 119 } 120 121 121 122 122 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_allocate.cpp
r1521 r1539 12 12 { 13 13 ::MPI_Alloc_mem(to_mpi_aint(size), to_mpi_info(info), baseptr); 14 //::MPI_Alloc_mem(size.mpi_aint, MPI_INFO_NULL_STD, baseptr);15 14 return 0; 16 15 } … … 19 18 { 20 19 ::MPI_Alloc_mem(size, *(static_cast< ::MPI_Info*>(info->mpi_info)), baseptr); 21 //::MPI_Alloc_mem(size, MPI_INFO_NULL_STD, baseptr);22 20 return 0; 23 21 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_allreduce.cpp
r1520 r1539 17 17 { 18 18 19 20 21 19 int MPI_Allreduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) 22 20 { 23 if(!comm->is_ep && comm->mpi_comm) 24 { 25 return ::MPI_Allreduce(sendbuf, recvbuf, count, to_mpi_type(datatype), to_mpi_op(op), to_mpi_comm(comm->mpi_comm)); 26 } 27 21 if(!comm->is_ep) return ::MPI_Allreduce(sendbuf, recvbuf, count, to_mpi_type(datatype), to_mpi_op(op), to_mpi_comm(comm->mpi_comm)); 22 if(comm->is_intercomm) return MPI_Allreduce_intercomm(sendbuf, recvbuf, count, datatype, op, comm); 28 23 29 24 … … 66 61 } 67 62 68 MPI_Barrier_local(comm);69 63 } 70 64 71 65 66 int MPI_Allreduce_intercomm(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) 67 { 68 printf("MPI_Allreduce_intercomm not yet implemented\n"); 69 MPI_Abort(comm, 0); 70 } 72 71 73 72 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_alltoall.cpp
r1520 r1539 9 9 int MPI_Alltoall(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) 10 10 { 11 if(!comm->is_ep) 12 { 13 return ::MPI_Alltoall(sendbuf, sendcount, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), to_mpi_comm(comm->mpi_comm)); 14 } 11 if(!comm->is_ep) return ::MPI_Alltoall(sendbuf, sendcount, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), to_mpi_comm(comm->mpi_comm)); 12 if(comm->is_intercomm) return MPI_Alltoall_intercomm(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); 15 13 16 14 … … 55 53 delete[] tmp_sendbuf; 56 54 } 57 58 MPI_Barrier(comm);59 60 return 0;61 55 } 62 56 57 58 int MPI_Alltoall_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) 59 { 60 printf("MPI_Alltoall_intercomm not yet implemented\n"); 61 MPI_Abort(comm, 0); 62 } 63 63 } 64 64 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_barrier.cpp
r1520 r1539 9 9 int MPI_Barrier(MPI_Comm comm) 10 10 { 11 if(comm->is_intercomm) return MPI_Barrier_intercomm(comm);12 11 13 12 if(comm->is_ep) 14 13 { 15 return MPI_Barrier_ intracomm(comm);14 return MPI_Barrier_endpoint(comm); 16 15 } 17 16 … … 21 20 } 22 21 23 int MPI_Barrier_ intracomm(MPI_Comm comm)22 int MPI_Barrier_endpoint(MPI_Comm comm) 24 23 { 25 24 int ep_rank_loc = comm->ep_comm_ptr->size_rank_info[1].first; … … 35 34 36 35 MPI_Barrier_local(comm); 37 38 return 0;39 36 } 40 37 41 int MPI_Barrier_intercomm(MPI_Comm comm)42 {43 MPI_Barrier_local(comm);44 45 if(comm->ep_comm_ptr->intercomm->size_rank_info[1].first == 0)46 ::MPI_Barrier(to_mpi_comm(comm->ep_comm_ptr->intercomm->mpi_inter_comm));47 48 MPI_Barrier_local(comm);49 }50 38 51 39 int MPI_Barrier_mpi(MPI_Comm comm) -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_bcast.cpp
r1520 r1539 45 45 { 46 46 47 if(!comm->is_ep) 48 { 49 #pragma omp single nowait 50 ::MPI_Bcast(buffer, count, to_mpi_type(datatype), root, to_mpi_comm(comm->mpi_comm)); 51 return 0; 52 } 47 if(!comm->is_ep) return ::MPI_Bcast(buffer, count, to_mpi_type(datatype), root, to_mpi_comm(comm->mpi_comm)); 48 if(comm->is_intercomm) return MPI_Bcast_intercomm(buffer, count, datatype, root, comm); 53 49 54 50 … … 59 55 int root_mpi_rank = comm->ep_rank_map->at(root).second; 60 56 int root_ep_rank_loc = comm->ep_rank_map->at(root).first; 57 58 //printf("ep_rank = %d, root_mpi_rank = %d, root_ep_rank_loc = %d\n", ep_rank, root_mpi_rank, root_ep_rank_loc); 61 59 62 60 … … 69 67 else MPI_Bcast_local(buffer, count, datatype, 0, comm); 70 68 71 return 0;72 69 } 73 70 74 71 75 72 int MPI_Bcast_intercomm(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm) 73 { 74 printf("MPI_Bcast_intercomm not yet implemented\n"); 75 MPI_Abort(comm, 0); 76 } 76 77 77 78 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_comm.hpp
r1521 r1539 3 3 4 4 #include "ep_message.hpp" 5 #include "ep_intercomm.hpp"6 5 #include "ep_barrier.hpp" 7 6 #include "ep_buffer.hpp" … … 10 9 typedef std::vector< std::pair<int, int> > RANK_MAP; // at(ep_rank) = <ep_rank_local, mpi_rank> 11 10 typedef std::map<int, std::pair<int, int> > EP_RANK_MAP; // key(ep_rank) = <ep_rank_local, mpi_rank> 12 //typedef std::vector<std::pair< std::pair<int, int>, std::pair<int, int> > > INTERCOMM_RANK_MAP;13 11 typedef std::list<ep_lib::MPI_Message > Message_list; 12 typedef std::map<int, int > INTER_RANK_MAP; 14 13 15 14 namespace ep_lib … … 26 25 // 2: mpi_rank, mpi_size 27 26 28 // for intercomm : = size_rank_info of local_comm29 30 31 27 ep_comm **comm_list; 32 28 33 29 Message_list *message_queue; 34 30 35 36 int comm_label;37 38 ep_intercomm *intercomm;39 40 31 }; 41 32 … … 52 43 ep_barrier *ep_barrier; 53 44 54 EP_RANK_MAP *ep_rank_map; // for intercomm : = ep_rank_map of newcomm45 EP_RANK_MAP *ep_rank_map; 55 46 56 47 INTER_RANK_MAP *inter_rank_map; 57 48 58 49 void* mpi_comm; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_create.cpp
r1520 r1539 71 71 out_comm_hdls[idx]->mpi_comm = parent_comm; 72 72 out_comm_hdls[idx]->ep_comm_ptr->comm_list = out_comm_hdls; 73 out_comm_hdls[idx]->ep_comm_ptr->comm_label = 0;74 73 } 75 74 … … 130 129 } 131 130 132 return 0;133 134 131 } //MPI_Comm_create_endpoints 135 132 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_dup.cpp
r1520 r1539 7 7 { 8 8 9 int MPI_Comm_dup_mpi(MPI_Comm comm, MPI_Comm *newcomm) 9 10 int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm) 10 11 { 11 newcomm = new MPI_Comm; 12 (*newcomm)->is_ep = false; 12 13 if(!comm->is_ep) 14 { 15 Debug("MPI_Comm_dup with MPI\n"); 16 return MPI_Comm_dup_mpi(comm, newcomm); 17 } 13 18 14 ::MPI_Comm *output = new ::MPI_Comm; 15 16 ::MPI_Comm_dup(to_mpi_comm(comm->mpi_comm), output); 17 18 (*newcomm)->mpi_comm = output; 19 return MPI_Comm_dup_endpoint(comm, newcomm); 20 19 21 } 20 22 21 int MPI_Comm_dup_intracomm(MPI_Comm comm, MPI_Comm *newcomm) 23 24 25 int MPI_Comm_dup_endpoint(MPI_Comm comm, MPI_Comm *newcomm) 22 26 { 23 27 int ep_rank_loc = comm->ep_comm_ptr->size_rank_info[1].first; … … 37 41 38 42 *newcomm = (comm->ep_comm_ptr->comm_list[0]->mem_bridge[ep_rank_loc]); 43 44 if(comm->is_intercomm) 45 { 46 (*newcomm)->is_intercomm = true; 47 (*newcomm)->ep_comm_ptr->size_rank_info[0] = comm->ep_comm_ptr->size_rank_info[0]; 48 (*newcomm)->inter_rank_map = new INTER_RANK_MAP; 49 50 for(INTER_RANK_MAP::iterator it = comm->inter_rank_map->begin(); it !=comm->inter_rank_map->end(); it++) 51 { 52 (*newcomm)->inter_rank_map->insert(std::make_pair(it->first, it->second)); 53 } 54 55 } 39 56 40 57 } 41 58 42 int MPI_Comm_dup (MPI_Comm comm, MPI_Comm *newcomm)59 int MPI_Comm_dup_mpi(MPI_Comm comm, MPI_Comm *newcomm) 43 60 { 44 45 if(!comm->is_ep) 46 { 47 Debug("MPI_Comm_dup with MPI\n"); 48 return MPI_Comm_dup_mpi(comm, newcomm); 49 } 61 newcomm = new MPI_Comm; 62 (*newcomm)->is_ep = false; 50 63 51 if(comm->is_intercomm) return MPI_Comm_dup_intercomm(comm, newcomm); 64 ::MPI_Comm *output = new ::MPI_Comm; 65 66 ::MPI_Comm_dup(to_mpi_comm(comm->mpi_comm), output); 52 67 53 54 return MPI_Comm_dup_intracomm(comm, newcomm); 55 56 57 } 58 59 int MPI_Comm_dup_intercomm(MPI_Comm comm, MPI_Comm *newcomm) 60 { 61 62 int newcomm_ep_rank =comm->ep_comm_ptr->intercomm->size_rank_info[0].first; 63 int newcomm_ep_rank_loc = comm->ep_comm_ptr->intercomm->size_rank_info[1].first; 64 int newcomm_num_ep = comm->ep_comm_ptr->intercomm->size_rank_info[1].second; 65 66 67 if(0 == newcomm_ep_rank_loc) 68 { 69 //printf("in dup , newcomm_ep_rank_loc = 0 : ep %d\n", comm->ep_comm_ptr->intercomm->size_rank_info[0].first); 70 71 MPI_Info info; 72 MPI_Comm *out_comm; 73 74 MPI_Comm_create_endpoints(comm->mpi_comm, newcomm_num_ep, info, out_comm); 75 76 ::MPI_Comm *mpi_inter_comm = new ::MPI_Comm; 77 ::MPI_Comm_dup(to_mpi_comm(comm->ep_comm_ptr->intercomm->mpi_inter_comm), mpi_inter_comm); 78 79 for(int i=0; i<newcomm_num_ep; i++) 80 { 81 out_comm[i]->is_intercomm = true; 82 out_comm[i]->ep_comm_ptr->comm_label = comm->ep_comm_ptr->comm_list[i]->ep_comm_ptr->comm_label; 83 out_comm[i]->ep_comm_ptr->intercomm = new ep_lib::ep_intercomm; 84 #ifdef _showinfo 85 printf("new out_comm[%d]->ep_comm_ptr->intercomm = %p\n", i, out_comm[i]->ep_comm_ptr->intercomm); 86 #endif 87 out_comm[i]->ep_comm_ptr->intercomm->mpi_inter_comm = mpi_inter_comm; 88 } 89 90 91 comm->ep_comm_ptr->comm_list[0]->mem_bridge = out_comm; 92 } 93 94 MPI_Barrier_local(comm); 95 96 *newcomm = (comm->ep_comm_ptr->comm_list[0]->mem_bridge[newcomm_ep_rank_loc]); 97 98 (*newcomm)->ep_comm_ptr->size_rank_info[0] = comm->ep_comm_ptr->size_rank_info[0]; 99 (*newcomm)->ep_comm_ptr->size_rank_info[1] = comm->ep_comm_ptr->size_rank_info[1]; 100 (*newcomm)->ep_comm_ptr->size_rank_info[2] = comm->ep_comm_ptr->size_rank_info[2]; 101 102 (*newcomm)->ep_comm_ptr->intercomm->size_rank_info[0] = comm->ep_comm_ptr->intercomm->size_rank_info[0]; 103 (*newcomm)->ep_comm_ptr->intercomm->size_rank_info[1] = comm->ep_comm_ptr->intercomm->size_rank_info[1]; 104 (*newcomm)->ep_comm_ptr->intercomm->size_rank_info[2] = comm->ep_comm_ptr->intercomm->size_rank_info[2]; 105 106 (*newcomm)->ep_comm_ptr->intercomm->intercomm_tag = comm->ep_comm_ptr->intercomm->intercomm_tag; 107 108 109 int ep_rank_loc = (*newcomm)->ep_comm_ptr->size_rank_info[1].first; 110 111 if(ep_rank_loc == 0) 112 { 113 int world_rank; 114 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank); 115 116 (*newcomm)->ep_comm_ptr->intercomm->intercomm_rank_map = new INTERCOMM_RANK_MAP; 117 (*newcomm)->ep_comm_ptr->intercomm->local_rank_map = new EP_RANK_MAP; 118 119 *(*newcomm)->ep_comm_ptr->intercomm->intercomm_rank_map = *comm->ep_comm_ptr->intercomm->intercomm_rank_map; 120 *(*newcomm)->ep_comm_ptr->intercomm->local_rank_map = *comm->ep_comm_ptr->intercomm->local_rank_map; 121 } 122 123 MPI_Barrier_local(comm); 124 125 if(ep_rank_loc !=0 ) 126 { 127 int target = (*newcomm)->ep_comm_ptr->intercomm->intercomm_tag; 128 (*newcomm)->ep_comm_ptr->intercomm->intercomm_rank_map = (*newcomm)->ep_comm_ptr->comm_list[target]->ep_comm_ptr->intercomm->intercomm_rank_map; 129 (*newcomm)->ep_comm_ptr->intercomm->local_rank_map = (*newcomm)->ep_comm_ptr->comm_list[target]->ep_comm_ptr->intercomm->local_rank_map; 130 } 131 132 133 134 135 136 68 (*newcomm)->mpi_comm = output; 137 69 } 138 70 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_exscan.cpp
r1520 r1539 228 228 int MPI_Exscan(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) 229 229 { 230 if(!comm->is_ep) 231 { 232 return ::MPI_Scan(sendbuf, recvbuf, count, to_mpi_type(datatype), to_mpi_op(op), to_mpi_comm(comm->mpi_comm)); 233 } 230 if(!comm->is_ep) return ::MPI_Exscan(sendbuf, recvbuf, count, to_mpi_type(datatype), to_mpi_op(op), to_mpi_comm(comm->mpi_comm)); 231 if(comm->is_intercomm) return MPI_Exscan_intercomm(sendbuf, recvbuf, count, datatype, op, comm); 234 232 235 233 valid_type(datatype); … … 291 289 292 290 if(ep_rank_loc == 0) 291 { 293 292 ::MPI_Exscan(MPI_IN_PLACE, tmp_recvbuf, count, to_mpi_type(datatype), to_mpi_op(op), to_mpi_comm(comm->mpi_comm)); 294 295 // printf(" ID=%d : %d %d \n", ep_rank, static_cast<int*>(tmp_recvbuf)[0], static_cast<int*>(tmp_recvbuf)[1]); 293 } 296 294 297 295 MPI_Exscan_local(tmp_sendbuf, tmp_recvbuf, count, datatype, op, comm); 298 299 // printf(" ID=%d : after local tmp_sendbuf = %d %d ; tmp_recvbuf = %d %d \n", ep_rank, static_cast<int*>(tmp_sendbuf)[0], static_cast<int*>(tmp_sendbuf)[1], static_cast<int*>(tmp_recvbuf)[0], static_cast<int*>(tmp_recvbuf)[1]);300 301 296 302 297 … … 314 309 315 310 else memcpy(recvbuf, tmp_recvbuf, datasize*count); 316 317 318 319 311 320 312 delete[] tmp_sendbuf; … … 323 315 } 324 316 317 318 int MPI_Exscan_intercomm(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) 319 { 320 printf("MPI_Exscan_intercomm not yet implemented\n"); 321 MPI_Abort(comm, 0); 322 } 323 325 324 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_finalize.cpp
r1520 r1539 9 9 int MPI_Finalize() 10 10 { 11 printf("calling EP Finalize\n"); 12 13 int id = omp_get_thread_num(); 14 15 if(id == 0) 11 #pragma omp master 16 12 { 13 printf("calling EP Finalize\n"); 17 14 ::MPI_Finalize(); 18 15 } 19 return 0;20 16 } 21 17 22 18 int MPI_Abort(MPI_Comm comm, int errorcode) 23 {24 if(!comm->is_ep)25 return MPI_Abort_mpi(comm, errorcode);26 27 else28 {29 if(comm->ep_comm_ptr->size_rank_info[1].first == 0)30 {31 ::MPI_Abort(to_mpi_comm(comm->mpi_comm), errorcode);32 }33 }34 }35 36 int MPI_Abort_mpi(MPI_Comm comm, int errorcode)37 19 { 38 20 return ::MPI_Abort(to_mpi_comm(comm->mpi_comm), errorcode); -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_free.cpp
r1533 r1539 9 9 int MPI_Comm_free(MPI_Comm *comm) 10 10 { 11 if(! (*comm)->is_ep) 12 { 13 return MPI_Comm_free_mpi(comm); 14 } 15 16 else 17 { 18 if((*comm)->is_intercomm) 19 return MPI_Comm_free_intercomm(comm); 20 else 21 return MPI_Comm_free_intracomm(comm); 22 } 11 if(! (*comm)->is_ep) return MPI_Comm_free_mpi(comm); 12 else return MPI_Comm_free_endpoint(comm); 23 13 } 24 14 … … 28 18 { 29 19 Debug("MPI_Comm_free with MPI\n"); 30 31 20 return ::MPI_Comm_free(to_mpi_comm_ptr((*comm)->mpi_comm)); 32 33 21 } 34 22 35 int MPI_Comm_free_ intracomm(MPI_Comm *comm)23 int MPI_Comm_free_endpoint(MPI_Comm *comm) 36 24 { 37 25 Debug("MPI_Comm_free with EP_intracomm\n"); … … 42 30 num_ep = (*comm)->ep_comm_ptr->size_rank_info[1].second; 43 31 44 MPI_Barrier (*comm);32 MPI_Barrier_local(*comm); 45 33 46 34 if(ep_rank_loc == 0) 47 35 { 36 37 if((*comm)->is_intercomm) 38 { 39 for(int i=0; i<num_ep; i++) 40 { 41 (*comm)->ep_comm_ptr->comm_list[i]->inter_rank_map->clear(); 42 #ifdef _showinfo 43 printf("delete (*comm)->ep_comm_ptr->comm_list[%d]->inter_rank_map = %p\n", i, (*comm)->ep_comm_ptr->comm_list[i]->inter_rank_map); 44 #endif 45 delete (*comm)->ep_comm_ptr->comm_list[i]->inter_rank_map; 46 } 47 } 48 49 48 50 49 51 #ifdef _showinfo … … 103 105 104 106 105 106 107 108 109 int MPI_Comm_free_intercomm(MPI_Comm *comm)110 {111 int ep_rank;112 MPI_Comm_rank(*comm, &ep_rank);113 int ep_rank_loc = (*comm)->ep_comm_ptr->size_rank_info[1].first;114 int num_ep = (*comm)->ep_comm_ptr->size_rank_info[1].second;115 116 int newcomm_ep_rank =(*comm)->ep_comm_ptr->intercomm->size_rank_info[0].first;117 int newcomm_ep_rank_loc = (*comm)->ep_comm_ptr->intercomm->size_rank_info[1].first;118 int newcomm_num_ep = (*comm)->ep_comm_ptr->intercomm->size_rank_info[1].second;119 120 return 0;121 MPI_Barrier(*comm);122 123 if(ep_rank_loc == 0)124 {125 (*comm)->ep_comm_ptr->intercomm->intercomm_rank_map->clear();126 #ifdef _showinfo127 printf("delete (*comm)->ep_comm_ptr->intercomm->intercomm_rank_map = %p\n", (*comm)->ep_comm_ptr->intercomm->intercomm_rank_map);128 #endif129 delete (*comm)->ep_comm_ptr->intercomm->intercomm_rank_map;130 131 (*comm)->ep_comm_ptr->intercomm->local_rank_map->clear();132 #ifdef _showinfo133 printf("delete (*comm)->ep_comm_ptr->intercomm->local_rank_map = %p\n", (*comm)->ep_comm_ptr->intercomm->local_rank_map);134 #endif135 delete (*comm)->ep_comm_ptr->intercomm->local_rank_map;136 }137 138 if(newcomm_ep_rank_loc == 0)139 {140 141 #ifdef _showinfo142 printf("delete (*comm)->my_buffer = %p\n", (*comm)->my_buffer);143 #endif144 delete (*comm)->my_buffer;145 146 147 148 #ifdef _showinfo149 printf("delete (*comm)->ep_barrier = %p\n", (*comm)->ep_barrier);150 #endif151 delete (*comm)->ep_barrier;152 153 154 (*comm)->ep_rank_map->clear();155 #ifdef _showinfo156 printf("delete (*comm)->ep_rank_map = %p\n", (*comm)->ep_rank_map);157 #endif158 delete (*comm)->ep_rank_map;159 160 #ifdef _showinfo161 printf("delete (*comm)->ep_comm_ptr->intercomm->mpi_inter_comm = %p\n", (*comm)->ep_comm_ptr->intercomm->mpi_inter_comm);162 #endif163 ::MPI_Comm_free(to_mpi_comm_ptr((*comm)->ep_comm_ptr->intercomm->mpi_inter_comm));164 165 for(int i=0; i<newcomm_num_ep; i++)166 {167 (*comm)->ep_comm_ptr->comm_list[i]->ep_comm_ptr->message_queue->clear();168 #ifdef _showinfo169 printf("delete (*comm)->ep_comm_ptr->comm_list[%d]->ep_comm_ptr->message_queue = %p\n", i, (*comm)->ep_comm_ptr->comm_list[i]->ep_comm_ptr->message_queue);170 #endif171 delete (*comm)->ep_comm_ptr->comm_list[i]->ep_comm_ptr->message_queue;172 173 #ifdef _showinfo174 printf("delete (*comm)->ep_comm_ptr->comm_list[%d]->ep_comm_ptr->intercomm = %p\n", i, (*comm)->ep_comm_ptr->comm_list[i]->ep_comm_ptr->intercomm);175 #endif176 delete (*comm)->ep_comm_ptr->comm_list[i]->ep_comm_ptr->intercomm;177 178 179 #ifdef _showinfo180 printf("delete (*comm)->ep_comm_ptr->comm_list[%d]->ep_comm_ptr = %p\n", i, (*comm)->ep_comm_ptr->comm_list[i]->ep_comm_ptr);181 #endif182 delete (*comm)->ep_comm_ptr->comm_list[i]->ep_comm_ptr;183 184 185 #ifdef _showinfo186 printf("delete (*comm)->ep_comm_ptr->comm_list[%d] = %p\n", i, (*comm)->ep_comm_ptr->comm_list[i]);187 #endif188 delete (*comm)->ep_comm_ptr->comm_list[i];189 190 }191 192 #ifdef _showinfo193 printf("delete (*comm)->mpi_comm = %p\n", (*comm)->mpi_comm);194 #endif195 ::MPI_Comm_free(to_mpi_comm_ptr((*comm)->mpi_comm));196 197 #ifdef _showinfo198 printf("delete (*comm)->ep_comm_ptr->comm_list = %p\n", (*comm)->ep_comm_ptr->comm_list);199 #endif200 delete[] (*comm)->ep_comm_ptr->comm_list;201 }202 }203 204 205 107 } 206 108 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_gather.cpp
r1520 r1539 35 35 for(int i=0; i<num_ep; i++) 36 36 memcpy(recvbuf + datasize * i * count, comm->my_buffer->void_buffer[i], datasize * count); 37 38 //printf("local_recvbuf = %d %d \n", static_cast<int*>(recvbuf)[0], static_cast<int*>(recvbuf)[1] );39 37 } 40 38 … … 44 42 int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) 45 43 { 46 if(!comm->is_ep) 47 { 48 return ::MPI_Gather(const_cast<void*>(sendbuf), sendcount, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), 49 root, to_mpi_comm(comm->mpi_comm)); 50 } 44 if(!comm->is_ep) return ::MPI_Gather(const_cast<void*>(sendbuf), sendcount, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), 45 root, to_mpi_comm(comm->mpi_comm)); 46 if(comm->is_intercomm) return MPI_Gather_intercomm(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); 51 47 52 48 assert(sendcount == recvcount && sendtype == recvtype); … … 103 99 if(is_root) 104 100 { 105 // printf("tmp_recvbuf = %d %d %d %d %d %d %d %d\n", static_cast<int*>(tmp_recvbuf)[0], static_cast<int*>(tmp_recvbuf)[1],106 // static_cast<int*>(tmp_recvbuf)[2], static_cast<int*>(tmp_recvbuf)[3],107 // static_cast<int*>(tmp_recvbuf)[4], static_cast<int*>(tmp_recvbuf)[5],108 // static_cast<int*>(tmp_recvbuf)[6], static_cast<int*>(tmp_recvbuf)[7] );109 110 101 int offset; 111 102 for(int i=0; i<ep_size; i++) … … 128 119 } 129 120 121 122 int MPI_Gather_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) 123 { 124 printf("MPI_Gather_intercomm not yet implemented\n"); 125 MPI_Abort(comm, 0); 126 } 130 127 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_gatherv.cpp
r1520 r1539 46 46 { 47 47 48 if(!comm->is_ep) 49 { 50 return ::MPI_Gatherv(const_cast<void*>(sendbuf), sendcount, to_mpi_type(sendtype), recvbuf, const_cast<int*>(input_recvcounts), const_cast<int*>(input_displs), 51 to_mpi_type(recvtype), root, to_mpi_comm(comm->mpi_comm)); 52 } 53 48 if(!comm->is_ep) return ::MPI_Gatherv(const_cast<void*>(sendbuf), sendcount, to_mpi_type(sendtype), recvbuf, const_cast<int*>(input_recvcounts), const_cast<int*>(input_displs), 49 to_mpi_type(recvtype), root, to_mpi_comm(comm->mpi_comm)); 50 if(comm->is_intercomm) return MPI_Gatherv_intercomm(sendbuf, sendcount, sendtype, recvbuf, input_recvcounts, input_displs, recvtype, root, comm); 54 51 55 52 assert(sendtype == recvtype); … … 169 166 } 170 167 168 int MPI_Gatherv_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int input_recvcounts[], const int input_displs[], 169 MPI_Datatype recvtype, int root, MPI_Comm comm) 170 { 171 printf("MPI_Gatherv_intercomm not yet implemented\n"); 172 MPI_Abort(comm, 0); 173 } 174 171 175 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_init.cpp
r1533 r1539 2 2 #include <mpi.h> 3 3 #include "ep_declaration.hpp" 4 #include "ep_mpi.hpp" 4 5 6 extern MPI_Group MPI_GROUP_WORLD; 5 7 6 8 namespace ep_lib … … 13 15 Debug("MPI_Init_thread with EP/MPI\n"); 14 16 15 if(omp_get_thread_num() == 0)17 #pragma omp master 16 18 { 17 19 ::MPI_Init_thread(argc, argv, required, provided); 20 ::MPI_Comm_group(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &MPI_GROUP_WORLD); 18 21 } 19 22 } … … 23 26 Debug("MPI_Init with EP/MPI\n"); 24 27 25 if(omp_get_thread_num() == 0)28 #pragma omp master 26 29 { 27 30 ::MPI_Init(argc, argv); 28 31 } 29 }30 31 int EP_group_init()32 {33 34 32 } 35 33 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_intercomm.cpp
r1520 r1539 6 6 using namespace std; 7 7 8 extern std::map<std::pair<int, int>, MPI_Group* > * tag_group_map; 9 10 extern std::map<int, std::pair<ep_lib::MPI_Comm*, std::pair<int, int> > > * tag_comm_map; 11 12 extern MPI_Group MPI_GROUP_WORLD; 13 8 14 namespace ep_lib 9 15 { 16 10 17 int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) 11 18 { 12 assert(local_comm->is_ep); 13 19 if(!local_comm->is_ep) return MPI_Intercomm_create_mpi(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); 20 21 int ep_rank = local_comm->ep_comm_ptr->size_rank_info[0].first; 22 23 24 // check if local leaders are in the same mpi proc 25 // by checking their mpi_rank in peer_comm 26 27 int mpi_rank_of_leader[2]; 28 29 if(ep_rank == local_leader) 30 { 31 mpi_rank_of_leader[0] = peer_comm->ep_comm_ptr->size_rank_info[2].first; 32 mpi_rank_of_leader[1] = peer_comm->ep_rank_map->at(remote_leader).second; 33 } 34 35 MPI_Bcast(mpi_rank_of_leader, 2, MPI_INT, local_leader, local_comm); 36 37 if(mpi_rank_of_leader[0] != mpi_rank_of_leader[1]) 38 { 39 Debug("calling MPI_Intercomm_create_kernel\n"); 40 return MPI_Intercomm_create_endpoint(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); 41 } 42 else 43 { 44 printf("local leaders are in the same MPI proc. Routine not yet implemented\n"); 45 MPI_Abort(local_comm, 0); 46 } 47 } 48 49 50 51 int MPI_Intercomm_create_endpoint(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) 52 { 14 53 int ep_rank, ep_rank_loc, mpi_rank; 15 54 int ep_size, num_ep, mpi_size; 16 55 17 ep_rank = local_comm->ep_comm_ptr->size_rank_info[0].first;56 ep_rank = local_comm->ep_comm_ptr->size_rank_info[0].first; 18 57 ep_rank_loc = local_comm->ep_comm_ptr->size_rank_info[1].first; 19 mpi_rank = local_comm->ep_comm_ptr->size_rank_info[2].first; 20 ep_size = local_comm->ep_comm_ptr->size_rank_info[0].second; 21 num_ep = local_comm->ep_comm_ptr->size_rank_info[1].second; 22 mpi_size = local_comm->ep_comm_ptr->size_rank_info[2].second; 23 24 25 MPI_Barrier(local_comm); 26 27 int leader_ranks_in_peer[3]; // local_leader_rank_in_peer 28 // remote_leader_rank_in_peer 29 // size of peer 30 31 if(ep_rank == local_leader) 32 { 33 MPI_Comm_rank(peer_comm, &leader_ranks_in_peer[0]); 34 leader_ranks_in_peer[1] = remote_leader; 35 MPI_Comm_size(peer_comm, &leader_ranks_in_peer[2]); 36 } 37 38 MPI_Bcast(leader_ranks_in_peer, 3, MPI_INT, local_leader, local_comm); 39 40 if(leader_ranks_in_peer[0] != leader_ranks_in_peer[2]) 41 { 42 Debug("calling MPI_Intercomm_create_kernel\n"); 43 return MPI_Intercomm_create_kernel(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); 44 } 45 46 else 47 { 48 if(leader_ranks_in_peer[2] == 1) 49 { 50 Debug("calling MPI_Intercomm_create_unique\n"); 51 return MPI_Intercomm_create_unique_leader(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); 58 mpi_rank = local_comm->ep_comm_ptr->size_rank_info[2].first; 59 ep_size = local_comm->ep_comm_ptr->size_rank_info[0].second; 60 num_ep = local_comm->ep_comm_ptr->size_rank_info[1].second; 61 mpi_size = local_comm->ep_comm_ptr->size_rank_info[2].second; 62 63 ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 64 // step 1 : local leaders exchange ep_size, leader_rank_in_peer, leader_rank_in_peer_mpi, leader_rank_in_world. // 65 // local leaders bcast results to all ep in local_comm // 66 ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// 67 68 bool is_local_leader = ep_rank==local_leader? true: false; 69 70 71 int local_leader_rank_in_peer; 72 int local_leader_rank_in_peer_mpi; 73 int local_leader_rank_in_world; 74 75 int remote_ep_size; 76 int remote_leader_rank_in_peer; 77 int remote_leader_rank_in_peer_mpi; 78 int remote_leader_rank_in_world; 79 80 int send_quadruple[4]; 81 int recv_quadruple[4]; 82 83 84 if(is_local_leader) 85 { 86 MPI_Comm_rank(peer_comm, &local_leader_rank_in_peer); 87 ::MPI_Comm_rank(to_mpi_comm(peer_comm->mpi_comm), &local_leader_rank_in_peer_mpi); 88 ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &local_leader_rank_in_world); 89 90 send_quadruple[0] = ep_size; 91 send_quadruple[1] = local_leader_rank_in_peer; 92 send_quadruple[2] = local_leader_rank_in_peer_mpi; 93 send_quadruple[3] = local_leader_rank_in_world; 94 95 MPI_Request request; 96 MPI_Status status; 97 98 99 if(remote_leader > local_leader_rank_in_peer) 100 { 101 MPI_Isend(send_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); 102 MPI_Wait(&request, &status); 103 104 MPI_Irecv(recv_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); 105 MPI_Wait(&request, &status); 106 } 107 else 108 { 109 MPI_Irecv(recv_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); 110 MPI_Wait(&request, &status); 111 112 MPI_Isend(send_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); 113 MPI_Wait(&request, &status); 114 } 115 116 remote_ep_size = recv_quadruple[0]; 117 remote_leader_rank_in_peer = recv_quadruple[1]; 118 remote_leader_rank_in_peer_mpi = recv_quadruple[2]; 119 remote_leader_rank_in_world = recv_quadruple[3]; 120 #ifdef _showinfo 121 printf("peer_rank = %d, packed exchange OK\n", local_leader_rank_in_peer); 122 #endif 123 } 124 125 MPI_Bcast(send_quadruple, 4, MPI_INT, local_leader, local_comm); 126 MPI_Bcast(recv_quadruple, 4, MPI_INT, local_leader, local_comm); 127 128 if(!is_local_leader) 129 { 130 local_leader_rank_in_peer = send_quadruple[1]; 131 local_leader_rank_in_peer_mpi = send_quadruple[2]; 132 local_leader_rank_in_world = send_quadruple[3]; 133 134 remote_ep_size = recv_quadruple[0]; 135 remote_leader_rank_in_peer = recv_quadruple[1]; 136 remote_leader_rank_in_peer_mpi = recv_quadruple[2]; 137 remote_leader_rank_in_world = recv_quadruple[3]; 138 } 139 140 141 #ifdef _showinfo 142 MPI_Barrier(peer_comm); 143 MPI_Barrier(peer_comm); 144 printf("peer_rank = %d, ep_size = %d, remote_ep_size = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_size, remote_ep_size); 145 MPI_Barrier(peer_comm); 146 MPI_Barrier(peer_comm); 147 #endif 148 149 /////////////////////////////////////////////////////////////////// 150 // step 2 : gather ranks in world for both local and remote comm // 151 /////////////////////////////////////////////////////////////////// 152 153 int rank_in_world; 154 ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &rank_in_world); 155 156 int *ranks_in_world_local = new int[ep_size]; 157 int *ranks_in_world_remote = new int[remote_ep_size]; 158 159 MPI_Allgather(&rank_in_world, 1, MPI_INT, ranks_in_world_local, 1, MPI_INT, local_comm); 160 161 if(is_local_leader) 162 { 163 MPI_Request request; 164 MPI_Status status; 165 166 if(remote_leader > local_leader_rank_in_peer) 167 { 168 MPI_Isend(ranks_in_world_local, ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); 169 MPI_Wait(&request, &status); 170 171 MPI_Irecv(ranks_in_world_remote, remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); 172 MPI_Wait(&request, &status); 173 } 174 else 175 { 176 MPI_Irecv(ranks_in_world_remote, remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); 177 MPI_Wait(&request, &status); 178 179 MPI_Isend(ranks_in_world_local, ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); 180 MPI_Wait(&request, &status); 181 } 182 #ifdef _showinfo 183 printf("peer_rank = %d, ranks_in_world exchange OK\n", local_leader_rank_in_peer); 184 #endif 185 } 186 187 MPI_Bcast(ranks_in_world_remote, remote_ep_size, MPI_INT, local_leader, local_comm); 188 189 #ifdef _showinfo 190 191 MPI_Barrier(peer_comm); 192 MPI_Barrier(peer_comm); 193 194 if(remote_leader == 4) 195 { 196 for(int i=0; i<ep_size; i++) 197 { 198 if(ep_rank == i) 199 { 200 printf("peer_rank = %d, ranks_in_world_local = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first); 201 for(int i=0; i<ep_size; i++) 202 { 203 printf("%d\t", ranks_in_world_local[i]); 204 } 205 206 printf("\npeer_rank = %d, ranks_in_world_remote = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first); 207 for(int i=0; i<remote_ep_size; i++) 208 { 209 printf("%d\t", ranks_in_world_remote[i]); 210 } 211 printf("\n"); 212 213 } 52 214 53 } 54 else 55 { 56 Debug("calling MPI_Intercomm_create_world\n"); 57 return MPI_Intercomm_create_from_world(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); 58 } 215 MPI_Barrier(local_comm); 216 MPI_Barrier(local_comm); 217 MPI_Barrier(local_comm); 218 } 219 } 220 221 MPI_Barrier(peer_comm); 222 MPI_Barrier(peer_comm); 223 MPI_Barrier(peer_comm); 224 225 if(remote_leader == 13) 226 { 227 for(int i=0; i<ep_size; i++) 228 { 229 if(ep_rank == i) 230 { 231 printf("peer_rank = %d, ranks_in_world_local = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first); 232 for(int i=0; i<ep_size; i++) 233 { 234 printf("%d\t", ranks_in_world_local[i]); 235 } 236 237 printf("\npeer_rank = %d, ranks_in_world_remote = \n", peer_comm->ep_comm_ptr->size_rank_info[0].first); 238 for(int i=0; i<remote_ep_size; i++) 239 { 240 printf("%d\t", ranks_in_world_remote[i]); 241 } 242 printf("\n"); 243 244 } 245 246 MPI_Barrier(local_comm); 247 MPI_Barrier(local_comm); 248 MPI_Barrier(local_comm); 249 } 250 } 251 252 MPI_Barrier(peer_comm); 253 MPI_Barrier(peer_comm); 254 255 #endif 256 257 ////////////////////////////////////////////////////////////// 258 // step 3 : determine the priority and ownership of each ep // 259 ////////////////////////////////////////////////////////////// 260 261 bool priority = local_leader_rank_in_peer < remote_leader_rank_in_peer? true : false; 262 263 264 int ownership = priority; 265 266 if(rank_in_world == ranks_in_world_local[local_leader]) ownership = 1; 267 if(rank_in_world == remote_leader_rank_in_world) ownership = 0; 268 269 270 #ifdef _showinfo 271 MPI_Barrier(peer_comm); 272 MPI_Barrier(peer_comm); 273 printf("peer_rank = %d, priority = %d, local_leader_rank_in_peer = %d, remote_leader_rank_in_peer = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, priority, local_leader_rank_in_peer, remote_leader_rank_in_peer); 274 MPI_Barrier(peer_comm); 275 MPI_Barrier(peer_comm); 276 #endif 277 278 279 #ifdef _showinfo 280 MPI_Barrier(peer_comm); 281 MPI_Barrier(peer_comm); 282 printf("peer_rank = %d, priority = %d, ownership = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, priority, ownership); 283 MPI_Barrier(peer_comm); 284 MPI_Barrier(peer_comm); 285 #endif 286 287 ////////////////////////////////////////////////////// 288 // step 4 : extract local_comm and create intercomm // 289 ////////////////////////////////////////////////////// 290 291 bool is_involved = is_local_leader || (!is_local_leader && ep_rank_loc == 0 && rank_in_world != local_leader_rank_in_world); 292 293 #ifdef _showinfo 294 295 MPI_Barrier(peer_comm); 296 MPI_Barrier(peer_comm); 297 printf("peer_rank = %d, is_involved = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, is_involved); 298 MPI_Barrier(peer_comm); 299 MPI_Barrier(peer_comm); 300 301 #endif 302 303 if(is_involved) 304 { 305 ::MPI_Group local_group; 306 ::MPI_Group extracted_group; 307 ::MPI_Comm extracted_comm; 308 309 310 ::MPI_Comm_group(to_mpi_comm(local_comm->mpi_comm), &local_group); 311 312 int *ownership_list = new int[mpi_size]; 313 int *mpi_rank_list = new int[mpi_size]; 314 315 ::MPI_Allgather(&ownership, 1, to_mpi_type(MPI_INT), ownership_list, 1, to_mpi_type(MPI_INT), to_mpi_comm(local_comm->mpi_comm)); 316 ::MPI_Allgather(&mpi_rank, 1, to_mpi_type(MPI_INT), mpi_rank_list, 1, to_mpi_type(MPI_INT), to_mpi_comm(local_comm->mpi_comm)); 317 59 318 60 } 61 62 63 64 65 66 int leader_ranks[6]; //! 0: rank in world, 1: mpi_size, 2: rank_in_peer. 67 //! 3, 4, 5 : remote 68 69 bool is_decider = false; 70 71 72 if(ep_rank == local_leader) 73 { 74 MPI_Comm_rank(MPI_COMM_WORLD, &leader_ranks[0]); 75 76 leader_ranks[1] = mpi_size; 77 MPI_Comm_rank(peer_comm, &leader_ranks[2]); 78 79 MPI_Request request[2]; 80 MPI_Status status[2]; 81 82 MPI_Isend(&leader_ranks[0], 3, MPI_INT, remote_leader, tag, peer_comm, &request[0]); 83 MPI_Irecv(&leader_ranks[3], 3, MPI_INT, remote_leader, tag, peer_comm, &request[1]); 84 85 MPI_Waitall(2, request, status); 86 87 } 88 89 90 MPI_Bcast(leader_ranks, 6, MPI_INT, local_leader, local_comm); 91 92 93 MPI_Barrier(local_comm); 94 95 96 if(leader_ranks[0] == leader_ranks[3]) 97 { 98 if( leader_ranks[1] * leader_ranks[4] == 1) 99 { 100 if(ep_rank == local_leader) printf("calling MPI_Intercomm_create_unique_leader\n"); 101 local_comm->ep_comm_ptr->comm_label = -99; 102 103 return MPI_Intercomm_create_unique_leader(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); 104 } 105 else // leader_ranks[1] * leader_ranks[4] != 1 106 { 107 // change leader 108 int new_local_leader; 109 110 if(leader_ranks[2] < leader_ranks[5]) 319 int n=0; 320 for(int i=0; i<mpi_size; i++) 321 { 322 n+=ownership_list[i]; 323 } 324 325 int *new_mpi_rank_list = new int[n]; 326 int j=0; 327 for(int i=0; i<mpi_size; i++) 328 { 329 if(ownership_list[i] !=0) 111 330 { 112 if(leader_ranks[1] > 1) //! change leader 331 new_mpi_rank_list[j++] = mpi_rank_list[i]; 332 } 333 } 334 335 336 ::MPI_Group_incl(local_group, n, new_mpi_rank_list, &extracted_group); 337 338 ::MPI_Comm_create(to_mpi_comm(local_comm->mpi_comm), extracted_group, &extracted_comm); 339 340 ::MPI_Comm mpi_inter_comm; 341 342 int local_leader_rank_in_extracted_comm; 343 344 if(is_local_leader) 345 { 346 ::MPI_Comm_rank(extracted_comm, &local_leader_rank_in_extracted_comm); 347 } 348 349 ::MPI_Bcast(&local_leader_rank_in_extracted_comm, 1, to_mpi_type(MPI_INT), local_comm->ep_rank_map->at(local_leader).second, to_mpi_comm(local_comm->mpi_comm)); 350 351 ::MPI_Comm *intracomm = new ::MPI_Comm; 352 bool is_real_involved = ownership && extracted_comm != to_mpi_comm(MPI_COMM_NULL->mpi_comm); 353 354 if(is_real_involved) 355 { 356 ::MPI_Intercomm_create(extracted_comm, local_leader_rank_in_extracted_comm, to_mpi_comm(peer_comm->mpi_comm), remote_leader_rank_in_peer_mpi, tag, &mpi_inter_comm); 357 ::MPI_Intercomm_merge(mpi_inter_comm, !priority, intracomm); 358 } 359 360 361 362 //////////////////////////////////// 363 // step 5 :: determine new num_ep // 364 //////////////////////////////////// 365 366 int num_ep_count=0; 367 368 for(int i=0; i<ep_size; i++) 369 { 370 if(rank_in_world == ranks_in_world_local[i]) 371 num_ep_count++; 372 } 373 374 for(int i=0; i<remote_ep_size; i++) 375 { 376 if(rank_in_world == ranks_in_world_remote[i]) 377 num_ep_count++; 378 } 379 380 381 /////////////////////////////////////////////////// 382 // step 6 : create endpoints from extracted_comm // 383 /////////////////////////////////////////////////// 384 385 if(is_real_involved) 386 { 387 MPI_Comm *ep_comm; 388 MPI_Info info; 389 MPI_Comm_create_endpoints(intracomm, num_ep_count, info, ep_comm); 390 391 #ifdef _showinfo 392 printf("new ep_comm->ep_comm_ptr->intercomm->mpi_inter_comm = %p\n", mpi_inter_comm); 393 #endif 394 395 #pragma omp critical (write_to_tag_list) 396 intercomm_list.push_back(make_pair( make_pair(tag, min(local_leader_rank_in_world, remote_leader_rank_in_world)) , make_pair(ep_comm , make_pair(num_ep_count, 0)))); 397 #pragma omp flush 398 #ifdef _showinfo 399 for(int i=0; i<num_ep_count; i++) 400 printf("peer_rank = %d, ep_comm = %p, ep_comm[%d] -> new_ep_rank = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_comm, i, ep_comm[i]->ep_comm_ptr->size_rank_info[0].first); 401 #endif 402 ::MPI_Comm_free(intracomm); 403 delete intracomm; 404 } 405 406 407 delete ownership_list; 408 delete mpi_rank_list; 409 delete new_mpi_rank_list; 410 411 } 412 413 int repeated=0; 414 for(int i=0; i<remote_ep_size; i++) 415 { 416 if(rank_in_world == ranks_in_world_remote[i]) 417 repeated++; 418 } 419 420 int new_ep_rank_loc = ownership==1? ep_rank_loc : ep_rank_loc+repeated; 421 422 #ifdef _showinfo 423 424 MPI_Barrier(peer_comm); 425 MPI_Barrier(peer_comm); 426 printf("peer_rank = %d, ep_rank_loc = %d, ownership = %d, repeated = %d, new_ep_rank_loc = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_rank_loc, ownership, repeated, new_ep_rank_loc); 427 MPI_Barrier(peer_comm); 428 MPI_Barrier(peer_comm); 429 430 #endif 431 432 433 #pragma omp flush 434 #pragma omp critical (read_from_intercomm_list) 435 { 436 bool flag=true; 437 while(flag) 438 { 439 for(std::list<std::pair<std::pair<int, int> , std::pair<MPI_Comm * , std::pair<int, int> > > >::iterator iter = intercomm_list.begin(); iter!=intercomm_list.end(); iter++) 440 { 441 if(iter->first == make_pair(tag, min(local_leader_rank_in_world, remote_leader_rank_in_world))) 113 442 { 114 // change leader 115 is_decider = true; 116 int target = local_comm->ep_rank_map->at(local_leader).second; 117 { 118 for(int i=0; i<ep_size; i++) 119 { 120 if(local_comm->ep_rank_map->at(i).second != target && local_comm->ep_rank_map->at(i).first == 0) 121 { 122 new_local_leader = i; 123 break; 124 } 125 } 126 } 127 } 128 else 129 { 130 new_local_leader = local_leader; 443 *newintercomm = iter->second.first[new_ep_rank_loc]; 444 445 iter->second.second.second++; 446 447 if(iter->second.second.first == iter->second.second.second) 448 intercomm_list.erase(iter); 449 450 flag = false; 451 break; 131 452 } 132 453 } 133 else 134 { 135 if(leader_ranks[4] == 1) 136 { 137 // change leader 138 is_decider = true; 139 int target = local_comm->ep_rank_map->at(local_leader).second; 140 { 141 for(int i=0; i<ep_size; i++) 142 { 143 if(local_comm->ep_rank_map->at(i).second != target && local_comm->ep_rank_map->at(i).first == 0) 144 { 145 new_local_leader = i; 146 break; 147 } 148 } 149 } 150 } 151 else 152 { 153 new_local_leader = local_leader; 154 } 155 } 156 157 158 int new_tag_in_world; 159 160 int leader_in_world[2]; 161 162 163 if(is_decider) 164 { 165 if(ep_rank == new_local_leader) 166 { 167 new_tag_in_world = TAG++; 168 } 169 MPI_Bcast(&new_tag_in_world, 1, MPI_INT, new_local_leader, local_comm); 170 if(ep_rank == local_leader) MPI_Send(&new_tag_in_world, 1, MPI_INT, remote_leader, tag, peer_comm); 171 } 172 else 173 { 174 if(ep_rank == local_leader) 175 { 176 MPI_Status status; 177 MPI_Recv(&new_tag_in_world, 1, MPI_INT, remote_leader, tag, peer_comm, &status); 178 } 179 MPI_Bcast(&new_tag_in_world, 1, MPI_INT, new_local_leader, local_comm); 180 } 181 182 183 if(ep_rank == new_local_leader) 184 { 185 ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &leader_in_world[0]); 186 } 187 188 MPI_Bcast(&leader_in_world[0], 1, MPI_INT, new_local_leader, local_comm); 189 190 191 if(ep_rank == local_leader) 192 { 193 MPI_Request request[2]; 194 MPI_Status status[2]; 195 196 MPI_Isend(&leader_in_world[0], 1, MPI_INT, remote_leader, tag, peer_comm, &request[0]); 197 MPI_Irecv(&leader_in_world[1], 1, MPI_INT, remote_leader, tag, peer_comm, &request[1]); 198 199 MPI_Waitall(2, request, status); 200 } 201 202 MPI_Bcast(&leader_in_world[1], 1, MPI_INT, local_leader, local_comm); 203 204 local_comm->ep_comm_ptr->comm_label = tag; 205 206 if(ep_rank == local_leader) printf("calling MPI_Intercomm_create_from_world\n"); 207 208 return MPI_Intercomm_create_from_world(local_comm, new_local_leader, MPI_COMM_WORLD->mpi_comm, leader_in_world[1], new_tag_in_world, newintercomm); 209 210 } 211 } 212 213 214 454 } 455 } 456 457 458 459 #ifdef _showinfo 460 461 MPI_Barrier(peer_comm); 462 MPI_Barrier(peer_comm); 463 printf("peer_rank = %d, test_rank = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, (*newintercomm)->ep_comm_ptr->size_rank_info[0].first); 464 MPI_Barrier(peer_comm); 465 MPI_Barrier(peer_comm); 466 467 #endif 468 469 ////////////////////////////////////////////////////////// 470 // step 7 : create intercomm_rank_map for local leaders // 471 ////////////////////////////////////////////////////////// 472 473 (*newintercomm)->is_intercomm = true; 474 475 (*newintercomm)->inter_rank_map = new INTER_RANK_MAP; 476 477 478 int rank_info[2]; 479 rank_info[0] = ep_rank; 480 rank_info[1] = (*newintercomm)->ep_comm_ptr->size_rank_info[0].first; 481 482 #ifdef _showinfo 483 printf("priority = %d, ep_rank = %d, new_ep_rank = %d\n", priority, rank_info[0], rank_info[1]); 484 #endif 485 486 int *local_rank_info = new int[2*ep_size]; 487 int *remote_rank_info = new int[2*remote_ep_size]; 488 489 MPI_Allgather(rank_info, 2, MPI_INT, local_rank_info, 2, MPI_INT, local_comm); 490 491 if(is_local_leader) 492 { 493 MPI_Request request; 494 MPI_Status status; 495 496 if(priority) 497 { 498 MPI_Isend(local_rank_info, 2*ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); 499 MPI_Wait(&request, &status); 500 501 MPI_Irecv(remote_rank_info, 2*remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); 502 MPI_Wait(&request, &status); 503 } 504 else 505 { 506 MPI_Irecv(remote_rank_info, 2*remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); 507 MPI_Wait(&request, &status); 508 509 MPI_Isend(local_rank_info, 2*ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); 510 MPI_Wait(&request, &status); 511 } 512 } 513 514 MPI_Bcast(remote_rank_info, 2*remote_ep_size, MPI_INT, local_leader, local_comm); 515 516 for(int i=0; i<remote_ep_size; i++) 517 { 518 (*newintercomm)->inter_rank_map->insert(make_pair(remote_rank_info[2*i], remote_rank_info[2*i+1])); 519 } 520 521 (*newintercomm)->ep_comm_ptr->size_rank_info[0] = local_comm->ep_comm_ptr->size_rank_info[0]; 522 523 524 delete[] local_rank_info; 525 delete[] remote_rank_info; 526 delete[] ranks_in_world_local; 527 delete[] ranks_in_world_remote; 528 /* 529 if((*newintercomm)->ep_comm_ptr->size_rank_info[0].second == 1) 530 { 531 for(INTER_RANK_MAP::iterator it = (*newintercomm)->inter_rank_map->begin(); it != (*newintercomm)->inter_rank_map->end(); it++) 532 { 533 printf("inter_rank_map[%d] = %d\n", it->first, it->second); 534 } 535 } 536 */ 537 538 215 539 } 216 217 int MPI_Comm_test_inter(MPI_Comm comm, int *flag) 540 541 542 543 int MPI_Intercomm_create_mpi(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) 218 544 { 219 *flag = false; 220 if(comm->is_ep) 221 { 222 *flag = comm->is_intercomm; 223 return 0; 224 } 225 else if(comm->mpi_comm != static_cast< ::MPI_Comm*>(MPI_COMM_NULL->mpi_comm)) 226 { 227 ::MPI_Comm mpi_comm = to_mpi_comm(comm->mpi_comm); 228 229 ::MPI_Comm_test_inter(mpi_comm, flag); 230 return 0; 231 } 232 return 0; 545 printf("MPI_Intercomm_create_mpi not yet implemented\n"); 546 MPI_Abort(local_comm, 0); 233 547 } 234 548 235 236 549 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_lib.cpp
r1520 r1539 12 12 13 13 14 std::map<std::pair<int, int>, MPI_Group* > * tag_group_map = 0; 15 16 std::map<int, std::pair<ep_lib::MPI_Comm*, std::pair<int, int> > > * tag_comm_map = 0; 17 18 19 MPI_Group MPI_GROUP_WORLD; 14 20 15 21 namespace ep_lib … … 26 32 27 33 int get_ep_rank(MPI_Comm comm, int ep_rank_loc, int mpi_rank) 28 { 29 if(comm->is_intercomm) 30 { 31 for(std::map<int, std::pair< int, std::pair<int, int> > >::iterator it = comm->ep_comm_ptr->intercomm->intercomm_rank_map->begin(); it != comm->ep_comm_ptr->intercomm->intercomm_rank_map->end(); it++) 32 { 33 if( ( it->second.first == ep_rank_loc ) 34 && ( it->second.second.first == mpi_rank ) ) 35 { 36 return it->first; 37 } 38 } 39 printf("rank not find for EP_intercomm\n"); 40 int err; 41 return MPI_Abort(comm, err); 42 } 43 34 { 44 35 for(std::map<int, std::pair<int, int> >::iterator it = comm->ep_rank_map->begin(); it != comm->ep_rank_map->end(); it++) 45 36 { … … 51 42 } 52 43 printf("rank not find for EP_intracomm\n"); 53 int err; 54 return MPI_Abort(comm, err); 44 return MPI_Abort(comm, 0); 55 45 } 56 46 … … 64 54 { 65 55 return ::MPI_Wtime(); 66 56 } 57 58 int MPI_Comm_test_inter(MPI_Comm comm, int *flag) 59 { 60 if(comm->is_ep) return *flag = comm->is_intercomm; 61 else return ::MPI_Comm_test_inter(to_mpi_comm(comm->mpi_comm), flag); 67 62 } 68 63 … … 133 128 } 134 129 135 int test_sendrecv(MPI_Comm comm)136 {137 int myRank;138 MPI_Comm_rank(comm, &myRank);139 bool amClient = false;140 bool amServer = false;141 if(myRank<=3) amClient = true;142 else amServer = true;143 144 if(amServer)145 {146 int send_buf[4];147 MPI_Request send_request[8];148 MPI_Status send_status[8];149 150 151 152 for(int j=0; j<4; j++) // 4 buffers153 {154 for(int i=0; i<2; i++)155 {156 send_buf[j] = (myRank+1)*100 + j;157 MPI_Isend(&send_buf[j], 1, MPI_INT, i*2, 9999, comm, &send_request[i*4+j]);158 }159 }160 161 162 MPI_Waitall(8, send_request, send_status);163 }164 165 166 if(amClient&&myRank%2==0) // Clients leaders167 {168 int recv_buf[8];169 MPI_Request recv_request[8];170 MPI_Status recv_status[8];171 172 for(int i=0; i<2; i++) // 2 servers173 {174 for(int j=0; j<4; j++)175 {176 MPI_Irecv(&recv_buf[i*4+j], 1, MPI_INT, i+4, 9999, comm, &recv_request[i*4+j]);177 }178 }179 180 MPI_Waitall(8, recv_request, recv_status);181 printf("============ client %d, recv_buf = %d, %d, %d, %d, %d, %d, %d, %d ================\n",182 myRank, recv_buf[0], recv_buf[1], recv_buf[2], recv_buf[3], recv_buf[4], recv_buf[5], recv_buf[6], recv_buf[7]);183 }184 185 MPI_Barrier(comm);186 187 }188 130 189 131 bool valid_type(MPI_Datatype datatype) -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_lib.hpp
r1533 r1539 4 4 #include "ep_type.hpp" 5 5 #include "ep_lib_intercomm.hpp" 6 #include "ep_lib_ intracomm.hpp"6 #include "ep_lib_endpoint.hpp" 7 7 #include "ep_lib_local.hpp" 8 8 #include "ep_lib_collective.hpp" 9 #include "ep_tag.hpp"10 9 #include "ep_lib_fortran.hpp" 11 10 #include "ep_lib_win.hpp" 12 11 #include "ep_lib_mpi.hpp" 13 //#include "ep_mpi.hpp"14 12 15 13 … … 41 39 int MPI_Comm_remote_size(MPI_Comm comm, int *size); 42 40 41 43 42 int MPI_Send(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm); 44 43 int MPI_Ssend(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm); … … 64 63 65 64 int MPI_Alloc_mem(MPI_Aint size, MPI_Info info, void *baseptr); 66 67 65 int MPI_Alloc_mem(unsigned long size, MPI_Info info, void *baseptr); 68 66 … … 86 84 87 85 int test_sendrecv(MPI_Comm comm); 88 89 int EP_group_init();90 86 91 87 92 88 } 93 89 94 //MPI_Datatype to_mpi(ep_lib::MPI_Datatype type);95 96 97 90 #endif // EP_LIB_HPP_INCLUDED -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_lib_collective.hpp
r1520 r1539 10 10 11 11 int MPI_Barrier(MPI_Comm comm); 12 12 13 13 14 14 int MPI_Comm_dup(MPI_Comm comm, MPI_Comm *newcomm); … … 47 47 int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm); 48 48 49 int MPI_Intercomm_create_kernel(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm);50 49 51 50 52 int MPI_Intercomm_create_from_world(MPI_Comm local_comm, int local_leader, void* peer_comm_ptr, int mpi_remote_leader, int tag, MPI_Comm *newintercomm);53 51 54 int MPI_Intercomm_create_unique_leader(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm); 52 //int MPI_Intercomm_create_from_world(MPI_Comm local_comm, int local_leader, void* peer_comm_ptr, int mpi_remote_leader, int tag, MPI_Comm *newintercomm); 53 54 //int MPI_Intercomm_create_unique_leader(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm); 55 55 56 56 int MPI_Comm_create_endpoints(void* base_comm_ptr, int num_ep, MPI_Info info, MPI_Comm *& out_comm_hdls); // from EP to create endpoints … … 60 60 int MPI_Intercomm_merge(MPI_Comm intercomm, bool high, MPI_Comm *newintracomm); 61 61 62 int MPI_Intercomm_merge_unique_leader(MPI_Comm intercomm, bool high, MPI_Comm *newintracomm);62 //int MPI_Intercomm_merge_unique_leader(MPI_Comm intercomm, bool high, MPI_Comm *newintracomm); 63 63 64 64 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_lib_intercomm.hpp
r1520 r1539 8 8 typedef void* MPI_Op; 9 9 10 int MPI_Send_intercomm(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm); 11 int MPI_Ssend_intercomm(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm); 12 int MPI_Isend_intercomm(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request); 10 int MPI_Allgather_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm); 11 int MPI_Allgatherv_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, MPI_Comm comm); 13 12 14 int MPI_ Issend_intercomm(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request);13 int MPI_Allreduce_intercomm(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); 15 14 15 int MPI_Alltoall_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm); 16 16 17 int MPI_ Comm_dup_intercomm(MPI_Comm comm, MPI_Comm *newcomm);17 int MPI_Bcast_intercomm(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm); 18 18 19 int MPI_Comm_free_intercomm(MPI_Comm* comm); 19 int MPI_Exscan_intercomm(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); 20 int MPI_Scan_intercomm(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); 20 21 21 int MPI_ Barrier_intercomm(MPI_Comm comm);22 int MPI_Gather_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm); 22 23 23 int Message_Check_intercomm(MPI_Comm comm); 24 int MPI_Gatherv_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int input_recvcounts[], const int input_displs[], 25 MPI_Datatype recvtype, int root, MPI_Comm comm); 24 26 25 int MPI_Isend_intercomm(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request); 26 int MPI_Issend_intercomm(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request); 27 int MPI_Reduce_intercomm(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm); 27 28 29 int MPI_Reduce_scatter_intercomm(const void *sendbuf, void *recvbuf, const int recvcounts[], MPI_Datatype datatype, MPI_Op op, MPI_Comm comm); 28 30 29 int MPI_Iprobe_intercomm(int source, int tag, MPI_Comm comm, int *flag, MPI_Status *status); 30 int MPI_Improbe_intercomm(int source, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status); 31 32 33 34 31 int MPI_Scatter_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm); 32 int MPI_Scatterv_intercomm(const void *sendbuf, const int sendcounts[], const int displs[], MPI_Datatype sendtype, void *recvbuf, int recvcount, 33 MPI_Datatype recvtype, int root, MPI_Comm comm); 35 34 } 36 35 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_lib_mpi.hpp
r1521 r1539 19 19 20 20 int MPI_Iprobe_mpi(int source, int tag, MPI_Comm comm, int *flag, MPI_Status *status); 21 int MPI_Improbe_mpi(int src, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status); 21 22 22 23 int MPI_Comm_rank_mpi(MPI_Comm comm, int* rank); … … 29 30 int MPI_Barrier_mpi(MPI_Comm comm); 30 31 31 int MPI_Abort_mpi(MPI_Comm comm, int errorcode); 32 int MPI_Intercomm_create_mpi(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm); 33 32 34 } 33 35 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_merge.cpp
r1520 r1539 10 10 { 11 11 12 int MPI_Intercomm_merge _unique_leader(MPI_Comm inter_comm, bool high, MPI_Comm *newintracomm)12 int MPI_Intercomm_merge(MPI_Comm inter_comm, bool high, MPI_Comm *newintracomm) 13 13 { 14 Debug("intercomm_merge with unique leader\n"); 14 15 int ep_rank = inter_comm->ep_comm_ptr->size_rank_info[0].first; 16 int ep_size = inter_comm->ep_comm_ptr->size_rank_info[0].second; 17 int ep_rank_loc = inter_comm->ep_comm_ptr->size_rank_info[1].first; 18 int num_ep = inter_comm->ep_comm_ptr->size_rank_info[1].second; 19 int mpi_rank = inter_comm->ep_comm_ptr->size_rank_info[2].first; 15 20 21 int remote_ep_size = inter_comm->inter_rank_map->size(); 16 22 17 18 int ep_rank, ep_rank_loc, mpi_rank; 19 int ep_size, num_ep, mpi_size; 20 21 ep_rank = inter_comm->ep_comm_ptr->size_rank_info[0].first; 22 ep_rank_loc = inter_comm->ep_comm_ptr->size_rank_info[1].first; 23 mpi_rank = inter_comm->ep_comm_ptr->size_rank_info[2].first; 24 ep_size = inter_comm->ep_comm_ptr->size_rank_info[0].second; 25 num_ep = inter_comm->ep_comm_ptr->size_rank_info[1].second; 26 mpi_size = inter_comm->ep_comm_ptr->size_rank_info[2].second; 27 28 int local_high = high; 29 int remote_high; 30 31 int remote_ep_size = inter_comm->ep_comm_ptr->intercomm->remote_rank_map->size(); 32 33 int local_ep_rank, local_ep_rank_loc, local_mpi_rank; 34 int local_ep_size, local_num_ep, local_mpi_size; 35 36 //local_ep_rank = inter_comm->ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].first; 37 //local_ep_rank_loc = inter_comm->ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].first; 38 //local_mpi_rank = inter_comm->ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].first; 39 //local_ep_size = inter_comm->ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[0].second; 40 //local_num_ep = inter_comm->ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[1].second; 41 //local_mpi_size = inter_comm->ep_comm_ptr->intercomm->local_comm->ep_comm_ptr->size_rank_info[2].second; 42 43 44 if(local_ep_rank == 0) 45 { 46 MPI_Status status[2]; 47 MPI_Request request[2]; 48 MPI_Isend(&local_high, 1, MPI_INT, 0, inter_comm->ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &request[0]); 49 MPI_Irecv(&remote_high, 1, MPI_INT, 0, inter_comm->ep_comm_ptr->intercomm->intercomm_tag, inter_comm, &request[1]); 50 51 MPI_Waitall(2, request, status); 52 } 53 54 55 //MPI_Bcast(&remote_high, 1, MPI_INT, 0, inter_comm->ep_comm_ptr->intercomm->local_comm); 56 23 int new_ep_rank = high? remote_ep_size + ep_rank : ep_rank; 57 24 58 25 59 26 MPI_Comm_dup(inter_comm, newintracomm); 60 27 61 int my_ep_rank = local_high<remote_high? local_ep_rank: local_ep_rank+remote_ep_size; 28 (*newintracomm)->is_intercomm = false; 29 (*newintracomm)->inter_rank_map->clear(); 30 delete (*newintracomm)->inter_rank_map; 31 32 (*newintracomm)->ep_comm_ptr->size_rank_info[0].second = ep_size + remote_ep_size; 33 34 /////////////////////////////////// 35 int int_high = high? 1 : 0; 36 int sum_high; 37 38 MPI_Allreduce(&int_high, &sum_high, 1, MPI_INT, MPI_SUM, *newintracomm); 39 40 if(sum_high==0 || sum_high==ep_size+remote_ep_size) 41 { 42 printf("MPI_Intercomm_merge error: please define high with different value...\n"); 43 MPI_Abort(inter_comm, 0); 44 } 45 46 47 48 /////////////////////////////////// 49 50 (*newintracomm)->ep_comm_ptr->size_rank_info[0].first = new_ep_rank; 51 52 // modif ep_rank_map 53 int my_triple[3] = {new_ep_rank, ep_rank_loc, mpi_rank}; 54 int *my_triple_list = new int[3*(ep_size+remote_ep_size)]; 55 56 57 MPI_Allgather(my_triple, 3, MPI_INT, my_triple_list, 3, MPI_INT, *newintracomm); 62 58 63 59 64 int intra_ep_rank, intra_ep_rank_loc, intra_mpi_rank; 65 int intra_ep_size, intra_num_ep, intra_mpi_size; 60 #ifdef _showinfo 61 for(int i=0; i<ep_size+remote_ep_size; i++) 62 { 63 if(new_ep_rank == i) 64 { 65 for(int j=0; j<ep_size+remote_ep_size; j++) 66 { 67 printf("rank %d : my_triple_list[%d] = %d %d %d\n", i, j, my_triple_list[3*j], my_triple_list[3*j+1], my_triple_list[3*j+2]); 68 } 69 printf("\n"); 70 } 71 MPI_Barrier(*newintracomm); 72 } 73 #endif 66 74 67 intra_ep_rank = (*newintracomm)->ep_comm_ptr->size_rank_info[0].first; 68 intra_ep_rank_loc = (*newintracomm)->ep_comm_ptr->size_rank_info[1].first; 69 intra_mpi_rank = (*newintracomm)->ep_comm_ptr->size_rank_info[2].first; 70 intra_ep_size = (*newintracomm)->ep_comm_ptr->size_rank_info[0].second; 71 intra_num_ep = (*newintracomm)->ep_comm_ptr->size_rank_info[1].second; 72 intra_mpi_size = (*newintracomm)->ep_comm_ptr->size_rank_info[2].second; 73 74 75 MPI_Barrier_local(*newintracomm); 76 77 78 int *reorder; 79 if(intra_ep_rank_loc == 0) 75 if((*newintracomm)->ep_comm_ptr->size_rank_info[1].first==0) 80 76 { 81 reorder = new int[intra_ep_size]; 82 } 83 84 85 MPI_Gather(&my_ep_rank, 1, MPI_INT, reorder, 1, MPI_INT, 0, *newintracomm); 86 if(intra_ep_rank_loc == 0) 87 { 88 ::MPI_Bcast(reorder, intra_ep_size, to_mpi_type(MPI_INT), 0, to_mpi_comm((*newintracomm)->mpi_comm)); 89 90 vector< pair<int, int> > tmp_rank_map(intra_ep_size); 91 92 93 for(int i=0; i<intra_ep_size; i++) 77 for(int i=0; i<ep_size+remote_ep_size; i++) 94 78 { 95 tmp_rank_map[reorder[i]] = (*newintracomm)->ep_rank_map->at(i);79 (*newintracomm)->ep_comm_ptr->comm_list[0]->ep_rank_map->at(my_triple_list[3*i]) = std::make_pair(my_triple_list[3*i+1], my_triple_list[3*i+2]); 96 80 } 97 98 //(*newintracomm)->rank_map->swap(tmp_rank_map);99 (*newintracomm)->ep_rank_map->clear();100 for(int i=0; i<tmp_rank_map.size(); i++)101 {102 (*newintracomm)->ep_rank_map->insert(std::pair< int, std::pair<int,int> >(i, tmp_rank_map[i].first, tmp_rank_map[i].second));103 }104 105 106 tmp_rank_map.clear();107 81 } 108 82 109 83 MPI_Barrier_local(*newintracomm); 110 84 111 (*newintracomm)->ep_comm_ptr->size_rank_info[0].first = my_ep_rank; 85 #ifdef _showinfo 112 86 113 if(intra_ep_rank_loc == 0)87 for(int i=0; i<ep_size+remote_ep_size; i++) 114 88 { 115 delete[] reorder; 116 } 117 118 return MPI_SUCCESS; 119 } 120 121 122 123 124 125 int MPI_Intercomm_merge(MPI_Comm inter_comm, bool high, MPI_Comm *newintracomm) 126 { 127 128 129 assert(inter_comm->is_intercomm); 130 131 // determine if only one MPI proc 132 133 // to be completed ...... 134 135 // multiple MPI proc and high differs 136 137 int newcomm_ep_rank = inter_comm->ep_comm_ptr->intercomm->size_rank_info[0].first; 138 int newcomm_ep_rank_loc = inter_comm->ep_comm_ptr->intercomm->size_rank_info[1].first; 139 int newcomm_num_ep = inter_comm->ep_comm_ptr->intercomm->size_rank_info[1].second; 140 141 int ep_rank = inter_comm->ep_comm_ptr->size_rank_info[0].first; 142 int ep_rank_loc = inter_comm->ep_comm_ptr->size_rank_info[1].first; 143 int num_ep = inter_comm->ep_comm_ptr->size_rank_info[1].second; 144 145 if(newcomm_ep_rank_loc == 0) 146 { 147 ::MPI_Comm *mpi_intracomm = new ::MPI_Comm; 148 ::MPI_Intercomm_merge(to_mpi_comm(inter_comm->ep_comm_ptr->intercomm->mpi_inter_comm), high, mpi_intracomm); 149 150 MPI_Info info; 151 MPI_Comm *ep_comm; 152 MPI_Comm_create_endpoints(mpi_intracomm, newcomm_num_ep, info, ep_comm); 153 154 inter_comm->ep_comm_ptr->comm_list[0]->mem_bridge = ep_comm; 155 } 156 157 MPI_Barrier_local(inter_comm); 158 159 int remote_num_ep = newcomm_num_ep - num_ep; 160 161 *newintracomm = inter_comm->ep_comm_ptr->comm_list[0]->mem_bridge[high? remote_num_ep+ep_rank_loc : ep_rank_loc]; 162 163 int ep_size = inter_comm->ep_comm_ptr->size_rank_info[0].second; 164 int remote_ep_size = inter_comm->ep_comm_ptr->intercomm->intercomm_rank_map->size(); 165 166 //printf("ep_size = %d, remote_ep_size = %d\n", ep_size, remote_ep_size); 167 168 (*newintracomm)->ep_comm_ptr->size_rank_info[0].first = high? remote_ep_size+ep_rank : ep_rank; 169 170 int my_triple[3]; 171 my_triple[0] = (*newintracomm)->ep_comm_ptr->size_rank_info[0].first; 172 my_triple[1] = (*newintracomm)->ep_comm_ptr->size_rank_info[1].first; 173 my_triple[2] = (*newintracomm)->ep_comm_ptr->size_rank_info[2].first; 174 175 int *my_triple_list = new int[3 * (*newintracomm)->ep_comm_ptr->size_rank_info[0].second]; 176 177 178 MPI_Allgather(my_triple, 3, MPI_INT, my_triple_list, 3, MPI_INT, *newintracomm); 179 180 if((*newintracomm)->ep_comm_ptr->size_rank_info[1].first == 0) 181 { 182 (*newintracomm)->ep_rank_map->clear(); 183 for(int i=0; i<(*newintracomm)->ep_comm_ptr->size_rank_info[0].second; i++) 89 if(new_ep_rank == i) 184 90 { 185 (*newintracomm)->ep_rank_map->insert(std::pair< int, std::pair<int,int> >(my_triple_list[3*i], my_triple_list[3*i+1], my_triple_list[3*i+2])); 91 for(EP_RANK_MAP::iterator it = (*newintracomm)->ep_rank_map->begin(); it != (*newintracomm)->ep_rank_map->end(); it++) 92 { 93 printf("rank %d : ep_rank_map[%d] = %d %d\n", i, it->first, it->second.first, it->second.second); 94 } 95 printf("\n"); 186 96 } 187 } 188 189 #ifdef _showinfo 190 MPI_Barrier_local(inter_comm); 191 if((*newintracomm)->ep_comm_ptr->size_rank_info[0].first == 15) 192 { 193 for(std::map<int, std::pair<int, int> >::iterator it = (*newintracomm)->ep_rank_map->begin(); it != (*newintracomm)->ep_rank_map->end(); it++) 194 { 195 printf("(%d %d %d)\n", it->first, it->second.first, it->second.second); 196 } 97 MPI_Barrier(*newintracomm); 197 98 } 198 99 #endif 199 200 delete my_triple_list; 100 101 delete[] my_triple_list; 102 201 103 } 202 104 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_message.cpp
r1533 r1539 32 32 std::list<MPI_Request* >::iterator it; 33 33 34 //show_EP_PendingRequests(EP_PendingRequests);34 show_EP_PendingRequests(EP_PendingRequests); 35 35 36 36 … … 81 81 } 82 82 83 if((*(*it))-> state == 0)83 if((*(*it))->probed == false) 84 84 { 85 85 #pragma omp critical (_query0) 86 86 { 87 MPI_Iprobe ((*(*it))->ep_src, (*(*it))->ep_tag, ((*(*it))->comm), &probed, &status);87 MPI_Iprobe_endpoint((*(*it))->ep_src, (*(*it))->ep_tag, ((*(*it))->comm), &probed, &status); 88 88 if(probed) 89 89 { … … 95 95 96 96 97 MPI_Improbe ((*(*it))->ep_src, (*(*it))->ep_tag, (*(*it))->comm, &probed, message, &status);97 MPI_Improbe_endpoint((*(*it))->ep_src, (*(*it))->ep_tag, (*(*it))->comm, &probed, message, &status); 98 98 99 99 } … … 107 107 MPI_Imrecv((*(*it))->buf, recv_count, (*(*it))->ep_datatype, message, *it); 108 108 (*(*it))->type = 3; 109 (*(*it))-> state = 1;109 (*(*it))->probed = true; 110 110 111 111 memcheck("delete "<< status.mpi_status <<" : in ep_lib::Request_Check, delete status.mpi_status"); … … 133 133 int Message_Check(MPI_Comm comm) 134 134 { 135 if(!comm->is_ep) return MPI_SUCCESS; 136 137 if(comm->is_intercomm) 138 { 139 Message_Check_intercomm(comm); 140 } 141 142 return Message_Check_intracomm(comm); 143 144 } 145 146 147 int Message_Check_intracomm(MPI_Comm comm) 135 if(comm->is_ep) return Message_Check_endpoint(comm); 136 } 137 138 139 int Message_Check_endpoint(MPI_Comm comm) 148 140 { 149 141 … … 204 196 } 205 197 206 207 int Message_Check_intercomm(MPI_Comm comm)208 {209 if(!comm->ep_comm_ptr->intercomm->mpi_inter_comm) return 0;210 211 Debug("Message probing for intercomm\n");212 213 int flag = true;214 ::MPI_Message message;215 ::MPI_Status status;216 int current_ep_rank;217 MPI_Comm_rank(comm, ¤t_ep_rank);218 219 while(flag) // loop until the end of global queue "comm->ep_comm_ptr->intercomm->mpi_inter_comm"220 {221 Debug("Message probing for intracomm\n");222 223 #pragma omp critical (_mpi_call)224 {225 ::MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, to_mpi_comm(comm->ep_comm_ptr->intercomm->mpi_inter_comm), &flag, &status);226 if(flag)227 {228 Debug("find message in mpi comm \n");229 ::MPI_Mprobe(status.MPI_SOURCE, status.MPI_TAG, to_mpi_comm(comm->ep_comm_ptr->intercomm->mpi_inter_comm), &message, &status);230 }231 }232 233 234 if(flag)235 {236 237 MPI_Message msg = new ep_message;238 msg->mpi_message = new ::MPI_Message(message);239 240 memcheck("new "<< msg <<" : in ep_lib::Message_Check, msg = new ep_message");241 memcheck("new "<< msg->mpi_message <<" : in ep_lib::Message_Check, msg->mpi_message = new ::MPI_Message");242 243 244 msg->ep_tag = bitset<15>(status.MPI_TAG >> 16).to_ulong();245 int src_loc = bitset<8> (status.MPI_TAG >> 8) .to_ulong();246 int dest_loc = bitset<8> (status.MPI_TAG) .to_ulong();247 int src_mpi = status.MPI_SOURCE;248 249 msg->ep_src = get_ep_rank(comm, src_loc, src_mpi);250 #ifdef _showinfo251 printf("status.MPI_TAG = %d, src_loc = %d, dest_loc = %d, ep_tag = %d\n", status.MPI_TAG, src_loc, dest_loc, msg->ep_tag);252 #endif253 254 msg->mpi_status = new ::MPI_Status(status);255 memcheck("new "<< msg->mpi_status <<" : in ep_lib::Message_Check, msg->mpi_status = new ::MPI_Status");256 257 #pragma omp critical (_query)258 {259 #pragma omp flush260 comm->ep_comm_ptr->comm_list[dest_loc]->ep_comm_ptr->message_queue->push_back(msg);261 memcheck("comm->ep_comm_ptr->comm_list["<<dest_loc<<"]->ep_comm_ptr->message_queue->size = "<<comm->ep_comm_ptr->comm_list[dest_loc]->ep_comm_ptr->message_queue->size());262 #pragma omp flush263 }264 }265 }266 267 Message_Check_intracomm(comm);268 269 return MPI_SUCCESS;270 }271 272 273 198 274 199 void show_EP_PendingRequests(std::list< ep_lib::MPI_Request* > * EP_PendingRequest) … … 310 235 311 236 } 237 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_probe.cpp
r1520 r1539 8 8 int MPI_Iprobe_mpi(int src, int tag, MPI_Comm comm, int *flag, MPI_Status *status) 9 9 { 10 ::MPI_Status mpi_status; 11 12 ::MPI_Iprobe(src<0? MPI_ANY_SOURCE : src, tag<0? MPI_ANY_TAG: tag, to_mpi_comm(comm->mpi_comm), flag, &mpi_status); 13 14 status->mpi_status = new ::MPI_Status(mpi_status); 10 15 status->ep_src = src; 11 16 status->ep_tag = tag; 12 return ::MPI_Iprobe(src<0? MPI_ANY_SOURCE : src, tag<0? MPI_ANY_TAG: tag, to_mpi_comm(comm->mpi_comm), flag, to_mpi_status_ptr(*status));13 17 } 18 19 20 int MPI_Improbe_mpi(int src, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status) 21 { 22 ::MPI_Status mpi_status; 23 ::MPI_Message mpi_message; 24 25 #ifdef _openmpi 26 #pragma omp critical (_mpi_call) 27 { 28 ::MPI_Iprobe(src<0? MPI_ANY_SOURCE : src, tag<0? MPI_ANY_TAG: tag, to_mpi_comm(comm->mpi_comm), flag, &mpi_status); 29 if(*flag) 30 { 31 ::MPI_Mprobe(src<0? MPI_ANY_SOURCE : src, tag<0? MPI_ANY_TAG: tag, to_mpi_comm(comm->mpi_comm), &mpi_message, &mpi_status); 32 } 33 } 34 #elif _intelmpi 35 ::MPI_Improbe(src<0? MPI_ANY_SOURCE : src, tag<0? MPI_ANY_TAG: tag, to_mpi_comm(comm->mpi_comm), flag, &mpi_message, &mpi_status); 36 #endif 37 38 status->mpi_status = new ::MPI_Status(mpi_status); 39 status->ep_src = src; 40 status->ep_tag = tag; 41 42 (*message)->mpi_message = &message; 43 (*message)->ep_src = src; 44 (*message)->ep_tag = tag; 45 } 46 14 47 15 48 int MPI_Iprobe(int src, int tag, MPI_Comm comm, int *flag, MPI_Status *status) … … 20 53 return MPI_Iprobe_mpi(src, tag, comm, flag, status); 21 54 } 55 56 if(comm->is_intercomm) 57 { 58 if(src>=0) src = comm->inter_rank_map->at(src); 59 } 22 60 23 else 61 return MPI_Iprobe_endpoint(src, tag, comm, flag, status); 62 } 63 64 int MPI_Iprobe_endpoint(int src, int tag, MPI_Comm comm, int *flag, MPI_Status *status) 65 { 66 Debug("MPI_Iprobe with EP\n"); 67 68 *flag = false; 69 70 Message_Check(comm); 71 72 #pragma omp flush 73 74 #pragma omp critical (_query) 75 for(Message_list::iterator it = comm->ep_comm_ptr->message_queue->begin(); it!= comm->ep_comm_ptr->message_queue->end(); ++it) 24 76 { 25 Debug("MPI_Iprobe with EP\n"); 77 bool src_matched = src<0? true: (*it)->ep_src == src; 78 bool tag_matched = tag<0? true: (*it)->ep_tag == tag; 26 79 27 *flag = false; 28 29 Message_Check(comm); 80 if(src_matched && tag_matched) 81 { 82 Debug("find message\n"); 83 84 status->mpi_status = new ::MPI_Status(*static_cast< ::MPI_Status*>((*it)->mpi_status)); 85 status->ep_src = (*it)->ep_src; 86 status->ep_tag = (*it)->ep_tag; 87 88 if(comm->is_intercomm) 89 { 90 for(INTER_RANK_MAP::iterator iter = comm->inter_rank_map->begin(); iter != comm->inter_rank_map->end(); iter++) 91 { 92 if(iter->second == (*it)->ep_src) status->ep_src=iter->first; 93 } 94 } 30 95 31 #pragma omp flush 32 33 #pragma omp critical (_query) 34 for(Message_list::iterator it = comm->ep_comm_ptr->message_queue->begin(); it!= comm->ep_comm_ptr->message_queue->end(); ++it) 35 { 36 bool src_matched = src<0? true: (*it)->ep_src == src; 37 bool tag_matched = tag<0? true: (*it)->ep_tag == tag; 38 39 if(src_matched && tag_matched) 40 { 41 Debug("find message\n"); 42 43 44 status->mpi_status = new ::MPI_Status(*static_cast< ::MPI_Status*>((*it)->mpi_status)); 45 status->ep_src = (*it)->ep_src; 46 status->ep_tag = (*it)->ep_tag; 47 48 *flag = true; 49 break; 50 } 96 *flag = true; 97 break; 51 98 } 52 99 } 53 100 } 54 101 55 102 56 103 57 104 int MPI_Improbe(int src, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status) 58 105 { 106 if(!comm->is_ep) 107 { 108 Debug("MPI_Iprobe with MPI\n"); 109 return MPI_Improbe_mpi(src, tag, comm, flag, message, status); 110 } 111 112 if(comm->is_intercomm) 113 { 114 src = comm->inter_rank_map->at(src); 115 *message = new ep_message; 116 printf("============= new *message = %p\n", *message); 117 } 118 119 return MPI_Improbe_endpoint(src, tag, comm, flag, message, status); 120 } 121 122 123 124 int MPI_Improbe_endpoint(int src, int tag, MPI_Comm comm, int *flag, MPI_Message *message, MPI_Status *status) 125 { 59 126 int ep_rank_loc = comm->ep_comm_ptr->size_rank_info[1].first; 60 127 int mpi_rank = comm->ep_comm_ptr->size_rank_info[2].first; 128 61 129 *flag = false; 62 if(!comm->is_ep) 63 { 64 Debug("calling MPI_Improbe MPI\n"); 65 66 ::MPI_Status mpi_status; 67 ::MPI_Message mpi_message; 68 69 #ifdef _openmpi 70 #pragma omp critical (_mpi_call) 71 { 72 ::MPI_Iprobe(src<0? MPI_ANY_SOURCE : src, tag<0? MPI_ANY_TAG: tag, to_mpi_comm(comm->mpi_comm), flag, &mpi_status); 73 if(*flag) 74 { 75 ::MPI_Mprobe(src<0? MPI_ANY_SOURCE : src, tag<0? MPI_ANY_TAG: tag, to_mpi_comm(comm->mpi_comm), &mpi_message, &mpi_status); 76 } 77 } 78 #elif _intelmpi 79 ::MPI_Improbe(src<0? MPI_ANY_SOURCE : src, tag<0? MPI_ANY_TAG: tag, to_mpi_comm(comm->mpi_comm), flag, &mpi_message, &mpi_status); 80 #endif 81 82 status->mpi_status = &mpi_status; 83 status->ep_src = src; 84 status->ep_tag = tag; 85 86 (*message)->mpi_message = &message; 87 (*message)->ep_src = src; 88 (*message)->ep_tag = tag; 89 90 91 return 0; 92 } 93 94 95 130 131 Message_Check(comm); 132 96 133 #pragma omp flush 97 134 … … 146 183 } 147 184 185 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_recv.cpp
r1533 r1539 40 40 { 41 41 if(!comm->is_ep) return MPI_Irecv_mpi(buf, count, datatype, src, tag, comm, request); 42 43 if(comm->is_intercomm) 44 { 45 if(src>=0) src = comm->inter_rank_map->at(src); 46 } 42 47 43 48 Debug("MPI_Irecv with EP"); … … 54 59 (*request)->comm = comm; 55 60 (*request)->type = 2; 61 (*request)->probed = false; 56 62 (*request)->state = 0; 57 58 63 59 64 (*request)->ep_src = src; … … 63 68 if(EP_PendingRequests == 0 ) EP_PendingRequests = new std::list< MPI_Request* >; 64 69 65 EP_PendingRequests->push_back(request); 70 EP_PendingRequests->push_back(request); 66 71 67 72 memcheck("EP_PendingRequests["<<ep_rank<<"]->size() = " << EP_PendingRequests->size()); … … 100 105 memcheck("delete " << (*message)->mpi_message << " : in ep_lib::MPI_Mrecv, delete (*message)->mpi_message"); 101 106 delete (*message)->mpi_message; 107 memcheck("delete " << *message << " : in ep_lib::MPI_Imrecv, delete *message"); 108 delete *message; 102 109 103 110 #ifdef _check_sum … … 118 125 (*request)->ep_src = (*message)->ep_src; 119 126 127 (*request)->probed = true; 120 128 (*request)->state = 1; 121 129 … … 125 133 delete (*message)->mpi_message; 126 134 135 127 136 #ifdef _check_sum 128 137 check_sum_recv(buf, count, datatype, message->ep_src, message->ep_tag); … … 130 139 131 140 132 return Request_Check();141 //return Request_Check(); 133 142 } 134 143 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_reduce.cpp
r1520 r1539 288 288 { 289 289 290 if(!comm->is_ep && comm->mpi_comm) 291 { 292 return ::MPI_Reduce(sendbuf, recvbuf, count, to_mpi_type(datatype), to_mpi_op(op), root, to_mpi_comm(comm->mpi_comm)); 293 } 290 if(!comm->is_ep) return ::MPI_Reduce(sendbuf, recvbuf, count, to_mpi_type(datatype), to_mpi_op(op), root, to_mpi_comm(comm->mpi_comm)); 291 if(comm->is_intercomm) return MPI_Reduce_intercomm(sendbuf, recvbuf, count, datatype, op, root, comm); 294 292 295 293 … … 339 337 340 338 339 int MPI_Reduce_intercomm(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) 340 { 341 printf("MPI_Reduce_intercomm not yet implemented\n"); 342 MPI_Abort(comm, 0); 343 } 341 344 } 342 345 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_reduce_scatter.cpp
r1520 r1539 19 19 int MPI_Reduce_scatter(const void *sendbuf, void *recvbuf, const int recvcounts[], MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) 20 20 { 21 if(!comm->is_ep) 22 { 23 return ::MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, to_mpi_type(datatype), to_mpi_op(op), to_mpi_comm(comm->mpi_comm)); 24 } 21 if(!comm->is_ep) return ::MPI_Reduce_scatter(sendbuf, recvbuf, recvcounts, to_mpi_type(datatype), to_mpi_op(op), to_mpi_comm(comm->mpi_comm)); 22 if(comm->is_intercomm) return MPI_Reduce_scatter_intercomm(sendbuf, recvbuf, recvcounts, datatype, op, comm); 25 23 26 24 … … 81 79 } 82 80 81 int MPI_Reduce_scatter_intercomm(const void *sendbuf, void *recvbuf, const int recvcounts[], MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) 82 { 83 printf("MPI_Reduce_scatter_intercomm not yet implemented\n"); 84 MPI_Abort(comm, 0); 85 } 86 83 87 84 88 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_request.hpp
r1521 r1539 21 21 // 1: imrecvd 22 22 // 2: tested or waited 23 bool probed; 23 24 24 25 void* buf; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_scan.cpp
r1520 r1539 350 350 int MPI_Scan(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) 351 351 { 352 if(!comm->is_ep) 353 { 354 return ::MPI_Scan(sendbuf, recvbuf, count, to_mpi_type(datatype), to_mpi_op(op), to_mpi_comm(comm->mpi_comm)); 355 } 352 if(!comm->is_ep) return ::MPI_Scan(sendbuf, recvbuf, count, to_mpi_type(datatype), to_mpi_op(op), to_mpi_comm(comm->mpi_comm)); 353 if(comm->is_intercomm) return MPI_Scan_intercomm(sendbuf, recvbuf, count, datatype, op, comm); 356 354 357 355 valid_type(datatype); … … 446 444 } 447 445 446 int MPI_Scan_intercomm(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) 447 { 448 printf("MPI_Scan_intercomm not yet implemented\n"); 449 MPI_Abort(comm, 0); 450 } 451 448 452 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_scatter.cpp
r1520 r1539 42 42 int MPI_Scatter(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) 43 43 { 44 if(!comm->is_ep) 45 { 46 return ::MPI_Scatter(sendbuf, sendcount, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), root, to_mpi_comm(comm->mpi_comm)); 47 } 44 if(!comm->is_ep) return ::MPI_Scatter(sendbuf, sendcount, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), root, to_mpi_comm(comm->mpi_comm)); 45 if(comm->is_intercomm) return MPI_Scatter_intercomm(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); 48 46 49 47 assert(sendcount == recvcount); … … 97 95 98 96 99 100 // if(is_root) printf("\nranks = %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n", ranks[0], ranks[1], ranks[2], ranks[3], ranks[4], ranks[5], ranks[6], ranks[7],101 // ranks[8], ranks[9], ranks[10], ranks[11], ranks[12], ranks[13], ranks[14], ranks[15]);102 103 97 if(is_root) 104 98 for(int i=0; i<ep_size; i++) … … 106 100 memcpy(tmp_sendbuf + i*datasize*count, sendbuf + ranks[i]*datasize*count, count*datasize); 107 101 } 108 109 // if(is_root) printf("\ntmp_sendbuf = %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n", static_cast<int*>(tmp_sendbuf)[0], static_cast<int*>(tmp_sendbuf)[2], static_cast<int*>(tmp_sendbuf)[4], static_cast<int*>(tmp_sendbuf)[6],110 // static_cast<int*>(tmp_sendbuf)[8], static_cast<int*>(tmp_sendbuf)[10], static_cast<int*>(tmp_sendbuf)[12], static_cast<int*>(tmp_sendbuf)[14],111 // static_cast<int*>(tmp_sendbuf)[16], static_cast<int*>(tmp_sendbuf)[18], static_cast<int*>(tmp_sendbuf)[20], static_cast<int*>(tmp_sendbuf)[22],112 // static_cast<int*>(tmp_sendbuf)[24], static_cast<int*>(tmp_sendbuf)[26], static_cast<int*>(tmp_sendbuf)[28], static_cast<int*>(tmp_sendbuf)[30] );113 114 102 115 103 // MPI_Scatterv from root to masters … … 127 115 ::MPI_Scatterv(tmp_sendbuf, recvcounts.data(), displs.data(), to_mpi_type(sendtype), local_recvbuf, num_ep*count, to_mpi_type(recvtype), root_mpi_rank, to_mpi_comm(comm->mpi_comm)); 128 116 129 // printf("local_recvbuf = %d %d %d %d\n", static_cast<int*>(local_recvbuf)[0], static_cast<int*>(local_recvbuf)[1], static_cast<int*>(local_recvbuf)[2], static_cast<int*>(local_recvbuf)[3]);130 // static_cast<int*>(local_recvbuf)[4], static_cast<int*>(local_recvbuf)[5], static_cast<int*>(local_recvbuf)[6], static_cast<int*>(local_recvbuf)[7]);131 117 } 132 118 … … 138 124 } 139 125 126 127 int MPI_Scatter_intercomm(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) 128 { 129 printf("MPI_Scatter_intercomm not yet implemented\n"); 130 MPI_Abort(comm, 0); 131 } 132 140 133 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_scatterv.cpp
r1520 r1539 45 45 MPI_Datatype recvtype, int root, MPI_Comm comm) 46 46 { 47 if(!comm->is_ep) 48 { 49 return ::MPI_Scatterv(sendbuf, sendcounts, displs, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), root, to_mpi_comm(comm->mpi_comm)); 50 } 47 if(!comm->is_ep) return ::MPI_Scatterv(sendbuf, sendcounts, displs, to_mpi_type(sendtype), recvbuf, recvcount, to_mpi_type(recvtype), root, to_mpi_comm(comm->mpi_comm)); 48 if(comm->is_intercomm) return MPI_Scatterv_intercomm(sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); 51 49 52 50 assert(sendtype == recvtype); … … 105 103 for(int i=0; i<ep_size; i++) 106 104 { 107 //printf("i=%d : start from %d, src displs = %d, count = %d\n ", i, local_displs/datasize, displs[ranks[i]], sendcounts[ranks[i]]);108 105 memcpy(tmp_sendbuf+local_displs, sendbuf + displs[ranks[i]]*datasize, sendcounts[ranks[i]]*datasize); 109 106 local_displs += sendcounts[ranks[i]]*datasize; 110 107 } 111 112 //for(int i=0; i<ep_size*2; i++) printf("%d\t", static_cast<int*>(const_cast<void*>(tmp_sendbuf))[i]);113 108 } 114 109 … … 129 124 130 125 ::MPI_Scatterv(tmp_sendbuf, recvcounts.data(), my_displs.data(), to_mpi_type(sendtype), local_sendbuf, num_ep*count, to_mpi_type(recvtype), root_mpi_rank, to_mpi_comm(comm->mpi_comm)); 131 132 // printf("my_displs = %d %d %d %d\n", my_displs[0], my_displs[1], my_displs[2], my_displs[3] );133 134 // printf("%d %d %d %d %d %d %d %d\n", static_cast<int*>(local_sendbuf)[0], static_cast<int*>(local_sendbuf)[1], static_cast<int*>(local_sendbuf)[2], static_cast<int*>(local_sendbuf)[3],135 // static_cast<int*>(local_sendbuf)[4], static_cast<int*>(local_sendbuf)[5], static_cast<int*>(local_sendbuf)[6], static_cast<int*>(local_sendbuf)[7]);136 126 } 137 127 … … 154 144 155 145 146 int MPI_Scatterv_intercomm(const void *sendbuf, const int sendcounts[], const int displs[], MPI_Datatype sendtype, void *recvbuf, int recvcount, 147 MPI_Datatype recvtype, int root, MPI_Comm comm) 148 { 149 printf("MPI_Scatterv_intercomm not yet implemented\n"); 150 MPI_Abort(comm, 0); 151 } 152 153 156 154 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_send.cpp
r1520 r1539 18 18 { 19 19 if(!comm->is_ep) return MPI_Send_mpi(buf, count, datatype, dest, tag, comm); 20 if(comm->is_intercomm) return MPI_Send_intercomm(buf, count, datatype, dest, tag, comm);20 if(comm->is_intercomm) dest = comm->inter_rank_map->at(dest); 21 21 22 22 Debug("\nMPI_Send with EP\n"); … … 38 38 { 39 39 if(!comm->is_ep) return MPI_Ssend_mpi(buf, count, datatype, dest, tag, comm); 40 if(comm->is_intercomm) return MPI_Ssend_intercomm(buf, count, datatype, dest, tag, comm);40 if(comm->is_intercomm) dest = comm->inter_rank_map->at(dest); 41 41 42 42 Debug("\nMPI_Ssend with EP\n"); … … 61 61 { 62 62 if(!comm->is_ep) return MPI_Isend_mpi(buf, count, datatype, dest, tag, comm, request); 63 if(comm->is_intercomm) return MPI_Isend_intercomm(buf, count, datatype, dest, tag, comm, request);63 if(comm->is_intercomm) dest = comm->inter_rank_map->at(dest); 64 64 65 65 Debug("\nMPI_Isend with EP\n"); … … 104 104 105 105 if(!comm->is_ep) return MPI_Issend_mpi(buf, count, datatype, dest, tag, comm, request); 106 if(comm->is_intercomm) return MPI_Issend_intercomm(buf, count, datatype, dest, tag, comm, request);106 if(comm->is_intercomm) dest = comm->inter_rank_map->at(dest); 107 107 108 108 Debug("\nMPI_Issend with EP\n"); … … 144 144 return ::MPI_Send(buf, count, to_mpi_type(datatype), dest, tag, to_mpi_comm(comm->mpi_comm)); 145 145 } 146 147 int MPI_Send_intercomm(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) 148 { 149 Debug("\nMPI_Send_intercomm with EP\n"); 150 MPI_Request request; 151 MPI_Status status; 152 MPI_Isend(buf, count, datatype, dest, tag, comm, &request); 153 MPI_Wait(&request, &status); 154 return MPI_SUCCESS; 155 } 156 146 157 147 158 148 int MPI_Ssend_mpi(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) … … 163 153 164 154 165 int MPI_Ssend_intercomm(void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)166 {167 Debug("\nMPI_Ssend_intercomm with EP\n");168 MPI_Request request;169 MPI_Status status;170 MPI_Issend(buf, count, datatype, dest, tag, comm, &request);171 MPI_Wait(&request, &status);172 return MPI_SUCCESS;173 }174 175 155 int MPI_Isend_mpi(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request) 176 156 { … … 195 175 } 196 176 197 int MPI_Isend_intercomm(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request) 198 { 199 Debug("\nMPI_Isend_intercomm with EP\n"); 200 201 #ifdef _check_sum 202 check_sum_send(buf, count, datatype, dest, tag, comm); 203 #endif 204 205 int src_comm_label = comm->ep_comm_ptr->comm_label; 206 int dest_comm_label = comm->ep_comm_ptr->intercomm->intercomm_rank_map->at(dest).second.second; 207 208 int src_rank; 209 MPI_Comm_rank(comm, &src_rank); 210 211 212 *request = new ep_request; 213 memcheck("new "<< *request <<" : in ep_lib::MPI_Isend_intercomm, *request = new ep_request"); 214 215 (*request)->mpi_request = new ::MPI_Request; 216 memcheck("new "<< (*request)->mpi_request <<" : in ep_lib::MPI_Isend_intercomm, (*request)->mpi_request = new ::MPI_Request"); 217 218 219 int ep_src_loc = comm->ep_rank_map->at(src_rank).first; 220 int ep_dest_loc = comm->ep_comm_ptr->intercomm->intercomm_rank_map->at(dest).first; 221 int mpi_tag = tag_combine(tag, ep_src_loc, ep_dest_loc); 222 int mpi_dest = comm->ep_comm_ptr->intercomm->intercomm_rank_map->at(dest).second.first; 223 224 #ifdef _showinfo 225 printf("Send : ep_src_loc = %d, ep_dest_loc = %d, mpi_src = %d, mpi_dest = %d, mpi_tag = %d\n", ep_src_loc, ep_dest_loc, comm->ep_comm_ptr->size_rank_info[2].first, mpi_dest, mpi_tag); 226 #endif 227 228 229 (*request)->ep_src = src_rank; 230 (*request)->ep_tag = tag; 231 (*request)->ep_datatype = datatype; 232 233 (*request)->type = 1; // used in wait 234 (*request)->comm = comm; 235 (*request)->buf = const_cast<void*>(buf); 236 237 238 if(src_comm_label == dest_comm_label) 239 { 240 Debug("\nMPI_Isend_intercomm with EP_intracomm\n"); 241 return ::MPI_Isend(buf, count, to_mpi_type(datatype), mpi_dest, mpi_tag, to_mpi_comm(comm->mpi_comm), to_mpi_request_ptr(*request)); 242 } 243 244 else 245 { 246 Debug("\nMPI_Isend_intercomm with EP_intercomm\n"); 247 return ::MPI_Isend(buf, count, to_mpi_type(datatype), mpi_dest, mpi_tag, to_mpi_comm(comm->ep_comm_ptr->intercomm->mpi_inter_comm), to_mpi_request_ptr(*request)); 248 } 249 } 250 251 177 252 178 253 179 … … 275 201 276 202 277 int MPI_Issend_intercomm(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request)278 {279 Debug("\nMPI_Issend_intercomm with EP\n");280 281 #ifdef _check_sum282 check_sum_send(buf, count, datatype, dest, tag, comm);283 #endif284 285 int src_comm_label = comm->ep_comm_ptr->comm_label;286 int dest_comm_label = comm->ep_comm_ptr->intercomm->intercomm_rank_map->at(dest).second.second;287 288 int src_rank;289 MPI_Comm_rank(comm, &src_rank);290 291 292 *request = new ep_request;293 memcheck("new "<< *request <<" : in ep_lib::MPI_Issend_intercomm, *request = new ep_request");294 295 (*request)->mpi_request = new ::MPI_Request;296 memcheck("new "<< (*request)->mpi_request <<" : in ep_lib::MPI_Issend_intercomm, (*request)->mpi_request = new ::MPI_Request");297 298 299 int ep_src_loc = comm->ep_rank_map->at(src_rank).first;300 int ep_dest_loc = comm->ep_comm_ptr->intercomm->intercomm_rank_map->at(dest).first;301 int mpi_tag = tag_combine(tag, ep_src_loc, ep_dest_loc);302 int mpi_dest = comm->ep_comm_ptr->intercomm->intercomm_rank_map->at(dest).second.first;303 304 #ifdef _showinfo305 printf("ep_src_loc = %d, ep_dest_loc = %d, mpi_src = %d, mpi_dest = %d, mpi_tag = %d\n", ep_src_loc, ep_dest_loc, comm->ep_comm_ptr->size_rank_info[2].first, mpi_dest, mpi_tag);306 #endif307 308 (*request)->ep_src = src_rank;309 (*request)->ep_tag = tag;310 (*request)->ep_datatype = datatype;311 312 (*request)->type = 1; // used in wait313 (*request)->comm = comm;314 (*request)->buf = const_cast<void*>(buf);315 316 317 if(src_comm_label == dest_comm_label)318 {319 Debug("\nMPI_Issend_intercomm with EP_intracomm\n");320 return ::MPI_Issend(buf, count, to_mpi_type(datatype), mpi_dest, mpi_tag, to_mpi_comm(comm->mpi_comm), to_mpi_request_ptr(*request));321 }322 323 else324 {325 Debug("\nMPI_Issend_intercomm with EP_intercomm\n");326 return ::MPI_Issend(buf, count, to_mpi_type(datatype), mpi_dest, mpi_tag, to_mpi_comm(comm->ep_comm_ptr->intercomm->mpi_inter_comm), to_mpi_request_ptr(*request));327 }328 }329 330 203 } 331 204 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_size.cpp
r1520 r1539 22 22 if(comm->is_ep) 23 23 { 24 if(comm->is_intercomm) 25 { 26 Debug("MPI_Comm_remote_size with EP_intercomm"); 27 return *size = comm->ep_comm_ptr->intercomm->intercomm_rank_map->size(); 28 } 29 else 30 { 31 Debug("MPI_Comm_remote_size with EP_intracomm"); 32 return *size=0; 33 } 24 return *size = comm->is_intercomm? comm->inter_rank_map->size() : 0; 34 25 } 35 26 return MPI_Comm_remote_size_mpi(comm, size); … … 49 40 } 50 41 51 52 42 } 53 43 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_split.cpp
r1520 r1539 9 9 { 10 10 11 void vec_simplify(std::vector<int> *inout_vector)12 {13 std::vector<int> out_vec;14 int found=false;15 for(std::vector<int>::iterator it_in = inout_vector->begin() ; it_in != inout_vector->end(); ++it_in)16 {17 for(std::vector<int>::iterator it = out_vec.begin() ; it != out_vec.end(); ++it)18 {19 if(*it_in == *it)20 {21 found=true;22 break;23 }24 else found=false;25 }26 if(found == false)27 {28 out_vec.push_back(*it_in);29 }30 }31 inout_vector->swap(out_vec);32 }33 34 11 void vec_simplify(std::vector<int> *in_vector, std::vector<int> *out_vector) 35 12 { … … 274 251 *newcomm = comm->ep_comm_ptr->comm_list[0]->mem_bridge[new_ep_rank_loc]; 275 252 memcheck("in MPI_Split ep_rank="<< ep_rank <<" : *newcomm = "<< *newcomm); 276 277 (*newcomm)->ep_comm_ptr->comm_label = color;278 253 279 254 (*newcomm)->ep_comm_ptr->size_rank_info[0].first = new_ep_rank; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_test.cpp
r1533 r1539 25 25 { 26 26 printf("MPI_Test : Error in request type\n"); 27 28 27 exit(1); 29 28 } … … 39 38 40 39 ::MPI_Status mpi_status; 41 42 43 40 ::MPI_Test(to_mpi_request_ptr(*request), flag, &mpi_status); 44 41 … … 61 58 62 59 return Request_Check(); 63 64 60 } 65 61 … … 109 105 110 106 } 111 112 107 } 113 108 114 109 return Request_Check(); 115 116 110 } 117 111 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_type.cpp
r1520 r1539 33 33 } 34 34 35 /*36 37 MPI_Aint::MPI_Aint(void* aint)38 {39 mpi_aint = new ::MPI_Aint;40 *(static_cast< ::MPI_Aint*>(mpi_aint)) = *(static_cast< ::MPI_Aint*>(aint));41 }42 43 MPI_Aint::MPI_Aint(int aint)44 {45 mpi_aint = new ::MPI_Aint;46 *(static_cast< ::MPI_Aint*>(mpi_aint)) = aint;47 }48 49 MPI_Aint MPI_Aint::operator=(int a)50 {51 mpi_aint = new ::MPI_Aint;52 *(static_cast< int*>(mpi_aint)) = a;53 }54 55 MPI_Fint::MPI_Fint(void* fint)56 {57 mpi_fint = new ::MPI_Fint;58 *(static_cast< ::MPI_Fint*>(mpi_fint)) = *(static_cast< ::MPI_Fint*>(fint));59 }60 61 62 */63 64 65 66 35 } 67 36 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_type.hpp
r1520 r1539 38 38 #include "ep_barrier.hpp" 39 39 #include "ep_comm.hpp" 40 #include "ep_intercomm.hpp"41 40 #include "ep_window.hpp" 42 41 … … 86 85 static std::map<std::pair<int, int>, MPI_Comm > fc_comm_map; 87 86 87 88 88 89 89 90 } -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_wait.cpp
r1533 r1539 26 26 { 27 27 printf("MPI_Wait : Error in request type\n"); 28 29 28 exit(1); 30 29 }
Note: See TracChangeset
for help on using the changeset viewer.