#include "ep_lib.hpp" #include #include "ep_declaration.hpp" #include "ep_mpi.hpp" using namespace std; extern std::map, MPI_Group* > * tag_group_map; extern std::map > > * tag_comm_map; extern MPI_Group MPI_GROUP_WORLD; namespace ep_lib { int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) { if(!local_comm->is_ep) return MPI_Intercomm_create_mpi(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); int ep_rank = local_comm->ep_comm_ptr->size_rank_info[0].first; // check if local leaders are in the same mpi proc // by checking their mpi_rank in peer_comm int mpi_rank_of_leader[2]; if(ep_rank == local_leader) { mpi_rank_of_leader[0] = peer_comm->ep_comm_ptr->size_rank_info[2].first; mpi_rank_of_leader[1] = peer_comm->ep_rank_map->at(remote_leader).second; } MPI_Bcast(mpi_rank_of_leader, 2, MPI_INT, local_leader, local_comm); if(mpi_rank_of_leader[0] != mpi_rank_of_leader[1]) { Debug("calling MPI_Intercomm_create_kernel\n"); return MPI_Intercomm_create_endpoint(local_comm, local_leader, peer_comm, remote_leader, tag, newintercomm); } else { printf("local leaders are in the same MPI proc. Routine not yet implemented\n"); MPI_Abort(local_comm, 0); } } int MPI_Intercomm_create_endpoint(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) { int ep_rank, ep_rank_loc, mpi_rank; int ep_size, num_ep, mpi_size; ep_rank = local_comm->ep_comm_ptr->size_rank_info[0].first; ep_rank_loc = local_comm->ep_comm_ptr->size_rank_info[1].first; mpi_rank = local_comm->ep_comm_ptr->size_rank_info[2].first; ep_size = local_comm->ep_comm_ptr->size_rank_info[0].second; num_ep = local_comm->ep_comm_ptr->size_rank_info[1].second; mpi_size = local_comm->ep_comm_ptr->size_rank_info[2].second; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // step 1 : local leaders exchange ep_size, leader_rank_in_peer, leader_rank_in_peer_mpi, leader_rank_in_world. // // local leaders bcast results to all ep in local_comm // ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// bool is_local_leader = ep_rank==local_leader? true: false; int local_leader_rank_in_peer; int local_leader_rank_in_peer_mpi; int local_leader_rank_in_world; int remote_ep_size; int remote_leader_rank_in_peer; int remote_leader_rank_in_peer_mpi; int remote_leader_rank_in_world; int send_quadruple[4]; int recv_quadruple[4]; if(is_local_leader) { MPI_Comm_rank(peer_comm, &local_leader_rank_in_peer); ::MPI_Comm_rank(to_mpi_comm(peer_comm->mpi_comm), &local_leader_rank_in_peer_mpi); ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &local_leader_rank_in_world); send_quadruple[0] = ep_size; send_quadruple[1] = local_leader_rank_in_peer; send_quadruple[2] = local_leader_rank_in_peer_mpi; send_quadruple[3] = local_leader_rank_in_world; MPI_Request request; MPI_Status status; if(remote_leader > local_leader_rank_in_peer) { MPI_Isend(send_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); MPI_Irecv(recv_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); } else { MPI_Irecv(recv_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); MPI_Isend(send_quadruple, 4, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); } remote_ep_size = recv_quadruple[0]; remote_leader_rank_in_peer = recv_quadruple[1]; remote_leader_rank_in_peer_mpi = recv_quadruple[2]; remote_leader_rank_in_world = recv_quadruple[3]; #ifdef _showinfo printf("peer_rank = %d, packed exchange OK\n", local_leader_rank_in_peer); #endif } MPI_Bcast(send_quadruple, 4, MPI_INT, local_leader, local_comm); MPI_Bcast(recv_quadruple, 4, MPI_INT, local_leader, local_comm); if(!is_local_leader) { local_leader_rank_in_peer = send_quadruple[1]; local_leader_rank_in_peer_mpi = send_quadruple[2]; local_leader_rank_in_world = send_quadruple[3]; remote_ep_size = recv_quadruple[0]; remote_leader_rank_in_peer = recv_quadruple[1]; remote_leader_rank_in_peer_mpi = recv_quadruple[2]; remote_leader_rank_in_world = recv_quadruple[3]; } #ifdef _showinfo MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); printf("peer_rank = %d, ep_size = %d, remote_ep_size = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_size, remote_ep_size); MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); #endif /////////////////////////////////////////////////////////////////// // step 2 : gather ranks in world for both local and remote comm // /////////////////////////////////////////////////////////////////// int rank_in_world; ::MPI_Comm_rank(to_mpi_comm(MPI_COMM_WORLD->mpi_comm), &rank_in_world); int *ranks_in_world_local = new int[ep_size]; int *ranks_in_world_remote = new int[remote_ep_size]; MPI_Allgather(&rank_in_world, 1, MPI_INT, ranks_in_world_local, 1, MPI_INT, local_comm); if(is_local_leader) { MPI_Request request; MPI_Status status; if(remote_leader > local_leader_rank_in_peer) { MPI_Isend(ranks_in_world_local, ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); MPI_Irecv(ranks_in_world_remote, remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); } else { MPI_Irecv(ranks_in_world_remote, remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); MPI_Isend(ranks_in_world_local, ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); } #ifdef _showinfo printf("peer_rank = %d, ranks_in_world exchange OK\n", local_leader_rank_in_peer); #endif } MPI_Bcast(ranks_in_world_remote, remote_ep_size, MPI_INT, local_leader, local_comm); #ifdef _showinfo MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); if(remote_leader == 4) { for(int i=0; iep_comm_ptr->size_rank_info[0].first); for(int i=0; iep_comm_ptr->size_rank_info[0].first); for(int i=0; iep_comm_ptr->size_rank_info[0].first); for(int i=0; iep_comm_ptr->size_rank_info[0].first); for(int i=0; iep_comm_ptr->size_rank_info[0].first, priority, local_leader_rank_in_peer, remote_leader_rank_in_peer); MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); #endif #ifdef _showinfo MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); printf("peer_rank = %d, priority = %d, ownership = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, priority, ownership); MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); #endif ////////////////////////////////////////////////////// // step 4 : extract local_comm and create intercomm // ////////////////////////////////////////////////////// bool is_involved = is_local_leader || (!is_local_leader && ep_rank_loc == 0 && rank_in_world != local_leader_rank_in_world); #ifdef _showinfo MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); printf("peer_rank = %d, is_involved = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, is_involved); MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); #endif if(is_involved) { ::MPI_Group local_group; ::MPI_Group extracted_group; ::MPI_Comm extracted_comm; ::MPI_Comm_group(to_mpi_comm(local_comm->mpi_comm), &local_group); int *ownership_list = new int[mpi_size]; int *mpi_rank_list = new int[mpi_size]; ::MPI_Allgather(&ownership, 1, to_mpi_type(MPI_INT), ownership_list, 1, to_mpi_type(MPI_INT), to_mpi_comm(local_comm->mpi_comm)); ::MPI_Allgather(&mpi_rank, 1, to_mpi_type(MPI_INT), mpi_rank_list, 1, to_mpi_type(MPI_INT), to_mpi_comm(local_comm->mpi_comm)); int n=0; for(int i=0; impi_comm), extracted_group, &extracted_comm); ::MPI_Comm mpi_inter_comm; int local_leader_rank_in_extracted_comm; if(is_local_leader) { ::MPI_Comm_rank(extracted_comm, &local_leader_rank_in_extracted_comm); } ::MPI_Bcast(&local_leader_rank_in_extracted_comm, 1, to_mpi_type(MPI_INT), local_comm->ep_rank_map->at(local_leader).second, to_mpi_comm(local_comm->mpi_comm)); ::MPI_Comm *intracomm = new ::MPI_Comm; bool is_real_involved = ownership && extracted_comm != to_mpi_comm(MPI_COMM_NULL->mpi_comm); if(is_real_involved) { ::MPI_Intercomm_create(extracted_comm, local_leader_rank_in_extracted_comm, to_mpi_comm(peer_comm->mpi_comm), remote_leader_rank_in_peer_mpi, tag, &mpi_inter_comm); ::MPI_Intercomm_merge(mpi_inter_comm, !priority, intracomm); } //////////////////////////////////// // step 5 :: determine new num_ep // //////////////////////////////////// int num_ep_count=0; for(int i=0; iep_comm_ptr->intercomm->mpi_inter_comm = %p\n", mpi_inter_comm); #endif #pragma omp critical (write_to_tag_list) intercomm_list.push_back(make_pair( make_pair(tag, min(local_leader_rank_in_world, remote_leader_rank_in_world)) , make_pair(ep_comm , make_pair(num_ep_count, 0)))); #pragma omp flush #ifdef _showinfo for(int i=0; i new_ep_rank = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, ep_comm, i, ep_comm[i]->ep_comm_ptr->size_rank_info[0].first); #endif ::MPI_Comm_free(intracomm); delete intracomm; } delete ownership_list; delete mpi_rank_list; delete new_mpi_rank_list; } int repeated=0; for(int i=0; iep_comm_ptr->size_rank_info[0].first, ep_rank_loc, ownership, repeated, my_turn); MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); #endif #pragma omp flush #pragma omp critical (read_from_intercomm_list) { bool flag=true; while(flag) { for(std::list , std::pair > > >::iterator iter = intercomm_list.begin(); iter!=intercomm_list.end(); iter++) { if(iter->first == make_pair(tag, min(local_leader_rank_in_world, remote_leader_rank_in_world))) { *newintercomm = iter->second.first[my_turn]; iter->second.second.second++; if(iter->second.second.first == iter->second.second.second) intercomm_list.erase(iter); flag = false; break; } } } } #ifdef _showinfo MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); printf("peer_rank = %d, test_rank = %d\n", peer_comm->ep_comm_ptr->size_rank_info[0].first, (*newintercomm)->ep_comm_ptr->size_rank_info[0].first); MPI_Barrier(peer_comm); MPI_Barrier(peer_comm); #endif ////////////////////////////////////////////////////////// // step 7 : create intercomm_rank_map for local leaders // ////////////////////////////////////////////////////////// (*newintercomm)->is_intercomm = true; (*newintercomm)->inter_rank_map = new INTER_RANK_MAP; int rank_info[2]; rank_info[0] = ep_rank; rank_info[1] = (*newintercomm)->ep_comm_ptr->size_rank_info[0].first; #ifdef _showinfo printf("priority = %d, ep_rank = %d, new_ep_rank = %d\n", priority, rank_info[0], rank_info[1]); #endif int *local_rank_info = new int[2*ep_size]; int *remote_rank_info = new int[2*remote_ep_size]; MPI_Allgather(rank_info, 2, MPI_INT, local_rank_info, 2, MPI_INT, local_comm); if(is_local_leader) { MPI_Request request; MPI_Status status; if(priority) { MPI_Isend(local_rank_info, 2*ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); MPI_Irecv(remote_rank_info, 2*remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); } else { MPI_Irecv(remote_rank_info, 2*remote_ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); MPI_Isend(local_rank_info, 2*ep_size, MPI_INT, remote_leader, tag, peer_comm, &request); MPI_Wait(&request, &status); } } MPI_Bcast(remote_rank_info, 2*remote_ep_size, MPI_INT, local_leader, local_comm); for(int i=0; iinter_rank_map->insert(make_pair(remote_rank_info[2*i], remote_rank_info[2*i+1])); } (*newintercomm)->ep_comm_ptr->size_rank_info[0] = local_comm->ep_comm_ptr->size_rank_info[0]; delete[] local_rank_info; delete[] remote_rank_info; delete[] ranks_in_world_local; delete[] ranks_in_world_remote; /* if((*newintercomm)->ep_comm_ptr->size_rank_info[0].second == 1) { for(INTER_RANK_MAP::iterator it = (*newintercomm)->inter_rank_map->begin(); it != (*newintercomm)->inter_rank_map->end(); it++) { printf("inter_rank_map[%d] = %d\n", it->first, it->second); } } */ } int MPI_Intercomm_create_mpi(MPI_Comm local_comm, int local_leader, MPI_Comm peer_comm, int remote_leader, int tag, MPI_Comm *newintercomm) { printf("MPI_Intercomm_create_mpi not yet implemented\n"); MPI_Abort(local_comm, 0); } }