Changeset 1185
- Timestamp:
- 06/26/17 18:36:56 (6 years ago)
- Location:
- XIOS/dev/branch_yushan_merged
- Files:
-
- 14 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_barrier.cpp
r1134 r1185 60 60 int MPI_Barrier_local(MPI_Comm comm) 61 61 { 62 Message_Check(comm);62 //Message_Check(comm); 63 63 comm.ep_barrier->wait(); 64 64 } -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_intercomm_kernel.cpp
r1134 r1185 96 96 ::MPI_Comm_rank(static_cast< ::MPI_Comm>(peer_comm.mpi_comm), &rank_in_peer_mpi[0]); 97 97 98 MPI_Status status;98 99 99 100 100 send_buf[0] = size_info[0]; … … 102 102 send_buf[2] = rank_in_peer_mpi[0]; 103 103 104 MPI_Send(send_buf.data(), 3, MPI_INT_STD, remote_leader, tag, peer_comm); 105 MPI_Recv(recv_buf.data(), 3, MPI_INT_STD, remote_leader, tag, peer_comm, &status); 106 104 MPI_Request req_send, req_recv; 105 MPI_Status sta_send, sta_recv; 106 107 MPI_Isend(send_buf.data(), 3, MPI_INT_STD, remote_leader, tag, peer_comm, &req_send); 108 MPI_Irecv(recv_buf.data(), 3, MPI_INT_STD, remote_leader, tag, peer_comm, &req_recv); 109 110 111 MPI_Wait(&req_send, &sta_send); 112 MPI_Wait(&req_recv, &sta_recv); 113 107 114 size_info[1] = recv_buf[0]; 108 115 remote_ep_size = recv_buf[1]; … … 142 149 std::copy ( ep_info[0].data(), ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[0] ); 143 150 144 MPI_Send(send_buf.data(), 3*size_info[0], MPI_INT_STD, remote_leader, tag , peer_comm);145 MPI_Recv(recv_buf.data(), 3*size_info[1], MPI_INT_STD, remote_leader, tag , peer_comm, &status);151 MPI_Send(send_buf.data(), 3*size_info[0], MPI_INT_STD, remote_leader, tag+1, peer_comm); 152 MPI_Recv(recv_buf.data(), 3*size_info[1], MPI_INT_STD, remote_leader, tag+1, peer_comm, &status); 146 153 } 147 154 … … 263 270 size_info[2] = new_ep_info[0].size(); 264 271 MPI_Status status; 265 MPI_Send(&size_info[2], 1, MPI_INT_STD, remote_leader, tag , peer_comm);266 MPI_Recv(&size_info[3], 1, MPI_INT_STD, remote_leader, tag , peer_comm, &status);272 MPI_Send(&size_info[2], 1, MPI_INT_STD, remote_leader, tag+2, peer_comm); 273 MPI_Recv(&size_info[3], 1, MPI_INT_STD, remote_leader, tag+2, peer_comm, &status); 267 274 } 268 275 … … 284 291 std::copy ( new_ep_info[0].data(), new_ep_info[0].data() + size_info[0], send_buf.begin() + 2*size_info[2] ); 285 292 286 MPI_Send(send_buf.data(), 3*size_info[2], MPI_INT_STD, remote_leader, tag , peer_comm);287 MPI_Recv(recv_buf.data(), 3*size_info[3], MPI_INT_STD, remote_leader, tag , peer_comm, &status);293 MPI_Send(send_buf.data(), 3*size_info[2], MPI_INT_STD, remote_leader, tag+3, peer_comm); 294 MPI_Recv(recv_buf.data(), 3*size_info[3], MPI_INT_STD, remote_leader, tag+3, peer_comm, &status); 288 295 } 289 296 … … 482 489 { 483 490 MPI_Status status; 484 MPI_Send((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_ep_size, MPI_INT_STD, remote_leader, tag , peer_comm);485 MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, MPI_INT_STD, remote_leader, tag , peer_comm, &status);486 487 MPI_Send(&local_intercomm_size, 1, MPI_INT_STD, remote_leader, tag , peer_comm);488 MPI_Recv(&remote_intercomm_size, 1, MPI_INT_STD, remote_leader, tag , peer_comm, &status);491 MPI_Send((*newintercomm).ep_comm_ptr->intercomm->local_rank_map->data(), 2*local_ep_size, MPI_INT_STD, remote_leader, tag+4, peer_comm); 492 MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->remote_rank_map->data(), 2*remote_ep_size, MPI_INT_STD, remote_leader, tag+4, peer_comm, &status); 493 494 MPI_Send(&local_intercomm_size, 1, MPI_INT_STD, remote_leader, tag+5, peer_comm); 495 MPI_Recv(&remote_intercomm_size, 1, MPI_INT_STD, remote_leader, tag+5, peer_comm, &status); 489 496 490 497 new_bcast_root_0 = intercomm_ep_rank; … … 507 514 { 508 515 MPI_Status status; 509 MPI_Send((*newintercomm).rank_map->data(), 2*local_intercomm_size, MPI_INT_STD, remote_leader, tag , peer_comm);510 MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, MPI_INT_STD, remote_leader, tag , peer_comm, &status);516 MPI_Send((*newintercomm).rank_map->data(), 2*local_intercomm_size, MPI_INT_STD, remote_leader, tag+6, peer_comm); 517 MPI_Recv((*newintercomm).ep_comm_ptr->intercomm->intercomm_rank_map->data(), 2*remote_intercomm_size, MPI_INT_STD, remote_leader, tag+6, peer_comm, &status); 511 518 } 512 519 … … 538 545 // MPI_Comm_rank(*test_comm, &test_rank); 539 546 // printf("=================test_rank = %d\n", test_rank); 547 548 540 549 541 550 return MPI_SUCCESS; -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_lib_intercomm.hpp
r1134 r1185 50 50 51 51 int MPI_Comm_test_inter(MPI_Comm comm, int *flag); 52 53 int test_sendrecv(MPI_Comm comm); 54 52 55 } 53 56 -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_message.cpp
r1176 r1185 77 77 78 78 msg_block->ep_src = get_ep_rank(comm, src_loc, src_mpi); //printf("myRank = %d, msg_block->ep_src = %d\n", myRank, msg_block->ep_src); 79 80 //printf("myRank = %d, probed one message, ep_src = %d, ep_dest = %d, tag = %d, message = %d\n", myRank, msg_block->ep_src, msg_block->ep_dest, msg_block->ep_tag, msg_block->mpi_message); 79 int dest_mpi = comm.ep_comm_ptr->size_rank_info[2].first; 80 int ep_dest = get_ep_rank(comm, dest_loc, dest_mpi); 81 printf("myRank = %d, probed one message, ep_src = %d, ep_dest = %d, tag = %d, message = %d\n", myRank, msg_block->ep_src, ep_dest, msg_block->ep_tag, msg_block->mpi_message); 81 82 msg_block->mpi_status = new ::MPI_Status(status); 82 83 … … 89 90 #pragma omp flush 90 91 ptr_comm_target->ep_comm_ptr->message_queue->push_back(*msg_block); 91 // printf("myRank = %d, push_back OK, ep_src = %d, ep_tag = %d, mpi_status = %p (%p)\n", myRank,92 //ptr_comm_target->ep_comm_ptr->message_queue->back().ep_src,93 //ptr_comm_target->ep_comm_ptr->message_queue->back().ep_tag,94 // msg_block->mpi_status, &status);92 printf("myRank = %d, push_back OK, ep_src = %d, ep_tag = %d, dest = %d(%d)\n", myRank, 93 ptr_comm_target->ep_comm_ptr->message_queue->back().ep_src, 94 ptr_comm_target->ep_comm_ptr->message_queue->back().ep_tag, 95 ep_dest, dest_loc); 95 96 96 97 #pragma omp flush -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_probe.cpp
r1134 r1185 102 102 103 103 104 Message_Check(comm);104 //Message_Check(comm); 105 105 106 106 #pragma omp flush -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_recv.cpp
r1147 r1185 16 16 namespace ep_lib 17 17 { 18 18 19 19 20 int MPI_Recv(void *buf, int count, MPI_Datatype datatype, int src, int tag, MPI_Comm comm, MPI_Status *status) … … 66 67 } 67 68 68 Message_Check(comm);69 70 71 69 request->mpi_request = MPI_REQUEST_NULL_STD; 72 70 request->buf = buf; … … 81 79 Message_Check(comm); 82 80 81 EP_PendingRequests->push_back(request); 82 printf("proc %d : EP_PendingRequests insert one request, add = %p(%p), buf = %p(%p)\n", dest_rank, EP_PendingRequests->back(), request, buf, request->buf); 83 84 // check all EP_PendingRequests 85 86 //if(EP_PendingRequests == 0 ) EP_PendingRequests = new std::list< MPI_Request* >; 87 if(!EP_PendingRequests->empty()) 88 { 89 printf("proc %d have %d pending irecv request\n", dest_rank, EP_PendingRequests->size()); 90 91 for(std::list<MPI_Request* >::iterator it = EP_PendingRequests->begin(); it!=EP_PendingRequests->end(); ) 92 { 93 if((*it)->type != 2) 94 { 95 printf("proc %d : pending request type = %d, src= %d, tag = %d, add = %p skip\n", dest_rank, (*it)->type, (*it)->ep_src, (*it)->ep_tag, *it); 96 EP_PendingRequests->erase(it); 97 it = EP_PendingRequests->begin(); 98 printf("proc %d : pending request processed, size = %d, it = %p\n", dest_rank, EP_PendingRequests->size(), *it); 99 continue; 100 } 101 102 printf("proc %d : pending irecv request src = %d, tag = %d, type = %d, add = %p\n", dest_rank, (*it)->ep_src, (*it)->ep_tag, (*it)->type, *it); 103 int probed = false; 104 MPI_Message pending_message; 105 MPI_Status pending_status; 106 107 MPI_Improbe((*it)->ep_src, (*it)->ep_tag, (*it)->comm, &probed, &pending_message, &pending_status); 108 printf("proc %d : pending irecv request probed to be %d, add = %p\n",dest_rank, probed, *it); 109 110 if(probed) 111 { 112 int count; 113 MPI_Get_count(&pending_status, (*it)->ep_datatype, &count); 114 MPI_Imrecv((*it)->buf, count, (*it)->ep_datatype, &pending_message, *it); 115 printf("proc %d : pending request is imrecving src = %d, tag = %d, add = %p, buf = %p, count = %d\n", dest_rank, (*it)->ep_src, (*it)->ep_tag, *it, (*it)->buf, count); 116 EP_PendingRequests->erase(it); 117 it = EP_PendingRequests->begin(); 118 printf("proc %d : pending request processed, size = %d\n", dest_rank, EP_PendingRequests->size()); 119 continue; 120 } 121 else it++; 122 } 123 } 124 /* 83 125 int flag = false; 84 126 MPI_Message message; … … 90 132 { 91 133 MPI_Imrecv(buf, count, datatype, &message, request); 134 printf("proc %d : found message in local queue, src = %d, tag = %d\n", dest_rank, src, tag); 92 135 } 136 */ 93 137 94 138 return 0; … … 110 154 request->ep_tag = message->ep_tag; 111 155 request->ep_src = message->ep_src; 112 request->buf = buf;156 //request->buf = buf; 113 157 114 158 return 0; -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_send.cpp
r1147 r1185 101 101 request->type = 1; // used in wait 102 102 request->comm = comm; 103 request->buf = const_cast<void*>(buf); 103 104 104 105 Message_Check(comm); -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_test.cpp
r1149 r1185 20 20 21 21 *flag = false; 22 22 23 23 24 if(request->type == 1) // isend … … 46 47 if(*flag) 47 48 { 49 48 50 int count; 49 51 MPI_Get_count(status, request->ep_datatype, &count); 50 52 MPI_Imrecv(request->buf, count, request->ep_datatype, &message, request); 53 printf("in ep_test, found message src = %d, tag = %d, type = %d\n", request->ep_src, request->ep_tag, request->type); 51 54 MPI_Test(request, flag, status); 52 55 } -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_type.hpp
r1134 r1185 490 490 // <MPI_Fint,thread_num> EP_Comm 491 491 492 static std::list< MPI_Request* > * EP_PendingRequests = new std::list< MPI_Request* >; 493 #pragma omp threadprivate(EP_PendingRequests) 492 494 } 493 495 -
XIOS/dev/branch_yushan_merged/extern/src_ep_dev/ep_wait.cpp
r1176 r1185 57 57 58 58 int count; 59 request->type = 3; 59 60 MPI_Get_count(status, request->ep_datatype, &count); 60 61 MPI_Mrecv(request->buf, count, request->ep_datatype, &message, status); … … 116 117 while(finished < count) 117 118 { 119 118 120 for(int i=0; i<count; i++) 119 121 { … … 131 133 else // irecv 132 134 { 135 // parcours pending list 136 // find request in waitall 137 Message_Check(array_of_requests[i].comm); 138 // improbe + mrecv 139 // erase element in pending list 140 // finished++; 141 // finished_index[i] = true; 142 143 133 144 int flag = false; 134 145 MPI_Message message; -
XIOS/dev/branch_yushan_merged/inputs/REMAP/iodef.xml
r1179 r1185 40 40 <field field_ref="dst_field_regular" name="field_regular" enabled=".TRUE."/> 41 41 <field field_ref="dst_field_regular_pole_0" name="field_regular_pole_0" enabled=".TRUE." /> 42 <field field_ref="dst_field_regular_pole_1" name="field_regular_pole_1" enabled=". FALSE." />42 <field field_ref="dst_field_regular_pole_1" name="field_regular_pole_1" enabled=".TRUE." /> 43 43 </file> 44 44 <file id="output_dst_curvilinear" name="output_dst_curvilinear" enabled=".TRUE." > -
XIOS/dev/branch_yushan_merged/src/client.cpp
r1179 r1185 107 107 MPI_Comm_rank(intraComm,&intraCommRank) ; 108 108 109 #pragma omp critical(_output)109 /*#pragma omp critical(_output) 110 110 { 111 111 info(10)<<"intercommCreate::client "<<test_omp_rank<< " "<< &test_omp_rank <<" intraCommSize : "<<intraCommSize 112 112 <<" intraCommRank :"<<intraCommRank<<" serverLeader "<< serverLeader 113 113 <<" globalComm : "<< &(CXios::globalComm) << endl ; 114 } 114 }*/ 115 115 116 116 117 117 test_sendrecv(CXios::globalComm); 118 118 MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 119 119 -
XIOS/dev/branch_yushan_merged/src/client_client_dht_template_impl.hpp
r1176 r1185 12 12 #ifdef _usingEP 13 13 #include "ep_declaration.hpp" 14 #include "ep_lib.hpp" 14 15 #endif 15 16 -
XIOS/dev/branch_yushan_merged/src/server.cpp
r1160 r1185 90 90 MPI_Comm_size(intraComm,&intraCommSize) ; 91 91 MPI_Comm_rank(intraComm,&intraCommRank) ; 92 info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 93 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 94 92 /*info(50)<<"intercommCreate::server "<<rank<<" intraCommSize : "<<intraCommSize 93 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ;*/ 94 95 test_sendrecv(CXios::globalComm); 95 96 MPI_Intercomm_create(intraComm,0,CXios::globalComm,clientLeader,0,&newComm) ; 96 97 interComm.push_back(newComm) ;
Note: See TracChangeset
for help on using the changeset viewer.