Changeset 1639 for XIOS/trunk/extern/remap/src/mpi_routing.cpp
- Timestamp:
- 01/22/19 16:43:32 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/extern/remap/src/mpi_routing.cpp
r1638 r1639 10 10 const int verbose = 0; 11 11 12 CMPIRouting::CMPIRouting( ep_lib::MPI_Comm comm) : communicator(comm)13 { 14 ep_lib::MPI_Comm_rank(comm, &mpiRank);15 ep_lib::MPI_Comm_size(comm, &mpiSize);12 CMPIRouting::CMPIRouting(MPI_Comm comm) : communicator(comm) 13 { 14 MPI_Comm_rank(comm, &mpiRank); 15 MPI_Comm_size(comm, &mpiSize); 16 16 } 17 17 … … 19 19 but message lengths are *known* to receiver */ 20 20 template <typename T> 21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator)22 { 23 vector< ep_lib::MPI_Request> request(ranks.size() * 2);24 vector< ep_lib::MPI_Status> status(ranks.size() * 2);21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator) 22 { 23 vector<MPI_Request> request(ranks.size() * 2); 24 vector<MPI_Status> status(ranks.size() * 2); 25 25 26 26 // communicate data … … 28 28 for (int i = 0; i < ranks.size(); i++) 29 29 if (recv[i].size()) 30 ep_lib::MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);30 MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 31 31 for (int i = 0; i < ranks.size(); i++) 32 32 if (send[i].size()) 33 ep_lib::MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);34 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]);33 MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 34 MPI_Waitall(nbRequest, &request[0], &status[0]); 35 35 } 36 36 … … 38 38 but message lengths are *unknown* to receiver */ 39 39 template <typename T> 40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator)41 { 42 vector< ep_lib::MPI_Request> request(ranks.size() * 2);43 vector< ep_lib::MPI_Status> status(ranks.size() * 2);40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator) 41 { 42 vector<MPI_Request> request(ranks.size() * 2); 43 vector<MPI_Status> status(ranks.size() * 2); 44 44 45 45 // communicate sizes … … 50 50 sendSizes[i] = send[i].size(); 51 51 for (int i = 0; i < ranks.size(); i++) 52 ep_lib::MPI_Irecv(&recvSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]);52 MPI_Irecv(&recvSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]); 53 53 for (int i = 0; i < ranks.size(); i++) 54 ep_lib::MPI_Isend(&sendSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]);55 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]);54 MPI_Isend(&sendSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]); 55 MPI_Waitall(nbRequest, &request[0], &status[0]); 56 56 57 57 // allocate … … 118 118 CTimer::get("CMPIRouting::init(reduce_scatter)").reset(); 119 119 CTimer::get("CMPIRouting::init(reduce_scatter)").resume(); 120 ep_lib::MPI_Reduce_scatter(toSend, &nbSource, recvCount, EP_INT, EP_SUM, communicator);120 MPI_Reduce_scatter(toSend, &nbSource, recvCount, MPI_INT, MPI_SUM, communicator); 121 121 CTimer::get("CMPIRouting::init(reduce_scatter)").suspend(); 122 122 CTimer::get("CMPIRouting::init(reduce_scatter)").print(); 123 123 124 ep_lib::MPI_Alloc_mem(nbTarget *sizeof(int), EP_INFO_NULL, &targetRank);125 ep_lib::MPI_Alloc_mem(nbSource *sizeof(int), EP_INFO_NULL, &sourceRank);124 MPI_Alloc_mem(nbTarget *sizeof(int), MPI_INFO_NULL, &targetRank); 125 MPI_Alloc_mem(nbSource *sizeof(int), MPI_INFO_NULL, &sourceRank); 126 126 127 127 targetRankToIndex = new int[mpiSize]; … … 137 137 } 138 138 139 ep_lib::MPI_Barrier(communicator);139 MPI_Barrier(communicator); 140 140 CTimer::get("CMPIRouting::init(get_source)").reset(); 141 141 CTimer::get("CMPIRouting::init(get_source)").resume(); 142 142 143 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget];144 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget];143 MPI_Request *request = new MPI_Request[nbSource + nbTarget]; 144 MPI_Status *status = new MPI_Status[nbSource + nbTarget]; 145 145 146 146 int indexRequest = 0; … … 150 150 for (int i = 0; i < nbSource; i++) 151 151 { 152 #ifdef _usingMPI 153 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 154 #elif _usingEP 155 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 156 #endif 157 indexRequest++; 158 } 159 MPI_Barrier(communicator); 160 for (int i = 0; i < nbTarget; i++) 161 { 162 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 152 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 153 indexRequest++; 154 } 155 MPI_Barrier(communicator); 156 for (int i = 0; i < nbTarget; i++) 157 { 158 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 163 159 indexRequest++; 164 160 } … … 174 170 for (int i = 0; i < nbSource; i++) 175 171 { 176 #ifdef _usingMPI 177 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 178 #elif _usingEP 179 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 180 #endif 181 indexRequest++; 182 } 183 184 for (int i = 0; i < nbTarget; i++) 185 { 186 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 172 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 173 indexRequest++; 174 } 175 176 for (int i = 0; i < nbTarget; i++) 177 { 178 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 187 179 indexRequest++; 188 180 } … … 209 201 for (int i = 0; i < nbSource; i++) 210 202 { 211 ep_lib::MPI_Irecv(&nbSourceElement[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]);203 MPI_Irecv(&nbSourceElement[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 212 204 indexRequest++; 213 205 } … … 216 208 { 217 209 totalTargetElement += nbTargetElement[i]; 218 ep_lib::MPI_Isend(&nbTargetElement[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]);210 MPI_Isend(&nbTargetElement[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 219 211 indexRequest++; 220 212 } … … 284 276 285 277 286 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget];287 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget];278 MPI_Request* request=new MPI_Request[nbSource+nbTarget]; 279 MPI_Status* status=new MPI_Status[nbSource+nbTarget]; 288 280 int indexRequest=0; 289 281 290 ep_lib::MPI_Barrier(communicator);282 MPI_Barrier(communicator); 291 283 CTimer::get("CMPIRouting::transferToTarget").reset(); 292 284 CTimer::get("CMPIRouting::transferToTarget").resume(); … … 294 286 for(int i=0; i<nbSource; i++) 295 287 { 296 ep_lib::MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);288 MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 297 289 indexRequest++; 298 290 } … … 300 292 for(int i=0;i<nbTarget; i++) 301 293 { 302 ep_lib::MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);303 indexRequest++; 304 } 305 306 ep_lib::MPI_Waitall(indexRequest,request,status);294 MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 295 indexRequest++; 296 } 297 298 MPI_Waitall(indexRequest,request,status); 307 299 308 300 CTimer::get("CMPIRouting::transferToTarget").suspend(); 309 301 CTimer::get("CMPIRouting::transferToTarget").print(); 310 ep_lib::MPI_Barrier(communicator);302 MPI_Barrier(communicator); 311 303 312 304 // unpack the data … … 348 340 } 349 341 350 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget];351 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget];342 MPI_Request *request = new MPI_Request[nbSource + nbTarget]; 343 MPI_Status *status = new MPI_Status[nbSource + nbTarget]; 352 344 int indexRequest = 0; 353 345 354 ep_lib::MPI_Barrier(communicator);346 MPI_Barrier(communicator); 355 347 CTimer::get("CMPIRouting::transferToTarget(messageSize)").reset(); 356 348 CTimer::get("CMPIRouting::transferToTarget(messageSize)").resume(); … … 358 350 for(int i=0; i<nbSource; i++) 359 351 { 360 ep_lib::MPI_Irecv(&sourceMessageSize[i],1,EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]);352 MPI_Irecv(&sourceMessageSize[i],1,MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 361 353 indexRequest++; 362 354 } … … 364 356 for(int i=0; i<nbTarget; i++) 365 357 { 366 ep_lib::MPI_Isend(&targetMessageSize[i],1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]);367 indexRequest++; 368 } 369 370 ep_lib::MPI_Waitall(indexRequest,request,status);371 372 ep_lib::MPI_Barrier(communicator);358 MPI_Isend(&targetMessageSize[i],1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 359 indexRequest++; 360 } 361 362 MPI_Waitall(indexRequest,request,status); 363 364 MPI_Barrier(communicator); 373 365 CTimer::get("CMPIRouting::transferToTarget(messageSize)").suspend(); 374 366 CTimer::get("CMPIRouting::transferToTarget(messageSize)").print(); … … 403 395 for(int i=0; i<nbSource; i++) 404 396 { 405 ep_lib::MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);397 MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 406 398 indexRequest++; 407 399 } … … 409 401 for(int i=0;i<nbTarget; i++) 410 402 { 411 ep_lib::MPI_Isend(targetBuffer[i],targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);403 MPI_Isend(targetBuffer[i],targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 412 404 indexRequest++; 413 405 } … … 468 460 } 469 461 470 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget];471 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget];462 MPI_Request* request=new MPI_Request[nbSource+nbTarget]; 463 MPI_Status* status=new MPI_Status[nbSource+nbTarget]; 472 464 int indexRequest=0; 473 465 474 466 for(int i=0; i<nbSource; i++) 475 467 { 476 ep_lib::MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);468 MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 477 469 indexRequest++; 478 470 } … … 480 472 for(int i=0;i<nbTarget; i++) 481 473 { 482 ep_lib::MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);483 indexRequest++; 484 } 485 486 ep_lib::MPI_Waitall(indexRequest,request,status);474 MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 475 indexRequest++; 476 } 477 478 MPI_Waitall(indexRequest,request,status); 487 479 488 480 // unpack the data … … 524 516 } 525 517 526 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget];527 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget];518 MPI_Request *request = new MPI_Request[nbSource + nbTarget]; 519 MPI_Status *status = new MPI_Status[nbSource + nbTarget]; 528 520 int indexRequest = 0; 529 521 for (int i = 0; i < nbSource; i++) 530 522 { 531 ep_lib::MPI_Isend(&sourceMessageSize[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]);532 indexRequest++; 533 } 534 for (int i = 0; i < nbTarget; i++) 535 { 536 ep_lib::MPI_Irecv(&targetMessageSize[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]);537 indexRequest++; 538 } 539 ep_lib::MPI_Waitall(indexRequest, request, status);523 MPI_Isend(&sourceMessageSize[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 524 indexRequest++; 525 } 526 for (int i = 0; i < nbTarget; i++) 527 { 528 MPI_Irecv(&targetMessageSize[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 529 indexRequest++; 530 } 531 MPI_Waitall(indexRequest, request, status); 540 532 541 533 for (int i = 0; i < nbTarget; i++) … … 565 557 for (int i = 0; i < nbSource; i++) 566 558 { 567 ep_lib::MPI_Isend(sourceBuffer[i], sourceMessageSize[i], EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);568 indexRequest++; 569 } 570 for (int i = 0; i < nbTarget; i++) 571 { 572 ep_lib::MPI_Irecv(targetBuffer[i], targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);573 indexRequest++; 574 } 575 ep_lib::MPI_Waitall(indexRequest, request, status);559 MPI_Isend(sourceBuffer[i], sourceMessageSize[i], MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 560 indexRequest++; 561 } 562 for (int i = 0; i < nbTarget; i++) 563 { 564 MPI_Irecv(targetBuffer[i], targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 565 indexRequest++; 566 } 567 MPI_Waitall(indexRequest, request, status); 576 568 577 569 // unpack the data … … 613 605 614 606 template void alltoalls_unknown(const std::vector<std::vector<NES> >& send, std::vector<std::vector<NES> >& recv, 615 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator);607 const std::vector<int>& ranks, MPI_Comm communicator); 616 608 617 609 template void alltoalls_known(const std::vector<std::vector<int> >& send, std::vector<std::vector<int> >& recv, 618 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator);619 620 } 610 const std::vector<int>& ranks, MPI_Comm communicator); 611 612 }
Note: See TracChangeset
for help on using the changeset viewer.