Changeset 1638 for XIOS/trunk/extern/remap/src/mpi_routing.cpp
- Timestamp:
- 01/22/19 16:15:03 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/extern/remap/src/mpi_routing.cpp
r694 r1638 10 10 const int verbose = 0; 11 11 12 CMPIRouting::CMPIRouting( MPI_Comm comm) : communicator(comm)13 { 14 MPI_Comm_rank(comm, &mpiRank);15 MPI_Comm_size(comm, &mpiSize);12 CMPIRouting::CMPIRouting(ep_lib::MPI_Comm comm) : communicator(comm) 13 { 14 ep_lib::MPI_Comm_rank(comm, &mpiRank); 15 ep_lib::MPI_Comm_size(comm, &mpiSize); 16 16 } 17 17 … … 19 19 but message lengths are *known* to receiver */ 20 20 template <typename T> 21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator)22 { 23 vector< MPI_Request> request(ranks.size() * 2);24 vector< MPI_Status> status(ranks.size() * 2);21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator) 22 { 23 vector<ep_lib::MPI_Request> request(ranks.size() * 2); 24 vector<ep_lib::MPI_Status> status(ranks.size() * 2); 25 25 26 26 // communicate data … … 28 28 for (int i = 0; i < ranks.size(); i++) 29 29 if (recv[i].size()) 30 MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);30 ep_lib::MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 31 31 for (int i = 0; i < ranks.size(); i++) 32 32 if (send[i].size()) 33 MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);34 MPI_Waitall(nbRequest, &request[0], &status[0]);33 ep_lib::MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 34 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]); 35 35 } 36 36 … … 38 38 but message lengths are *unknown* to receiver */ 39 39 template <typename T> 40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator)41 { 42 vector< MPI_Request> request(ranks.size() * 2);43 vector< MPI_Status> status(ranks.size() * 2);40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator) 41 { 42 vector<ep_lib::MPI_Request> request(ranks.size() * 2); 43 vector<ep_lib::MPI_Status> status(ranks.size() * 2); 44 44 45 45 // communicate sizes … … 50 50 sendSizes[i] = send[i].size(); 51 51 for (int i = 0; i < ranks.size(); i++) 52 MPI_Irecv(&recvSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]);52 ep_lib::MPI_Irecv(&recvSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]); 53 53 for (int i = 0; i < ranks.size(); i++) 54 MPI_Isend(&sendSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]);55 MPI_Waitall(nbRequest, &request[0], &status[0]);54 ep_lib::MPI_Isend(&sendSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]); 55 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]); 56 56 57 57 // allocate … … 118 118 CTimer::get("CMPIRouting::init(reduce_scatter)").reset(); 119 119 CTimer::get("CMPIRouting::init(reduce_scatter)").resume(); 120 MPI_Reduce_scatter(toSend, &nbSource, recvCount, MPI_INT, MPI_SUM, communicator);120 ep_lib::MPI_Reduce_scatter(toSend, &nbSource, recvCount, EP_INT, EP_SUM, communicator); 121 121 CTimer::get("CMPIRouting::init(reduce_scatter)").suspend(); 122 122 CTimer::get("CMPIRouting::init(reduce_scatter)").print(); 123 123 124 MPI_Alloc_mem(nbTarget *sizeof(int), MPI_INFO_NULL, &targetRank);125 MPI_Alloc_mem(nbSource *sizeof(int), MPI_INFO_NULL, &sourceRank);124 ep_lib::MPI_Alloc_mem(nbTarget *sizeof(int), EP_INFO_NULL, &targetRank); 125 ep_lib::MPI_Alloc_mem(nbSource *sizeof(int), EP_INFO_NULL, &sourceRank); 126 126 127 127 targetRankToIndex = new int[mpiSize]; … … 137 137 } 138 138 139 MPI_Barrier(communicator);139 ep_lib::MPI_Barrier(communicator); 140 140 CTimer::get("CMPIRouting::init(get_source)").reset(); 141 141 CTimer::get("CMPIRouting::init(get_source)").resume(); 142 142 143 MPI_Request *request = newMPI_Request[nbSource + nbTarget];144 MPI_Status *status = newMPI_Status[nbSource + nbTarget];143 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; 144 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; 145 145 146 146 int indexRequest = 0; … … 150 150 for (int i = 0; i < nbSource; i++) 151 151 { 152 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 153 indexRequest++; 152 #ifdef _usingMPI 153 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 154 #elif _usingEP 155 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 156 #endif 157 indexRequest++; 154 158 } 155 159 MPI_Barrier(communicator); 156 160 for (int i = 0; i < nbTarget; i++) 157 161 { 158 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);162 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 159 163 indexRequest++; 160 164 } … … 170 174 for (int i = 0; i < nbSource; i++) 171 175 { 172 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 173 indexRequest++; 174 } 175 176 for (int i = 0; i < nbTarget; i++) 177 { 178 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 176 #ifdef _usingMPI 177 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 178 #elif _usingEP 179 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 180 #endif 181 indexRequest++; 182 } 183 184 for (int i = 0; i < nbTarget; i++) 185 { 186 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 179 187 indexRequest++; 180 188 } … … 201 209 for (int i = 0; i < nbSource; i++) 202 210 { 203 MPI_Irecv(&nbSourceElement[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]);211 ep_lib::MPI_Irecv(&nbSourceElement[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 204 212 indexRequest++; 205 213 } … … 208 216 { 209 217 totalTargetElement += nbTargetElement[i]; 210 MPI_Isend(&nbTargetElement[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);218 ep_lib::MPI_Isend(&nbTargetElement[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 211 219 indexRequest++; 212 220 } … … 276 284 277 285 278 MPI_Request* request=newMPI_Request[nbSource+nbTarget];279 MPI_Status* status=newMPI_Status[nbSource+nbTarget];286 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget]; 287 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget]; 280 288 int indexRequest=0; 281 289 282 MPI_Barrier(communicator);290 ep_lib::MPI_Barrier(communicator); 283 291 CTimer::get("CMPIRouting::transferToTarget").reset(); 284 292 CTimer::get("CMPIRouting::transferToTarget").resume(); … … 286 294 for(int i=0; i<nbSource; i++) 287 295 { 288 MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);296 ep_lib::MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 289 297 indexRequest++; 290 298 } … … 292 300 for(int i=0;i<nbTarget; i++) 293 301 { 294 MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);295 indexRequest++; 296 } 297 298 MPI_Waitall(indexRequest,request,status);302 ep_lib::MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 303 indexRequest++; 304 } 305 306 ep_lib::MPI_Waitall(indexRequest,request,status); 299 307 300 308 CTimer::get("CMPIRouting::transferToTarget").suspend(); 301 309 CTimer::get("CMPIRouting::transferToTarget").print(); 302 MPI_Barrier(communicator);310 ep_lib::MPI_Barrier(communicator); 303 311 304 312 // unpack the data … … 340 348 } 341 349 342 MPI_Request *request = newMPI_Request[nbSource + nbTarget];343 MPI_Status *status = newMPI_Status[nbSource + nbTarget];350 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; 351 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; 344 352 int indexRequest = 0; 345 353 346 MPI_Barrier(communicator);354 ep_lib::MPI_Barrier(communicator); 347 355 CTimer::get("CMPIRouting::transferToTarget(messageSize)").reset(); 348 356 CTimer::get("CMPIRouting::transferToTarget(messageSize)").resume(); … … 350 358 for(int i=0; i<nbSource; i++) 351 359 { 352 MPI_Irecv(&sourceMessageSize[i],1,MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]);360 ep_lib::MPI_Irecv(&sourceMessageSize[i],1,EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 353 361 indexRequest++; 354 362 } … … 356 364 for(int i=0; i<nbTarget; i++) 357 365 { 358 MPI_Isend(&targetMessageSize[i],1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);359 indexRequest++; 360 } 361 362 MPI_Waitall(indexRequest,request,status);363 364 MPI_Barrier(communicator);366 ep_lib::MPI_Isend(&targetMessageSize[i],1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 367 indexRequest++; 368 } 369 370 ep_lib::MPI_Waitall(indexRequest,request,status); 371 372 ep_lib::MPI_Barrier(communicator); 365 373 CTimer::get("CMPIRouting::transferToTarget(messageSize)").suspend(); 366 374 CTimer::get("CMPIRouting::transferToTarget(messageSize)").print(); … … 395 403 for(int i=0; i<nbSource; i++) 396 404 { 397 MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);405 ep_lib::MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 398 406 indexRequest++; 399 407 } … … 401 409 for(int i=0;i<nbTarget; i++) 402 410 { 403 MPI_Isend(targetBuffer[i],targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);411 ep_lib::MPI_Isend(targetBuffer[i],targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 404 412 indexRequest++; 405 413 } … … 460 468 } 461 469 462 MPI_Request* request=newMPI_Request[nbSource+nbTarget];463 MPI_Status* status=newMPI_Status[nbSource+nbTarget];470 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget]; 471 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget]; 464 472 int indexRequest=0; 465 473 466 474 for(int i=0; i<nbSource; i++) 467 475 { 468 MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);476 ep_lib::MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 469 477 indexRequest++; 470 478 } … … 472 480 for(int i=0;i<nbTarget; i++) 473 481 { 474 MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);475 indexRequest++; 476 } 477 478 MPI_Waitall(indexRequest,request,status);482 ep_lib::MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 483 indexRequest++; 484 } 485 486 ep_lib::MPI_Waitall(indexRequest,request,status); 479 487 480 488 // unpack the data … … 516 524 } 517 525 518 MPI_Request *request = newMPI_Request[nbSource + nbTarget];519 MPI_Status *status = newMPI_Status[nbSource + nbTarget];526 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; 527 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; 520 528 int indexRequest = 0; 521 529 for (int i = 0; i < nbSource; i++) 522 530 { 523 MPI_Isend(&sourceMessageSize[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]);524 indexRequest++; 525 } 526 for (int i = 0; i < nbTarget; i++) 527 { 528 MPI_Irecv(&targetMessageSize[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);529 indexRequest++; 530 } 531 MPI_Waitall(indexRequest, request, status);531 ep_lib::MPI_Isend(&sourceMessageSize[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 532 indexRequest++; 533 } 534 for (int i = 0; i < nbTarget; i++) 535 { 536 ep_lib::MPI_Irecv(&targetMessageSize[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 537 indexRequest++; 538 } 539 ep_lib::MPI_Waitall(indexRequest, request, status); 532 540 533 541 for (int i = 0; i < nbTarget; i++) … … 557 565 for (int i = 0; i < nbSource; i++) 558 566 { 559 MPI_Isend(sourceBuffer[i], sourceMessageSize[i], MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);560 indexRequest++; 561 } 562 for (int i = 0; i < nbTarget; i++) 563 { 564 MPI_Irecv(targetBuffer[i], targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);565 indexRequest++; 566 } 567 MPI_Waitall(indexRequest, request, status);567 ep_lib::MPI_Isend(sourceBuffer[i], sourceMessageSize[i], EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 568 indexRequest++; 569 } 570 for (int i = 0; i < nbTarget; i++) 571 { 572 ep_lib::MPI_Irecv(targetBuffer[i], targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 573 indexRequest++; 574 } 575 ep_lib::MPI_Waitall(indexRequest, request, status); 568 576 569 577 // unpack the data … … 605 613 606 614 template void alltoalls_unknown(const std::vector<std::vector<NES> >& send, std::vector<std::vector<NES> >& recv, 607 const std::vector<int>& ranks, MPI_Comm communicator);615 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 608 616 609 617 template void alltoalls_known(const std::vector<std::vector<int> >& send, std::vector<std::vector<int> >& recv, 610 const std::vector<int>& ranks, MPI_Comm communicator);611 612 } 618 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 619 620 }
Note: See TracChangeset
for help on using the changeset viewer.