Changeset 1638 for XIOS/trunk/extern
- Timestamp:
- 01/22/19 16:15:03 (5 years ago)
- Location:
- XIOS/trunk/extern/remap/src
- Files:
-
- 9 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/extern/remap/src/libmapper.cpp
r1614 r1638 43 43 double* src_area=NULL ; 44 44 double* dst_area=NULL ; 45 mapper = new Mapper( MPI_COMM_WORLD);45 mapper = new Mapper(EP_COMM_WORLD); 46 46 mapper->setVerbosity(PROGRESS) ; 47 47 mapper->setSourceMesh(src_bounds_lon, src_bounds_lat, src_area, n_vert_per_cell_src, n_cell_src, src_pole ) ; -
XIOS/trunk/extern/remap/src/mapper.cpp
r1614 r1638 32 32 33 33 int mpiRank, mpiSize; 34 MPI_Comm_rank(communicator, &mpiRank);35 MPI_Comm_size(communicator, &mpiSize);34 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 35 ep_lib::MPI_Comm_size(communicator, &mpiSize); 36 36 37 37 sourceElements.reserve(nbCells); … … 43 43 long int offset ; 44 44 long int nb=nbCells ; 45 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ;45 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 46 46 offset=offset-nb ; 47 47 for(int i=0;i<nbCells;i++) sourceGlobalId[i]=offset+i ; … … 70 70 71 71 int mpiRank, mpiSize; 72 MPI_Comm_rank(communicator, &mpiRank);73 MPI_Comm_size(communicator, &mpiSize);72 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 73 ep_lib::MPI_Comm_size(communicator, &mpiSize); 74 74 75 75 targetElements.reserve(nbCells); … … 81 81 long int offset ; 82 82 long int nb=nbCells ; 83 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ;83 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ; 84 84 offset=offset-nb ; 85 85 for(int i=0;i<nbCells;i++) targetGlobalId[i]=offset+i ; … … 117 117 vector<double> timings; 118 118 int mpiSize, mpiRank; 119 MPI_Comm_size(communicator, &mpiSize);120 MPI_Comm_rank(communicator, &mpiRank);119 ep_lib::MPI_Comm_size(communicator, &mpiSize); 120 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 121 121 122 122 this->buildSSTree(sourceMesh, targetMesh); … … 173 173 { 174 174 int mpiSize, mpiRank; 175 MPI_Comm_size(communicator, &mpiSize);176 MPI_Comm_rank(communicator, &mpiRank);175 ep_lib::MPI_Comm_size(communicator, &mpiSize); 176 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 177 177 178 178 /* create list of intersections (super mesh elements) for each rank */ … … 235 235 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 236 236 int *nbRecvElement = new int[mpiSize]; 237 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator);237 ep_lib::MPI_Alltoall(nbSendElement, 1, EP_INT, nbRecvElement, 1, EP_INT, communicator); 238 238 239 239 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ … … 246 246 Coord **sendGrad = new Coord*[mpiSize]; 247 247 GloId **sendNeighIds = new GloId*[mpiSize]; 248 MPI_Request *sendRequest = newMPI_Request[5*mpiSize];249 MPI_Request *recvRequest = newMPI_Request[5*mpiSize];248 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[5*mpiSize]; 249 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[5*mpiSize]; 250 250 for (int rank = 0; rank < mpiSize; rank++) 251 251 { 252 252 if (nbSendElement[rank] > 0) 253 253 { 254 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);254 ep_lib::MPI_Issend(sendElement[rank], nbSendElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 255 255 nbSendRequest++; 256 256 } … … 271 271 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 272 272 } 273 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);273 ep_lib::MPI_Irecv(recvElement[rank], nbRecvElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 274 274 nbRecvRequest++; 275 275 } 276 276 } 277 MPI_Status *status = newMPI_Status[5*mpiSize];277 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[5*mpiSize]; 278 278 279 MPI_Waitall(nbSendRequest, sendRequest, status);280 MPI_Waitall(nbRecvRequest, recvRequest, status);279 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 280 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 281 281 282 282 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ … … 310 310 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 311 311 } 312 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);312 ep_lib::MPI_Issend(sendValue[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 313 313 nbSendRequest++; 314 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);314 ep_lib::MPI_Issend(sendArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 315 315 nbSendRequest++; 316 MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);316 ep_lib::MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 317 317 nbSendRequest++; 318 318 if (order == 2) 319 319 { 320 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);320 ep_lib::MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 321 321 nbSendRequest++; 322 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);322 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 323 323 //ym --> attention taille GloId 324 324 nbSendRequest++; … … 326 326 else 327 327 { 328 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);328 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 329 329 //ym --> attention taille GloId 330 330 nbSendRequest++; … … 333 333 if (nbSendElement[rank] > 0) 334 334 { 335 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);335 ep_lib::MPI_Irecv(recvValue[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 336 336 nbRecvRequest++; 337 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);337 ep_lib::MPI_Irecv(recvArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 338 338 nbRecvRequest++; 339 MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);339 ep_lib::MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 340 340 nbRecvRequest++; 341 341 if (order == 2) 342 342 { 343 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1),344 MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);343 ep_lib::MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 344 EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 345 345 nbRecvRequest++; 346 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);346 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 347 347 //ym --> attention taille GloId 348 348 nbRecvRequest++; … … 350 350 else 351 351 { 352 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);352 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 353 353 //ym --> attention taille GloId 354 354 nbRecvRequest++; … … 357 357 } 358 358 359 MPI_Waitall(nbSendRequest, sendRequest, status);360 MPI_Waitall(nbRecvRequest, recvRequest, status);359 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 360 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 361 361 362 362 … … 487 487 { 488 488 int mpiSize, mpiRank; 489 MPI_Comm_size(communicator, &mpiSize);490 MPI_Comm_rank(communicator, &mpiRank);489 ep_lib::MPI_Comm_size(communicator, &mpiSize); 490 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 491 491 492 492 vector<Node> *routingList = new vector<Node>[mpiSize]; … … 522 522 } 523 523 524 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);525 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);524 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 525 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 526 526 527 527 char **sendBuffer = new char*[mpiSize]; … … 549 549 int nbSendRequest = 0; 550 550 int nbRecvRequest = 0; 551 MPI_Request *sendRequest = newMPI_Request[mpiSize];552 MPI_Request *recvRequest = newMPI_Request[mpiSize];553 MPI_Status *status = newMPI_Status[mpiSize];551 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 552 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 553 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize]; 554 554 555 555 for (int rank = 0; rank < mpiSize; rank++) … … 557 557 if (nbSendNode[rank] > 0) 558 558 { 559 MPI_Issend(sendBuffer[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);559 ep_lib::MPI_Issend(sendBuffer[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 560 560 nbSendRequest++; 561 561 } 562 562 if (nbRecvNode[rank] > 0) 563 563 { 564 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);564 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 565 565 nbRecvRequest++; 566 566 } 567 567 } 568 568 569 MPI_Waitall(nbRecvRequest, recvRequest, status);570 MPI_Waitall(nbSendRequest, sendRequest, status);569 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 570 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 571 571 572 572 for (int rank = 0; rank < mpiSize; rank++) … … 615 615 616 616 617 MPI_Barrier(communicator);618 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);619 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);617 ep_lib::MPI_Barrier(communicator); 618 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 619 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 620 620 621 621 for (int rank = 0; rank < mpiSize; rank++) … … 629 629 if (nbSendNode[rank] > 0) 630 630 { 631 MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);631 ep_lib::MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 632 632 nbSendRequest++; 633 633 } 634 634 if (nbRecvNode[rank] > 0) 635 635 { 636 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);636 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 637 637 nbRecvRequest++; 638 638 } 639 639 } 640 640 641 MPI_Waitall(nbRecvRequest, recvRequest, status);642 MPI_Waitall(nbSendRequest, sendRequest, status);641 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 642 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 643 643 644 644 int nbNeighbourNodes = 0; … … 725 725 { 726 726 int mpiSize, mpiRank; 727 MPI_Comm_size(communicator, &mpiSize);728 MPI_Comm_rank(communicator, &mpiRank);729 730 MPI_Barrier(communicator);727 ep_lib::MPI_Comm_size(communicator, &mpiSize); 728 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 729 730 ep_lib::MPI_Barrier(communicator); 731 731 732 732 vector<Node> *routingList = new vector<Node>[mpiSize]; … … 753 753 cout << endl; 754 754 } 755 MPI_Barrier(communicator);755 ep_lib::MPI_Barrier(communicator); 756 756 757 757 int *nbSendNode = new int[mpiSize]; … … 771 771 } 772 772 773 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator);774 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);773 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator); 774 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 775 775 776 776 int total = 0; … … 805 805 int nbSendRequest = 0; 806 806 int nbRecvRequest = 0; 807 MPI_Request *sendRequest = newMPI_Request[mpiSize];808 MPI_Request *recvRequest = newMPI_Request[mpiSize];809 MPI_Status *status = newMPI_Status[mpiSize];807 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize]; 808 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize]; 809 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize]; 810 810 811 811 for (int rank = 0; rank < mpiSize; rank++) … … 813 813 if (nbSendNode[rank] > 0) 814 814 { 815 MPI_Issend(sendBuffer[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);815 ep_lib::MPI_Issend(sendBuffer[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 816 816 nbSendRequest++; 817 817 } 818 818 if (nbRecvNode[rank] > 0) 819 819 { 820 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);820 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 821 821 nbRecvRequest++; 822 822 } 823 823 } 824 824 825 MPI_Waitall(nbRecvRequest, recvRequest, status);826 MPI_Waitall(nbSendRequest, sendRequest, status);825 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 826 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 827 827 char **sendBuffer2 = new char*[mpiSize]; 828 828 char **recvBuffer2 = new char*[mpiSize]; … … 883 883 884 884 if (verbose >= 2) cout << "Rank " << mpiRank << " Compute (internal) intersection " << cputime() - tic << " s" << endl; 885 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator);885 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator); 886 886 887 887 for (int rank = 0; rank < mpiSize; rank++) … … 896 896 if (sentMessageSize[rank] > 0) 897 897 { 898 MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);898 ep_lib::MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 899 899 nbSendRequest++; 900 900 } 901 901 if (recvMessageSize[rank] > 0) 902 902 { 903 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);903 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 904 904 nbRecvRequest++; 905 905 } 906 906 } 907 907 908 MPI_Waitall(nbRecvRequest, recvRequest, status);909 MPI_Waitall(nbSendRequest, sendRequest, status);908 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status); 909 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status); 910 910 911 911 delete [] sendRequest; -
XIOS/trunk/extern/remap/src/mapper.hpp
r1614 r1638 18 18 { 19 19 public: 20 Mapper( MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {}20 Mapper(ep_lib::MPI_Comm comm=EP_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {} 21 21 ~Mapper(); 22 22 void setVerbosity(verbosity v) {verbose=v ;} … … 67 67 68 68 CParallelTree sstree; 69 MPI_Comm communicator ;69 ep_lib::MPI_Comm communicator ; 70 70 std::vector<Elt> sourceElements ; 71 71 std::vector<Node> sourceMesh ; -
XIOS/trunk/extern/remap/src/mpi_cascade.cpp
r688 r1638 4 4 namespace sphereRemap { 5 5 6 CMPICascade::CMPICascade(int nodes_per_level, MPI_Comm comm)6 CMPICascade::CMPICascade(int nodes_per_level, ep_lib::MPI_Comm comm) 7 7 { 8 8 int remaining_levels; 9 MPI_Comm intraComm;9 ep_lib::MPI_Comm intraComm; 10 10 int l = 0; // current level 11 11 do { … … 15 15 level[l].p_grp_size = level[l].size/level[l].group_size; 16 16 17 MPI_Comm_split(comm, level[l].colour(), level[l].key(), &intraComm);18 MPI_Comm_split(comm, level[l].p_colour(), level[l].p_key(), &(level[l].pg_comm));17 ep_lib::MPI_Comm_split(comm, level[l].colour(), level[l].key(), &intraComm); 18 ep_lib::MPI_Comm_split(comm, level[l].p_colour(), level[l].p_key(), &(level[l].pg_comm)); 19 19 comm = intraComm; 20 20 l++; -
XIOS/trunk/extern/remap/src/mpi_cascade.hpp
r694 r1638 12 12 { 13 13 public: 14 CCascadeLevel( MPI_Comm comm) : comm(comm)14 CCascadeLevel(ep_lib::MPI_Comm comm) : comm(comm) 15 15 { 16 MPI_Comm_size(comm, &size);17 MPI_Comm_rank(comm, &rank);16 ep_lib::MPI_Comm_size(comm, &size); 17 ep_lib::MPI_Comm_rank(comm, &rank); 18 18 } 19 19 int colour() const { return rank % group_size; }; … … 24 24 int p_key() const { return colour() + rank/(p_grp_size*group_size)*group_size; } 25 25 26 MPI_Comm comm, pg_comm;26 ep_lib::MPI_Comm comm, pg_comm; 27 27 int rank; 28 28 int size; … … 35 35 public: 36 36 // 37 CMPICascade(int nodes_per_level, MPI_Comm comm);37 CMPICascade(int nodes_per_level, ep_lib::MPI_Comm comm); 38 38 39 39 int num_levels; -
XIOS/trunk/extern/remap/src/mpi_routing.cpp
r694 r1638 10 10 const int verbose = 0; 11 11 12 CMPIRouting::CMPIRouting( MPI_Comm comm) : communicator(comm)13 { 14 MPI_Comm_rank(comm, &mpiRank);15 MPI_Comm_size(comm, &mpiSize);12 CMPIRouting::CMPIRouting(ep_lib::MPI_Comm comm) : communicator(comm) 13 { 14 ep_lib::MPI_Comm_rank(comm, &mpiRank); 15 ep_lib::MPI_Comm_size(comm, &mpiSize); 16 16 } 17 17 … … 19 19 but message lengths are *known* to receiver */ 20 20 template <typename T> 21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator)22 { 23 vector< MPI_Request> request(ranks.size() * 2);24 vector< MPI_Status> status(ranks.size() * 2);21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator) 22 { 23 vector<ep_lib::MPI_Request> request(ranks.size() * 2); 24 vector<ep_lib::MPI_Status> status(ranks.size() * 2); 25 25 26 26 // communicate data … … 28 28 for (int i = 0; i < ranks.size(); i++) 29 29 if (recv[i].size()) 30 MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);30 ep_lib::MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 31 31 for (int i = 0; i < ranks.size(); i++) 32 32 if (send[i].size()) 33 MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);34 MPI_Waitall(nbRequest, &request[0], &status[0]);33 ep_lib::MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 34 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]); 35 35 } 36 36 … … 38 38 but message lengths are *unknown* to receiver */ 39 39 template <typename T> 40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator)41 { 42 vector< MPI_Request> request(ranks.size() * 2);43 vector< MPI_Status> status(ranks.size() * 2);40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator) 41 { 42 vector<ep_lib::MPI_Request> request(ranks.size() * 2); 43 vector<ep_lib::MPI_Status> status(ranks.size() * 2); 44 44 45 45 // communicate sizes … … 50 50 sendSizes[i] = send[i].size(); 51 51 for (int i = 0; i < ranks.size(); i++) 52 MPI_Irecv(&recvSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]);52 ep_lib::MPI_Irecv(&recvSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]); 53 53 for (int i = 0; i < ranks.size(); i++) 54 MPI_Isend(&sendSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]);55 MPI_Waitall(nbRequest, &request[0], &status[0]);54 ep_lib::MPI_Isend(&sendSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]); 55 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]); 56 56 57 57 // allocate … … 118 118 CTimer::get("CMPIRouting::init(reduce_scatter)").reset(); 119 119 CTimer::get("CMPIRouting::init(reduce_scatter)").resume(); 120 MPI_Reduce_scatter(toSend, &nbSource, recvCount, MPI_INT, MPI_SUM, communicator);120 ep_lib::MPI_Reduce_scatter(toSend, &nbSource, recvCount, EP_INT, EP_SUM, communicator); 121 121 CTimer::get("CMPIRouting::init(reduce_scatter)").suspend(); 122 122 CTimer::get("CMPIRouting::init(reduce_scatter)").print(); 123 123 124 MPI_Alloc_mem(nbTarget *sizeof(int), MPI_INFO_NULL, &targetRank);125 MPI_Alloc_mem(nbSource *sizeof(int), MPI_INFO_NULL, &sourceRank);124 ep_lib::MPI_Alloc_mem(nbTarget *sizeof(int), EP_INFO_NULL, &targetRank); 125 ep_lib::MPI_Alloc_mem(nbSource *sizeof(int), EP_INFO_NULL, &sourceRank); 126 126 127 127 targetRankToIndex = new int[mpiSize]; … … 137 137 } 138 138 139 MPI_Barrier(communicator);139 ep_lib::MPI_Barrier(communicator); 140 140 CTimer::get("CMPIRouting::init(get_source)").reset(); 141 141 CTimer::get("CMPIRouting::init(get_source)").resume(); 142 142 143 MPI_Request *request = newMPI_Request[nbSource + nbTarget];144 MPI_Status *status = newMPI_Status[nbSource + nbTarget];143 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; 144 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; 145 145 146 146 int indexRequest = 0; … … 150 150 for (int i = 0; i < nbSource; i++) 151 151 { 152 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 153 indexRequest++; 152 #ifdef _usingMPI 153 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 154 #elif _usingEP 155 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 156 #endif 157 indexRequest++; 154 158 } 155 159 MPI_Barrier(communicator); 156 160 for (int i = 0; i < nbTarget; i++) 157 161 { 158 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);162 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 159 163 indexRequest++; 160 164 } … … 170 174 for (int i = 0; i < nbSource; i++) 171 175 { 172 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 173 indexRequest++; 174 } 175 176 for (int i = 0; i < nbTarget; i++) 177 { 178 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 176 #ifdef _usingMPI 177 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 178 #elif _usingEP 179 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 180 #endif 181 indexRequest++; 182 } 183 184 for (int i = 0; i < nbTarget; i++) 185 { 186 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 179 187 indexRequest++; 180 188 } … … 201 209 for (int i = 0; i < nbSource; i++) 202 210 { 203 MPI_Irecv(&nbSourceElement[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]);211 ep_lib::MPI_Irecv(&nbSourceElement[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 204 212 indexRequest++; 205 213 } … … 208 216 { 209 217 totalTargetElement += nbTargetElement[i]; 210 MPI_Isend(&nbTargetElement[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);218 ep_lib::MPI_Isend(&nbTargetElement[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 211 219 indexRequest++; 212 220 } … … 276 284 277 285 278 MPI_Request* request=newMPI_Request[nbSource+nbTarget];279 MPI_Status* status=newMPI_Status[nbSource+nbTarget];286 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget]; 287 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget]; 280 288 int indexRequest=0; 281 289 282 MPI_Barrier(communicator);290 ep_lib::MPI_Barrier(communicator); 283 291 CTimer::get("CMPIRouting::transferToTarget").reset(); 284 292 CTimer::get("CMPIRouting::transferToTarget").resume(); … … 286 294 for(int i=0; i<nbSource; i++) 287 295 { 288 MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);296 ep_lib::MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 289 297 indexRequest++; 290 298 } … … 292 300 for(int i=0;i<nbTarget; i++) 293 301 { 294 MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);295 indexRequest++; 296 } 297 298 MPI_Waitall(indexRequest,request,status);302 ep_lib::MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 303 indexRequest++; 304 } 305 306 ep_lib::MPI_Waitall(indexRequest,request,status); 299 307 300 308 CTimer::get("CMPIRouting::transferToTarget").suspend(); 301 309 CTimer::get("CMPIRouting::transferToTarget").print(); 302 MPI_Barrier(communicator);310 ep_lib::MPI_Barrier(communicator); 303 311 304 312 // unpack the data … … 340 348 } 341 349 342 MPI_Request *request = newMPI_Request[nbSource + nbTarget];343 MPI_Status *status = newMPI_Status[nbSource + nbTarget];350 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; 351 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; 344 352 int indexRequest = 0; 345 353 346 MPI_Barrier(communicator);354 ep_lib::MPI_Barrier(communicator); 347 355 CTimer::get("CMPIRouting::transferToTarget(messageSize)").reset(); 348 356 CTimer::get("CMPIRouting::transferToTarget(messageSize)").resume(); … … 350 358 for(int i=0; i<nbSource; i++) 351 359 { 352 MPI_Irecv(&sourceMessageSize[i],1,MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]);360 ep_lib::MPI_Irecv(&sourceMessageSize[i],1,EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 353 361 indexRequest++; 354 362 } … … 356 364 for(int i=0; i<nbTarget; i++) 357 365 { 358 MPI_Isend(&targetMessageSize[i],1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);359 indexRequest++; 360 } 361 362 MPI_Waitall(indexRequest,request,status);363 364 MPI_Barrier(communicator);366 ep_lib::MPI_Isend(&targetMessageSize[i],1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 367 indexRequest++; 368 } 369 370 ep_lib::MPI_Waitall(indexRequest,request,status); 371 372 ep_lib::MPI_Barrier(communicator); 365 373 CTimer::get("CMPIRouting::transferToTarget(messageSize)").suspend(); 366 374 CTimer::get("CMPIRouting::transferToTarget(messageSize)").print(); … … 395 403 for(int i=0; i<nbSource; i++) 396 404 { 397 MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);405 ep_lib::MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 398 406 indexRequest++; 399 407 } … … 401 409 for(int i=0;i<nbTarget; i++) 402 410 { 403 MPI_Isend(targetBuffer[i],targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);411 ep_lib::MPI_Isend(targetBuffer[i],targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 404 412 indexRequest++; 405 413 } … … 460 468 } 461 469 462 MPI_Request* request=newMPI_Request[nbSource+nbTarget];463 MPI_Status* status=newMPI_Status[nbSource+nbTarget];470 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget]; 471 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget]; 464 472 int indexRequest=0; 465 473 466 474 for(int i=0; i<nbSource; i++) 467 475 { 468 MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);476 ep_lib::MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 469 477 indexRequest++; 470 478 } … … 472 480 for(int i=0;i<nbTarget; i++) 473 481 { 474 MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);475 indexRequest++; 476 } 477 478 MPI_Waitall(indexRequest,request,status);482 ep_lib::MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 483 indexRequest++; 484 } 485 486 ep_lib::MPI_Waitall(indexRequest,request,status); 479 487 480 488 // unpack the data … … 516 524 } 517 525 518 MPI_Request *request = newMPI_Request[nbSource + nbTarget];519 MPI_Status *status = newMPI_Status[nbSource + nbTarget];526 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget]; 527 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget]; 520 528 int indexRequest = 0; 521 529 for (int i = 0; i < nbSource; i++) 522 530 { 523 MPI_Isend(&sourceMessageSize[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]);524 indexRequest++; 525 } 526 for (int i = 0; i < nbTarget; i++) 527 { 528 MPI_Irecv(&targetMessageSize[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]);529 indexRequest++; 530 } 531 MPI_Waitall(indexRequest, request, status);531 ep_lib::MPI_Isend(&sourceMessageSize[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 532 indexRequest++; 533 } 534 for (int i = 0; i < nbTarget; i++) 535 { 536 ep_lib::MPI_Irecv(&targetMessageSize[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 537 indexRequest++; 538 } 539 ep_lib::MPI_Waitall(indexRequest, request, status); 532 540 533 541 for (int i = 0; i < nbTarget; i++) … … 557 565 for (int i = 0; i < nbSource; i++) 558 566 { 559 MPI_Isend(sourceBuffer[i], sourceMessageSize[i], MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);560 indexRequest++; 561 } 562 for (int i = 0; i < nbTarget; i++) 563 { 564 MPI_Irecv(targetBuffer[i], targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);565 indexRequest++; 566 } 567 MPI_Waitall(indexRequest, request, status);567 ep_lib::MPI_Isend(sourceBuffer[i], sourceMessageSize[i], EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 568 indexRequest++; 569 } 570 for (int i = 0; i < nbTarget; i++) 571 { 572 ep_lib::MPI_Irecv(targetBuffer[i], targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 573 indexRequest++; 574 } 575 ep_lib::MPI_Waitall(indexRequest, request, status); 568 576 569 577 // unpack the data … … 605 613 606 614 template void alltoalls_unknown(const std::vector<std::vector<NES> >& send, std::vector<std::vector<NES> >& recv, 607 const std::vector<int>& ranks, MPI_Comm communicator);615 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 608 616 609 617 template void alltoalls_known(const std::vector<std::vector<int> >& send, std::vector<std::vector<int> >& recv, 610 const std::vector<int>& ranks, MPI_Comm communicator);611 612 } 618 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 619 620 } -
XIOS/trunk/extern/remap/src/mpi_routing.hpp
r694 r1638 11 11 { 12 12 13 MPI_Comm communicator;13 ep_lib::MPI_Comm communicator; 14 14 int mpiRank; 15 15 int mpiSize; … … 29 29 30 30 public: 31 CMPIRouting( MPI_Comm comm);31 CMPIRouting(ep_lib::MPI_Comm comm); 32 32 ~CMPIRouting(); 33 33 template<typename T> void init(const std::vector<T>& route, CMPICascade *cascade = NULL); … … 44 44 template <typename T> 45 45 void alltoalls_known(const std::vector<std::vector<T> >& send, std::vector<std::vector<T> >& recv, 46 const std::vector<int>& ranks, MPI_Comm communicator);46 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 47 47 48 48 template <typename T> 49 49 void alltoalls_unknown(const std::vector<std::vector<T> >& send, std::vector<std::vector<T> >& recv, 50 const std::vector<int>& ranks, MPI_Comm communicator);50 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 51 51 } 52 52 #endif -
XIOS/trunk/extern/remap/src/parallel_tree.cpp
r923 r1638 115 115 116 116 //CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MIN_NODE_SZ*MIN_NODE_SZ, comm) 117 CParallelTree::CParallelTree( MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm)117 CParallelTree::CParallelTree(ep_lib::MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm) 118 118 { 119 119 treeCascade.reserve(cascade.num_levels); … … 151 151 152 152 int nrecv; // global number of samples THIS WILL BE THE NUMBER OF LEAFS IN THE SAMPLE TREE 153 MPI_Allreduce(&n, &nrecv, 1, MPI_INT, MPI_SUM, comm.comm); // => size of sample tree does not depend on keepNodes!153 ep_lib::MPI_Allreduce(&n, &nrecv, 1, EP_INT, EP_SUM, comm.comm); // => size of sample tree does not depend on keepNodes! 154 154 double ratio = blocSize / (1.0 * nrecv); 155 155 int nsend = ratio * n + 1; // nsend = n_local_samples / n_global_samples * blocksize + 1 = blocksize/comm.size … … 157 157 158 158 int *counts = new int[comm.size]; 159 MPI_Allgather(&nsend, 1, MPI_INT, counts, 1, MPI_INT, comm.comm);159 ep_lib::MPI_Allgather(&nsend, 1, EP_INT, counts, 1, EP_INT, comm.comm); 160 160 161 161 nrecv = 0; … … 183 183 /* each process needs the sample elements from all processes */ 184 184 double *recvBuffer = new double[nrecv*4]; 185 MPI_Allgatherv(sendBuffer, 4 * nsend, MPI_DOUBLE, recvBuffer, counts, displs, MPI_DOUBLE, comm.comm);185 ep_lib::MPI_Allgatherv(sendBuffer, 4 * nsend, EP_DOUBLE, recvBuffer, counts, displs, EP_DOUBLE, comm.comm); 186 186 delete[] sendBuffer; 187 187 delete[] counts; … … 241 241 << " node size : "<<node.size()<<" bloc size : "<<blocSize<<" total number of leaf : "<<tree.leafs.size()<<endl ; 242 242 /* 243 MPI_Allreduce(&ok, &allok, 1, MPI_INT, MPI_PROD, communicator);243 MPI_Allreduce(&ok, &allok, 1, EP_INT, MPI_PROD, communicator); 244 244 if (!allok) { 245 245 MPI_Finalize(); … … 247 247 } 248 248 */ 249 MPI_Abort(MPI_COMM_WORLD,-1) ;249 ep_lib::MPI_Abort(EP_COMM_WORLD,-1) ; 250 250 } 251 251 /* … … 265 265 { 266 266 CMPIRouting MPIRoute(communicator); 267 MPI_Barrier(communicator);267 ep_lib::MPI_Barrier(communicator); 268 268 CTimer::get("buildLocalTree(initRoute)").resume(); 269 269 MPIRoute.init(route); … … 290 290 291 291 int mpiRank; 292 MPI_Comm_rank(communicator, &mpiRank);292 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 293 293 localTree.leafs.reserve(nbLocalElements); 294 294 for (int i = 0; i < nbLocalElements; i++) … … 316 316 nb1=node.size() ; nb2=node2.size() ; 317 317 nb=nb1+nb2 ; 318 MPI_Allreduce(&nb, &nbTot, 1, MPI_LONG, MPI_SUM, communicator) ;318 ep_lib::MPI_Allreduce(&nb, &nbTot, 1, EP_LONG, EP_SUM, communicator) ; 319 319 int commSize ; 320 MPI_Comm_size(communicator,&commSize) ;320 ep_lib::MPI_Comm_size(communicator,&commSize) ; 321 321 322 322 // make multiple of two … … 501 501 // gather circles on this level of the cascade 502 502 int pg_size; 503 MPI_Comm_size(cascade.level[level].pg_comm, &pg_size);503 ep_lib::MPI_Comm_size(cascade.level[level].pg_comm, &pg_size); 504 504 vector<Coord> allRootCentres(pg_size); 505 505 vector<double> allRootRadia(pg_size); 506 MPI_Allgather(&rootCentre, 3, MPI_DOUBLE, &allRootCentres[0], 3, MPI_DOUBLE, cascade.level[level].pg_comm);507 MPI_Allgather(&rootRadius, 1, MPI_DOUBLE, &allRootRadia[0], 1, MPI_DOUBLE, cascade.level[level].pg_comm);506 ep_lib::MPI_Allgather(&rootCentre, 3, EP_DOUBLE, &allRootCentres[0], 3, EP_DOUBLE, cascade.level[level].pg_comm); 507 ep_lib::MPI_Allgather(&rootRadius, 1, EP_DOUBLE, &allRootRadia[0], 1, EP_DOUBLE, cascade.level[level].pg_comm); 508 508 509 509 // now allRootsRadia and allRootCentres must be inserted into second levels of us and propagated to root -
XIOS/trunk/extern/remap/src/parallel_tree.hpp
r694 r1638 12 12 { 13 13 public: 14 CParallelTree( MPI_Comm comm);14 CParallelTree(ep_lib::MPI_Comm comm); 15 15 ~CParallelTree(); 16 16 … … 34 34 vector<CSampleTree> treeCascade; // first for sample tree, then for routing tree 35 35 CMPICascade cascade; 36 MPI_Comm communicator ;36 ep_lib::MPI_Comm communicator ; 37 37 38 38 };
Note: See TracChangeset
for help on using the changeset viewer.