Changeset 1639
- Timestamp:
- 01/22/19 16:43:32 (6 years ago)
- Location:
- XIOS/trunk
- Files:
-
- 2 deleted
- 70 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/extern/remap/src/libmapper.cpp
r1638 r1639 43 43 double* src_area=NULL ; 44 44 double* dst_area=NULL ; 45 mapper = new Mapper( EP_COMM_WORLD);45 mapper = new Mapper(MPI_COMM_WORLD); 46 46 mapper->setVerbosity(PROGRESS) ; 47 47 mapper->setSourceMesh(src_bounds_lon, src_bounds_lat, src_area, n_vert_per_cell_src, n_cell_src, src_pole ) ; -
XIOS/trunk/extern/remap/src/mapper.cpp
r1638 r1639 32 32 33 33 int mpiRank, mpiSize; 34 ep_lib::MPI_Comm_rank(communicator, &mpiRank);35 ep_lib::MPI_Comm_size(communicator, &mpiSize);34 MPI_Comm_rank(communicator, &mpiRank); 35 MPI_Comm_size(communicator, &mpiSize); 36 36 37 37 sourceElements.reserve(nbCells); … … 43 43 long int offset ; 44 44 long int nb=nbCells ; 45 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ;45 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ; 46 46 offset=offset-nb ; 47 47 for(int i=0;i<nbCells;i++) sourceGlobalId[i]=offset+i ; … … 70 70 71 71 int mpiRank, mpiSize; 72 ep_lib::MPI_Comm_rank(communicator, &mpiRank);73 ep_lib::MPI_Comm_size(communicator, &mpiSize);72 MPI_Comm_rank(communicator, &mpiRank); 73 MPI_Comm_size(communicator, &mpiSize); 74 74 75 75 targetElements.reserve(nbCells); … … 81 81 long int offset ; 82 82 long int nb=nbCells ; 83 ep_lib::MPI_Scan(&nb,&offset,1,EP_LONG,EP_SUM,communicator) ;83 MPI_Scan(&nb,&offset,1,MPI_LONG,MPI_SUM,communicator) ; 84 84 offset=offset-nb ; 85 85 for(int i=0;i<nbCells;i++) targetGlobalId[i]=offset+i ; … … 117 117 vector<double> timings; 118 118 int mpiSize, mpiRank; 119 ep_lib::MPI_Comm_size(communicator, &mpiSize);120 ep_lib::MPI_Comm_rank(communicator, &mpiRank);119 MPI_Comm_size(communicator, &mpiSize); 120 MPI_Comm_rank(communicator, &mpiRank); 121 121 122 122 this->buildSSTree(sourceMesh, targetMesh); … … 173 173 { 174 174 int mpiSize, mpiRank; 175 ep_lib::MPI_Comm_size(communicator, &mpiSize);176 ep_lib::MPI_Comm_rank(communicator, &mpiRank);175 MPI_Comm_size(communicator, &mpiSize); 176 MPI_Comm_rank(communicator, &mpiRank); 177 177 178 178 /* create list of intersections (super mesh elements) for each rank */ … … 235 235 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 236 236 int *nbRecvElement = new int[mpiSize]; 237 ep_lib::MPI_Alltoall(nbSendElement, 1, EP_INT, nbRecvElement, 1, EP_INT, communicator);237 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); 238 238 239 239 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ … … 246 246 Coord **sendGrad = new Coord*[mpiSize]; 247 247 GloId **sendNeighIds = new GloId*[mpiSize]; 248 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[5*mpiSize];249 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[5*mpiSize];248 MPI_Request *sendRequest = new MPI_Request[5*mpiSize]; 249 MPI_Request *recvRequest = new MPI_Request[5*mpiSize]; 250 250 for (int rank = 0; rank < mpiSize; rank++) 251 251 { 252 252 if (nbSendElement[rank] > 0) 253 253 { 254 ep_lib::MPI_Issend(sendElement[rank], nbSendElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);254 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 255 255 nbSendRequest++; 256 256 } … … 271 271 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 272 272 } 273 ep_lib::MPI_Irecv(recvElement[rank], nbRecvElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);273 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 274 274 nbRecvRequest++; 275 275 } 276 276 } 277 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[5*mpiSize];277 MPI_Status *status = new MPI_Status[5*mpiSize]; 278 278 279 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status);280 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status);279 MPI_Waitall(nbSendRequest, sendRequest, status); 280 MPI_Waitall(nbRecvRequest, recvRequest, status); 281 281 282 282 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ … … 310 310 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 311 311 } 312 ep_lib::MPI_Issend(sendValue[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);312 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 313 313 nbSendRequest++; 314 ep_lib::MPI_Issend(sendArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);314 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 315 315 nbSendRequest++; 316 ep_lib::MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);316 MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 317 317 nbSendRequest++; 318 318 if (order == 2) 319 319 { 320 ep_lib::MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), EP_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);320 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 321 321 nbSendRequest++; 322 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);322 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 323 323 //ym --> attention taille GloId 324 324 nbSendRequest++; … … 326 326 else 327 327 { 328 ep_lib::MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], EP_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);328 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 329 329 //ym --> attention taille GloId 330 330 nbSendRequest++; … … 333 333 if (nbSendElement[rank] > 0) 334 334 { 335 ep_lib::MPI_Irecv(recvValue[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);335 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 336 336 nbRecvRequest++; 337 ep_lib::MPI_Irecv(recvArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);337 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 338 338 nbRecvRequest++; 339 ep_lib::MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);339 MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 340 340 nbRecvRequest++; 341 341 if (order == 2) 342 342 { 343 ep_lib::MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1),344 EP_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);343 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 344 MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 345 345 nbRecvRequest++; 346 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);346 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 347 347 //ym --> attention taille GloId 348 348 nbRecvRequest++; … … 350 350 else 351 351 { 352 ep_lib::MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], EP_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);352 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 353 353 //ym --> attention taille GloId 354 354 nbRecvRequest++; … … 357 357 } 358 358 359 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status);360 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status);359 MPI_Waitall(nbSendRequest, sendRequest, status); 360 MPI_Waitall(nbRecvRequest, recvRequest, status); 361 361 362 362 … … 487 487 { 488 488 int mpiSize, mpiRank; 489 ep_lib::MPI_Comm_size(communicator, &mpiSize);490 ep_lib::MPI_Comm_rank(communicator, &mpiRank);489 MPI_Comm_size(communicator, &mpiSize); 490 MPI_Comm_rank(communicator, &mpiRank); 491 491 492 492 vector<Node> *routingList = new vector<Node>[mpiSize]; … … 522 522 } 523 523 524 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator);525 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator);524 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 525 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 526 526 527 527 char **sendBuffer = new char*[mpiSize]; … … 549 549 int nbSendRequest = 0; 550 550 int nbRecvRequest = 0; 551 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize];552 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize];553 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize];551 MPI_Request *sendRequest = new MPI_Request[mpiSize]; 552 MPI_Request *recvRequest = new MPI_Request[mpiSize]; 553 MPI_Status *status = new MPI_Status[mpiSize]; 554 554 555 555 for (int rank = 0; rank < mpiSize; rank++) … … 557 557 if (nbSendNode[rank] > 0) 558 558 { 559 ep_lib::MPI_Issend(sendBuffer[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);559 MPI_Issend(sendBuffer[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 560 560 nbSendRequest++; 561 561 } 562 562 if (nbRecvNode[rank] > 0) 563 563 { 564 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);564 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 565 565 nbRecvRequest++; 566 566 } 567 567 } 568 568 569 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status);570 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status);569 MPI_Waitall(nbRecvRequest, recvRequest, status); 570 MPI_Waitall(nbSendRequest, sendRequest, status); 571 571 572 572 for (int rank = 0; rank < mpiSize; rank++) … … 615 615 616 616 617 ep_lib::MPI_Barrier(communicator);618 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator);619 ep_lib::MPI_Alltoall(sendMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator);617 MPI_Barrier(communicator); 618 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 619 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 620 620 621 621 for (int rank = 0; rank < mpiSize; rank++) … … 629 629 if (nbSendNode[rank] > 0) 630 630 { 631 ep_lib::MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);631 MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 632 632 nbSendRequest++; 633 633 } 634 634 if (nbRecvNode[rank] > 0) 635 635 { 636 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);636 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 637 637 nbRecvRequest++; 638 638 } 639 639 } 640 640 641 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status);642 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status);641 MPI_Waitall(nbRecvRequest, recvRequest, status); 642 MPI_Waitall(nbSendRequest, sendRequest, status); 643 643 644 644 int nbNeighbourNodes = 0; … … 725 725 { 726 726 int mpiSize, mpiRank; 727 ep_lib::MPI_Comm_size(communicator, &mpiSize);728 ep_lib::MPI_Comm_rank(communicator, &mpiRank);729 730 ep_lib::MPI_Barrier(communicator);727 MPI_Comm_size(communicator, &mpiSize); 728 MPI_Comm_rank(communicator, &mpiRank); 729 730 MPI_Barrier(communicator); 731 731 732 732 vector<Node> *routingList = new vector<Node>[mpiSize]; … … 753 753 cout << endl; 754 754 } 755 ep_lib::MPI_Barrier(communicator);755 MPI_Barrier(communicator); 756 756 757 757 int *nbSendNode = new int[mpiSize]; … … 771 771 } 772 772 773 ep_lib::MPI_Alltoall(nbSendNode, 1, EP_INT, nbRecvNode, 1, EP_INT, communicator);774 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator);773 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 774 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 775 775 776 776 int total = 0; … … 805 805 int nbSendRequest = 0; 806 806 int nbRecvRequest = 0; 807 ep_lib::MPI_Request *sendRequest = new ep_lib::MPI_Request[mpiSize];808 ep_lib::MPI_Request *recvRequest = new ep_lib::MPI_Request[mpiSize];809 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[mpiSize];807 MPI_Request *sendRequest = new MPI_Request[mpiSize]; 808 MPI_Request *recvRequest = new MPI_Request[mpiSize]; 809 MPI_Status *status = new MPI_Status[mpiSize]; 810 810 811 811 for (int rank = 0; rank < mpiSize; rank++) … … 813 813 if (nbSendNode[rank] > 0) 814 814 { 815 ep_lib::MPI_Issend(sendBuffer[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);815 MPI_Issend(sendBuffer[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 816 816 nbSendRequest++; 817 817 } 818 818 if (nbRecvNode[rank] > 0) 819 819 { 820 ep_lib::MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);820 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 821 821 nbRecvRequest++; 822 822 } 823 823 } 824 824 825 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status);826 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status);825 MPI_Waitall(nbRecvRequest, recvRequest, status); 826 MPI_Waitall(nbSendRequest, sendRequest, status); 827 827 char **sendBuffer2 = new char*[mpiSize]; 828 828 char **recvBuffer2 = new char*[mpiSize]; … … 883 883 884 884 if (verbose >= 2) cout << "Rank " << mpiRank << " Compute (internal) intersection " << cputime() - tic << " s" << endl; 885 ep_lib::MPI_Alltoall(sentMessageSize, 1, EP_INT, recvMessageSize, 1, EP_INT, communicator);885 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 886 886 887 887 for (int rank = 0; rank < mpiSize; rank++) … … 896 896 if (sentMessageSize[rank] > 0) 897 897 { 898 ep_lib::MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], EP_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]);898 MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 899 899 nbSendRequest++; 900 900 } 901 901 if (recvMessageSize[rank] > 0) 902 902 { 903 ep_lib::MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], EP_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]);903 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 904 904 nbRecvRequest++; 905 905 } 906 906 } 907 907 908 ep_lib::MPI_Waitall(nbRecvRequest, recvRequest, status);909 ep_lib::MPI_Waitall(nbSendRequest, sendRequest, status);908 MPI_Waitall(nbRecvRequest, recvRequest, status); 909 MPI_Waitall(nbSendRequest, sendRequest, status); 910 910 911 911 delete [] sendRequest; -
XIOS/trunk/extern/remap/src/mapper.hpp
r1638 r1639 18 18 { 19 19 public: 20 Mapper( ep_lib::MPI_Comm comm=EP_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {}20 Mapper(MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {} 21 21 ~Mapper(); 22 22 void setVerbosity(verbosity v) {verbose=v ;} … … 67 67 68 68 CParallelTree sstree; 69 ep_lib::MPI_Comm communicator ;69 MPI_Comm communicator ; 70 70 std::vector<Elt> sourceElements ; 71 71 std::vector<Node> sourceMesh ; -
XIOS/trunk/extern/remap/src/mpi_cascade.cpp
r1638 r1639 4 4 namespace sphereRemap { 5 5 6 CMPICascade::CMPICascade(int nodes_per_level, ep_lib::MPI_Comm comm)6 CMPICascade::CMPICascade(int nodes_per_level, MPI_Comm comm) 7 7 { 8 8 int remaining_levels; 9 ep_lib::MPI_Comm intraComm;9 MPI_Comm intraComm; 10 10 int l = 0; // current level 11 11 do { … … 15 15 level[l].p_grp_size = level[l].size/level[l].group_size; 16 16 17 ep_lib::MPI_Comm_split(comm, level[l].colour(), level[l].key(), &intraComm);18 ep_lib::MPI_Comm_split(comm, level[l].p_colour(), level[l].p_key(), &(level[l].pg_comm));17 MPI_Comm_split(comm, level[l].colour(), level[l].key(), &intraComm); 18 MPI_Comm_split(comm, level[l].p_colour(), level[l].p_key(), &(level[l].pg_comm)); 19 19 comm = intraComm; 20 20 l++; -
XIOS/trunk/extern/remap/src/mpi_cascade.hpp
r1638 r1639 12 12 { 13 13 public: 14 CCascadeLevel( ep_lib::MPI_Comm comm) : comm(comm)14 CCascadeLevel(MPI_Comm comm) : comm(comm) 15 15 { 16 ep_lib::MPI_Comm_size(comm, &size);17 ep_lib::MPI_Comm_rank(comm, &rank);16 MPI_Comm_size(comm, &size); 17 MPI_Comm_rank(comm, &rank); 18 18 } 19 19 int colour() const { return rank % group_size; }; … … 24 24 int p_key() const { return colour() + rank/(p_grp_size*group_size)*group_size; } 25 25 26 ep_lib::MPI_Comm comm, pg_comm;26 MPI_Comm comm, pg_comm; 27 27 int rank; 28 28 int size; … … 35 35 public: 36 36 // 37 CMPICascade(int nodes_per_level, ep_lib::MPI_Comm comm);37 CMPICascade(int nodes_per_level, MPI_Comm comm); 38 38 39 39 int num_levels; -
XIOS/trunk/extern/remap/src/mpi_routing.cpp
r1638 r1639 10 10 const int verbose = 0; 11 11 12 CMPIRouting::CMPIRouting( ep_lib::MPI_Comm comm) : communicator(comm)13 { 14 ep_lib::MPI_Comm_rank(comm, &mpiRank);15 ep_lib::MPI_Comm_size(comm, &mpiSize);12 CMPIRouting::CMPIRouting(MPI_Comm comm) : communicator(comm) 13 { 14 MPI_Comm_rank(comm, &mpiRank); 15 MPI_Comm_size(comm, &mpiSize); 16 16 } 17 17 … … 19 19 but message lengths are *known* to receiver */ 20 20 template <typename T> 21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator)22 { 23 vector< ep_lib::MPI_Request> request(ranks.size() * 2);24 vector< ep_lib::MPI_Status> status(ranks.size() * 2);21 void alltoalls_known(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator) 22 { 23 vector<MPI_Request> request(ranks.size() * 2); 24 vector<MPI_Status> status(ranks.size() * 2); 25 25 26 26 // communicate data … … 28 28 for (int i = 0; i < ranks.size(); i++) 29 29 if (recv[i].size()) 30 ep_lib::MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);30 MPI_Irecv(&recv[i][0], recv[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 31 31 for (int i = 0; i < ranks.size(); i++) 32 32 if (send[i].size()) 33 ep_lib::MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), EP_CHAR, ranks[i], 0, communicator, &request[nbRequest++]);34 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]);33 MPI_Isend((void *) &send[i][0], send[i].size()*sizeof(T), MPI_CHAR, ranks[i], 0, communicator, &request[nbRequest++]); 34 MPI_Waitall(nbRequest, &request[0], &status[0]); 35 35 } 36 36 … … 38 38 but message lengths are *unknown* to receiver */ 39 39 template <typename T> 40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, ep_lib::MPI_Comm communicator)41 { 42 vector< ep_lib::MPI_Request> request(ranks.size() * 2);43 vector< ep_lib::MPI_Status> status(ranks.size() * 2);40 void alltoalls_unknown(const vector<vector<T> >& send, vector<vector<T> >& recv, const vector<int>& ranks, MPI_Comm communicator) 41 { 42 vector<MPI_Request> request(ranks.size() * 2); 43 vector<MPI_Status> status(ranks.size() * 2); 44 44 45 45 // communicate sizes … … 50 50 sendSizes[i] = send[i].size(); 51 51 for (int i = 0; i < ranks.size(); i++) 52 ep_lib::MPI_Irecv(&recvSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]);52 MPI_Irecv(&recvSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]); 53 53 for (int i = 0; i < ranks.size(); i++) 54 ep_lib::MPI_Isend(&sendSizes[i], 1, EP_INT, ranks[i], 0, communicator, &request[nbRequest++]);55 ep_lib::MPI_Waitall(nbRequest, &request[0], &status[0]);54 MPI_Isend(&sendSizes[i], 1, MPI_INT, ranks[i], 0, communicator, &request[nbRequest++]); 55 MPI_Waitall(nbRequest, &request[0], &status[0]); 56 56 57 57 // allocate … … 118 118 CTimer::get("CMPIRouting::init(reduce_scatter)").reset(); 119 119 CTimer::get("CMPIRouting::init(reduce_scatter)").resume(); 120 ep_lib::MPI_Reduce_scatter(toSend, &nbSource, recvCount, EP_INT, EP_SUM, communicator);120 MPI_Reduce_scatter(toSend, &nbSource, recvCount, MPI_INT, MPI_SUM, communicator); 121 121 CTimer::get("CMPIRouting::init(reduce_scatter)").suspend(); 122 122 CTimer::get("CMPIRouting::init(reduce_scatter)").print(); 123 123 124 ep_lib::MPI_Alloc_mem(nbTarget *sizeof(int), EP_INFO_NULL, &targetRank);125 ep_lib::MPI_Alloc_mem(nbSource *sizeof(int), EP_INFO_NULL, &sourceRank);124 MPI_Alloc_mem(nbTarget *sizeof(int), MPI_INFO_NULL, &targetRank); 125 MPI_Alloc_mem(nbSource *sizeof(int), MPI_INFO_NULL, &sourceRank); 126 126 127 127 targetRankToIndex = new int[mpiSize]; … … 137 137 } 138 138 139 ep_lib::MPI_Barrier(communicator);139 MPI_Barrier(communicator); 140 140 CTimer::get("CMPIRouting::init(get_source)").reset(); 141 141 CTimer::get("CMPIRouting::init(get_source)").resume(); 142 142 143 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget];144 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget];143 MPI_Request *request = new MPI_Request[nbSource + nbTarget]; 144 MPI_Status *status = new MPI_Status[nbSource + nbTarget]; 145 145 146 146 int indexRequest = 0; … … 150 150 for (int i = 0; i < nbSource; i++) 151 151 { 152 #ifdef _usingMPI 153 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 154 #elif _usingEP 155 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 156 #endif 157 indexRequest++; 158 } 159 MPI_Barrier(communicator); 160 for (int i = 0; i < nbTarget; i++) 161 { 162 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 152 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 153 indexRequest++; 154 } 155 MPI_Barrier(communicator); 156 for (int i = 0; i < nbTarget; i++) 157 { 158 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 163 159 indexRequest++; 164 160 } … … 174 170 for (int i = 0; i < nbSource; i++) 175 171 { 176 #ifdef _usingMPI 177 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 178 #elif _usingEP 179 ep_lib::MPI_Irecv(&sourceRank[i], 1, EP_INT, -2, 0, communicator, &request[indexRequest]); 180 #endif 181 indexRequest++; 182 } 183 184 for (int i = 0; i < nbTarget; i++) 185 { 186 ep_lib::MPI_Isend(&mpiRank, 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]); 172 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 173 indexRequest++; 174 } 175 176 for (int i = 0; i < nbTarget; i++) 177 { 178 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 187 179 indexRequest++; 188 180 } … … 209 201 for (int i = 0; i < nbSource; i++) 210 202 { 211 ep_lib::MPI_Irecv(&nbSourceElement[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]);203 MPI_Irecv(&nbSourceElement[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 212 204 indexRequest++; 213 205 } … … 216 208 { 217 209 totalTargetElement += nbTargetElement[i]; 218 ep_lib::MPI_Isend(&nbTargetElement[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]);210 MPI_Isend(&nbTargetElement[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 219 211 indexRequest++; 220 212 } … … 284 276 285 277 286 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget];287 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget];278 MPI_Request* request=new MPI_Request[nbSource+nbTarget]; 279 MPI_Status* status=new MPI_Status[nbSource+nbTarget]; 288 280 int indexRequest=0; 289 281 290 ep_lib::MPI_Barrier(communicator);282 MPI_Barrier(communicator); 291 283 CTimer::get("CMPIRouting::transferToTarget").reset(); 292 284 CTimer::get("CMPIRouting::transferToTarget").resume(); … … 294 286 for(int i=0; i<nbSource; i++) 295 287 { 296 ep_lib::MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);288 MPI_Irecv(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 297 289 indexRequest++; 298 290 } … … 300 292 for(int i=0;i<nbTarget; i++) 301 293 { 302 ep_lib::MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);303 indexRequest++; 304 } 305 306 ep_lib::MPI_Waitall(indexRequest,request,status);294 MPI_Isend(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 295 indexRequest++; 296 } 297 298 MPI_Waitall(indexRequest,request,status); 307 299 308 300 CTimer::get("CMPIRouting::transferToTarget").suspend(); 309 301 CTimer::get("CMPIRouting::transferToTarget").print(); 310 ep_lib::MPI_Barrier(communicator);302 MPI_Barrier(communicator); 311 303 312 304 // unpack the data … … 348 340 } 349 341 350 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget];351 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget];342 MPI_Request *request = new MPI_Request[nbSource + nbTarget]; 343 MPI_Status *status = new MPI_Status[nbSource + nbTarget]; 352 344 int indexRequest = 0; 353 345 354 ep_lib::MPI_Barrier(communicator);346 MPI_Barrier(communicator); 355 347 CTimer::get("CMPIRouting::transferToTarget(messageSize)").reset(); 356 348 CTimer::get("CMPIRouting::transferToTarget(messageSize)").resume(); … … 358 350 for(int i=0; i<nbSource; i++) 359 351 { 360 ep_lib::MPI_Irecv(&sourceMessageSize[i],1,EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]);352 MPI_Irecv(&sourceMessageSize[i],1,MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 361 353 indexRequest++; 362 354 } … … 364 356 for(int i=0; i<nbTarget; i++) 365 357 { 366 ep_lib::MPI_Isend(&targetMessageSize[i],1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]);367 indexRequest++; 368 } 369 370 ep_lib::MPI_Waitall(indexRequest,request,status);371 372 ep_lib::MPI_Barrier(communicator);358 MPI_Isend(&targetMessageSize[i],1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 359 indexRequest++; 360 } 361 362 MPI_Waitall(indexRequest,request,status); 363 364 MPI_Barrier(communicator); 373 365 CTimer::get("CMPIRouting::transferToTarget(messageSize)").suspend(); 374 366 CTimer::get("CMPIRouting::transferToTarget(messageSize)").print(); … … 403 395 for(int i=0; i<nbSource; i++) 404 396 { 405 ep_lib::MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);397 MPI_Irecv(sourceBuffer[i],sourceMessageSize[i],MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 406 398 indexRequest++; 407 399 } … … 409 401 for(int i=0;i<nbTarget; i++) 410 402 { 411 ep_lib::MPI_Isend(targetBuffer[i],targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);403 MPI_Isend(targetBuffer[i],targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 412 404 indexRequest++; 413 405 } … … 468 460 } 469 461 470 ep_lib::MPI_Request* request=new ep_lib::MPI_Request[nbSource+nbTarget];471 ep_lib::MPI_Status* status=new ep_lib::MPI_Status[nbSource+nbTarget];462 MPI_Request* request=new MPI_Request[nbSource+nbTarget]; 463 MPI_Status* status=new MPI_Status[nbSource+nbTarget]; 472 464 int indexRequest=0; 473 465 474 466 for(int i=0; i<nbSource; i++) 475 467 { 476 ep_lib::MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);468 MPI_Isend(sourceBuffer[i],nbSourceElement[i]*sizeof(T),MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 477 469 indexRequest++; 478 470 } … … 480 472 for(int i=0;i<nbTarget; i++) 481 473 { 482 ep_lib::MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);483 indexRequest++; 484 } 485 486 ep_lib::MPI_Waitall(indexRequest,request,status);474 MPI_Irecv(targetBuffer[i],nbTargetElement[i]*sizeof(T), MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 475 indexRequest++; 476 } 477 478 MPI_Waitall(indexRequest,request,status); 487 479 488 480 // unpack the data … … 524 516 } 525 517 526 ep_lib::MPI_Request *request = new ep_lib::MPI_Request[nbSource + nbTarget];527 ep_lib::MPI_Status *status = new ep_lib::MPI_Status[nbSource + nbTarget];518 MPI_Request *request = new MPI_Request[nbSource + nbTarget]; 519 MPI_Status *status = new MPI_Status[nbSource + nbTarget]; 528 520 int indexRequest = 0; 529 521 for (int i = 0; i < nbSource; i++) 530 522 { 531 ep_lib::MPI_Isend(&sourceMessageSize[i], 1, EP_INT, sourceRank[i], 0, communicator, &request[indexRequest]);532 indexRequest++; 533 } 534 for (int i = 0; i < nbTarget; i++) 535 { 536 ep_lib::MPI_Irecv(&targetMessageSize[i], 1, EP_INT, targetRank[i], 0, communicator, &request[indexRequest]);537 indexRequest++; 538 } 539 ep_lib::MPI_Waitall(indexRequest, request, status);523 MPI_Isend(&sourceMessageSize[i], 1, MPI_INT, sourceRank[i], 0, communicator, &request[indexRequest]); 524 indexRequest++; 525 } 526 for (int i = 0; i < nbTarget; i++) 527 { 528 MPI_Irecv(&targetMessageSize[i], 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 529 indexRequest++; 530 } 531 MPI_Waitall(indexRequest, request, status); 540 532 541 533 for (int i = 0; i < nbTarget; i++) … … 565 557 for (int i = 0; i < nbSource; i++) 566 558 { 567 ep_lib::MPI_Isend(sourceBuffer[i], sourceMessageSize[i], EP_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]);568 indexRequest++; 569 } 570 for (int i = 0; i < nbTarget; i++) 571 { 572 ep_lib::MPI_Irecv(targetBuffer[i], targetMessageSize[i], EP_CHAR, targetRank[i], 0, communicator, &request[indexRequest]);573 indexRequest++; 574 } 575 ep_lib::MPI_Waitall(indexRequest, request, status);559 MPI_Isend(sourceBuffer[i], sourceMessageSize[i], MPI_CHAR, sourceRank[i], 0, communicator, &request[indexRequest]); 560 indexRequest++; 561 } 562 for (int i = 0; i < nbTarget; i++) 563 { 564 MPI_Irecv(targetBuffer[i], targetMessageSize[i], MPI_CHAR, targetRank[i], 0, communicator, &request[indexRequest]); 565 indexRequest++; 566 } 567 MPI_Waitall(indexRequest, request, status); 576 568 577 569 // unpack the data … … 613 605 614 606 template void alltoalls_unknown(const std::vector<std::vector<NES> >& send, std::vector<std::vector<NES> >& recv, 615 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator);607 const std::vector<int>& ranks, MPI_Comm communicator); 616 608 617 609 template void alltoalls_known(const std::vector<std::vector<int> >& send, std::vector<std::vector<int> >& recv, 618 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator);619 620 } 610 const std::vector<int>& ranks, MPI_Comm communicator); 611 612 } -
XIOS/trunk/extern/remap/src/mpi_routing.hpp
r1638 r1639 11 11 { 12 12 13 ep_lib::MPI_Comm communicator;13 MPI_Comm communicator; 14 14 int mpiRank; 15 15 int mpiSize; … … 29 29 30 30 public: 31 CMPIRouting( ep_lib::MPI_Comm comm);31 CMPIRouting(MPI_Comm comm); 32 32 ~CMPIRouting(); 33 33 template<typename T> void init(const std::vector<T>& route, CMPICascade *cascade = NULL); … … 44 44 template <typename T> 45 45 void alltoalls_known(const std::vector<std::vector<T> >& send, std::vector<std::vector<T> >& recv, 46 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator);46 const std::vector<int>& ranks, MPI_Comm communicator); 47 47 48 48 template <typename T> 49 49 void alltoalls_unknown(const std::vector<std::vector<T> >& send, std::vector<std::vector<T> >& recv, 50 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator);50 const std::vector<int>& ranks, MPI_Comm communicator); 51 51 } 52 52 #endif -
XIOS/trunk/extern/remap/src/parallel_tree.cpp
r1638 r1639 115 115 116 116 //CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MIN_NODE_SZ*MIN_NODE_SZ, comm) 117 CParallelTree::CParallelTree( ep_lib::MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm)117 CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm) 118 118 { 119 119 treeCascade.reserve(cascade.num_levels); … … 151 151 152 152 int nrecv; // global number of samples THIS WILL BE THE NUMBER OF LEAFS IN THE SAMPLE TREE 153 ep_lib::MPI_Allreduce(&n, &nrecv, 1, EP_INT, EP_SUM, comm.comm); // => size of sample tree does not depend on keepNodes!153 MPI_Allreduce(&n, &nrecv, 1, MPI_INT, MPI_SUM, comm.comm); // => size of sample tree does not depend on keepNodes! 154 154 double ratio = blocSize / (1.0 * nrecv); 155 155 int nsend = ratio * n + 1; // nsend = n_local_samples / n_global_samples * blocksize + 1 = blocksize/comm.size … … 157 157 158 158 int *counts = new int[comm.size]; 159 ep_lib::MPI_Allgather(&nsend, 1, EP_INT, counts, 1, EP_INT, comm.comm);159 MPI_Allgather(&nsend, 1, MPI_INT, counts, 1, MPI_INT, comm.comm); 160 160 161 161 nrecv = 0; … … 183 183 /* each process needs the sample elements from all processes */ 184 184 double *recvBuffer = new double[nrecv*4]; 185 ep_lib::MPI_Allgatherv(sendBuffer, 4 * nsend, EP_DOUBLE, recvBuffer, counts, displs, EP_DOUBLE, comm.comm);185 MPI_Allgatherv(sendBuffer, 4 * nsend, MPI_DOUBLE, recvBuffer, counts, displs, MPI_DOUBLE, comm.comm); 186 186 delete[] sendBuffer; 187 187 delete[] counts; … … 241 241 << " node size : "<<node.size()<<" bloc size : "<<blocSize<<" total number of leaf : "<<tree.leafs.size()<<endl ; 242 242 /* 243 MPI_Allreduce(&ok, &allok, 1, EP_INT, MPI_PROD, communicator);243 MPI_Allreduce(&ok, &allok, 1, MPI_INT, MPI_PROD, communicator); 244 244 if (!allok) { 245 245 MPI_Finalize(); … … 247 247 } 248 248 */ 249 ep_lib::MPI_Abort(EP_COMM_WORLD,-1) ;249 MPI_Abort(MPI_COMM_WORLD,-1) ; 250 250 } 251 251 /* … … 265 265 { 266 266 CMPIRouting MPIRoute(communicator); 267 ep_lib::MPI_Barrier(communicator);267 MPI_Barrier(communicator); 268 268 CTimer::get("buildLocalTree(initRoute)").resume(); 269 269 MPIRoute.init(route); … … 290 290 291 291 int mpiRank; 292 ep_lib::MPI_Comm_rank(communicator, &mpiRank);292 MPI_Comm_rank(communicator, &mpiRank); 293 293 localTree.leafs.reserve(nbLocalElements); 294 294 for (int i = 0; i < nbLocalElements; i++) … … 316 316 nb1=node.size() ; nb2=node2.size() ; 317 317 nb=nb1+nb2 ; 318 ep_lib::MPI_Allreduce(&nb, &nbTot, 1, EP_LONG, EP_SUM, communicator) ;318 MPI_Allreduce(&nb, &nbTot, 1, MPI_LONG, MPI_SUM, communicator) ; 319 319 int commSize ; 320 ep_lib::MPI_Comm_size(communicator,&commSize) ;320 MPI_Comm_size(communicator,&commSize) ; 321 321 322 322 // make multiple of two … … 501 501 // gather circles on this level of the cascade 502 502 int pg_size; 503 ep_lib::MPI_Comm_size(cascade.level[level].pg_comm, &pg_size);503 MPI_Comm_size(cascade.level[level].pg_comm, &pg_size); 504 504 vector<Coord> allRootCentres(pg_size); 505 505 vector<double> allRootRadia(pg_size); 506 ep_lib::MPI_Allgather(&rootCentre, 3, EP_DOUBLE, &allRootCentres[0], 3, EP_DOUBLE, cascade.level[level].pg_comm);507 ep_lib::MPI_Allgather(&rootRadius, 1, EP_DOUBLE, &allRootRadia[0], 1, EP_DOUBLE, cascade.level[level].pg_comm);506 MPI_Allgather(&rootCentre, 3, MPI_DOUBLE, &allRootCentres[0], 3, MPI_DOUBLE, cascade.level[level].pg_comm); 507 MPI_Allgather(&rootRadius, 1, MPI_DOUBLE, &allRootRadia[0], 1, MPI_DOUBLE, cascade.level[level].pg_comm); 508 508 509 509 // now allRootsRadia and allRootCentres must be inserted into second levels of us and propagated to root -
XIOS/trunk/extern/remap/src/parallel_tree.hpp
r1638 r1639 12 12 { 13 13 public: 14 CParallelTree( ep_lib::MPI_Comm comm);14 CParallelTree(MPI_Comm comm); 15 15 ~CParallelTree(); 16 16 … … 34 34 vector<CSampleTree> treeCascade; // first for sample tree, then for routing tree 35 35 CMPICascade cascade; 36 ep_lib::MPI_Comm communicator ;36 MPI_Comm communicator ; 37 37 38 38 }; -
XIOS/trunk/src/buffer_client.cpp
r1638 r1639 12 12 size_t CClientBuffer::maxRequestSize = 0; 13 13 14 CClientBuffer::CClientBuffer( ep_lib::MPI_Comm interComm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents)14 CClientBuffer::CClientBuffer(MPI_Comm interComm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents) 15 15 : interComm(interComm) 16 16 , serverRank(serverRank) … … 83 83 bool CClientBuffer::checkBuffer(void) 84 84 { 85 ep_lib::MPI_Status status;85 MPI_Status status; 86 86 int flag; 87 87 … … 89 89 { 90 90 traceOff(); 91 ep_lib::MPI_Test(&request, &flag, &status);91 MPI_Test(&request, &flag, &status); 92 92 traceOn(); 93 93 if (flag == true) pending = false; … … 98 98 if (count > 0) 99 99 { 100 ep_lib::MPI_Issend(buffer[current], count, EP_CHAR, serverRank, 20, interComm, &request);100 MPI_Issend(buffer[current], count, MPI_CHAR, serverRank, 20, interComm, &request); 101 101 pending = true; 102 102 if (current == 1) current = 0; -
XIOS/trunk/src/buffer_client.hpp
r1638 r1639 14 14 static size_t maxRequestSize; 15 15 16 CClientBuffer( ep_lib::MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents);16 CClientBuffer(MPI_Comm intercomm, int serverRank, StdSize bufferSize, StdSize estimatedMaxEventSize, StdSize maxBufferedEvents); 17 17 ~CClientBuffer(); 18 18 … … 39 39 bool pending; 40 40 41 ep_lib::MPI_Request request;41 MPI_Request request; 42 42 43 43 CBufferOut* retBuffer; 44 const ep_lib::MPI_Comm interComm;44 const MPI_Comm interComm; 45 45 }; 46 46 } -
XIOS/trunk/src/client.cpp
r1638 r1639 9 9 #include "oasis_cinterface.hpp" 10 10 #include "mpi.hpp" 11 //#include "mpi_wrapper.hpp"12 11 #include "timer.hpp" 13 12 #include "buffer_client.hpp" … … 17 16 { 18 17 19 ep_lib::MPI_Comm CClient::intraComm ;20 ep_lib::MPI_Comm CClient::interComm ;21 std::list< ep_lib::MPI_Comm> CClient::contextInterComms;18 MPI_Comm CClient::intraComm ; 19 MPI_Comm CClient::interComm ; 20 std::list<MPI_Comm> CClient::contextInterComms; 22 21 int CClient::serverLeader ; 23 22 bool CClient::is_MPI_Initialized ; … … 25 24 StdOFStream CClient::m_infoStream; 26 25 StdOFStream CClient::m_errorStream; 27 ep_lib::MPI_Comm& CClient::getInterComm(void) { return (interComm); }26 MPI_Comm& CClient::getInterComm(void) { return (interComm); } 28 27 29 28 ///--------------------------------------------------------------- … … 36 35 */ 37 36 38 void CClient::initialize(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm)37 void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 39 38 { 40 39 int initialized ; 41 ep_lib::MPI_Initialized(&initialized) ;40 MPI_Initialized(&initialized) ; 42 41 if (initialized) is_MPI_Initialized=true ; 43 42 else is_MPI_Initialized=false ; … … 48 47 { 49 48 // localComm isn't given 50 if (localComm == EP_COMM_NULL)49 if (localComm == MPI_COMM_NULL) 51 50 { 52 51 if (!is_MPI_Initialized) 53 52 { 54 ep_lib::MPI_Init(NULL, NULL);53 MPI_Init(NULL, NULL); 55 54 } 56 55 CTimer::get("XIOS").resume() ; … … 64 63 int myColor ; 65 64 int i,c ; 66 ep_lib::MPI_Comm newComm ; 67 68 ep_lib::MPI_Comm_size(CXios::globalComm,&size) ; 69 70 ep_lib::MPI_Comm_rank(CXios::globalComm,&rank_); 65 MPI_Comm newComm ; 66 67 MPI_Comm_size(CXios::globalComm,&size) ; 68 MPI_Comm_rank(CXios::globalComm,&rank_); 71 69 72 70 hashAll=new unsigned long[size] ; 73 71 74 ep_lib::MPI_Allgather(&hashClient,1,EP_LONG,hashAll,1,EP_LONG,CXios::globalComm) ;72 MPI_Allgather(&hashClient,1,MPI_LONG,hashAll,1,MPI_LONG,CXios::globalComm) ; 75 73 76 74 map<unsigned long, int> colors ; … … 99 97 100 98 myColor=colors[hashClient]; 101 ep_lib::MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ;99 MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ; 102 100 103 101 if (CXios::usingServer) … … 106 104 serverLeader=leaders[hashServer] ; 107 105 int intraCommSize, intraCommRank ; 108 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ;109 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank) ;106 MPI_Comm_size(intraComm,&intraCommSize) ; 107 MPI_Comm_rank(intraComm,&intraCommRank) ; 110 108 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 111 109 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 112 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ;110 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 113 111 //rank_ = intraCommRank; 114 112 } 115 113 else 116 114 { 117 ep_lib::MPI_Comm_dup(intraComm,&interComm) ;115 MPI_Comm_dup(intraComm,&interComm) ; 118 116 } 119 117 delete [] hashAll ; … … 128 126 else 129 127 { 130 ep_lib::MPI_Comm_dup(localComm,&intraComm) ;131 ep_lib::MPI_Comm_dup(intraComm,&interComm) ;128 MPI_Comm_dup(localComm,&intraComm) ; 129 MPI_Comm_dup(intraComm,&interComm) ; 132 130 } 133 131 } … … 137 135 { 138 136 // localComm isn't given 139 if (localComm == EP_COMM_NULL)137 if (localComm == MPI_COMM_NULL) 140 138 { 141 139 if (!is_MPI_Initialized) oasis_init(codeId) ; 142 140 oasis_get_localcomm(localComm) ; 143 141 } 144 ep_lib::MPI_Comm_dup(localComm,&intraComm) ;142 MPI_Comm_dup(localComm,&intraComm) ; 145 143 146 144 CTimer::get("XIOS").resume() ; … … 149 147 if (CXios::usingServer) 150 148 { 151 ep_lib::MPI_Status status ;152 ep_lib::MPI_Comm_rank(intraComm,&rank_) ;149 MPI_Status status ; 150 MPI_Comm_rank(intraComm,&rank_) ; 153 151 154 152 oasis_get_intercomm(interComm,CXios::xiosCodeId) ; 155 if (rank_==0) ep_lib::MPI_Recv(&serverLeader,1, EP_INT, 0, 0, interComm, &status) ;156 ep_lib::MPI_Bcast(&serverLeader,1,EP_INT,0,intraComm) ;157 } 158 else ep_lib::MPI_Comm_dup(intraComm,&interComm) ;159 } 160 161 ep_lib::MPI_Comm_dup(intraComm,&returnComm) ;153 if (rank_==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ; 154 MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ; 155 } 156 else MPI_Comm_dup(intraComm,&interComm) ; 157 } 158 159 MPI_Comm_dup(intraComm,&returnComm) ; 162 160 } 163 161 … … 170 168 * Function is only called by client. 171 169 */ 172 void CClient::registerContext(const string& id, ep_lib::MPI_Comm contextComm)170 void CClient::registerContext(const string& id, MPI_Comm contextComm) 173 171 { 174 172 CContext::setCurrent(id) ; … … 180 178 // Attached mode 181 179 { 182 ep_lib::MPI_Comm contextInterComm ;183 ep_lib::MPI_Comm_dup(contextComm,&contextInterComm) ;180 MPI_Comm contextInterComm ; 181 MPI_Comm_dup(contextComm,&contextInterComm) ; 184 182 CContext* contextServer = CContext::create(idServer); 185 183 … … 200 198 size_t message_size ; 201 199 int leaderRank ; 202 ep_lib::MPI_Comm contextInterComm ;203 204 ep_lib::MPI_Comm_size(contextComm,&size) ;205 ep_lib::MPI_Comm_rank(contextComm,&rank) ;206 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank) ;200 MPI_Comm contextInterComm ; 201 202 MPI_Comm_size(contextComm,&size) ; 203 MPI_Comm_rank(contextComm,&rank) ; 204 MPI_Comm_rank(CXios::globalComm,&globalRank) ; 207 205 if (rank!=0) globalRank=0 ; 208 206 … … 216 214 buffer<<msg ; 217 215 218 ep_lib::MPI_Send((void*)buff,buffer.count(),EP_CHAR,serverLeader,1,CXios::globalComm) ;219 220 ep_lib::MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ;216 MPI_Send((void*)buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; 217 218 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 221 219 info(10)<<"Register new Context : "<<id<<endl ; 222 ep_lib::MPI_Comm inter ;223 ep_lib::MPI_Intercomm_merge(contextInterComm,0,&inter) ;224 ep_lib::MPI_Barrier(inter) ;220 MPI_Comm inter ; 221 MPI_Intercomm_merge(contextInterComm,0,&inter) ; 222 MPI_Barrier(inter) ; 225 223 226 224 context->initClient(contextComm,contextInterComm) ; 227 225 228 226 contextInterComms.push_back(contextInterComm); 229 ep_lib::MPI_Comm_free(&inter);227 MPI_Comm_free(&inter); 230 228 delete [] buff ; 231 229 … … 253 251 int msg=0 ; 254 252 255 ep_lib::MPI_Comm_rank(intraComm,&rank) ;253 MPI_Comm_rank(intraComm,&rank) ; 256 254 if (rank==0) 257 255 { 258 ep_lib::MPI_Send(&msg,1,EP_INT,0,5,interComm) ; // tags oasis_endded = 5256 MPI_Send(&msg,1,MPI_INT,0,5,interComm) ; // tags oasis_endded = 5 259 257 } 260 258 … … 268 266 int msg=0 ; 269 267 270 ep_lib::MPI_Comm_rank(intraComm,&rank) ;268 MPI_Comm_rank(intraComm,&rank) ; 271 269 272 270 if (!CXios::isServer) 273 271 { 274 ep_lib::MPI_Comm_rank(intraComm,&rank) ;272 MPI_Comm_rank(intraComm,&rank) ; 275 273 if (rank==0) 276 274 { 277 ep_lib::MPI_Send(&msg,1,EP_INT,0,0,interComm) ;278 } 279 } 280 281 for (std::list< ep_lib::MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++)282 ep_lib::MPI_Comm_free(&(*it));283 ep_lib::MPI_Comm_free(&interComm);284 ep_lib::MPI_Comm_free(&intraComm);275 MPI_Send(&msg,1,MPI_INT,0,0,interComm) ; 276 } 277 } 278 279 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 280 MPI_Comm_free(&(*it)); 281 MPI_Comm_free(&interComm); 282 MPI_Comm_free(&intraComm); 285 283 286 284 CTimer::get("XIOS init/finalize").suspend() ; … … 290 288 { 291 289 if (CXios::usingOasis) oasis_finalize(); 292 else ep_lib::MPI_Finalize() ;290 else MPI_Finalize() ; 293 291 } 294 292 … … 327 325 int size = 0; 328 326 int rank; 329 ep_lib::MPI_Comm_size(CXios::globalComm, &size);327 MPI_Comm_size(CXios::globalComm, &size); 330 328 while (size) 331 329 { … … 336 334 if (CXios::usingOasis) 337 335 { 338 ep_lib::MPI_Comm_rank(CXios::globalComm,&rank);336 MPI_Comm_rank(CXios::globalComm,&rank); 339 337 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << rank << ext; 340 338 } -
XIOS/trunk/src/client.hpp
r1638 r1639 10 10 { 11 11 public: 12 static void initialize(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm);12 static void initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm); 13 13 static void finalize(void); 14 static void registerContext(const string& id, ep_lib::MPI_Comm contextComm);14 static void registerContext(const string& id, MPI_Comm contextComm); 15 15 static void callOasisEnddef(void) ; 16 16 17 static ep_lib::MPI_Comm intraComm;18 static ep_lib::MPI_Comm interComm;19 static std::list< ep_lib::MPI_Comm> contextInterComms;17 static MPI_Comm intraComm; 18 static MPI_Comm interComm; 19 static std::list<MPI_Comm> contextInterComms; 20 20 static int serverLeader; 21 21 static bool is_MPI_Initialized ; 22 22 23 static ep_lib::MPI_Comm& getInterComm();23 static MPI_Comm& getInterComm(); 24 24 25 25 //! Get global rank without oasis and current rank in model intraComm in case of oasis -
XIOS/trunk/src/client_client_dht_template.hpp
r1638 r1639 40 40 public: 41 41 CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoInitMap, 42 const ep_lib::MPI_Comm& clientIntraComm);42 const MPI_Comm& clientIntraComm); 43 43 44 44 CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoInitMap, 45 const ep_lib::MPI_Comm& clientIntraComm);45 const MPI_Comm& clientIntraComm); 46 46 47 47 void computeIndexInfoMapping(const CArray<size_t,1>& indices); … … 55 55 56 56 protected: 57 CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm);57 CClientClientDHTTemplate(const MPI_Comm& clientIntraComm); 58 58 59 59 protected: … … 62 62 // Redistribute index and info among clients 63 63 void computeDistributedIndex(const Index2InfoTypeMap& indexInfoInitMap, 64 const ep_lib::MPI_Comm& intraCommLevel,64 const MPI_Comm& intraCommLevel, 65 65 int level); 66 66 67 67 void computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoInitMap, 68 const ep_lib::MPI_Comm& intraCommLevel,68 const MPI_Comm& intraCommLevel, 69 69 int level); 70 70 … … 73 73 74 74 void computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 75 const ep_lib::MPI_Comm& intraCommLevel,75 const MPI_Comm& intraCommLevel, 76 76 int level); 77 77 … … 85 85 // Send information to clients 86 86 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 87 const ep_lib::MPI_Comm& clientIntraComm, 88 std::vector<ep_lib::MPI_Request>& requestSendInfo); 89 void sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 90 const ep_lib::MPI_Comm& clientIntraComm, 91 ep_lib::MPI_Request* requestSendInfo); 87 const MPI_Comm& clientIntraComm, 88 std::vector<MPI_Request>& requestSendInfo); 92 89 93 90 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 94 const ep_lib::MPI_Comm& clientIntraComm, 95 std::vector<ep_lib::MPI_Request>& requestRecvInfo); 96 void recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 97 const ep_lib::MPI_Comm& clientIntraComm, 98 ep_lib::MPI_Request* requestRecvInfo); 99 91 const MPI_Comm& clientIntraComm, 92 std::vector<MPI_Request>& requestRecvInfo); 100 93 101 94 // Send global index to clients 102 95 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 103 const ep_lib::MPI_Comm& clientIntraComm, 104 std::vector<ep_lib::MPI_Request>& requestSendIndexGlobal); 105 void sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 106 const ep_lib::MPI_Comm& clientIntraComm, 107 ep_lib::MPI_Request* requestSendIndexGlobal); 96 const MPI_Comm& clientIntraComm, 97 std::vector<MPI_Request>& requestSendIndexGlobal); 108 98 109 99 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 110 const ep_lib::MPI_Comm& clientIntraComm, 111 std::vector<ep_lib::MPI_Request>& requestRecvIndex); 112 void recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 113 const ep_lib::MPI_Comm& clientIntraComm, 114 ep_lib::MPI_Request* requestRecvIndex); 100 const MPI_Comm& clientIntraComm, 101 std::vector<MPI_Request>& requestRecvIndex); 115 102 116 103 void sendRecvOnReturn(const std::vector<int>& sendNbRank, std::vector<int>& sendNbElements, -
XIOS/trunk/src/client_client_dht_template_impl.hpp
r1638 r1639 14 14 { 15 15 template<typename T, typename H> 16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const ep_lib::MPI_Comm& clientIntraComm)16 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const MPI_Comm& clientIntraComm) 17 17 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 18 18 { 19 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_);19 MPI_Comm_size(clientIntraComm, &nbClient_); 20 20 this->computeMPICommLevel(); 21 21 int nbLvl = this->getNbLevel(); … … 34 34 template<typename T, typename H> 35 35 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2InfoTypeMap& indexInfoMap, 36 const ep_lib::MPI_Comm& clientIntraComm)36 const MPI_Comm& clientIntraComm) 37 37 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 38 38 { 39 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_);39 MPI_Comm_size(clientIntraComm, &nbClient_); 40 40 this->computeMPICommLevel(); 41 41 int nbLvl = this->getNbLevel(); … … 59 59 template<typename T, typename H> 60 60 CClientClientDHTTemplate<T,H>::CClientClientDHTTemplate(const Index2VectorInfoTypeMap& indexInfoMap, 61 const ep_lib::MPI_Comm& clientIntraComm)61 const MPI_Comm& clientIntraComm) 62 62 : H(clientIntraComm), index2InfoMapping_(), indexToInfoMappingLevel_(), nbClient_(0) 63 63 { 64 ep_lib::MPI_Comm_size(clientIntraComm, &nbClient_);64 MPI_Comm_size(clientIntraComm, &nbClient_); 65 65 this->computeMPICommLevel(); 66 66 int nbLvl = this->getNbLevel(); … … 95 95 template<typename T, typename H> 96 96 void CClientClientDHTTemplate<T,H>::computeIndexInfoMappingLevel(const CArray<size_t,1>& indices, 97 const ep_lib::MPI_Comm& commLevel,97 const MPI_Comm& commLevel, 98 98 int level) 99 99 { 100 100 int clientRank; 101 ep_lib::MPI_Comm_rank(commLevel,&clientRank);101 MPI_Comm_rank(commLevel,&clientRank); 102 102 int groupRankBegin = this->getGroupBegin()[level]; 103 103 int nbClient = this->getNbInGroup()[level]; … … 169 169 recvIndexBuff = new unsigned long[recvNbIndexCount]; 170 170 171 int request_size = 0; 172 for (int idx = 0; idx < recvRankClient.size(); ++idx) 173 { 174 if (0 != recvNbIndexClientCount[idx]) 175 request_size ++; 176 } 177 178 request_size += client2ClientIndex.size(); 179 180 std::vector<ep_lib::MPI_Request> request(request_size); 171 std::vector<MPI_Request> request; 181 172 std::vector<int>::iterator itbRecvIndex = recvRankClient.begin(), itRecvIndex, 182 173 iteRecvIndex = recvRankClient.end(), … … 185 176 int currentIndex = 0; 186 177 int nbRecvClient = recvRankClient.size(); 187 int request_position = 0;188 178 for (int idx = 0; idx < nbRecvClient; ++idx) 189 179 { 190 180 if (0 != recvNbIndexClientCount[idx]) 191 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]);181 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 192 182 currentIndex += recvNbIndexClientCount[idx]; 193 183 } … … 196 186 iteIndex = client2ClientIndex.end(); 197 187 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 198 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]);199 200 std::vector< ep_lib::MPI_Status> status(request.size());201 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]);188 sendIndexToClients(itIndex->first, (itIndex->second), sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 189 190 std::vector<MPI_Status> status(request.size()); 191 MPI_Waitall(request.size(), &request[0], &status[0]); 202 192 203 193 CArray<size_t,1>* tmpGlobalIndex; … … 252 242 } 253 243 254 int requestOnReturn_size=0; 255 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) 256 { 257 if (0 != recvNbIndexOnReturn[idx]) 258 { 259 requestOnReturn_size += 2; 260 } 261 } 262 263 for (int idx = 0; idx < nbRecvClient; ++idx) 264 { 265 if (0 != sendNbIndexOnReturn[idx]) 266 { 267 requestOnReturn_size += 2; 268 } 269 } 270 271 int requestOnReturn_position=0; 272 273 std::vector<ep_lib::MPI_Request> requestOnReturn(requestOnReturn_size); 244 std::vector<MPI_Request> requestOnReturn; 274 245 currentIndex = 0; 275 246 for (int idx = 0; idx < recvRankOnReturn.size(); ++idx) … … 277 248 if (0 != recvNbIndexOnReturn[idx]) 278 249 { 279 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]);250 recvIndexFromClients(recvRankOnReturn[idx], recvIndexBuffOnReturn+currentIndex, recvNbIndexOnReturn[idx], commLevel, requestOnReturn); 280 251 recvInfoFromClients(recvRankOnReturn[idx], 281 252 recvInfoBuffOnReturn+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 282 253 recvNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), 283 commLevel, &requestOnReturn[requestOnReturn_position++]);254 commLevel, requestOnReturn); 284 255 } 285 256 currentIndex += recvNbIndexOnReturn[idx]; … … 315 286 316 287 sendIndexToClients(rank, client2ClientIndexOnReturn[rank], 317 sendNbIndexOnReturn[idx], commLevel, &requestOnReturn[requestOnReturn_position++]);288 sendNbIndexOnReturn[idx], commLevel, requestOnReturn); 318 289 sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 319 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, &requestOnReturn[requestOnReturn_position++]);290 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, requestOnReturn); 320 291 } 321 292 currentIndex += recvNbIndexClientCount[idx]; 322 293 } 323 294 324 std::vector< ep_lib::MPI_Status> statusOnReturn(requestOnReturn.size());325 ep_lib::MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]);295 std::vector<MPI_Status> statusOnReturn(requestOnReturn.size()); 296 MPI_Waitall(requestOnReturn.size(), &requestOnReturn[0], &statusOnReturn[0]); 326 297 327 298 Index2VectorInfoTypeMap indexToInfoMapping; … … 389 360 template<typename T, typename H> 390 361 void CClientClientDHTTemplate<T,H>::computeDistributedIndex(const Index2VectorInfoTypeMap& indexInfoMap, 391 const ep_lib::MPI_Comm& commLevel,362 const MPI_Comm& commLevel, 392 363 int level) 393 364 { 394 365 int clientRank; 395 ep_lib::MPI_Comm_rank(commLevel,&clientRank);366 MPI_Comm_rank(commLevel,&clientRank); 396 367 computeSendRecvRank(level, clientRank); 397 368 … … 468 439 // it will send a message to the correct clients. 469 440 // Contents of the message are index and its corresponding informatioin 470 int request_size = 0; 471 for (int idx = 0; idx < recvRankClient.size(); ++idx) 472 { 473 if (0 != recvNbIndexClientCount[idx]) 474 { 475 request_size += 2; 476 } 477 } 478 479 request_size += client2ClientIndex.size(); 480 request_size += client2ClientInfo.size(); 481 482 std::vector<ep_lib::MPI_Request> request(request_size); 483 441 std::vector<MPI_Request> request; 484 442 int currentIndex = 0; 485 443 int nbRecvClient = recvRankClient.size(); 486 int request_position=0;487 444 for (int idx = 0; idx < nbRecvClient; ++idx) 488 445 { 489 446 if (0 != recvNbIndexClientCount[idx]) 490 447 { 491 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, &request[request_position++]); 492 //if(clientRank==0) printf("recv index request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request)); 448 recvIndexFromClients(recvRankClient[idx], recvIndexBuff+currentIndex, recvNbIndexClientCount[idx], commLevel, request); 493 449 recvInfoFromClients(recvRankClient[idx], 494 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 495 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 496 commLevel, &request[request_position++]); 497 //if(clientRank==0) printf("recv info request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request)); 450 recvInfoBuff+currentIndex*ProcessDHTElement<InfoType>::typeSize(), 451 recvNbIndexClientCount[idx]*ProcessDHTElement<InfoType>::typeSize(), 452 commLevel, request); 498 453 } 499 454 currentIndex += recvNbIndexClientCount[idx]; … … 503 458 iteIndex = client2ClientIndex.end(); 504 459 for (itIndex = itbIndex; itIndex != iteIndex; ++itIndex) 505 { sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, &request[request_position++]); 506 } //if(clientRank==0) printf("send index request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request));} 460 sendIndexToClients(itIndex->first, itIndex->second, sendNbIndexBuff[itIndex->first-groupRankBegin], commLevel, request); 507 461 std::unordered_map<int, unsigned char*>::iterator itbInfo = client2ClientInfo.begin(), itInfo, 508 462 iteInfo = client2ClientInfo.end(); 509 463 for (itInfo = itbInfo; itInfo != iteInfo; ++itInfo) 510 { sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, &request[request_position++]); 511 }// if(clientRank==0) printf("send info request = %p, inserted = %p, request = %d\n", &request[0], request.back(), *static_cast< int*>(request.back()->mpi_request));} 512 513 std::vector<ep_lib::MPI_Status> status(request.size()); 514 515 ep_lib::MPI_Waitall(request.size(), &request[0], &status[0]); 464 sendInfoToClients(itInfo->first, itInfo->second, sendNbInfo[itInfo->first-groupRankBegin], commLevel, request); 465 466 std::vector<MPI_Status> status(request.size()); 467 MPI_Waitall(request.size(), &request[0], &status[0]); 516 468 517 469 Index2VectorInfoTypeMap indexToInfoMapping; … … 566 518 template<typename T, typename H> 567 519 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize, 568 const ep_lib::MPI_Comm& clientIntraComm,569 std::vector< ep_lib::MPI_Request>& requestSendIndex)570 { 571 ep_lib::MPI_Request request;520 const MPI_Comm& clientIntraComm, 521 std::vector<MPI_Request>& requestSendIndex) 522 { 523 MPI_Request request; 572 524 requestSendIndex.push_back(request); 573 ep_lib::MPI_Isend(indices, indiceSize, EP_UNSIGNED_LONG,525 MPI_Isend(indices, indiceSize, MPI_UNSIGNED_LONG, 574 526 clientDestRank, MPI_DHT_INDEX, clientIntraComm, &(requestSendIndex.back())); 575 }576 577 /*!578 Send message containing index to clients579 \param [in] clientDestRank rank of destination client580 \param [in] indices index to send581 \param [in] indiceSize size of index array to send582 \param [in] clientIntraComm communication group of client583 \param [in] requestSendIndex sending request584 */585 template<typename T, typename H>586 void CClientClientDHTTemplate<T,H>::sendIndexToClients(int clientDestRank, size_t* indices, size_t indiceSize,587 const ep_lib::MPI_Comm& clientIntraComm,588 ep_lib::MPI_Request* requestSendIndex)589 {590 ep_lib::MPI_Isend(indices, indiceSize, EP_UNSIGNED_LONG,591 clientDestRank, MPI_DHT_INDEX, clientIntraComm, requestSendIndex);592 527 } 593 528 … … 601 536 template<typename T, typename H> 602 537 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize, 603 const ep_lib::MPI_Comm& clientIntraComm,604 std::vector< ep_lib::MPI_Request>& requestRecvIndex)605 { 606 ep_lib::MPI_Request request;538 const MPI_Comm& clientIntraComm, 539 std::vector<MPI_Request>& requestRecvIndex) 540 { 541 MPI_Request request; 607 542 requestRecvIndex.push_back(request); 608 ep_lib::MPI_Irecv(indices, indiceSize, EP_UNSIGNED_LONG,543 MPI_Irecv(indices, indiceSize, MPI_UNSIGNED_LONG, 609 544 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, &(requestRecvIndex.back())); 610 }611 612 /*!613 Receive message containing index to clients614 \param [in] clientDestRank rank of destination client615 \param [in] indices index to send616 \param [in] clientIntraComm communication group of client617 \param [in] requestRecvIndex receiving request618 */619 template<typename T, typename H>620 void CClientClientDHTTemplate<T,H>::recvIndexFromClients(int clientSrcRank, size_t* indices, size_t indiceSize,621 const ep_lib::MPI_Comm& clientIntraComm,622 ep_lib::MPI_Request *requestRecvIndex)623 {624 ep_lib::MPI_Irecv(indices, indiceSize, EP_UNSIGNED_LONG,625 clientSrcRank, MPI_DHT_INDEX, clientIntraComm, requestRecvIndex);626 545 } 627 546 … … 636 555 template<typename T, typename H> 637 556 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize, 638 const ep_lib::MPI_Comm& clientIntraComm,639 std::vector< ep_lib::MPI_Request>& requestSendInfo)640 { 641 ep_lib::MPI_Request request;557 const MPI_Comm& clientIntraComm, 558 std::vector<MPI_Request>& requestSendInfo) 559 { 560 MPI_Request request; 642 561 requestSendInfo.push_back(request); 643 562 644 ep_lib::MPI_Isend(info, infoSize, EP_CHAR,563 MPI_Isend(info, infoSize, MPI_CHAR, 645 564 clientDestRank, MPI_DHT_INFO, clientIntraComm, &(requestSendInfo.back())); 646 }647 648 /*!649 Send message containing information to clients650 \param [in] clientDestRank rank of destination client651 \param [in] info info array to send652 \param [in] infoSize info array size to send653 \param [in] clientIntraComm communication group of client654 \param [in] requestSendInfo sending request655 */656 template<typename T, typename H>657 void CClientClientDHTTemplate<T,H>::sendInfoToClients(int clientDestRank, unsigned char* info, int infoSize,658 const ep_lib::MPI_Comm& clientIntraComm,659 ep_lib::MPI_Request *requestSendInfo)660 {661 ep_lib::MPI_Isend(info, infoSize, EP_CHAR,662 clientDestRank, MPI_DHT_INFO, clientIntraComm, requestSendInfo);663 565 } 664 566 … … 673 575 template<typename T, typename H> 674 576 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize, 675 const ep_lib::MPI_Comm& clientIntraComm,676 std::vector< ep_lib::MPI_Request>& requestRecvInfo)677 { 678 ep_lib::MPI_Request request;577 const MPI_Comm& clientIntraComm, 578 std::vector<MPI_Request>& requestRecvInfo) 579 { 580 MPI_Request request; 679 581 requestRecvInfo.push_back(request); 680 582 681 ep_lib::MPI_Irecv(info, infoSize, EP_CHAR,583 MPI_Irecv(info, infoSize, MPI_CHAR, 682 584 clientSrcRank, MPI_DHT_INFO, clientIntraComm, &(requestRecvInfo.back())); 683 }684 685 /*!686 Receive message containing information from other clients687 \param [in] clientDestRank rank of destination client688 \param [in] info info array to receive689 \param [in] infoSize info array size to receive690 \param [in] clientIntraComm communication group of client691 \param [in] requestRecvInfo list of receiving request692 */693 template<typename T, typename H>694 void CClientClientDHTTemplate<T,H>::recvInfoFromClients(int clientSrcRank, unsigned char* info, int infoSize,695 const ep_lib::MPI_Comm& clientIntraComm,696 ep_lib::MPI_Request* requestRecvInfo)697 {698 ep_lib::MPI_Irecv(info, infoSize, EP_CHAR,699 clientSrcRank, MPI_DHT_INFO, clientIntraComm, requestRecvInfo);700 585 } 701 586 … … 766 651 { 767 652 recvNbElements.resize(recvNbRank.size()); 768 std::vector< ep_lib::MPI_Request> request(sendNbRank.size()+recvNbRank.size());769 std::vector< ep_lib::MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size());653 std::vector<MPI_Request> request(sendNbRank.size()+recvNbRank.size()); 654 std::vector<MPI_Status> requestStatus(sendNbRank.size()+recvNbRank.size()); 770 655 771 656 int nRequest = 0; 772 657 for (int idx = 0; idx < recvNbRank.size(); ++idx) 773 658 { 774 ep_lib::MPI_Irecv(&recvNbElements[0]+idx, 1, EP_INT,659 MPI_Irecv(&recvNbElements[0]+idx, 1, MPI_INT, 775 660 recvNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 776 661 ++nRequest; … … 779 664 for (int idx = 0; idx < sendNbRank.size(); ++idx) 780 665 { 781 ep_lib::MPI_Isend(&sendNbElements[0]+idx, 1, EP_INT,666 MPI_Isend(&sendNbElements[0]+idx, 1, MPI_INT, 782 667 sendNbRank[idx], MPI_DHT_INDEX_1, this->internalComm_, &request[nRequest]); 783 668 ++nRequest; 784 669 } 785 670 786 ep_lib::MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]);671 MPI_Waitall(sendNbRank.size()+recvNbRank.size(), &request[0], &requestStatus[0]); 787 672 } 788 673 … … 811 696 std::vector<int> recvBuff(recvBuffSize*2,0); 812 697 813 std::vector< ep_lib::MPI_Request> request(sendBuffSize+recvBuffSize);814 std::vector< ep_lib::MPI_Status> requestStatus(sendBuffSize+recvBuffSize);698 std::vector<MPI_Request> request(sendBuffSize+recvBuffSize); 699 std::vector<MPI_Status> requestStatus(sendBuffSize+recvBuffSize); 815 700 816 701 int nRequest = 0; 817 702 for (int idx = 0; idx < recvBuffSize; ++idx) 818 703 { 819 ep_lib::MPI_Irecv(&recvBuff[0]+2*idx, 2, EP_INT,704 MPI_Irecv(&recvBuff[0]+2*idx, 2, MPI_INT, 820 705 recvRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 821 706 ++nRequest; … … 831 716 for (int idx = 0; idx < sendBuffSize; ++idx) 832 717 { 833 ep_lib::MPI_Isend(&sendBuff[idx*2], 2, EP_INT,718 MPI_Isend(&sendBuff[idx*2], 2, MPI_INT, 834 719 sendRank[idx], MPI_DHT_INDEX_0, this->internalComm_, &request[nRequest]); 835 720 ++nRequest; 836 721 } 837 722 838 ep_lib::MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]);723 MPI_Waitall(sendBuffSize+recvBuffSize, &request[0], &requestStatus[0]); 839 724 int nbRecvRank = 0, nbRecvElements = 0; 840 725 recvNbRank.clear(); -
XIOS/trunk/src/client_server_mapping.cpp
r1638 r1639 30 30 */ 31 31 std::map<int,int> CClientServerMapping::computeConnectedClients(int nbServer, int nbClient, 32 ep_lib::MPI_Comm& clientIntraComm,32 MPI_Comm& clientIntraComm, 33 33 const std::vector<int>& connectedServerRank) 34 34 { … … 62 62 63 63 // get connected server for everybody 64 ep_lib::MPI_Allgather(&nbConnectedServer,1,EP_INT,recvCount,1,EP_INT,clientIntraComm) ;64 MPI_Allgather(&nbConnectedServer,1,MPI_INT,recvCount,1,MPI_INT,clientIntraComm) ; 65 65 66 66 displ[0]=0 ; … … 70 70 71 71 72 ep_lib::MPI_Allgatherv(sendBuff,nbConnectedServer,EP_INT,recvBuff,recvCount,displ,EP_INT,clientIntraComm) ;72 MPI_Allgatherv(sendBuff,nbConnectedServer,MPI_INT,recvBuff,recvCount,displ,MPI_INT,clientIntraComm) ; 73 73 for(int n=0;n<recvSize;n++) clientRes[recvBuff[n]]++ ; 74 74 -
XIOS/trunk/src/client_server_mapping.hpp
r1638 r1639 37 37 38 38 static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, 39 ep_lib::MPI_Comm& clientIntraComm,39 MPI_Comm& clientIntraComm, 40 40 const std::vector<int>& connectedServerRank); 41 41 -
XIOS/trunk/src/client_server_mapping_distributed.cpp
r1638 r1639 20 20 21 21 CClientServerMappingDistributed::CClientServerMappingDistributed(const std::unordered_map<size_t,int>& globalIndexOfServer, 22 const ep_lib::MPI_Comm& clientIntraComm, bool isDataDistributed)22 const MPI_Comm& clientIntraComm, bool isDataDistributed) 23 23 : CClientServerMapping(), ccDHT_(0) 24 24 { -
XIOS/trunk/src/client_server_mapping_distributed.hpp
r1638 r1639 35 35 /** Default constructor */ 36 36 CClientServerMappingDistributed(const std::unordered_map<size_t,int>& globalIndexOfServer, 37 const ep_lib::MPI_Comm& clientIntraComm,37 const MPI_Comm& clientIntraComm, 38 38 bool isDataDistributed = true); 39 39 -
XIOS/trunk/src/context_client.cpp
r1638 r1639 21 21 \cxtSer [in] cxtSer Pointer to context of server side. (It is only used in case of attached mode). 22 22 */ 23 CContextClient::CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm_, ep_lib::MPI_Comm interComm_, CContext* cxtSer)23 CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer) 24 24 : mapBufferSize_(), parentServer(cxtSer), maxBufferedEvents(4) 25 25 { … … 27 27 intraComm = intraComm_; 28 28 interComm = interComm_; 29 ep_lib::MPI_Comm_rank(intraComm, &clientRank);30 ep_lib::MPI_Comm_size(intraComm, &clientSize);29 MPI_Comm_rank(intraComm, &clientRank); 30 MPI_Comm_size(intraComm, &clientSize); 31 31 32 32 int flag; 33 ep_lib::MPI_Comm_test_inter(interComm, &flag);34 if (flag) ep_lib::MPI_Comm_remote_size(interComm, &serverSize);35 else ep_lib::MPI_Comm_size(interComm, &serverSize);33 MPI_Comm_test_inter(interComm, &flag); 34 if (flag) MPI_Comm_remote_size(interComm, &serverSize); 35 else MPI_Comm_size(interComm, &serverSize); 36 36 37 37 computeLeader(clientRank, clientSize, serverSize, ranksServerLeader, ranksServerNotLeader); … … 102 102 classId_in=event.getClassId() ; 103 103 // MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ; // MPI_UINT64_T standardized by MPI 3 104 ep_lib::MPI_Allreduce(&timeLine,&timeLine_out, 1, EP_LONG_LONG_INT, EP_SUM, intraComm) ;105 ep_lib::MPI_Allreduce(&typeId_in,&typeId, 1, EP_INT, EP_SUM, intraComm) ;106 ep_lib::MPI_Allreduce(&classId_in,&classId, 1, EP_INT, EP_SUM, intraComm) ;104 MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_LONG_LONG_INT, MPI_SUM, intraComm) ; 105 MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ; 106 MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ; 107 107 if (typeId/clientSize!=event.getTypeId() || classId/clientSize!=event.getClassId() || timeLine_out/clientSize!=timeLine) 108 108 { … … 343 343 if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio; 344 344 } 345 #ifdef _usingMPI 346 ep_lib::MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, EP_DOUBLE, EP_MIN, intraComm); 347 #elif _usingEP 348 ep_lib::MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, EP_DOUBLE, EP_MIN, intraComm); 349 #endif 345 MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 350 346 351 347 if (minBufferSizeEventSizeRatio < 1.0) -
XIOS/trunk/src/context_client.hpp
r1638 r1639 27 27 public: 28 28 // Contructor 29 CContextClient(CContext* parent, ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* parentServer = 0);29 CContextClient(CContext* parent, MPI_Comm intraComm, MPI_Comm interComm, CContext* parentServer = 0); 30 30 31 31 // Send event to server … … 71 71 int serverSize; //!< Size of server group 72 72 73 ep_lib::MPI_Comm interComm; //!< Communicator of server group73 MPI_Comm interComm; //!< Communicator of server group 74 74 75 ep_lib::MPI_Comm intraComm; //!< Communicator of client group75 MPI_Comm intraComm; //!< Communicator of client group 76 76 77 77 map<int,CClientBuffer*> buffers; //!< Buffers for connection to servers -
XIOS/trunk/src/context_server.cpp
r1638 r1639 23 23 { 24 24 25 CContextServer::CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm_,ep_lib::MPI_Comm interComm_)25 CContextServer::CContextServer(CContext* parent,MPI_Comm intraComm_,MPI_Comm interComm_) 26 26 { 27 27 context=parent; 28 28 intraComm=intraComm_; 29 ep_lib::MPI_Comm_size(intraComm,&intraCommSize);30 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank);29 MPI_Comm_size(intraComm,&intraCommSize); 30 MPI_Comm_rank(intraComm,&intraCommRank); 31 31 32 32 interComm=interComm_; 33 33 int flag; 34 ep_lib::MPI_Comm_test_inter(interComm,&flag);35 if (flag) ep_lib::MPI_Comm_remote_size(interComm,&commSize);36 else ep_lib::MPI_Comm_size(interComm,&commSize);34 MPI_Comm_test_inter(interComm,&flag); 35 if (flag) MPI_Comm_remote_size(interComm,&commSize); 36 else MPI_Comm_size(interComm,&commSize); 37 37 38 38 currentTimeLine=0; … … 76 76 int count; 77 77 char * addr; 78 ep_lib::MPI_Status status;78 MPI_Status status; 79 79 map<int,CServerBuffer*>::iterator it; 80 80 bool okLoop; 81 81 82 82 traceOff(); 83 #ifdef _usingMPI84 83 MPI_Iprobe(MPI_ANY_SOURCE, 20,interComm,&flag,&status); 85 #elif _usingEP86 ep_lib::MPI_Iprobe(-2, 20,interComm,&flag,&status);87 #endif88 84 traceOn(); 89 85 90 86 if (flag==true) 91 87 { 92 #ifdef _usingMPI93 88 rank=status.MPI_SOURCE ; 94 #elif _usingEP95 rank=status.ep_src ;96 #endif97 89 okLoop = true; 98 90 if (pendingRequest.find(rank)==pendingRequest.end()) … … 106 98 107 99 traceOff(); 108 ep_lib::MPI_Iprobe(rank, 20,interComm,&flag,&status);100 MPI_Iprobe(rank, 20,interComm,&flag,&status); 109 101 traceOn(); 110 102 if (flag==true) listenPendingRequest(status) ; … … 115 107 } 116 108 117 bool CContextServer::listenPendingRequest( ep_lib::MPI_Status& status)109 bool CContextServer::listenPendingRequest(MPI_Status& status) 118 110 { 119 111 int count; 120 112 char * addr; 121 113 map<int,CServerBuffer*>::iterator it; 122 #ifdef _usingMPI123 114 int rank=status.MPI_SOURCE ; 124 #elif _usingEP125 int rank=status.ep_src ;126 #endif127 115 128 116 it=buffers.find(rank); … … 130 118 { 131 119 StdSize buffSize = 0; 132 ep_lib::MPI_Recv(&buffSize, 1, EP_LONG, rank, 20, interComm, &status);120 MPI_Recv(&buffSize, 1, MPI_LONG, rank, 20, interComm, &status); 133 121 mapBufferSize_.insert(std::make_pair(rank, buffSize)); 134 122 it=(buffers.insert(pair<int,CServerBuffer*>(rank,new CServerBuffer(buffSize)))).first; … … 137 125 else 138 126 { 139 ep_lib::MPI_Get_count(&status,EP_CHAR,&count);127 MPI_Get_count(&status,MPI_CHAR,&count); 140 128 if (it->second->isBufferFree(count)) 141 129 { 142 130 addr=(char*)it->second->getBuffer(count); 143 ep_lib::MPI_Irecv(addr,count,EP_CHAR,rank,20,interComm,&pendingRequest[rank]);131 MPI_Irecv(addr,count,MPI_CHAR,rank,20,interComm,&pendingRequest[rank]); 144 132 bufferRequest[rank]=addr; 145 133 return true; … … 153 141 void CContextServer::checkPendingRequest(void) 154 142 { 155 map<int, ep_lib::MPI_Request>::iterator it;143 map<int,MPI_Request>::iterator it; 156 144 list<int> recvRequest; 157 145 list<int>::iterator itRecv; … … 159 147 int flag; 160 148 int count; 161 ep_lib::MPI_Status status;149 MPI_Status status; 162 150 163 151 for(it=pendingRequest.begin();it!=pendingRequest.end();it++) … … 165 153 rank=it->first; 166 154 traceOff(); 167 ep_lib::MPI_Test(& it->second, &flag, &status);155 MPI_Test(& it->second, &flag, &status); 168 156 traceOn(); 169 157 if (flag==true) 170 158 { 171 159 recvRequest.push_back(rank); 172 ep_lib::MPI_Get_count(&status,EP_CHAR,&count);160 MPI_Get_count(&status,MPI_CHAR,&count); 173 161 processRequest(rank,bufferRequest[rank],count); 174 162 } … … 230 218 // The best way to properly solve this problem will be to use the event scheduler also in attached mode 231 219 // for now just set up a MPI barrier 232 if (!CServer::eventScheduler && CXios::isServer) ep_lib::MPI_Barrier(intraComm) ;220 if (!CServer::eventScheduler && CXios::isServer) MPI_Barrier(intraComm) ; 233 221 234 222 CTimer::get("Process events").resume(); -
XIOS/trunk/src/context_server.hpp
r1638 r1639 14 14 public: 15 15 16 CContextServer(CContext* parent, ep_lib::MPI_Comm intraComm,ep_lib::MPI_Comm interComm) ;16 CContextServer(CContext* parent,MPI_Comm intraComm,MPI_Comm interComm) ; 17 17 bool eventLoop(bool enableEventsProcessing = true); 18 18 void listen(void) ; 19 bool listenPendingRequest( ep_lib::MPI_Status& status) ;19 bool listenPendingRequest(MPI_Status& status) ; 20 20 void checkPendingRequest(void) ; 21 21 void processRequest(int rank, char* buff,int count) ; … … 26 26 bool hasPendingEvent(void) ; 27 27 28 ep_lib::MPI_Comm intraComm ;28 MPI_Comm intraComm ; 29 29 int intraCommSize ; 30 30 int intraCommRank ; 31 31 32 ep_lib::MPI_Comm interComm ;32 MPI_Comm interComm ; 33 33 int commSize ; 34 34 35 35 map<int,CServerBuffer*> buffers ; 36 map<int, ep_lib::MPI_Request> pendingRequest ;36 map<int,MPI_Request> pendingRequest ; 37 37 map<int,char*> bufferRequest ; 38 38 -
XIOS/trunk/src/cxios.cpp
r1638 r1639 26 26 bool CXios::isClient ; 27 27 bool CXios::isServer ; 28 ep_lib::MPI_Comm CXios::globalComm ;28 MPI_Comm CXios::globalComm ; 29 29 bool CXios::usingOasis ; 30 30 bool CXios::usingServer = false; … … 90 90 91 91 checkEventSync = getin<bool>("check_event_sync", checkEventSync); 92 #ifdef _usingMPI 92 93 93 globalComm=MPI_COMM_WORLD ; 94 #elif _usingEP95 ep_lib::MPI_Comm *ep_comm;96 ep_lib::MPI_Info info;97 ep_lib::MPI_Comm_create_endpoints(EP_COMM_WORLD->mpi_comm, 1, info, ep_comm);98 ep_lib::passage = ep_comm;99 globalComm=ep_lib::passage[0] ;100 #endif101 94 } 102 95 … … 107 100 \param [in/out] returnComm communicator corresponding to group of client with same codeId 108 101 */ 109 void CXios::initClientSide(const string& codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm)102 void CXios::initClientSide(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 110 103 TRY 111 104 { 105 initialize() ; 106 112 107 isClient = true; 113 isServer = false;114 115 initialize() ;116 117 108 118 109 CClient::initialize(codeId,localComm,returnComm) ; 119 110 if (CClient::getRank()==0) globalRegistry = new CRegistry(returnComm) ; 120 121 111 122 112 // If there are no server processes then we are in attached mode … … 177 167 void CXios::initServerSide(void) 178 168 { 179 isClient = false;180 isServer = true;181 182 169 initServer(); 183 170 isClient = false; 184 171 isServer = true; 172 185 173 // Initialize all aspects MPI 186 174 CServer::initialize(); … … 235 223 int firstPoolGlobalRank = secondaryServerGlobalRanks[0]; 236 224 int rankGlobal; 237 ep_lib::MPI_Comm_rank(globalComm, &rankGlobal);225 MPI_Comm_rank(globalComm, &rankGlobal); 238 226 239 227 // Merge registries defined on each pools … … 247 235 globalRegistrySndServers.mergeRegistry(*globalRegistry) ; 248 236 int registrySize = globalRegistrySndServers.size(); 249 ep_lib::MPI_Send(®istrySize,1,EP_LONG,firstPoolGlobalRank,15,CXios::globalComm) ;237 MPI_Send(®istrySize,1,MPI_LONG,firstPoolGlobalRank,15,CXios::globalComm) ; 250 238 CBufferOut buffer(registrySize) ; 251 239 globalRegistrySndServers.toBuffer(buffer) ; 252 ep_lib::MPI_Send(buffer.start(),registrySize,EP_CHAR,firstPoolGlobalRank,15,CXios::globalComm) ;240 MPI_Send(buffer.start(),registrySize,MPI_CHAR,firstPoolGlobalRank,15,CXios::globalComm) ; 253 241 } 254 242 } … … 257 245 if (rankGlobal == firstPoolGlobalRank) 258 246 { 259 ep_lib::MPI_Status status;247 MPI_Status status; 260 248 char* recvBuff; 261 249 … … 266 254 int rank = secondaryServerGlobalRanks[i]; 267 255 int registrySize = 0; 268 ep_lib::MPI_Recv(®istrySize, 1, EP_LONG, rank, 15, CXios::globalComm, &status);256 MPI_Recv(®istrySize, 1, MPI_LONG, rank, 15, CXios::globalComm, &status); 269 257 recvBuff = new char[registrySize]; 270 ep_lib::MPI_Recv(recvBuff, registrySize, EP_CHAR, rank, 15, CXios::globalComm, &status);258 MPI_Recv(recvBuff, registrySize, MPI_CHAR, rank, 15, CXios::globalComm, &status); 271 259 CBufferIn buffer(recvBuff, registrySize) ; 272 260 CRegistry recvRegistry; -
XIOS/trunk/src/cxios.hpp
r1638 r1639 15 15 public: 16 16 static void initialize(void) ; 17 static void initClientSide(const string & codeId, ep_lib::MPI_Comm& localComm, ep_lib::MPI_Comm& returnComm) ;17 static void initClientSide(const string & codeId, MPI_Comm& localComm, MPI_Comm& returnComm) ; 18 18 static void initServerSide(void) ; 19 19 static void clientFinalize(void) ; … … 40 40 static bool isServer ; //!< Check if xios is server 41 41 42 static ep_lib::MPI_Comm globalComm ; //!< Global communicator42 static MPI_Comm globalComm ; //!< Global communicator 43 43 44 44 static bool printLogs2Files; //!< Printing out logs into files -
XIOS/trunk/src/dht_auto_indexing.cpp
r1638 r1639 22 22 23 23 CDHTAutoIndexing::CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 24 const ep_lib::MPI_Comm& clientIntraComm)24 const MPI_Comm& clientIntraComm) 25 25 : CClientClientDHTTemplate<size_t>(clientIntraComm) 26 26 { … … 28 28 nbIndexOnProc_ = hashValue.size(); 29 29 size_t nbIndexAccum; 30 ep_lib::MPI_Scan(&nbIndexOnProc_, &nbIndexAccum, 1, EP_UNSIGNED_LONG, EP_SUM, clientIntraComm);30 MPI_Scan(&nbIndexOnProc_, &nbIndexAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, clientIntraComm); 31 31 32 32 // Broadcasting the total number of indexes 33 33 int rank, size; 34 ep_lib::MPI_Comm_rank(clientIntraComm, &rank);35 ep_lib::MPI_Comm_size(clientIntraComm, &size);34 MPI_Comm_rank(clientIntraComm, &rank); 35 MPI_Comm_size(clientIntraComm, &size); 36 36 if (rank == (size-1)) nbIndexesGlobal_ = nbIndexAccum; 37 ep_lib::MPI_Bcast(&nbIndexesGlobal_, 1, EP_UNSIGNED_LONG, size-1, clientIntraComm);37 MPI_Bcast(&nbIndexesGlobal_, 1, MPI_UNSIGNED_LONG, size-1, clientIntraComm); 38 38 39 39 CArray<size_t,1>::const_iterator itbIdx = hashValue.begin(), itIdx, … … 58 58 */ 59 59 CDHTAutoIndexing::CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 60 const ep_lib::MPI_Comm& clientIntraComm)60 const MPI_Comm& clientIntraComm) 61 61 : CClientClientDHTTemplate<size_t>(clientIntraComm) 62 62 { … … 64 64 nbIndexOnProc_ = hashInitMap.size(); 65 65 size_t nbIndexAccum; 66 ep_lib::MPI_Scan(&nbIndexOnProc_, &nbIndexAccum, 1, EP_UNSIGNED_LONG, EP_SUM, clientIntraComm);66 MPI_Scan(&nbIndexOnProc_, &nbIndexAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, clientIntraComm); 67 67 68 68 int rank, size; 69 ep_lib::MPI_Comm_rank(clientIntraComm, &rank);70 ep_lib::MPI_Comm_size(clientIntraComm, &size);69 MPI_Comm_rank(clientIntraComm, &rank); 70 MPI_Comm_size(clientIntraComm, &size); 71 71 if (rank == (size-1)) nbIndexesGlobal_ = nbIndexAccum; 72 ep_lib::MPI_Bcast(&nbIndexesGlobal_, 1, EP_UNSIGNED_LONG, size-1, clientIntraComm);72 MPI_Bcast(&nbIndexesGlobal_, 1, MPI_UNSIGNED_LONG, size-1, clientIntraComm); 73 73 74 74 Index2VectorInfoTypeMap::iterator itbIdx = hashInitMap.begin(), itIdx, -
XIOS/trunk/src/dht_auto_indexing.hpp
r1638 r1639 25 25 26 26 CDHTAutoIndexing(const CArray<size_t,1>& hashValue, 27 const ep_lib::MPI_Comm& clientIntraComm);27 const MPI_Comm& clientIntraComm); 28 28 29 29 CDHTAutoIndexing(Index2VectorInfoTypeMap& hashInitMap, 30 const ep_lib::MPI_Comm& clientIntraComm);30 const MPI_Comm& clientIntraComm); 31 31 32 32 size_t getNbIndexesGlobal() const; -
XIOS/trunk/src/event_scheduler.cpp
r1638 r1639 8 8 9 9 10 CEventScheduler::CEventScheduler(const ep_lib::MPI_Comm& comm)11 { 12 ep_lib::MPI_Comm_dup(comm, &communicator) ;13 ep_lib::MPI_Comm_size(communicator,&mpiSize) ;14 ep_lib::MPI_Comm_rank(communicator,&mpiRank);10 CEventScheduler::CEventScheduler(const MPI_Comm& comm) 11 { 12 MPI_Comm_dup(comm, &communicator) ; 13 MPI_Comm_size(communicator,&mpiSize) ; 14 MPI_Comm_rank(communicator,&mpiRank); 15 15 16 16 … … 88 88 89 89 pendingSentParentRequest.push(sentRequest) ; 90 ep_lib::MPI_Isend(sentRequest->buffer,3, EP_UNSIGNED_LONG, parent[lev], 0, communicator, &sentRequest->request) ;90 MPI_Isend(sentRequest->buffer,3, MPI_UNSIGNED_LONG, parent[lev], 0, communicator, &sentRequest->request) ; 91 91 traceOn() ; 92 92 } … … 115 115 { 116 116 int completed ; 117 ep_lib::MPI_Status status ;117 MPI_Status status ; 118 118 int received ; 119 119 SPendingRequest* recvRequest ; … … 135 135 while(received) 136 136 { 137 #ifdef _usingMPI138 137 MPI_Iprobe(MPI_ANY_SOURCE,1,communicator,&received, &status) ; 139 #elif _usingEP140 ep_lib::MPI_Iprobe(-2,1,communicator,&received, &status) ;141 #endif142 138 if (received) 143 139 { 144 140 recvRequest=new SPendingRequest ; 145 #ifdef _usingMPI146 141 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 1, communicator, &(recvRequest->request)) ; 147 #elif _usingEP148 ep_lib::MPI_Irecv(recvRequest->buffer, 3, EP_UNSIGNED_LONG, -2, 1, communicator, &(recvRequest->request)) ;149 #endif150 142 pendingRecvParentRequest.push(recvRequest) ; 151 143 } … … 157 149 { 158 150 recvRequest=pendingRecvParentRequest.front() ; 159 ep_lib::MPI_Test( &(recvRequest->request), &completed, &status) ;151 MPI_Test( &(recvRequest->request), &completed, &status) ; 160 152 if (completed) 161 153 { … … 177 169 // function call only by parent mpi process 178 170 179 ep_lib::MPI_Status status ;171 MPI_Status status ; 180 172 int received ; 181 173 received=true ; … … 185 177 while(received) 186 178 { 187 #ifdef _usingMPI188 179 MPI_Iprobe(MPI_ANY_SOURCE,0,communicator,&received, &status) ; 189 #elif _usingEP190 ep_lib::MPI_Iprobe(-2,0,communicator,&received, &status) ;191 #endif192 180 if (received) 193 181 { 194 182 recvRequest=new SPendingRequest ; 195 #ifdef _usingMPI196 183 MPI_Irecv(recvRequest->buffer, 3, MPI_UNSIGNED_LONG, MPI_ANY_SOURCE, 0, communicator, &recvRequest->request) ; 197 #elif _usingEP198 ep_lib::MPI_Irecv(recvRequest->buffer, 3, EP_UNSIGNED_LONG, -2, 0, communicator, &recvRequest->request) ;199 #endif200 184 pendingRecvChildRequest.push_back(recvRequest) ; 201 185 } … … 206 190 for(list<SPendingRequest*>::iterator it=pendingRecvChildRequest.begin(); it!=pendingRecvChildRequest.end() ; ) 207 191 { 208 ep_lib::MPI_Test(&((*it)->request),&received,&status) ;192 MPI_Test(&((*it)->request),&received,&status) ; 209 193 if (received) 210 194 { … … 244 228 for(list<SPendingRequest*>::iterator it=pendingSentChildRequest.begin(); it!=pendingSentChildRequest.end() ; ) 245 229 { 246 ep_lib::MPI_Test(&(*it)->request,&received,&status) ;230 MPI_Test(&(*it)->request,&received,&status) ; 247 231 if (received) 248 232 { … … 267 251 sentRequest->buffer[1]=contextHashId ; 268 252 sentRequest->buffer[2]=lev+1 ; 269 ep_lib::MPI_Isend(sentRequest->buffer,3, EP_UNSIGNED_LONG, child[lev][i], 1, communicator, & sentRequest->request) ;253 MPI_Isend(sentRequest->buffer,3, MPI_UNSIGNED_LONG, child[lev][i], 1, communicator, & sentRequest->request) ; 270 254 pendingSentChildRequest.push_back(sentRequest) ; 271 255 } -
XIOS/trunk/src/event_scheduler.hpp
r1638 r1639 26 26 * @param[in] comm : MPI communicator du duplicate for internal use 27 27 */ 28 CEventScheduler(const ep_lib::MPI_Comm& comm) ;28 CEventScheduler(const MPI_Comm& comm) ; 29 29 30 30 … … 151 151 { 152 152 size_t buffer[3] ; /*!< communication buffer : timeLine, hashId, level */ 153 ep_lib::MPI_Request request ; /*!< pending MPI request */153 MPI_Request request ; /*!< pending MPI request */ 154 154 } ; 155 155 156 ep_lib::MPI_Comm communicator ; /*!< Internal MPI communicator */156 MPI_Comm communicator ; /*!< Internal MPI communicator */ 157 157 int mpiRank ; /*!< Rank in the communicator */ 158 158 int mpiSize ; /*!< Size of the communicator */ -
XIOS/trunk/src/filter/spatial_transform_filter.cpp
r1638 r1639 224 224 225 225 idxSendBuff = 0; 226 std::vector< ep_lib::MPI_Request> sendRecvRequest;226 std::vector<MPI_Request> sendRecvRequest; 227 227 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 228 228 { … … 234 234 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 235 235 } 236 sendRecvRequest.push_back( ep_lib::MPI_Request());237 ep_lib::MPI_Isend(sendBuff[idxSendBuff], countSize, EP_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back());236 sendRecvRequest.push_back(MPI_Request()); 237 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest.back()); 238 238 } 239 239 … … 252 252 int srcRank = itRecv->first; 253 253 int countSize = itRecv->second.size(); 254 sendRecvRequest.push_back( ep_lib::MPI_Request());255 ep_lib::MPI_Irecv(recvBuff + currentBuff, countSize, EP_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back());254 sendRecvRequest.push_back(MPI_Request()); 255 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest.back()); 256 256 currentBuff += countSize; 257 257 } 258 std::vector< ep_lib::MPI_Status> status(sendRecvRequest.size());259 ep_lib::MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]);258 std::vector<MPI_Status> status(sendRecvRequest.size()); 259 MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]); 260 260 261 261 dataCurrentDest.resize(*itNbListRecv); -
XIOS/trunk/src/interface/c/icdata.cpp
r1638 r1639 60 60 { 61 61 std::string str; 62 ep_lib::MPI_Comm local_comm;63 ep_lib::MPI_Comm return_comm;62 MPI_Comm local_comm; 63 MPI_Comm return_comm; 64 64 65 65 if (!cstr2string(client_id, len_client_id, str)) return; 66 66 67 67 int initialized; 68 ep_lib::MPI_Initialized(&initialized); 69 #ifdef _usingMPI 68 MPI_Initialized(&initialized); 70 69 if (initialized) local_comm=MPI_Comm_f2c(*f_local_comm); 71 70 else local_comm=MPI_COMM_NULL; 72 #elif _usingEP73 if (initialized) local_comm=EP_Comm_f2c(f_local_comm);74 else local_comm=EP_COMM_NULL;75 #endif76 71 CXios::initClientSide(str, local_comm, return_comm); 77 #ifdef _usingMPI78 72 *f_return_comm=MPI_Comm_c2f(return_comm); 79 #elif _usingEP80 *f_return_comm=*static_cast<MPI_Fint* >(EP_Comm_c2f(return_comm));81 #endif82 73 CTimer::get("XIOS init").suspend(); 83 74 CTimer::get("XIOS").suspend(); … … 89 80 { 90 81 std::string str; 91 ep_lib::MPI_Comm comm;82 MPI_Comm comm; 92 83 93 84 if (!cstr2string(context_id, len_context_id, str)) return; 94 85 CTimer::get("XIOS").resume(); 95 86 CTimer::get("XIOS init context").resume(); 96 #ifdef _usingMPI97 87 comm=MPI_Comm_f2c(*f_comm); 98 #elif _usingEP99 comm=EP_Comm_f2c(f_comm);100 #endif101 88 CClient::registerContext(str, comm); 102 89 CTimer::get("XIOS init context").suspend(); -
XIOS/trunk/src/interface/c/oasis_cinterface.cpp
r1638 r1639 21 21 } 22 22 23 void oasis_get_localcomm( ep_lib::MPI_Comm& comm)23 void oasis_get_localcomm(MPI_Comm& comm) 24 24 { 25 ep_lib::MPI_Fint f_comm ;25 MPI_Fint f_comm ; 26 26 27 27 fxios_oasis_get_localcomm(&f_comm) ; 28 //comm=MPI_Comm_f2c(f_comm) ;28 comm=MPI_Comm_f2c(f_comm) ; 29 29 } 30 30 31 void oasis_get_intracomm( ep_lib::MPI_Comm& comm_client_server,const std::string& server_id)31 void oasis_get_intracomm(MPI_Comm& comm_client_server,const std::string& server_id) 32 32 { 33 ep_lib::MPI_Fint f_comm ;33 MPI_Fint f_comm ; 34 34 35 35 fxios_oasis_get_intracomm(&f_comm,server_id.data(),server_id.size()) ; 36 //comm_client_server=MPI_Comm_f2c(f_comm) ;36 comm_client_server=MPI_Comm_f2c(f_comm) ; 37 37 } 38 38 39 void oasis_get_intercomm( ep_lib::MPI_Comm& comm_client_server,const std::string& server_id)39 void oasis_get_intercomm(MPI_Comm& comm_client_server,const std::string& server_id) 40 40 { 41 ep_lib::MPI_Fint f_comm ;41 MPI_Fint f_comm ; 42 42 43 43 fxios_oasis_get_intercomm(&f_comm,server_id.data(),server_id.size()) ; 44 //comm_client_server=MPI_Comm_f2c(f_comm) ;44 comm_client_server=MPI_Comm_f2c(f_comm) ; 45 45 } 46 46 } -
XIOS/trunk/src/interface/c/oasis_cinterface.hpp
r1638 r1639 10 10 void fxios_oasis_enddef(void) ; 11 11 void fxios_oasis_finalize(void) ; 12 void fxios_oasis_get_localcomm( ep_lib::MPI_Fint* f_comm) ;13 void fxios_oasis_get_intracomm( ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ;14 void fxios_oasis_get_intercomm( ep_lib::MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ;12 void fxios_oasis_get_localcomm(MPI_Fint* f_comm) ; 13 void fxios_oasis_get_intracomm(MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 14 void fxios_oasis_get_intercomm(MPI_Fint* f_comm_client_server,const char* client_id,int str_len) ; 15 15 } 16 16 … … 20 20 void oasis_enddef(void) ; 21 21 void oasis_finalize(void) ; 22 void oasis_get_localcomm( ep_lib::MPI_Comm& comm) ;23 void oasis_get_intracomm( ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ;24 void oasis_get_intercomm( ep_lib::MPI_Comm& comm_client_server,const std::string& server_id) ;22 void oasis_get_localcomm(MPI_Comm& comm) ; 23 void oasis_get_intracomm(MPI_Comm& comm_client_server,const std::string& server_id) ; 24 void oasis_get_intercomm(MPI_Comm& comm_client_server,const std::string& server_id) ; 25 25 } 26 26 #endif -
XIOS/trunk/src/io/inetcdf4.cpp
r1638 r1639 7 7 namespace xios 8 8 { 9 CINetCDF4::CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/,9 CINetCDF4::CINetCDF4(const StdString& filename, const MPI_Comm* comm /*= NULL*/, bool multifile /*= true*/, 10 10 bool readMetaDataPar /*= false*/, const StdString& timeCounterName /*= "time_counter"*/) 11 11 { … … 14 14 { 15 15 int commSize = 0; 16 ep_lib::MPI_Comm_size(*comm, &commSize);16 MPI_Comm_size(*comm, &commSize); 17 17 if (commSize <= 1) 18 18 comm = NULL; … … 23 23 // even if Parallel NetCDF ends up being used. 24 24 if (mpi) 25 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, EP_INFO_NULL, this->ncidp);25 CNetCdfInterface::openPar(filename, NC_NOWRITE | NC_MPIIO, *comm, MPI_INFO_NULL, this->ncidp); 26 26 else 27 27 CNetCdfInterface::open(filename, NC_NOWRITE, this->ncidp); -
XIOS/trunk/src/io/inetcdf4.hpp
r1638 r1639 22 22 public: 23 23 /// Constructors /// 24 CINetCDF4(const StdString& filename, const ep_lib::MPI_Comm* comm = NULL, bool multifile = true,24 CINetCDF4(const StdString& filename, const MPI_Comm* comm = NULL, bool multifile = true, 25 25 bool readMetaDataPar = false, const StdString& timeCounterName = "time_counter"); 26 26 -
XIOS/trunk/src/io/nc4_data_input.cpp
r1638 r1639 10 10 namespace xios 11 11 { 12 CNc4DataInput::CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/,12 CNc4DataInput::CNc4DataInput(const StdString& filename, MPI_Comm comm_file, bool multifile, bool isCollective /*= true*/, 13 13 bool readMetaDataPar /*= false*/, bool ugridConvention /*= false*/, const StdString& timeCounterName /*= "time_counter"*/) 14 14 : SuperClass() -
XIOS/trunk/src/io/nc4_data_input.hpp
r1638 r1639 23 23 24 24 /// Constructors /// 25 CNc4DataInput(const StdString& filename, ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true,25 CNc4DataInput(const StdString& filename, MPI_Comm comm_file, bool multifile, bool isCollective = true, 26 26 bool readMetaDataPar = false, bool ugridConvention = false, const StdString& timeCounterName = "time_counter"); 27 27 CNc4DataInput(const CNc4DataInput& dataInput); // Not implemented. … … 70 70 private: 71 71 /// Private attributes /// 72 ep_lib::MPI_Comm comm_file;72 MPI_Comm comm_file; 73 73 const StdString filename; 74 74 bool isCollective; -
XIOS/trunk/src/io/nc4_data_output.cpp
r1638 r1639 28 28 CNc4DataOutput::CNc4DataOutput 29 29 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, bool useCFConvention, 30 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName)30 MPI_Comm comm_file, bool multifile, bool isCollective, const StdString& timeCounterName) 31 31 : SuperClass() 32 32 , SuperClassWriter(filename, exist, useClassicFormat, useCFConvention, &comm_file, multifile, timeCounterName) -
XIOS/trunk/src/io/nc4_data_output.hpp
r1638 r1639 27 27 (CFile* file, const StdString & filename, bool exist, bool useClassicFormat, 28 28 bool useCFConvention, 29 ep_lib::MPI_Comm comm_file, bool multifile, bool isCollective = true,29 MPI_Comm comm_file, bool multifile, bool isCollective = true, 30 30 const StdString& timeCounterName = "time_counter"); 31 31 … … 117 117 118 118 /// Propriétés privées /// 119 ep_lib::MPI_Comm comm_file;119 MPI_Comm comm_file; 120 120 const StdString filename; 121 121 std::map<Time, StdSize> timeToRecordCache; -
XIOS/trunk/src/io/netCdfInterface.cpp
r1638 r1639 10 10 #include "netCdfInterface.hpp" 11 11 #include "netCdfException.hpp" 12 #include "ep_mpi.hpp" 12 13 13 namespace xios 14 14 { … … 47 47 \return Status code 48 48 */ 49 int CNetCdfInterface::createPar(const StdString& fileName, int cMode, ep_lib::MPI_Comm comm, ep_lib::MPI_Info info, int& ncId) 50 { 51 #ifdef _usingMPI 49 int CNetCdfInterface::createPar(const StdString& fileName, int cMode, MPI_Comm comm, MPI_Info info, int& ncId) 50 { 52 51 int status = xios::nc_create_par(fileName.c_str(), cMode, comm, info, &ncId); 53 #elif _usingEP54 int status = xios::nc_create_par(fileName.c_str(), cMode, to_mpi_comm(comm->mpi_comm), to_mpi_info(info), &ncId);55 #endif56 52 if (NC_NOERR != status) 57 53 { … … 104 100 \return Status code 105 101 */ 106 int CNetCdfInterface::openPar(const StdString& fileName, int oMode, ep_lib::MPI_Comm comm, ep_lib::MPI_Info info, int& ncId) 107 { 108 #ifdef _usingMPI 102 int CNetCdfInterface::openPar(const StdString& fileName, int oMode, MPI_Comm comm, MPI_Info info, int& ncId) 103 { 109 104 int status = xios::nc_open_par(fileName.c_str(), oMode, comm, info, &ncId); 110 #elif _usingEP111 int status = xios::nc_open_par(fileName.c_str(), oMode, to_mpi_comm(comm->mpi_comm), to_mpi_info(info), &ncId);112 #endif113 105 if (NC_NOERR != status) 114 106 { -
XIOS/trunk/src/io/netCdfInterface.hpp
r1638 r1639 32 32 33 33 //! Create a netcdf file on a parallel file system 34 static int createPar(const StdString& path, int cmode, ep_lib::MPI_Comm comm, ep_lib::MPI_Info info, int& ncId);34 static int createPar(const StdString& path, int cmode, MPI_Comm comm, MPI_Info info, int& ncId); 35 35 36 36 //! Open a netcdf file … … 38 38 39 39 //! Open a netcdf file 40 static int openPar(const StdString& path, int cmode, ep_lib::MPI_Comm comm, ep_lib::MPI_Info info, int& ncId);40 static int openPar(const StdString& path, int cmode, MPI_Comm comm, MPI_Info info, int& ncId); 41 41 42 42 //! Close a netcdf file -
XIOS/trunk/src/io/onetcdf4.cpp
r1638 r1639 15 15 CONetCDF4::CONetCDF4(const StdString& filename, bool append, bool useClassicFormat, 16 16 bool useCFConvention, 17 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName)17 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 18 18 : path() 19 19 , wmpi(false) … … 33 33 34 34 void CONetCDF4::initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 35 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName)35 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName) 36 36 { 37 37 this->useClassicFormat = useClassicFormat; … … 44 44 { 45 45 int commSize = 0; 46 ep_lib::MPI_Comm_size(*comm, &commSize);46 MPI_Comm_size(*comm, &commSize); 47 47 if (commSize <= 1) 48 48 comm = NULL; … … 58 58 CTimer::get("Files : create").resume(); 59 59 if (wmpi) 60 CNetCdfInterface::createPar(filename, mode, *comm, EP_INFO_NULL, this->ncidp);60 CNetCdfInterface::createPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp); 61 61 else 62 62 CNetCdfInterface::create(filename, mode, this->ncidp); … … 70 70 CTimer::get("Files : open").resume(); 71 71 if (wmpi) 72 CNetCdfInterface::openPar(filename, mode, *comm, EP_INFO_NULL, this->ncidp);72 CNetCdfInterface::openPar(filename, mode, *comm, MPI_INFO_NULL, this->ncidp); 73 73 else 74 74 CNetCdfInterface::open(filename, mode, this->ncidp); -
XIOS/trunk/src/io/onetcdf4.hpp
r1638 r1639 28 28 CONetCDF4(const StdString& filename, bool append, bool useClassicFormat = false, 29 29 bool useCFConvention = true, 30 const ep_lib::MPI_Comm* comm = NULL, bool multifile = true,30 const MPI_Comm* comm = NULL, bool multifile = true, 31 31 const StdString& timeCounterName = "time_counter"); 32 32 … … 37 37 /// Initialisation /// 38 38 void initialize(const StdString& filename, bool append, bool useClassicFormat, bool useCFConvention, 39 const ep_lib::MPI_Comm* comm, bool multifile, const StdString& timeCounterName);39 const MPI_Comm* comm, bool multifile, const StdString& timeCounterName); 40 40 void close(void); 41 41 void sync(void); -
XIOS/trunk/src/mpi.hpp
r1638 r1639 11 11 #define OMPI_SKIP_MPICXX 12 12 13 #ifdef _usingEP14 #include "ep_lib.hpp"15 #include "ep_declaration.hpp"16 #endif17 18 13 #include <mpi.h> 19 14 20 #ifdef _usingMPI21 22 #define ep_lib23 24 #define EP_INT MPI_INT25 #define EP_FLOAT MPI_FLOAT26 #define EP_DOUBLE MPI_DOUBLE27 #define EP_CHAR MPI_CHAR28 #define EP_LONG MPI_LONG29 #define EP_LONG_LONG_INT MPI_LONG_LONG_INT30 #define EP_UNSIGNED_LONG MPI_UNSIGNED_LONG31 #define EP_UNSIGNED_CHAR MPI_UNSIGNED_CHAR32 33 34 #define EP_COMM_WORLD MPI_COMM_WORLD35 #define EP_COMM_NULL MPI_COMM_NULL36 #define EP_INFO_NULL MPI_INFO_NULL37 38 #define EP_MAX MPI_MAX39 #define EP_MIN MPI_MIN40 #define EP_SUM MPI_SUM41 #define EP_LOR MPI_LOR42 43 15 #endif 44 45 #endif -
XIOS/trunk/src/node/axis.cpp
r1638 r1639 130 130 \return the number of indexes written by each server 131 131 */ 132 int CAxis::getNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom)132 int CAxis::getNumberWrittenIndexes(MPI_Comm writtenCom) 133 133 TRY 134 134 { 135 135 int writtenSize; 136 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);136 MPI_Comm_size(writtenCom, &writtenSize); 137 137 return numberWrittenIndexes_[writtenSize]; 138 138 } … … 143 143 \return the total number of indexes written by the servers 144 144 */ 145 int CAxis::getTotalNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom)145 int CAxis::getTotalNumberWrittenIndexes(MPI_Comm writtenCom) 146 146 TRY 147 147 { 148 148 int writtenSize; 149 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);149 MPI_Comm_size(writtenCom, &writtenSize); 150 150 return totalNumberWrittenIndexes_[writtenSize]; 151 151 } … … 156 156 \return the offset of indexes written by each server 157 157 */ 158 int CAxis::getOffsetWrittenIndexes( ep_lib::MPI_Comm writtenCom)158 int CAxis::getOffsetWrittenIndexes(MPI_Comm writtenCom) 159 159 TRY 160 160 { 161 161 int writtenSize; 162 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);162 MPI_Comm_size(writtenCom, &writtenSize); 163 163 return offsetWrittenIndexes_[writtenSize]; 164 164 } 165 165 CATCH_DUMP_ATTR 166 166 167 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer( ep_lib::MPI_Comm writtenCom)167 CArray<int, 1>& CAxis::getCompressedIndexToWriteOnServer(MPI_Comm writtenCom) 168 168 TRY 169 169 { 170 170 int writtenSize; 171 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);171 MPI_Comm_size(writtenCom, &writtenSize); 172 172 return compressedIndexToWriteOnServer[writtenSize]; 173 173 } … … 768 768 CATCH_DUMP_ATTR 769 769 770 void CAxis::computeWrittenCompressedIndex( ep_lib::MPI_Comm writtenComm)770 void CAxis::computeWrittenCompressedIndex(MPI_Comm writtenComm) 771 771 TRY 772 772 { 773 773 int writtenCommSize; 774 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize);774 MPI_Comm_size(writtenComm, &writtenCommSize); 775 775 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 776 776 return; … … 850 850 { 851 851 852 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm);853 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm);852 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 853 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 854 854 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 855 855 } -
XIOS/trunk/src/node/axis.hpp
r1638 r1639 68 68 const std::set<StdString> & getRelFiles(void) const; 69 69 70 int getNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom);71 int getTotalNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom);72 int getOffsetWrittenIndexes( ep_lib::MPI_Comm writtenCom);73 CArray<int, 1>& getCompressedIndexToWriteOnServer( ep_lib::MPI_Comm writtenCom);70 int getNumberWrittenIndexes(MPI_Comm writtenCom); 71 int getTotalNumberWrittenIndexes(MPI_Comm writtenCom); 72 int getOffsetWrittenIndexes(MPI_Comm writtenCom); 73 CArray<int, 1>& getCompressedIndexToWriteOnServer(MPI_Comm writtenCom); 74 74 75 75 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, const std::vector<int>& globalDim, int orderPositionInGrid, … … 113 113 114 114 void computeWrittenIndex(); 115 void computeWrittenCompressedIndex( ep_lib::MPI_Comm);115 void computeWrittenCompressedIndex(MPI_Comm); 116 116 bool hasTransformation(); 117 117 void solveInheritanceTransformation(); -
XIOS/trunk/src/node/context.cpp
r1638 r1639 265 265 266 266 //! Initialize client side 267 void CContext::initClient( ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer /*= 0*/)267 void CContext::initClient(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtServer /*= 0*/) 268 268 TRY 269 269 { 270 270 271 271 hasClient = true; 272 ep_lib::MPI_Comm intraCommServer, interCommServer;272 MPI_Comm intraCommServer, interCommServer; 273 273 274 274 … … 284 284 else 285 285 { 286 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer);286 MPI_Comm_dup(intraComm, &intraCommServer); 287 287 comms.push_back(intraCommServer); 288 ep_lib::MPI_Comm_dup(interComm, &interCommServer);288 MPI_Comm_dup(interComm, &interCommServer); 289 289 comms.push_back(interCommServer); 290 290 } … … 309 309 { 310 310 clientPrimServer.push_back(new CContextClient(this, intraComm, interComm)); 311 ep_lib::MPI_Comm_dup(intraComm, &intraCommServer);311 MPI_Comm_dup(intraComm, &intraCommServer); 312 312 comms.push_back(intraCommServer); 313 ep_lib::MPI_Comm_dup(interComm, &interCommServer);313 MPI_Comm_dup(interComm, &interCommServer); 314 314 comms.push_back(interCommServer); 315 315 serverPrimServer.push_back(new CContextServer(this, intraCommServer, interCommServer)); … … 383 383 CATCH_DUMP_ATTR 384 384 385 void CContext::initServer( ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient /*= 0*/)385 void CContext::initServer(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtClient /*= 0*/) 386 386 TRY 387 387 { … … 402 402 registryOut->setPath(contextRegistryId) ; 403 403 404 ep_lib::MPI_Comm intraCommClient, interCommClient;404 MPI_Comm intraCommClient, interCommClient; 405 405 if (cxtClient) // Attached mode 406 406 { … … 410 410 else 411 411 { 412 ep_lib::MPI_Comm_dup(intraComm, &intraCommClient);412 MPI_Comm_dup(intraComm, &intraCommClient); 413 413 comms.push_back(intraCommClient); 414 ep_lib::MPI_Comm_dup(interComm, &interCommClient);414 MPI_Comm_dup(interComm, &interCommClient); 415 415 comms.push_back(interCommClient); 416 416 } … … 502 502 503 503 //! Free internally allocated communicators 504 for (std::list< ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)505 ep_lib::MPI_Comm_free(&(*it));504 for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 505 MPI_Comm_free(&(*it)); 506 506 comms.clear(); 507 507 … … 544 544 545 545 //! Free internally allocated communicators 546 for (std::list< ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)547 ep_lib::MPI_Comm_free(&(*it));546 for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 547 MPI_Comm_free(&(*it)); 548 548 comms.clear(); 549 549 … … 560 560 TRY 561 561 { 562 for (std::list< ep_lib::MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it)563 ep_lib::MPI_Comm_free(&(*it));562 for (std::list<MPI_Comm>::iterator it = comms.begin(); it != comms.end(); ++it) 563 MPI_Comm_free(&(*it)); 564 564 comms.clear(); 565 565 } -
XIOS/trunk/src/node/context.hpp
r1638 r1639 88 88 public : 89 89 // Initialize server or client 90 void initClient( ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtServer = 0);91 void initServer( ep_lib::MPI_Comm intraComm, ep_lib::MPI_Comm interComm, CContext* cxtClient = 0);90 void initClient(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtServer = 0); 91 void initServer(MPI_Comm intraComm, MPI_Comm interComm, CContext* cxtClient = 0); 92 92 bool isInitialized(void); 93 93 … … 263 263 StdString idServer_; 264 264 CGarbageCollector garbageCollector; 265 std::list< ep_lib::MPI_Comm> comms; //!< Communicators allocated internally265 std::list<MPI_Comm> comms; //!< Communicators allocated internally 266 266 267 267 public: // Some function maybe removed in the near future -
XIOS/trunk/src/node/domain.cpp
r1638 r1639 99 99 \return the number of indexes written by each server 100 100 */ 101 int CDomain::getNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom)101 int CDomain::getNumberWrittenIndexes(MPI_Comm writtenCom) 102 102 TRY 103 103 { 104 104 int writtenSize; 105 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);105 MPI_Comm_size(writtenCom, &writtenSize); 106 106 return numberWrittenIndexes_[writtenSize]; 107 107 } … … 112 112 \return the total number of indexes written by the servers 113 113 */ 114 int CDomain::getTotalNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom)114 int CDomain::getTotalNumberWrittenIndexes(MPI_Comm writtenCom) 115 115 TRY 116 116 { 117 117 int writtenSize; 118 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);118 MPI_Comm_size(writtenCom, &writtenSize); 119 119 return totalNumberWrittenIndexes_[writtenSize]; 120 120 } … … 125 125 \return the offset of indexes written by each server 126 126 */ 127 int CDomain::getOffsetWrittenIndexes( ep_lib::MPI_Comm writtenCom)127 int CDomain::getOffsetWrittenIndexes(MPI_Comm writtenCom) 128 128 TRY 129 129 { 130 130 int writtenSize; 131 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);131 MPI_Comm_size(writtenCom, &writtenSize); 132 132 return offsetWrittenIndexes_[writtenSize]; 133 133 } 134 134 CATCH_DUMP_ATTR 135 135 136 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer( ep_lib::MPI_Comm writtenCom)136 CArray<int, 1>& CDomain::getCompressedIndexToWriteOnServer(MPI_Comm writtenCom) 137 137 TRY 138 138 { 139 139 int writtenSize; 140 ep_lib::MPI_Comm_size(writtenCom, &writtenSize);140 MPI_Comm_size(writtenCom, &writtenSize); 141 141 return compressedIndexToWriteOnServer[writtenSize]; 142 142 } … … 690 690 int v ; 691 691 v=ibegin ; 692 ep_lib::MPI_Allgather(&v,1,EP_INT,ibegin_g,1,EP_INT,client->intraComm) ;692 MPI_Allgather(&v,1,MPI_INT,ibegin_g,1,MPI_INT,client->intraComm) ; 693 693 v=jbegin ; 694 ep_lib::MPI_Allgather(&v,1,EP_INT,jbegin_g,1,EP_INT,client->intraComm) ;694 MPI_Allgather(&v,1,MPI_INT,jbegin_g,1,MPI_INT,client->intraComm) ; 695 695 v=ni ; 696 ep_lib::MPI_Allgather(&v,1,EP_INT,ni_g,1,EP_INT,client->intraComm) ;696 MPI_Allgather(&v,1,MPI_INT,ni_g,1,MPI_INT,client->intraComm) ; 697 697 v=nj ; 698 ep_lib::MPI_Allgather(&v,1,EP_INT,nj_g,1,EP_INT,client->intraComm) ;699 700 ep_lib::MPI_Allgatherv(lon.dataFirst(),ni,EP_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,EP_DOUBLE,client->intraComm) ;701 ep_lib::MPI_Allgatherv(lat.dataFirst(),nj,EP_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,EP_DOUBLE,client->intraComm) ;698 MPI_Allgather(&v,1,MPI_INT,nj_g,1,MPI_INT,client->intraComm) ; 699 700 MPI_Allgatherv(lon.dataFirst(),ni,MPI_DOUBLE,lon_g.dataFirst(),ni_g, ibegin_g,MPI_DOUBLE,client->intraComm) ; 701 MPI_Allgatherv(lat.dataFirst(),nj,MPI_DOUBLE,lat_g.dataFirst(),nj_g, jbegin_g,MPI_DOUBLE,client->intraComm) ; 702 702 703 703 delete[] ibegin_g ; … … 1932 1932 displs[0] = 0; 1933 1933 int localCount = connectedServerRank_[nbServer].size() ; 1934 ep_lib::MPI_Gather(&localCount, 1, EP_INT, &counts[0], 1, EP_INT, 0, client->intraComm) ;1934 MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ; 1935 1935 for (int i = 0; i < clientSize-1; ++i) 1936 1936 { … … 1938 1938 } 1939 1939 std::vector<int> allConnectedServers(displs[clientSize-1]+counts[clientSize-1]); 1940 ep_lib::MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, EP_INT, &allConnectedServers[0], &counts[0], &displs[0], EP_INT, 0, client->intraComm);1940 MPI_Gatherv(&(connectedServerRank_[nbServer])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 1941 1941 1942 1942 if ((allConnectedServers.size() != nbServer) && (rank == 0)) … … 2003 2003 CATCH_DUMP_ATTR 2004 2004 2005 void CDomain::computeWrittenCompressedIndex( ep_lib::MPI_Comm writtenComm)2005 void CDomain::computeWrittenCompressedIndex(MPI_Comm writtenComm) 2006 2006 TRY 2007 2007 { 2008 2008 int writtenCommSize; 2009 ep_lib::MPI_Comm_size(writtenComm, &writtenCommSize);2009 MPI_Comm_size(writtenComm, &writtenCommSize); 2010 2010 if (compressedIndexToWriteOnServer.find(writtenCommSize) != compressedIndexToWriteOnServer.end()) 2011 2011 return; … … 2064 2064 { 2065 2065 2066 ep_lib::MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm);2067 ep_lib::MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, EP_INT, EP_SUM, writtenComm);2066 MPI_Allreduce(&numberWrittenIndexes_[writtenCommSize], &totalNumberWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 2067 MPI_Scan(&numberWrittenIndexes_[writtenCommSize], &offsetWrittenIndexes_[writtenCommSize], 1, MPI_INT, MPI_SUM, writtenComm); 2068 2068 offsetWrittenIndexes_[writtenCommSize] -= numberWrittenIndexes_[writtenCommSize]; 2069 2069 } -
XIOS/trunk/src/node/domain.hpp
r1638 r1639 94 94 bool isWrittenCompressed(const StdString& filename) const; 95 95 96 int getNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom);97 int getTotalNumberWrittenIndexes( ep_lib::MPI_Comm writtenCom);98 int getOffsetWrittenIndexes( ep_lib::MPI_Comm writtenCom);99 CArray<int,1>& getCompressedIndexToWriteOnServer( ep_lib::MPI_Comm writtenCom);96 int getNumberWrittenIndexes(MPI_Comm writtenCom); 97 int getTotalNumberWrittenIndexes(MPI_Comm writtenCom); 98 int getOffsetWrittenIndexes(MPI_Comm writtenCom); 99 CArray<int,1>& getCompressedIndexToWriteOnServer(MPI_Comm writtenCom); 100 100 101 101 std::map<int, StdSize> getAttributesBufferSize(CContextClient* client, bool bufferForWriting = false); … … 116 116 117 117 void computeWrittenIndex(); 118 void computeWrittenCompressedIndex( ep_lib::MPI_Comm);118 void computeWrittenCompressedIndex(MPI_Comm); 119 119 120 120 void AllgatherRectilinearLonLat(CArray<double,1>& lon, CArray<double,1>& lat, -
XIOS/trunk/src/node/field.cpp
r1638 r1639 531 531 if (!nstepMaxRead) 532 532 { 533 #ifdef _usingMPI534 533 MPI_Allreduce(MPI_IN_PLACE, &nstepMax, 1, MPI_INT, MPI_MAX, context->server->intraComm); 535 #elif _usingEP536 ep_lib::MPI_Allreduce(&nstepMax, &nstepMax, 1, EP_INT, EP_MAX, context->server->intraComm);537 #endif538 534 nstepMaxRead = true; 539 535 } -
XIOS/trunk/src/node/file.cpp
r1638 r1639 25 25 CFile::CFile(void) 26 26 : CObjectTemplate<CFile>(), CFileAttributes() 27 , vFieldGroup(), data_out(), enabledFields(), fileComm( EP_COMM_NULL)27 , vFieldGroup(), data_out(), enabledFields(), fileComm(MPI_COMM_NULL) 28 28 , isOpen(false), read_client(0), checkRead(false), allZoneEmpty(false) 29 29 { … … 34 34 CFile::CFile(const StdString & id) 35 35 : CObjectTemplate<CFile>(id), CFileAttributes() 36 , vFieldGroup(), data_out(), enabledFields(), fileComm( EP_COMM_NULL)36 , vFieldGroup(), data_out(), enabledFields(), fileComm(MPI_COMM_NULL) 37 37 , isOpen(false), read_client(0), checkRead(false), allZoneEmpty(false) 38 38 { … … 307 307 308 308 int color = allZoneEmpty ? 0 : 1; 309 ep_lib::MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm);310 if (allZoneEmpty) ep_lib::MPI_Comm_free(&fileComm);309 MPI_Comm_split(server->intraComm, color, server->intraCommRank, &fileComm); 310 if (allZoneEmpty) MPI_Comm_free(&fileComm); 311 311 } 312 312 CATCH_DUMP_ATTR … … 554 554 { 555 555 int commSize, commRank; 556 ep_lib::MPI_Comm_size(fileComm, &commSize);557 ep_lib::MPI_Comm_rank(fileComm, &commRank);556 MPI_Comm_size(fileComm, &commSize); 557 MPI_Comm_rank(fileComm, &commRank); 558 558 559 559 if (server->intraCommSize > 1) … … 634 634 CContext* context = CContext::getCurrent(); 635 635 CContextServer* server = context->server; 636 ep_lib::MPI_Comm readComm = this->fileComm;636 MPI_Comm readComm = this->fileComm; 637 637 638 638 if (!allZoneEmpty) … … 677 677 { 678 678 int commSize, commRank; 679 ep_lib::MPI_Comm_size(readComm, &commSize);680 ep_lib::MPI_Comm_rank(readComm, &commRank);679 MPI_Comm_size(readComm, &commSize); 680 MPI_Comm_rank(readComm, &commRank); 681 681 682 682 if (server->intraCommSize > 1) … … 722 722 isOpen = false; 723 723 } 724 if (fileComm != EP_COMM_NULL) ep_lib::MPI_Comm_free(&fileComm);724 if (fileComm != MPI_COMM_NULL) MPI_Comm_free(&fileComm); 725 725 } 726 726 CATCH_DUMP_ATTR -
XIOS/trunk/src/node/file.hpp
r1638 r1639 175 175 int nbAxis, nbDomains; 176 176 bool isOpen; 177 ep_lib::MPI_Comm fileComm;177 MPI_Comm fileComm; 178 178 179 179 private: -
XIOS/trunk/src/node/grid.cpp
r1638 r1639 661 661 { 662 662 CContextServer* server = CContext::getCurrent()->server; 663 ep_lib::MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, EP_INT, EP_SUM, server->intraComm);664 ep_lib::MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, EP_INT, EP_SUM, server->intraComm);663 MPI_Allreduce(&numberWrittenIndexes_, &totalNumberWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 664 MPI_Scan(&numberWrittenIndexes_, &offsetWrittenIndexes_, 1, MPI_INT, MPI_SUM, server->intraComm); 665 665 offsetWrittenIndexes_ -= numberWrittenIndexes_; 666 666 } … … 856 856 displs[0] = 0; 857 857 int localCount = connectedServerRank_[receiverSize].size() ; 858 ep_lib::MPI_Gather(&localCount, 1, EP_INT, &counts[0], 1, EP_INT, 0, client->intraComm) ;858 MPI_Gather(&localCount, 1, MPI_INT, &counts[0], 1, MPI_INT, 0, client->intraComm) ; 859 859 for (int i = 0; i < client->clientSize-1; ++i) 860 860 { … … 862 862 } 863 863 std::vector<int> allConnectedServers(displs[client->clientSize-1]+counts[client->clientSize-1]); 864 ep_lib::MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, EP_INT, &allConnectedServers[0], &counts[0], &displs[0], EP_INT, 0, client->intraComm);864 MPI_Gatherv(&(connectedServerRank_[receiverSize])[0], localCount, MPI_INT, &allConnectedServers[0], &counts[0], &displs[0], MPI_INT, 0, client->intraComm); 865 865 866 866 if ((allConnectedServers.size() != receiverSize) && (client->clientRank == 0)) -
XIOS/trunk/src/node/mesh.cpp
r1638 r1639 414 414 * \param [in] bounds_lat Array of boundary latitudes. Its size depends on the element type. 415 415 */ 416 void CMesh::createMeshEpsilon(const ep_lib::MPI_Comm& comm,416 void CMesh::createMeshEpsilon(const MPI_Comm& comm, 417 417 const CArray<double, 1>& lonvalue, const CArray<double, 1>& latvalue, 418 418 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat) … … 421 421 int nvertex = (bounds_lon.numElements() == 0) ? 1 : bounds_lon.rows(); 422 422 int mpiRank, mpiSize; 423 ep_lib::MPI_Comm_rank(comm, &mpiRank);424 ep_lib::MPI_Comm_size(comm, &mpiSize);423 MPI_Comm_rank(comm, &mpiRank); 424 MPI_Comm_size(comm, &mpiSize); 425 425 double prec = 1e-11; // used in calculations of edge_lon/lat 426 426 … … 460 460 unsigned long nbEdgesOnProc = nbEdges_; 461 461 unsigned long nbEdgesAccum; 462 ep_lib::MPI_Scan(&nbEdgesOnProc, &nbEdgesAccum, 1, EP_UNSIGNED_LONG, EP_SUM, comm);462 MPI_Scan(&nbEdgesOnProc, &nbEdgesAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm); 463 463 nbEdgesAccum -= nbEdges_; 464 464 … … 590 590 unsigned long nodeCount = nodeIdx2Idx.size(); 591 591 unsigned long nodeStart, nbNodes; 592 ep_lib::MPI_Scan(&nodeCount, &nodeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm);592 MPI_Scan(&nodeCount, &nodeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm); 593 593 int nNodes = nodeStart; 594 ep_lib::MPI_Bcast(&nNodes, 1, EP_UNSIGNED_LONG, mpiSize-1, comm);594 MPI_Bcast(&nNodes, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm); 595 595 nbNodesGlo = nNodes; 596 596 … … 683 683 unsigned long nbFacesOnProc = nbFaces_; 684 684 unsigned long nbFacesAccum; 685 ep_lib::MPI_Scan(&nbFacesOnProc, &nbFacesAccum, 1, EP_UNSIGNED_LONG, EP_SUM, comm);685 MPI_Scan(&nbFacesOnProc, &nbFacesAccum, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm); 686 686 nbFacesAccum -= nbFaces_; 687 687 … … 807 807 808 808 unsigned long edgeStart, nbEdges; 809 ep_lib::MPI_Scan(&edgeCount, &edgeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm);809 MPI_Scan(&edgeCount, &edgeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm); 810 810 int nEdges = edgeStart; 811 ep_lib::MPI_Bcast(&nEdges, 1, EP_UNSIGNED_LONG, mpiSize-1, comm);811 MPI_Bcast(&nEdges, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm); 812 812 nbEdgesGlo = nEdges; 813 813 … … 1028 1028 unsigned long edgeCount = edgeIdx2Idx.size(); 1029 1029 unsigned long edgeStart, nbEdges; 1030 ep_lib::MPI_Scan(&edgeCount, &edgeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm);1030 MPI_Scan(&edgeCount, &edgeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm); 1031 1031 int nEdges = edgeStart; 1032 ep_lib::MPI_Bcast(&nEdges, 1, EP_UNSIGNED_LONG, mpiSize-1, comm);1032 MPI_Bcast(&nEdges, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm); 1033 1033 nbEdgesGlo = nEdges; 1034 1034 … … 1298 1298 unsigned long nodeCount = nodeIdx2Idx.size(); 1299 1299 unsigned long nodeStart, nbNodes; 1300 ep_lib::MPI_Scan(&nodeCount, &nodeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm);1300 MPI_Scan(&nodeCount, &nodeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm); 1301 1301 int nNodes = nodeStart; 1302 ep_lib::MPI_Bcast(&nNodes, 1, EP_UNSIGNED_LONG, mpiSize-1, comm);1302 MPI_Bcast(&nNodes, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm); 1303 1303 nbNodesGlo = nNodes; 1304 1304 … … 1418 1418 unsigned long edgeCount = edgeIdx2Idx.size(); 1419 1419 unsigned long edgeStart, nbEdges; 1420 ep_lib::MPI_Scan(&edgeCount, &edgeStart, 1, EP_UNSIGNED_LONG, EP_SUM, comm);1420 MPI_Scan(&edgeCount, &edgeStart, 1, MPI_UNSIGNED_LONG, MPI_SUM, comm); 1421 1421 int nEdges = edgeStart; 1422 ep_lib::MPI_Bcast(&nEdges, 1, EP_UNSIGNED_LONG, mpiSize-1, comm);1422 MPI_Bcast(&nEdges, 1, MPI_UNSIGNED_LONG, mpiSize-1, comm); 1423 1423 nbEdgesGlo = nEdges; 1424 1424 … … 1614 1614 */ 1615 1615 1616 void CMesh::getGloNghbFacesNodeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx,1616 void CMesh::getGloNghbFacesNodeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx, 1617 1617 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1618 1618 CArray<int, 2>& nghbFaces) … … 1623 1623 1624 1624 int mpiRank, mpiSize; 1625 ep_lib::MPI_Comm_rank(comm, &mpiRank);1626 ep_lib::MPI_Comm_size(comm, &mpiSize);1625 MPI_Comm_rank(comm, &mpiRank); 1626 MPI_Comm_size(comm, &mpiSize); 1627 1627 1628 1628 // (1) Generating unique node indexes … … 1770 1770 */ 1771 1771 1772 void CMesh::getGloNghbFacesEdgeType(const ep_lib::MPI_Comm& comm, const CArray<int, 1>& face_idx,1772 void CMesh::getGloNghbFacesEdgeType(const MPI_Comm& comm, const CArray<int, 1>& face_idx, 1773 1773 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, 1774 1774 CArray<int, 2>& nghbFaces) … … 1779 1779 1780 1780 int mpiRank, mpiSize; 1781 ep_lib::MPI_Comm_rank(comm, &mpiRank);1782 ep_lib::MPI_Comm_size(comm, &mpiSize);1781 MPI_Comm_rank(comm, &mpiRank); 1782 MPI_Comm_size(comm, &mpiSize); 1783 1783 1784 1784 // (1) Generating unique node indexes … … 1951 1951 */ 1952 1952 1953 void CMesh::getGlobalNghbFaces(const int nghbType, const ep_lib::MPI_Comm& comm,1953 void CMesh::getGlobalNghbFaces(const int nghbType, const MPI_Comm& comm, 1954 1954 const CArray<int, 1>& face_idx, 1955 1955 const CArray<double, 2>& bounds_lon, const CArray<double, 2>& bounds_lat, -
XIOS/trunk/src/node/mesh.hpp
r1638 r1639 60 60 const CArray<double, 2>&, const CArray<double, 2>& ); 61 61 62 void createMeshEpsilon(const ep_lib::MPI_Comm&,62 void createMeshEpsilon(const MPI_Comm&, 63 63 const CArray<double, 1>&, const CArray<double, 1>&, 64 64 const CArray<double, 2>&, const CArray<double, 2>& ); 65 65 66 void getGlobalNghbFaces(const int, const ep_lib::MPI_Comm&, const CArray<int, 1>&,66 void getGlobalNghbFaces(const int, const MPI_Comm&, const CArray<int, 1>&, 67 67 const CArray<double, 2>&, const CArray<double, 2>&, 68 68 CArray<int, 2>&); … … 87 87 CClientClientDHTSizet* pNodeGlobalIndex; // pointer to a map <nodeHash, nodeIdxGlo> 88 88 CClientClientDHTSizet* pEdgeGlobalIndex; // pointer to a map <edgeHash, edgeIdxGlo> 89 void getGloNghbFacesNodeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);90 void getGloNghbFacesEdgeType(const ep_lib::MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&);89 void getGloNghbFacesNodeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 90 void getGloNghbFacesEdgeType(const MPI_Comm&, const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&); 91 91 void getLocNghbFacesNodeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); 92 92 void getLocNghbFacesEdgeType(const CArray<int, 1>&, const CArray<double, 2>&, const CArray<double, 2>&, CArray<int, 2>&, CArray<int, 1>&); -
XIOS/trunk/src/policy.cpp
r1638 r1639 49 49 //} 50 50 51 DivideAdaptiveComm::DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm)51 DivideAdaptiveComm::DivideAdaptiveComm(const MPI_Comm& mpiComm) 52 52 : internalComm_(mpiComm), level_(0), groupBegin_(), nbInGroup_(), computed_(false) 53 53 { … … 61 61 62 62 int mpiSize, mpiRank; 63 ep_lib::MPI_Comm_size(internalComm_,&mpiSize);64 ep_lib::MPI_Comm_rank(internalComm_,&mpiRank);63 MPI_Comm_size(internalComm_,&mpiSize); 64 MPI_Comm_rank(internalComm_,&mpiRank); 65 65 66 66 int maxChild=1; -
XIOS/trunk/src/policy.hpp
r1638 r1639 31 31 { 32 32 protected: 33 DivideAdaptiveComm(const ep_lib::MPI_Comm& mpiComm);33 DivideAdaptiveComm(const MPI_Comm& mpiComm); 34 34 35 35 void computeMPICommLevel(); … … 41 41 42 42 protected: 43 const ep_lib::MPI_Comm& internalComm_;43 const MPI_Comm& internalComm_; 44 44 std::vector<std::vector<int> > groupParentsBegin_; 45 45 std::vector<std::vector<int> > nbInGroupParents_; -
XIOS/trunk/src/registry.cpp
r1638 r1639 191 191 { 192 192 int rank ; 193 ep_lib::MPI_Comm_rank(communicator,&rank);193 MPI_Comm_rank(communicator,&rank); 194 194 if (rank==0) 195 195 { … … 197 197 this->toBuffer(buffer) ; 198 198 int size=buffer.count() ; 199 ep_lib::MPI_Bcast(&size,1,EP_INT,0,communicator) ;200 ep_lib::MPI_Bcast(buffer.start(),size,EP_CHAR,0,communicator) ;199 MPI_Bcast(&size,1,MPI_INT,0,communicator) ; 200 MPI_Bcast(buffer.start(),size,MPI_CHAR,0,communicator) ; 201 201 } 202 202 else 203 203 { 204 204 int size ; 205 ep_lib::MPI_Bcast(&size,1,EP_INT,0,communicator) ;205 MPI_Bcast(&size,1,MPI_INT,0,communicator) ; 206 206 CBufferIn buffer(size) ; 207 ep_lib::MPI_Bcast(buffer.start(),size,EP_CHAR,0,communicator) ;207 MPI_Bcast(buffer.start(),size,MPI_CHAR,0,communicator) ; 208 208 this->fromBuffer(buffer) ; 209 209 } … … 214 214 } 215 215 216 void CRegistry::gatherRegistry(const ep_lib::MPI_Comm& comm)216 void CRegistry::gatherRegistry(const MPI_Comm& comm) 217 217 { 218 218 int rank,mpiSize ; 219 ep_lib::MPI_Comm_rank(comm,&rank);220 ep_lib::MPI_Comm_size(comm,&mpiSize);219 MPI_Comm_rank(comm,&rank); 220 MPI_Comm_size(comm,&mpiSize); 221 221 222 222 int* sizes=new int[mpiSize] ; … … 224 224 this->toBuffer(localBuffer) ; 225 225 int localSize=localBuffer.count() ; 226 ep_lib::MPI_Gather(&localSize,1,EP_INT,sizes,1,EP_INT,0,comm) ;226 MPI_Gather(&localSize,1,MPI_INT,sizes,1,MPI_INT,0,comm) ; 227 227 228 228 char* globalBuffer ; … … 240 240 241 241 globalBuffer=new char[globalBufferSize] ; 242 ep_lib::MPI_Gatherv(localBuffer.start(),localSize,EP_CHAR,globalBuffer,sizes,displs,EP_CHAR,0,comm) ;242 MPI_Gatherv(localBuffer.start(),localSize,MPI_CHAR,globalBuffer,sizes,displs,MPI_CHAR,0,comm) ; 243 243 for(int i=1;i<mpiSize;++i) 244 244 { … … 251 251 delete[] globalBuffer ; 252 252 } 253 else ep_lib::MPI_Gatherv(localBuffer.start(),localSize,EP_CHAR,globalBuffer,sizes,displs,EP_CHAR,0,comm) ;253 else MPI_Gatherv(localBuffer.start(),localSize,MPI_CHAR,globalBuffer,sizes,displs,MPI_CHAR,0,comm) ; 254 254 delete[] sizes ; 255 255 … … 261 261 } 262 262 263 void CRegistry::hierarchicalGatherRegistry(const ep_lib::MPI_Comm& comm)263 void CRegistry::hierarchicalGatherRegistry(const MPI_Comm& comm) 264 264 { 265 265 int mpiRank,mpiSize ; 266 ep_lib::MPI_Comm_rank(comm,&mpiRank);267 ep_lib::MPI_Comm_size(comm,&mpiSize);266 MPI_Comm_rank(comm,&mpiRank); 267 MPI_Comm_size(comm,&mpiSize); 268 268 269 269 if (mpiSize>2) … … 272 272 if (mpiRank<mpiSize/2+mpiSize%2) color=0 ; 273 273 else color=1 ; 274 ep_lib::MPI_Comm commUp ;275 ep_lib::MPI_Comm_split(comm,color,mpiRank,&commUp) ,274 MPI_Comm commUp ; 275 MPI_Comm_split(comm,color,mpiRank,&commUp) , 276 276 hierarchicalGatherRegistry(commUp) ; 277 ep_lib::MPI_Comm_free(&commUp) ;277 MPI_Comm_free(&commUp) ; 278 278 } 279 279 280 280 if (mpiSize>1) 281 281 { 282 ep_lib::MPI_Comm commDown ;282 MPI_Comm commDown ; 283 283 int color ; 284 284 285 285 if (mpiRank==0 || mpiRank==mpiSize/2+mpiSize%2) color=0 ; 286 286 else color=1 ; 287 ep_lib::MPI_Comm_split(comm,color,mpiRank,&commDown) ;287 MPI_Comm_split(comm,color,mpiRank,&commDown) ; 288 288 if (color==0) gatherRegistry(commDown) ; 289 ep_lib::MPI_Comm_free(&commDown) ;289 MPI_Comm_free(&commDown) ; 290 290 } 291 291 } -
XIOS/trunk/src/registry.hpp
r1638 r1639 23 23 24 24 /** Constructor, the communicator is used for bcast or gather operation between MPI processes */ 25 CRegistry(const ep_lib::MPI_Comm& comm=EP_COMM_WORLD) : communicator(comm) {}25 CRegistry(const MPI_Comm& comm=MPI_COMM_WORLD) : communicator(comm) {} 26 26 27 27 /** Copy constructor */ … … 106 106 107 107 /** use internally for recursivity */ 108 void gatherRegistry(const ep_lib::MPI_Comm& comm) ;108 void gatherRegistry(const MPI_Comm& comm) ; 109 109 110 110 /** use internally for recursivity */ 111 void hierarchicalGatherRegistry(const ep_lib::MPI_Comm& comm) ;111 void hierarchicalGatherRegistry(const MPI_Comm& comm) ; 112 112 113 113 … … 120 120 121 121 /** MPI communicator used for broadcast and gather operation */ 122 ep_lib::MPI_Comm communicator ;122 MPI_Comm communicator ; 123 123 } ; 124 124 -
XIOS/trunk/src/server.cpp
r1638 r1639 18 18 namespace xios 19 19 { 20 ep_lib::MPI_Comm CServer::intraComm ;21 std::list< ep_lib::MPI_Comm> CServer::interCommLeft ;22 std::list< ep_lib::MPI_Comm> CServer::interCommRight ;23 std::list< ep_lib::MPI_Comm> CServer::contextInterComms;24 std::list< ep_lib::MPI_Comm> CServer::contextIntraComms;20 MPI_Comm CServer::intraComm ; 21 std::list<MPI_Comm> CServer::interCommLeft ; 22 std::list<MPI_Comm> CServer::interCommRight ; 23 std::list<MPI_Comm> CServer::contextInterComms; 24 std::list<MPI_Comm> CServer::contextIntraComms; 25 25 int CServer::serverLevel = 0 ; 26 26 int CServer::nbContexts = 0; … … 48 48 { 49 49 int initialized ; 50 ep_lib::MPI_Initialized(&initialized) ;50 MPI_Initialized(&initialized) ; 51 51 if (initialized) is_MPI_Initialized=true ; 52 52 else is_MPI_Initialized=false ; … … 59 59 if (!is_MPI_Initialized) 60 60 { 61 ep_lib::MPI_Init(NULL, NULL);61 MPI_Init(NULL, NULL); 62 62 } 63 63 CTimer::get("XIOS").resume() ; … … 72 72 int myColor ; 73 73 int i,c ; 74 ep_lib::MPI_Comm newComm;75 76 ep_lib::MPI_Comm_size(CXios::globalComm, &size) ;77 ep_lib::MPI_Comm_rank(CXios::globalComm, &rank_);74 MPI_Comm newComm; 75 76 MPI_Comm_size(CXios::globalComm, &size) ; 77 MPI_Comm_rank(CXios::globalComm, &rank_); 78 78 79 79 hashAll=new unsigned long[size] ; 80 ep_lib::MPI_Allgather(&hashServer, 1, EP_LONG, hashAll, 1, EP_LONG, CXios::globalComm) ;80 MPI_Allgather(&hashServer, 1, MPI_LONG, hashAll, 1, MPI_LONG, CXios::globalComm) ; 81 81 82 82 map<unsigned long, int> colors ; … … 174 174 // (2) Create intraComm 175 175 if (serverLevel != 2) myColor=colors[hashServer]; 176 ep_lib::MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ;176 MPI_Comm_split(CXios::globalComm, myColor, rank_, &intraComm) ; 177 177 178 178 // (3) Create interComm … … 186 186 clientLeader=it->second ; 187 187 int intraCommSize, intraCommRank ; 188 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ;189 ep_lib::MPI_Comm_rank(intraComm,&intraCommRank) ;188 MPI_Comm_size(intraComm,&intraCommSize) ; 189 MPI_Comm_rank(intraComm,&intraCommRank) ; 190 190 info(50)<<"intercommCreate::server (classical mode) "<<rank_<<" intraCommSize : "<<intraCommSize 191 191 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 192 192 193 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;193 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 194 194 interCommLeft.push_back(newComm) ; 195 195 } … … 207 207 clientLeader=it->second ; 208 208 int intraCommSize, intraCommRank ; 209 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ;210 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ;209 MPI_Comm_size(intraComm, &intraCommSize) ; 210 MPI_Comm_rank(intraComm, &intraCommRank) ; 211 211 info(50)<<"intercommCreate::server (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 212 212 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 213 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ;213 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 0, &newComm) ; 214 214 interCommLeft.push_back(newComm) ; 215 215 } … … 219 219 { 220 220 int intraCommSize, intraCommRank ; 221 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ;222 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ;221 MPI_Comm_size(intraComm, &intraCommSize) ; 222 MPI_Comm_rank(intraComm, &intraCommRank) ; 223 223 info(50)<<"intercommCreate::client (server level 1) "<<rank_<<" intraCommSize : "<<intraCommSize 224 224 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< sndServerGlobalRanks[i]<<endl ; 225 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ;225 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, sndServerGlobalRanks[i], 1, &newComm) ; 226 226 interCommRight.push_back(newComm) ; 227 227 } … … 232 232 clientLeader = leaders[hashString(CXios::xiosCodeId)]; 233 233 int intraCommSize, intraCommRank ; 234 ep_lib::MPI_Comm_size(intraComm, &intraCommSize) ;235 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ;234 MPI_Comm_size(intraComm, &intraCommSize) ; 235 MPI_Comm_rank(intraComm, &intraCommRank) ; 236 236 info(50)<<"intercommCreate::server (server level 2) "<<rank_<<" intraCommSize : "<<intraCommSize 237 237 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< clientLeader<<endl ; 238 238 239 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ;239 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, clientLeader, 1, &newComm) ; 240 240 interCommLeft.push_back(newComm) ; 241 241 } … … 253 253 254 254 CTimer::get("XIOS").resume() ; 255 ep_lib::MPI_Comm localComm;255 MPI_Comm localComm; 256 256 oasis_get_localcomm(localComm); 257 ep_lib::MPI_Comm_rank(localComm,&rank_) ;257 MPI_Comm_rank(localComm,&rank_) ; 258 258 259 259 // (1) Create server intraComm 260 260 if (!CXios::usingServer2) 261 261 { 262 ep_lib::MPI_Comm_dup(localComm, &intraComm);262 MPI_Comm_dup(localComm, &intraComm); 263 263 } 264 264 else 265 265 { 266 266 int globalRank; 267 ep_lib::MPI_Comm_size(localComm,&size) ;268 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank) ;267 MPI_Comm_size(localComm,&size) ; 268 MPI_Comm_rank(CXios::globalComm,&globalRank) ; 269 269 srvGlobalRanks = new int[size] ; 270 ep_lib::MPI_Allgather(&globalRank, 1, EP_INT, srvGlobalRanks, 1, EP_INT, localComm) ;270 MPI_Allgather(&globalRank, 1, MPI_INT, srvGlobalRanks, 1, MPI_INT, localComm) ; 271 271 272 272 int reqNbProc = size*CXios::ratioServer2/100.; … … 276 276 << "It is impossible to dedicate the requested number of processes = "<<reqNbProc 277 277 <<" to secondary server. XIOS will run in the classical server mode."<<endl; 278 ep_lib::MPI_Comm_dup(localComm, &intraComm);278 MPI_Comm_dup(localComm, &intraComm); 279 279 } 280 280 else … … 339 339 } 340 340 if (serverLevel != 2) myColor=0; 341 ep_lib::MPI_Comm_split(localComm, myColor, rank_, &intraComm) ;341 MPI_Comm_split(localComm, myColor, rank_, &intraComm) ; 342 342 } 343 343 } … … 348 348 vector<string>::iterator it ; 349 349 350 ep_lib::MPI_Comm newComm ;350 MPI_Comm newComm ; 351 351 int globalRank ; 352 ep_lib::MPI_Comm_rank(CXios::globalComm,&globalRank);352 MPI_Comm_rank(CXios::globalComm,&globalRank); 353 353 354 354 // (2) Create interComms with models … … 359 359 { 360 360 interCommLeft.push_back(newComm) ; 361 if (rank_==0) ep_lib::MPI_Send(&globalRank,1,EP_INT,0,0,newComm) ;361 if (rank_==0) MPI_Send(&globalRank,1,MPI_INT,0,0,newComm) ; 362 362 } 363 363 } … … 365 365 // (3) Create interComms between primary and secondary servers 366 366 int intraCommSize, intraCommRank ; 367 ep_lib::MPI_Comm_size(intraComm,&intraCommSize) ;368 ep_lib::MPI_Comm_rank(intraComm, &intraCommRank) ;367 MPI_Comm_size(intraComm,&intraCommSize) ; 368 MPI_Comm_rank(intraComm, &intraCommRank) ; 369 369 370 370 if (serverLevel == 1) … … 375 375 info(50)<<"intercommCreate::client (server level 1) "<<globalRank<<" intraCommSize : "<<intraCommSize 376 376 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvSndLeader<<endl ; 377 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ;377 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvSndLeader, 0, &newComm) ; 378 378 interCommRight.push_back(newComm) ; 379 379 } … … 383 383 info(50)<<"intercommCreate::server (server level 2)"<<globalRank<<" intraCommSize : "<<intraCommSize 384 384 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< srvGlobalRanks[0] <<endl ; 385 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvGlobalRanks[0], 0, &newComm) ;385 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, srvGlobalRanks[0], 0, &newComm) ; 386 386 interCommLeft.push_back(newComm) ; 387 387 } … … 393 393 394 394 395 ep_lib::MPI_Comm_rank(intraComm, &rank) ;395 MPI_Comm_rank(intraComm, &rank) ; 396 396 if (rank==0) isRoot=true; 397 397 else isRoot=false; … … 406 406 delete eventScheduler ; 407 407 408 for (std::list< ep_lib::MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++)409 ep_lib::MPI_Comm_free(&(*it));410 411 for (std::list< ep_lib::MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++)412 ep_lib::MPI_Comm_free(&(*it));408 for (std::list<MPI_Comm>::iterator it = contextInterComms.begin(); it != contextInterComms.end(); it++) 409 MPI_Comm_free(&(*it)); 410 411 for (std::list<MPI_Comm>::iterator it = contextIntraComms.begin(); it != contextIntraComms.end(); it++) 412 MPI_Comm_free(&(*it)); 413 413 414 414 // for (std::list<MPI_Comm>::iterator it = interComm.begin(); it != interComm.end(); it++) … … 418 418 // MPI_Comm_free(&(*it)); 419 419 420 for (std::list< ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++)421 ep_lib::MPI_Comm_free(&(*it));422 423 ep_lib::MPI_Comm_free(&intraComm);420 for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) 421 MPI_Comm_free(&(*it)); 422 423 MPI_Comm_free(&intraComm); 424 424 425 425 if (!is_MPI_Initialized) 426 426 { 427 427 if (CXios::usingOasis) oasis_finalize(); 428 else ep_lib::MPI_Finalize() ;428 else MPI_Finalize() ; 429 429 } 430 430 report(0)<<"Performance report : Time spent for XIOS : "<<CTimer::get("XIOS server").getCumulatedTime()<<endl ; … … 465 465 void CServer::listenFinalize(void) 466 466 { 467 list< ep_lib::MPI_Comm>::iterator it, itr;467 list<MPI_Comm>::iterator it, itr; 468 468 int msg ; 469 469 int flag ; … … 471 471 for(it=interCommLeft.begin();it!=interCommLeft.end();it++) 472 472 { 473 ep_lib::MPI_Status status ;473 MPI_Status status ; 474 474 traceOff() ; 475 ep_lib::MPI_Iprobe(0,0,*it,&flag,&status) ;475 MPI_Iprobe(0,0,*it,&flag,&status) ; 476 476 traceOn() ; 477 477 if (flag==true) 478 478 { 479 ep_lib::MPI_Recv(&msg,1,EP_INT,0,0,*it,&status) ;479 MPI_Recv(&msg,1,MPI_INT,0,0,*it,&status) ; 480 480 info(20)<<" CServer : Receive client finalize"<<endl ; 481 481 // Sending server finalize message to secondary servers (if any) 482 482 for(itr=interCommRight.begin();itr!=interCommRight.end();itr++) 483 483 { 484 ep_lib::MPI_Send(&msg,1,EP_INT,0,0,*itr) ;484 MPI_Send(&msg,1,MPI_INT,0,0,*itr) ; 485 485 } 486 ep_lib::MPI_Comm_free(&(*it));486 MPI_Comm_free(&(*it)); 487 487 interCommLeft.erase(it) ; 488 488 break ; … … 493 493 { 494 494 int i,size ; 495 ep_lib::MPI_Comm_size(intraComm,&size) ;496 ep_lib::MPI_Request* requests= new ep_lib::MPI_Request[size-1] ;497 ep_lib::MPI_Status* status= new ep_lib::MPI_Status[size-1] ;498 499 for(int i=1;i<size;i++) ep_lib::MPI_Isend(&msg,1,EP_INT,i,4,intraComm,&requests[i-1]) ;500 ep_lib::MPI_Waitall(size-1,requests,status) ;495 MPI_Comm_size(intraComm,&size) ; 496 MPI_Request* requests= new MPI_Request[size-1] ; 497 MPI_Status* status= new MPI_Status[size-1] ; 498 499 for(int i=1;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,4,intraComm,&requests[i-1]) ; 500 MPI_Waitall(size-1,requests,status) ; 501 501 502 502 finished=true ; … … 510 510 { 511 511 int flag ; 512 ep_lib::MPI_Status status ;512 MPI_Status status ; 513 513 int msg ; 514 514 515 515 traceOff() ; 516 ep_lib::MPI_Iprobe(0,4,intraComm, &flag, &status) ;516 MPI_Iprobe(0,4,intraComm, &flag, &status) ; 517 517 traceOn() ; 518 518 if (flag==true) 519 519 { 520 ep_lib::MPI_Recv(&msg,1,EP_INT,0,4,intraComm,&status) ;520 MPI_Recv(&msg,1,MPI_INT,0,4,intraComm,&status) ; 521 521 finished=true ; 522 522 } … … 534 534 { 535 535 int flag ; 536 ep_lib::MPI_Status status ;537 list< ep_lib::MPI_Comm>::iterator it;536 MPI_Status status ; 537 list<MPI_Comm>::iterator it; 538 538 int msg ; 539 539 static int nbCompound=0 ; 540 540 int size ; 541 541 static bool sent=false ; 542 static ep_lib::MPI_Request* allRequests ;543 static ep_lib::MPI_Status* allStatus ;542 static MPI_Request* allRequests ; 543 static MPI_Status* allStatus ; 544 544 545 545 546 546 if (sent) 547 547 { 548 ep_lib::MPI_Comm_size(intraComm,&size) ;549 ep_lib::MPI_Testall(size,allRequests, &flag, allStatus) ;548 MPI_Comm_size(intraComm,&size) ; 549 MPI_Testall(size,allRequests, &flag, allStatus) ; 550 550 if (flag==true) 551 551 { … … 559 559 for(it=interCommLeft.begin();it!=interCommLeft.end();it++) 560 560 { 561 ep_lib::MPI_Status status ;561 MPI_Status status ; 562 562 traceOff() ; 563 ep_lib::MPI_Iprobe(0,5,*it,&flag,&status) ; // tags oasis_endded = 5563 MPI_Iprobe(0,5,*it,&flag,&status) ; // tags oasis_endded = 5 564 564 traceOn() ; 565 565 if (flag==true) 566 566 { 567 ep_lib::MPI_Recv(&msg,1,EP_INT,0,5,*it,&status) ; // tags oasis_endded = 5567 MPI_Recv(&msg,1,MPI_INT,0,5,*it,&status) ; // tags oasis_endded = 5 568 568 nbCompound++ ; 569 569 if (nbCompound==interCommLeft.size()) 570 570 { 571 for (std::list< ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++)571 for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++) 572 572 { 573 ep_lib::MPI_Send(&msg,1,EP_INT,0,5,*it) ; // tags oasis_endded = 5573 MPI_Send(&msg,1,MPI_INT,0,5,*it) ; // tags oasis_endded = 5 574 574 } 575 ep_lib::MPI_Comm_size(intraComm,&size) ;576 allRequests= new ep_lib::MPI_Request[size] ;577 allStatus= new ep_lib::MPI_Status[size] ;578 for(int i=0;i<size;i++) ep_lib::MPI_Isend(&msg,1,EP_INT,i,5,intraComm,&allRequests[i]) ; // tags oasis_endded = 5575 MPI_Comm_size(intraComm,&size) ; 576 allRequests= new MPI_Request[size] ; 577 allStatus= new MPI_Status[size] ; 578 for(int i=0;i<size;i++) MPI_Isend(&msg,1,MPI_INT,i,5,intraComm,&allRequests[i]) ; // tags oasis_endded = 5 579 579 sent=true ; 580 580 } … … 590 590 { 591 591 int flag ; 592 ep_lib::MPI_Status status ;592 MPI_Status status ; 593 593 const int root=0 ; 594 594 int msg ; … … 607 607 608 608 traceOff() ; 609 ep_lib::MPI_Iprobe(root,5,intraComm, &flag, &status) ;609 MPI_Iprobe(root,5,intraComm, &flag, &status) ; 610 610 traceOn() ; 611 611 if (flag==true) 612 612 { 613 ep_lib::MPI_Recv(&msg,1,EP_INT,root,5,intraComm,&status) ; // tags oasis_endded = 5613 MPI_Recv(&msg,1,MPI_INT,root,5,intraComm,&status) ; // tags oasis_endded = 5 614 614 boost::hash<string> hashString; 615 615 size_t hashId = hashString("oasis_enddef"); … … 626 626 { 627 627 628 ep_lib::MPI_Status status ;628 MPI_Status status ; 629 629 int flag ; 630 630 static char* buffer ; 631 static ep_lib::MPI_Request request ;631 static MPI_Request request ; 632 632 static bool recept=false ; 633 633 int rank ; … … 637 637 { 638 638 traceOff() ; 639 #ifdef _usingMPI 640 ep_lib::MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 641 #elif _usingEP 642 ep_lib::MPI_Iprobe(-2,1,CXios::globalComm, &flag, &status) ; 643 #endif 639 MPI_Iprobe(MPI_ANY_SOURCE,1,CXios::globalComm, &flag, &status) ; 644 640 traceOn() ; 645 641 if (flag==true) 646 642 { 647 #ifdef _usingMPI648 643 rank=status.MPI_SOURCE ; 649 #elif _usingEP 650 rank=status.ep_src ; 651 #endif 652 ep_lib::MPI_Get_count(&status,EP_CHAR,&count) ; 644 MPI_Get_count(&status,MPI_CHAR,&count) ; 653 645 buffer=new char[count] ; 654 ep_lib::MPI_Irecv((void*)buffer,count,EP_CHAR,rank,1,CXios::globalComm,&request) ;646 MPI_Irecv((void*)buffer,count,MPI_CHAR,rank,1,CXios::globalComm,&request) ; 655 647 recept=true ; 656 648 } … … 659 651 { 660 652 traceOff() ; 661 ep_lib::MPI_Test(&request,&flag,&status) ;653 MPI_Test(&request,&flag,&status) ; 662 654 traceOn() ; 663 655 if (flag==true) 664 656 { 665 #ifdef _usingMPI666 657 rank=status.MPI_SOURCE ; 667 #elif _usingEP 668 rank=status.ep_src ; 669 #endif 670 ep_lib::MPI_Get_count(&status,EP_CHAR,&count) ; 658 MPI_Get_count(&status,MPI_CHAR,&count) ; 671 659 recvContextMessage((void*)buffer,count) ; 672 660 delete [] buffer ; … … 701 689 { 702 690 int size ; 703 ep_lib::MPI_Comm_size(intraComm,&size) ;691 MPI_Comm_size(intraComm,&size) ; 704 692 // MPI_Request* requests= new MPI_Request[size-1] ; 705 693 // MPI_Status* status= new MPI_Status[size-1] ; 706 ep_lib::MPI_Request* requests= new ep_lib::MPI_Request[size] ;707 ep_lib::MPI_Status* status= new ep_lib::MPI_Status[size] ;694 MPI_Request* requests= new MPI_Request[size] ; 695 MPI_Status* status= new MPI_Status[size] ; 708 696 709 697 CMessage msg ; … … 717 705 for(int i=0; i<size; i++) 718 706 { 719 ep_lib::MPI_Isend(sendBuff,sendBuffer.count(),EP_CHAR,i,2,intraComm,&requests[i]) ;707 MPI_Isend(sendBuff,sendBuffer.count(),MPI_CHAR,i,2,intraComm,&requests[i]) ; 720 708 } 721 709 … … 729 717 void CServer::listenRootContext(void) 730 718 { 731 ep_lib::MPI_Status status ;719 MPI_Status status ; 732 720 int flag ; 733 721 static std::vector<void*> buffers; 734 static std::vector< ep_lib::MPI_Request> requests ;722 static std::vector<MPI_Request> requests ; 735 723 static std::vector<int> counts ; 736 724 static std::vector<bool> isEventRegistered ; 737 725 static std::vector<bool> isEventQueued ; 738 ep_lib::MPI_Request request;726 MPI_Request request; 739 727 740 728 int rank ; … … 745 733 // (1) Receive context id from the root, save it into a buffer 746 734 traceOff() ; 747 ep_lib::MPI_Iprobe(root,2,intraComm, &flag, &status) ;735 MPI_Iprobe(root,2,intraComm, &flag, &status) ; 748 736 traceOn() ; 749 737 if (flag==true) 750 738 { 751 739 counts.push_back(0); 752 ep_lib::MPI_Get_count(&status,EP_CHAR,&(counts.back())) ;740 MPI_Get_count(&status,MPI_CHAR,&(counts.back())) ; 753 741 buffers.push_back(new char[counts.back()]) ; 754 742 requests.push_back(request); 755 ep_lib::MPI_Irecv((void*)(buffers.back()),counts.back(),EP_CHAR,root,2,intraComm,&(requests.back())) ;743 MPI_Irecv((void*)(buffers.back()),counts.back(),MPI_CHAR,root,2,intraComm,&(requests.back())) ; 756 744 isEventRegistered.push_back(false); 757 745 isEventQueued.push_back(false); … … 762 750 { 763 751 // (2) If context id is received, register an event 764 ep_lib::MPI_Test(&requests[ctxNb],&flag,&status) ;752 MPI_Test(&requests[ctxNb],&flag,&status) ; 765 753 if (flag==true && !isEventRegistered[ctxNb]) 766 754 { … … 799 787 // (1) create interComm (with a client) 800 788 // (2) initialize client and server (contextClient and contextServer) 801 ep_lib::MPI_Comm inter;789 MPI_Comm inter; 802 790 if (serverLevel < 2) 803 791 { 804 ep_lib::MPI_Comm contextInterComm;805 ep_lib::MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm);806 ep_lib::MPI_Intercomm_merge(contextInterComm,1,&inter);807 ep_lib::MPI_Barrier(inter);808 ep_lib::MPI_Comm_free(&inter);792 MPI_Comm contextInterComm; 793 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, leaderRank, 10+leaderRank, &contextInterComm); 794 MPI_Intercomm_merge(contextInterComm,1,&inter); 795 MPI_Barrier(inter); 796 MPI_Comm_free(&inter); 809 797 context->initServer(intraComm,contextInterComm); 810 798 contextInterComms.push_back(contextInterComm); … … 819 807 else if (serverLevel == 2) 820 808 { 821 ep_lib::MPI_Comm_dup(interCommLeft.front(), &inter);809 MPI_Comm_dup(interCommLeft.front(), &inter); 822 810 contextInterComms.push_back(inter); 823 811 context->initServer(intraComm, contextInterComms.back()); … … 830 818 { 831 819 int i = 0, size; 832 ep_lib::MPI_Comm_size(intraComm, &size) ;833 for (std::list< ep_lib::MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i)820 MPI_Comm_size(intraComm, &size) ; 821 for (std::list<MPI_Comm>::iterator it = interCommRight.begin(); it != interCommRight.end(); it++, ++i) 834 822 { 835 823 StdString str = contextId +"_server_" + boost::lexical_cast<string>(i); … … 841 829 CBufferOut buffer(buff,messageSize) ; 842 830 buffer<<msg ; 843 ep_lib::MPI_Send(buff, buffer.count(), EP_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ;844 ep_lib::MPI_Comm_dup(*it, &inter);831 MPI_Send(buff, buffer.count(), MPI_CHAR, sndServerGlobalRanks[i], 1, CXios::globalComm) ; 832 MPI_Comm_dup(*it, &inter); 845 833 contextInterComms.push_back(inter); 846 ep_lib::MPI_Comm_dup(intraComm, &inter);834 MPI_Comm_dup(intraComm, &inter); 847 835 contextIntraComms.push_back(inter); 848 836 context->initClient(contextIntraComms.back(), contextInterComms.back()) ; … … 874 862 { 875 863 int rank; 876 ep_lib::MPI_Comm_rank(intraComm,&rank);864 MPI_Comm_rank(intraComm,&rank); 877 865 return rank; 878 866 } … … 897 885 int size = 0; 898 886 int id; 899 ep_lib::MPI_Comm_size(CXios::globalComm, &size);887 MPI_Comm_size(CXios::globalComm, &size); 900 888 while (size) 901 889 { -
XIOS/trunk/src/server.hpp
r1638 r1639 26 26 static void registerContext(void* buff,int count, int leaderRank=0); 27 27 28 static ep_lib::MPI_Comm intraComm;29 static std::list< ep_lib::MPI_Comm> interCommLeft; // interComm between server (primary, classical or secondary) and its client (client or primary server)30 static std::list< ep_lib::MPI_Comm> interCommRight; // interComm between primary server and secondary server (non-empty only for primary server pool)31 static std::list< ep_lib::MPI_Comm> contextInterComms; // list of context intercomms32 static std::list< ep_lib::MPI_Comm> contextIntraComms; // list of context intercomms (needed only in case of secondary servers)28 static MPI_Comm intraComm; 29 static std::list<MPI_Comm> interCommLeft; // interComm between server (primary, classical or secondary) and its client (client or primary server) 30 static std::list<MPI_Comm> interCommRight; // interComm between primary server and secondary server (non-empty only for primary server pool) 31 static std::list<MPI_Comm> contextInterComms; // list of context intercomms 32 static std::list<MPI_Comm> contextIntraComms; // list of context intercomms (needed only in case of secondary servers) 33 33 static CEventScheduler* eventScheduler; 34 34 -
XIOS/trunk/src/timer.cpp
r1638 r1639 1 1 #include "timer.hpp" 2 //#include "mpi_std.hpp"3 2 #include "mpi.hpp" 4 3 #include <string> … … 7 6 #include <sstream> 8 7 #include "tracer.hpp" 9 //extern ::MPI_Comm MPI_COMM_WORLD;10 8 11 9 namespace xios … … 20 18 double CTimer::getTime(void) 21 19 { 22 MPI_COMM_WORLD;23 20 return MPI_Wtime(); 24 21 } -
XIOS/trunk/src/tracer.cpp
r1638 r1639 1 1 #include "tracer.hpp" 2 2 #ifdef VTRACE 3 //#include <vt_user.h> 4 #include <VT.h> 3 #include <vt_user.h> 5 4 #endif 6 5 #include <string> … … 13 12 { 14 13 #ifdef VTRACE 15 //VT_ON() ; 16 VT_traceon() ; 14 VT_ON() ; 17 15 #endif 18 16 } … … 21 19 { 22 20 #ifdef VTRACE 23 //VT_OFF() ; 24 VT_traceoff() ; 21 VT_OFF() ; 25 22 #endif 26 23 } … … 29 26 { 30 27 #ifdef VTRACE 31 //VT_USER_START(name.c_str()) ;28 VT_USER_START(name.c_str()) ; 32 29 #endif 33 30 } … … 36 33 { 37 34 #ifdef VTRACE 38 //VT_USER_END(name.c_str()) ;35 VT_USER_END(name.c_str()) ; 39 36 #endif 40 37 } -
XIOS/trunk/src/transformation/axis_algorithm_interpolate.cpp
r1638 r1639 272 272 273 273 int* recvCount=new int[nbClient]; 274 ep_lib::MPI_Allgather(&numValue,1,EP_INT,recvCount,1,EP_INT,client->intraComm);274 MPI_Allgather(&numValue,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 275 275 276 276 int* displ=new int[nbClient]; … … 279 279 280 280 // Each client have enough global info of axis 281 ep_lib::MPI_Allgatherv(sendIndexBuff,numValue,EP_INT,recvIndexBuff,recvCount,displ,EP_INT,client->intraComm);282 ep_lib::MPI_Allgatherv(sendValueBuff,numValue,EP_DOUBLE,&(recvBuff[0]),recvCount,displ,EP_DOUBLE,client->intraComm);281 MPI_Allgatherv(sendIndexBuff,numValue,MPI_INT,recvIndexBuff,recvCount,displ,MPI_INT,client->intraComm); 282 MPI_Allgatherv(sendValueBuff,numValue,MPI_DOUBLE,&(recvBuff[0]),recvCount,displ,MPI_DOUBLE,client->intraComm); 283 283 284 284 for (int idx = 0; idx < srcSize; ++idx) -
XIOS/trunk/src/transformation/axis_algorithm_inverse.cpp
r1638 r1639 161 161 sendRankSizeMap[itIndex->first] = sendSize; 162 162 } 163 ep_lib::MPI_Allgather(&connectedClient,1,EP_INT,recvCount,1,EP_INT,client->intraComm);163 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 164 164 165 165 displ[0]=0 ; … … 168 168 int* recvRankBuff=new int[recvSize]; 169 169 int* recvSizeBuff=new int[recvSize]; 170 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,EP_INT,recvRankBuff,recvCount,displ,EP_INT,client->intraComm);171 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,EP_INT,recvSizeBuff,recvCount,displ,EP_INT,client->intraComm);170 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 171 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 172 172 for (int i = 0; i < nbClient; ++i) 173 173 { … … 181 181 182 182 // Sending global index of grid source to corresponding process as well as the corresponding mask 183 std::vector< ep_lib::MPI_Request> requests;184 std::vector< ep_lib::MPI_Status> status;183 std::vector<MPI_Request> requests; 184 std::vector<MPI_Status> status; 185 185 std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 186 186 std::unordered_map<int, double* > sendValueToDest; … … 192 192 sendValueToDest[recvRank] = new double [recvSize]; 193 193 194 requests.push_back( ep_lib::MPI_Request());195 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back());194 requests.push_back(MPI_Request()); 195 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 196 196 } 197 197 … … 214 214 215 215 // Send global index source and mask 216 requests.push_back( ep_lib::MPI_Request());217 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back());216 requests.push_back(MPI_Request()); 217 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 218 218 } 219 219 220 220 status.resize(requests.size()); 221 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]);222 223 224 std::vector< ep_lib::MPI_Request>().swap(requests);225 std::vector< ep_lib::MPI_Status>().swap(status);221 MPI_Waitall(requests.size(), &requests[0], &status[0]); 222 223 224 std::vector<MPI_Request>().swap(requests); 225 std::vector<MPI_Status>().swap(status); 226 226 227 227 // Okie, on destination side, we will wait for information of masked index of source … … 231 231 int recvSize = itSend->second; 232 232 233 requests.push_back( ep_lib::MPI_Request());234 ep_lib::MPI_Irecv(recvValueFromSrc[recvRank], recvSize, EP_DOUBLE, recvRank, 48, client->intraComm, &requests.back());233 requests.push_back(MPI_Request()); 234 MPI_Irecv(recvValueFromSrc[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 235 235 } 236 236 … … 249 249 } 250 250 // Okie, now inform the destination which source index are masked 251 requests.push_back( ep_lib::MPI_Request());252 ep_lib::MPI_Isend(sendValueToDest[recvRank], recvSize, EP_DOUBLE, recvRank, 48, client->intraComm, &requests.back());251 requests.push_back(MPI_Request()); 252 MPI_Isend(sendValueToDest[recvRank], recvSize, MPI_DOUBLE, recvRank, 48, client->intraComm, &requests.back()); 253 253 } 254 254 status.resize(requests.size()); 255 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]);255 MPI_Waitall(requests.size(), &requests[0], &status[0]); 256 256 257 257 -
XIOS/trunk/src/transformation/domain_algorithm_generate_rectilinear.cpp
r1638 r1639 70 70 StdSize hashValue = hashFunc.hashVec(globalAxisIndex); 71 71 std::vector<StdSize> recvBuff(client->clientSize); 72 ep_lib::MPI_Gather(&hashValue, 1, EP_UNSIGNED_LONG,73 &recvBuff[0], 1, EP_UNSIGNED_LONG,72 MPI_Gather(&hashValue, 1, MPI_UNSIGNED_LONG, 73 &recvBuff[0], 1, MPI_UNSIGNED_LONG, 74 74 0, 75 75 client->intraComm); … … 87 87 } 88 88 89 ep_lib::MPI_Bcast(&nbLocalAxis[0], nbAxis, EP_INT,89 MPI_Bcast(&nbLocalAxis[0], nbAxis, MPI_INT, 90 90 0, client->intraComm); 91 91 } -
XIOS/trunk/src/transformation/domain_algorithm_interpolate.cpp
r1638 r1639 434 434 CContextClient* client=context->client; 435 435 436 ep_lib::MPI_Comm poleComme(EP_COMM_NULL); 437 #ifdef _usingMPI 436 MPI_Comm poleComme(MPI_COMM_NULL); 438 437 MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? MPI_UNDEFINED : 1, 0, &poleComme); 439 #elif _usingEP 440 ep_lib::MPI_Comm_split(client->intraComm, interMapValuePole.empty() ? 0 : 1, 0, &poleComme); 441 #endif 442 if (EP_COMM_NULL != poleComme) 438 if (MPI_COMM_NULL != poleComme) 443 439 { 444 440 int nbClientPole; 445 ep_lib::MPI_Comm_size(poleComme, &nbClientPole);441 MPI_Comm_size(poleComme, &nbClientPole); 446 442 447 443 std::map<int,std::vector<std::pair<int,double> > >::iterator itePole = interMapValuePole.end(), itPole, … … 454 450 std::vector<int> recvCount(nbClientPole,0); 455 451 std::vector<int> displ(nbClientPole,0); 456 ep_lib::MPI_Allgather(&nbWeight,1,EP_INT,&recvCount[0],1,EP_INT,poleComme) ;452 MPI_Allgather(&nbWeight,1,MPI_INT,&recvCount[0],1,MPI_INT,poleComme) ; 457 453 458 454 displ[0]=0; … … 477 473 478 474 // Gather all index and weight for pole 479 ep_lib::MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,EP_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],EP_INT,poleComme);480 ep_lib::MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,EP_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],EP_DOUBLE,poleComme);475 MPI_Allgatherv(&sendSourceIndexBuff[0],nbWeight,MPI_INT,&recvSourceIndexBuff[0],&recvCount[0],&displ[0],MPI_INT,poleComme); 476 MPI_Allgatherv(&sendSourceWeightBuff[0],nbWeight,MPI_DOUBLE,&recvSourceWeightBuff[0],&recvCount[0],&displ[0],MPI_DOUBLE,poleComme); 481 477 482 478 std::map<int,double> recvTemp; … … 635 631 636 632 637 ep_lib::MPI_Allreduce(sendBuff, recvBuff, nbClient, EP_INT, EP_SUM, client->intraComm);633 MPI_Allreduce(sendBuff, recvBuff, nbClient, MPI_INT, MPI_SUM, client->intraComm); 638 634 639 635 int* sendIndexDestBuff = new int [sendBuffSize]; … … 641 637 double* sendWeightBuff = new double [sendBuffSize]; 642 638 643 std::vector< ep_lib::MPI_Request> sendRequest;639 std::vector<MPI_Request> sendRequest; 644 640 645 641 int sendOffSet = 0, l = 0; … … 662 658 } 663 659 664 sendRequest.push_back( ep_lib::MPI_Request());665 ep_lib::MPI_Isend(sendIndexDestBuff + sendOffSet,660 sendRequest.push_back(MPI_Request()); 661 MPI_Isend(sendIndexDestBuff + sendOffSet, 666 662 k, 667 EP_INT,663 MPI_INT, 668 664 itMap->first, 669 665 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 670 666 client->intraComm, 671 667 &sendRequest.back()); 672 sendRequest.push_back( ep_lib::MPI_Request());673 ep_lib::MPI_Isend(sendIndexSrcBuff + sendOffSet,668 sendRequest.push_back(MPI_Request()); 669 MPI_Isend(sendIndexSrcBuff + sendOffSet, 674 670 k, 675 EP_INT,671 MPI_INT, 676 672 itMap->first, 677 673 MPI_DOMAIN_INTERPOLATION_SRC_INDEX, 678 674 client->intraComm, 679 675 &sendRequest.back()); 680 sendRequest.push_back( ep_lib::MPI_Request());681 ep_lib::MPI_Isend(sendWeightBuff + sendOffSet,676 sendRequest.push_back(MPI_Request()); 677 MPI_Isend(sendWeightBuff + sendOffSet, 682 678 k, 683 EP_DOUBLE,679 MPI_DOUBLE, 684 680 itMap->first, 685 681 MPI_DOMAIN_INTERPOLATION_WEIGHT, … … 697 693 while (receivedSize < recvBuffSize) 698 694 { 699 ep_lib::MPI_Status recvStatus; 700 #ifdef _usingMPI 695 MPI_Status recvStatus; 701 696 MPI_Recv((recvIndexDestBuff + receivedSize), 702 697 recvBuffSize, 703 EP_INT,698 MPI_INT, 704 699 MPI_ANY_SOURCE, 705 700 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 706 701 client->intraComm, 707 702 &recvStatus); 708 #elif _usingEP 709 ep_lib::MPI_Recv((recvIndexDestBuff + receivedSize), 703 704 int countBuff = 0; 705 MPI_Get_count(&recvStatus, MPI_INT, &countBuff); 706 clientSrcRank = recvStatus.MPI_SOURCE; 707 708 MPI_Recv((recvIndexSrcBuff + receivedSize), 710 709 recvBuffSize, 711 EP_INT, 712 -2, 713 MPI_DOMAIN_INTERPOLATION_DEST_INDEX, 714 client->intraComm, 715 &recvStatus); 716 #endif 717 718 int countBuff = 0; 719 ep_lib::MPI_Get_count(&recvStatus, EP_INT, &countBuff); 720 #ifdef _usingMPI 721 clientSrcRank = recvStatus.MPI_SOURCE; 722 #elif _usingEP 723 clientSrcRank = recvStatus.ep_src; 724 #endif 725 726 ep_lib::MPI_Recv((recvIndexSrcBuff + receivedSize), 727 recvBuffSize, 728 EP_INT, 710 MPI_INT, 729 711 clientSrcRank, 730 712 MPI_DOMAIN_INTERPOLATION_SRC_INDEX, … … 732 714 &recvStatus); 733 715 734 ep_lib::MPI_Recv((recvWeightBuff + receivedSize),716 MPI_Recv((recvWeightBuff + receivedSize), 735 717 recvBuffSize, 736 EP_DOUBLE,718 MPI_DOUBLE, 737 719 clientSrcRank, 738 720 MPI_DOMAIN_INTERPOLATION_WEIGHT, … … 748 730 } 749 731 750 std::vector<ep_lib::MPI_Status> requestStatus(sendRequest.size()); 751 #ifdef _usingMPI 732 std::vector<MPI_Status> requestStatus(sendRequest.size()); 752 733 MPI_Waitall(sendRequest.size(), &sendRequest[0], MPI_STATUS_IGNORE); 753 #elif _usingEP754 std::vector<ep_lib::MPI_Status> waitstat(sendRequest.size());755 ep_lib::MPI_Waitall(sendRequest.size(), &sendRequest[0], &waitstat[0]);756 #endif757 734 758 735 delete [] sendIndexDestBuff; … … 768 745 769 746 /*! Redefined some functions of CONetCDF4 to make use of them */ 770 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const ep_lib::MPI_Comm comm)747 CDomainAlgorithmInterpolate::WriteNetCdf::WriteNetCdf(const StdString& filename, const MPI_Comm comm) 771 748 : CNc4DataOutput(NULL, filename, false, false, true, comm, false, true) {} 772 749 int CDomainAlgorithmInterpolate::WriteNetCdf::addDimensionWrite(const StdString& name, … … 858 835 } 859 836 860 ep_lib::MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, EP_LONG, EP_SUM, client->intraComm);861 ep_lib::MPI_Scan(&localNbWeight, &startIndex, 1, EP_LONG, EP_SUM, client->intraComm);837 MPI_Allreduce(&localNbWeight, &globalNbWeight, 1, MPI_LONG, MPI_SUM, client->intraComm); 838 MPI_Scan(&localNbWeight, &startIndex, 1, MPI_LONG, MPI_SUM, client->intraComm); 862 839 863 840 if (0 == globalNbWeight) -
XIOS/trunk/src/transformation/generic_algorithm_transformation.cpp
r1638 r1639 136 136 { 137 137 distributed=domainListSrcP[elementPositionInGridSrc2DomainPosition_[elementPositionInGrid]]->isDistributed() ; 138 ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, EP_INT, EP_LOR, client->intraComm) ;138 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 139 139 140 140 } … … 142 142 { 143 143 distributed=axisListSrcP[elementPositionInGridSrc2AxisPosition_[elementPositionInGrid]]->isDistributed() ; 144 ep_lib::MPI_Allreduce(&distributed,&distributed_glo, 1, EP_INT, EP_LOR, client->intraComm) ;144 MPI_Allreduce(&distributed,&distributed_glo, 1, MPI_INT, MPI_LOR, client->intraComm) ; 145 145 } 146 146 else //it's a scalar … … 238 238 int sendValue = (computeGlobalIndexOnProc) ? 1 : 0; 239 239 int recvValue = 0; 240 ep_lib::MPI_Allreduce(&sendValue, &recvValue, 1, EP_INT, EP_SUM, client->intraComm);240 MPI_Allreduce(&sendValue, &recvValue, 1, MPI_INT, MPI_SUM, client->intraComm); 241 241 computeGlobalIndexOnProc = (0 < recvValue); 242 242 -
XIOS/trunk/src/transformation/grid_transformation.cpp
r1638 r1639 514 514 sendRankSizeMap[itIndex->first] = sendSize; 515 515 } 516 ep_lib::MPI_Allgather(&connectedClient,1,EP_INT,recvCount,1,EP_INT,client->intraComm);516 MPI_Allgather(&connectedClient,1,MPI_INT,recvCount,1,MPI_INT,client->intraComm); 517 517 518 518 displ[0]=0 ; … … 521 521 int* recvRankBuff=new int[recvSize]; 522 522 int* recvSizeBuff=new int[recvSize]; 523 ep_lib::MPI_Allgatherv(sendRankBuff,connectedClient,EP_INT,recvRankBuff,recvCount,displ,EP_INT,client->intraComm);524 ep_lib::MPI_Allgatherv(sendSizeBuff,connectedClient,EP_INT,recvSizeBuff,recvCount,displ,EP_INT,client->intraComm);523 MPI_Allgatherv(sendRankBuff,connectedClient,MPI_INT,recvRankBuff,recvCount,displ,MPI_INT,client->intraComm); 524 MPI_Allgatherv(sendSizeBuff,connectedClient,MPI_INT,recvSizeBuff,recvCount,displ,MPI_INT,client->intraComm); 525 525 for (int i = 0; i < nbClient; ++i) 526 526 { … … 534 534 535 535 // Sending global index of grid source to corresponding process as well as the corresponding mask 536 std::vector< ep_lib::MPI_Request> requests(recvRankSizeMap.size()*2 + globaIndexWeightFromSrcToDst.size()*2);537 std::vector< ep_lib::MPI_Status> status;536 std::vector<MPI_Request> requests; 537 std::vector<MPI_Status> status; 538 538 std::unordered_map<int, unsigned char* > recvMaskDst; 539 539 std::unordered_map<int, unsigned long* > recvGlobalIndexSrc; 540 int requests_position = 0;541 540 for (std::map<int,int>::const_iterator itRecv = recvRankSizeMap.begin(); itRecv != recvRankSizeMap.end(); ++itRecv) 542 541 { … … 546 545 recvGlobalIndexSrc[recvRank] = new unsigned long [recvSize]; 547 546 548 ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests[requests_position++]); 549 ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests[requests_position++]); 550 551 //requests.push_back(ep_lib::MPI_Request()); 552 //ep_lib::MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, EP_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 553 //requests.push_back(ep_lib::MPI_Request()); 554 //ep_lib::MPI_Irecv(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 547 requests.push_back(MPI_Request()); 548 MPI_Irecv(recvGlobalIndexSrc[recvRank], recvSize, MPI_UNSIGNED_LONG, recvRank, 46, client->intraComm, &requests.back()); 549 requests.push_back(MPI_Request()); 550 MPI_Irecv(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 47, client->intraComm, &requests.back()); 555 551 } 556 552 … … 587 583 588 584 // Send global index source and mask 589 ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests[requests_position++]); 590 ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, EP_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests[requests_position++]); 591 //requests.push_back(ep_lib::MPI_Request()); 592 //ep_lib::MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, EP_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 593 //requests.push_back(ep_lib::MPI_Request()); 594 //ep_lib::MPI_Isend(sendMaskDst[sendRank], sendSize, EP_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 585 requests.push_back(MPI_Request()); 586 MPI_Isend(sendGlobalIndexSrc[sendRank], sendSize, MPI_UNSIGNED_LONG, sendRank, 46, client->intraComm, &requests.back()); 587 requests.push_back(MPI_Request()); 588 MPI_Isend(sendMaskDst[sendRank], sendSize, MPI_UNSIGNED_CHAR, sendRank, 47, client->intraComm, &requests.back()); 595 589 } 596 590 597 591 status.resize(requests.size()); 598 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]);592 MPI_Waitall(requests.size(), &requests[0], &status[0]); 599 593 600 594 // Okie, now use the mask to identify which index source we need to send, then also signal the destination which masked index we will return 601 requests.resize(sendRankSizeMap.size() + recvRankSizeMap.size()); 602 requests_position = 0; 603 std::vector<ep_lib::MPI_Status>().swap(status); 595 std::vector<MPI_Request>().swap(requests); 596 std::vector<MPI_Status>().swap(status); 604 597 // Okie, on destination side, we will wait for information of masked index of source 605 598 for (std::map<int,int>::const_iterator itSend = sendRankSizeMap.begin(); itSend != sendRankSizeMap.end(); ++itSend) … … 608 601 int recvSize = itSend->second; 609 602 610 ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 611 //requests.push_back(ep_lib::MPI_Request()); 612 //ep_lib::MPI_Irecv(sendMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 603 requests.push_back(MPI_Request()); 604 MPI_Irecv(sendMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 613 605 } 614 606 … … 646 638 647 639 // Okie, now inform the destination which source index are masked 648 ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests[requests_position++]); 649 //requests.push_back(ep_lib::MPI_Request()); 650 //ep_lib::MPI_Isend(recvMaskDst[recvRank], recvSize, EP_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 640 requests.push_back(MPI_Request()); 641 MPI_Isend(recvMaskDst[recvRank], recvSize, MPI_UNSIGNED_CHAR, recvRank, 48, client->intraComm, &requests.back()); 651 642 } 652 643 status.resize(requests.size()); 653 ep_lib::MPI_Waitall(requests.size(), &requests[0], &status[0]);644 MPI_Waitall(requests.size(), &requests[0], &status[0]); 654 645 655 646 // Cool, now we can fill in local index of grid destination (counted for masked index)
Note: See TracChangeset
for help on using the changeset viewer.