Changeset 1638 for XIOS/trunk/extern/remap/src/parallel_tree.cpp
- Timestamp:
- 01/22/19 16:15:03 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/trunk/extern/remap/src/parallel_tree.cpp
r923 r1638 115 115 116 116 //CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MIN_NODE_SZ*MIN_NODE_SZ, comm) 117 CParallelTree::CParallelTree( MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm)117 CParallelTree::CParallelTree(ep_lib::MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm) 118 118 { 119 119 treeCascade.reserve(cascade.num_levels); … … 151 151 152 152 int nrecv; // global number of samples THIS WILL BE THE NUMBER OF LEAFS IN THE SAMPLE TREE 153 MPI_Allreduce(&n, &nrecv, 1, MPI_INT, MPI_SUM, comm.comm); // => size of sample tree does not depend on keepNodes!153 ep_lib::MPI_Allreduce(&n, &nrecv, 1, EP_INT, EP_SUM, comm.comm); // => size of sample tree does not depend on keepNodes! 154 154 double ratio = blocSize / (1.0 * nrecv); 155 155 int nsend = ratio * n + 1; // nsend = n_local_samples / n_global_samples * blocksize + 1 = blocksize/comm.size … … 157 157 158 158 int *counts = new int[comm.size]; 159 MPI_Allgather(&nsend, 1, MPI_INT, counts, 1, MPI_INT, comm.comm);159 ep_lib::MPI_Allgather(&nsend, 1, EP_INT, counts, 1, EP_INT, comm.comm); 160 160 161 161 nrecv = 0; … … 183 183 /* each process needs the sample elements from all processes */ 184 184 double *recvBuffer = new double[nrecv*4]; 185 MPI_Allgatherv(sendBuffer, 4 * nsend, MPI_DOUBLE, recvBuffer, counts, displs, MPI_DOUBLE, comm.comm);185 ep_lib::MPI_Allgatherv(sendBuffer, 4 * nsend, EP_DOUBLE, recvBuffer, counts, displs, EP_DOUBLE, comm.comm); 186 186 delete[] sendBuffer; 187 187 delete[] counts; … … 241 241 << " node size : "<<node.size()<<" bloc size : "<<blocSize<<" total number of leaf : "<<tree.leafs.size()<<endl ; 242 242 /* 243 MPI_Allreduce(&ok, &allok, 1, MPI_INT, MPI_PROD, communicator);243 MPI_Allreduce(&ok, &allok, 1, EP_INT, MPI_PROD, communicator); 244 244 if (!allok) { 245 245 MPI_Finalize(); … … 247 247 } 248 248 */ 249 MPI_Abort(MPI_COMM_WORLD,-1) ;249 ep_lib::MPI_Abort(EP_COMM_WORLD,-1) ; 250 250 } 251 251 /* … … 265 265 { 266 266 CMPIRouting MPIRoute(communicator); 267 MPI_Barrier(communicator);267 ep_lib::MPI_Barrier(communicator); 268 268 CTimer::get("buildLocalTree(initRoute)").resume(); 269 269 MPIRoute.init(route); … … 290 290 291 291 int mpiRank; 292 MPI_Comm_rank(communicator, &mpiRank);292 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 293 293 localTree.leafs.reserve(nbLocalElements); 294 294 for (int i = 0; i < nbLocalElements; i++) … … 316 316 nb1=node.size() ; nb2=node2.size() ; 317 317 nb=nb1+nb2 ; 318 MPI_Allreduce(&nb, &nbTot, 1, MPI_LONG, MPI_SUM, communicator) ;318 ep_lib::MPI_Allreduce(&nb, &nbTot, 1, EP_LONG, EP_SUM, communicator) ; 319 319 int commSize ; 320 MPI_Comm_size(communicator,&commSize) ;320 ep_lib::MPI_Comm_size(communicator,&commSize) ; 321 321 322 322 // make multiple of two … … 501 501 // gather circles on this level of the cascade 502 502 int pg_size; 503 MPI_Comm_size(cascade.level[level].pg_comm, &pg_size);503 ep_lib::MPI_Comm_size(cascade.level[level].pg_comm, &pg_size); 504 504 vector<Coord> allRootCentres(pg_size); 505 505 vector<double> allRootRadia(pg_size); 506 MPI_Allgather(&rootCentre, 3, MPI_DOUBLE, &allRootCentres[0], 3, MPI_DOUBLE, cascade.level[level].pg_comm);507 MPI_Allgather(&rootRadius, 1, MPI_DOUBLE, &allRootRadia[0], 1, MPI_DOUBLE, cascade.level[level].pg_comm);506 ep_lib::MPI_Allgather(&rootCentre, 3, EP_DOUBLE, &allRootCentres[0], 3, EP_DOUBLE, cascade.level[level].pg_comm); 507 ep_lib::MPI_Allgather(&rootRadius, 1, EP_DOUBLE, &allRootRadia[0], 1, EP_DOUBLE, cascade.level[level].pg_comm); 508 508 509 509 // now allRootsRadia and allRootCentres must be inserted into second levels of us and propagated to root
Note: See TracChangeset
for help on using the changeset viewer.