- Timestamp:
- 01/23/19 10:31:44 (5 years ago)
- File:
-
- 1 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/extern/remap/src/parallel_tree.cpp
r1545 r1642 12 12 13 13 #include "parallel_tree.hpp" 14 using namespace ep_lib;15 14 16 15 namespace sphereRemap { 17 18 extern CRemapGrid srcGrid;19 #pragma omp threadprivate(srcGrid)20 21 extern CRemapGrid tgtGrid;22 #pragma omp threadprivate(tgtGrid)23 16 24 17 static const int assignLevel = 2; … … 121 114 } 122 115 123 CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MIN_NODE_SZ*MIN_NODE_SZ*2, comm) 124 { 125 treeCascade.reserve(cascade.num_levels); 126 for (int lev = 0; lev < cascade.num_levels; lev++) 127 treeCascade.push_back(CSampleTree(cascade.level[lev].group_size, assignLevel)); 116 //CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MIN_NODE_SZ*MIN_NODE_SZ, comm) 117 CParallelTree::CParallelTree(ep_lib::MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm) 118 { 119 treeCascade.reserve(cascade.num_levels); 120 for (int lev = 0; lev < cascade.num_levels; lev++) 121 treeCascade.push_back(CSampleTree(cascade.level[lev].group_size, assignLevel)); 128 122 } 129 123 … … 157 151 158 152 int nrecv; // global number of samples THIS WILL BE THE NUMBER OF LEAFS IN THE SAMPLE TREE 159 MPI_Allreduce(&n, &nrecv, 1, MPI_INT, MPI_SUM, comm.comm); // => size of sample tree does not depend on keepNodes! 160 153 ep_lib::MPI_Allreduce(&n, &nrecv, 1, EP_INT, EP_SUM, comm.comm); // => size of sample tree does not depend on keepNodes! 161 154 double ratio = blocSize / (1.0 * nrecv); 162 155 int nsend = ratio * n + 1; // nsend = n_local_samples / n_global_samples * blocksize + 1 = blocksize/comm.size … … 164 157 165 158 int *counts = new int[comm.size]; 166 MPI_Allgather(&nsend, 1, MPI_INT, counts, 1, MPI_INT, comm.comm);159 ep_lib::MPI_Allgather(&nsend, 1, EP_INT, counts, 1, EP_INT, comm.comm); 167 160 168 161 nrecv = 0; … … 190 183 /* each process needs the sample elements from all processes */ 191 184 double *recvBuffer = new double[nrecv*4]; 192 MPI_Allgatherv(sendBuffer, 4 * nsend, MPI_DOUBLE, recvBuffer, counts, displs, MPI_DOUBLE, comm.comm);185 ep_lib::MPI_Allgatherv(sendBuffer, 4 * nsend, EP_DOUBLE, recvBuffer, counts, displs, EP_DOUBLE, comm.comm); 193 186 delete[] sendBuffer; 194 187 delete[] counts; … … 248 241 << " node size : "<<node.size()<<" bloc size : "<<blocSize<<" total number of leaf : "<<tree.leafs.size()<<endl ; 249 242 /* 250 MPI_Allreduce(&ok, &allok, 1, MPI_INT, MPI_PROD, communicator);243 MPI_Allreduce(&ok, &allok, 1, EP_INT, MPI_PROD, communicator); 251 244 if (!allok) { 252 245 MPI_Finalize(); … … 254 247 } 255 248 */ 256 MPI_Abort(MPI_COMM_WORLD,-1) ;249 ep_lib::MPI_Abort(EP_COMM_WORLD,-1) ; 257 250 } 258 251 /* … … 272 265 { 273 266 CMPIRouting MPIRoute(communicator); 274 MPI_Barrier(communicator);267 ep_lib::MPI_Barrier(communicator); 275 268 CTimer::get("buildLocalTree(initRoute)").resume(); 276 269 MPIRoute.init(route); … … 297 290 298 291 int mpiRank; 299 MPI_Comm_rank(communicator, &mpiRank);292 ep_lib::MPI_Comm_rank(communicator, &mpiRank); 300 293 localTree.leafs.reserve(nbLocalElements); 301 294 for (int i = 0; i < nbLocalElements; i++) … … 323 316 nb1=node.size() ; nb2=node2.size() ; 324 317 nb=nb1+nb2 ; 325 MPI_Allreduce(&nb, &nbTot, 1, MPI_LONG, MPI_SUM, communicator) ;318 ep_lib::MPI_Allreduce(&nb, &nbTot, 1, EP_LONG, EP_SUM, communicator) ; 326 319 int commSize ; 327 MPI_Comm_size(communicator,&commSize) ;320 ep_lib::MPI_Comm_size(communicator,&commSize) ; 328 321 329 322 // make multiple of two … … 508 501 // gather circles on this level of the cascade 509 502 int pg_size; 510 MPI_Comm_size(cascade.level[level].pg_comm, &pg_size);503 ep_lib::MPI_Comm_size(cascade.level[level].pg_comm, &pg_size); 511 504 vector<Coord> allRootCentres(pg_size); 512 505 vector<double> allRootRadia(pg_size); 513 MPI_Allgather(&rootCentre, 3, MPI_DOUBLE, &allRootCentres[0], 3, MPI_DOUBLE, cascade.level[level].pg_comm);514 MPI_Allgather(&rootRadius, 1, MPI_DOUBLE, &allRootRadia[0], 1, MPI_DOUBLE, cascade.level[level].pg_comm);506 ep_lib::MPI_Allgather(&rootCentre, 3, EP_DOUBLE, &allRootCentres[0], 3, EP_DOUBLE, cascade.level[level].pg_comm); 507 ep_lib::MPI_Allgather(&rootRadius, 1, EP_DOUBLE, &allRootRadia[0], 1, EP_DOUBLE, cascade.level[level].pg_comm); 515 508 516 509 // now allRootsRadia and allRootCentres must be inserted into second levels of us and propagated to root
Note: See TracChangeset
for help on using the changeset viewer.