- Timestamp:
- 11/19/18 16:28:03 (5 years ago)
- Location:
- XIOS/dev/dev_trunk_omp/extern/remap/src
- Files:
-
- 16 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_omp/extern/remap/src/cputime.cpp
r694 r1602 1 1 #include "mpi.hpp" 2 using namespace ep_lib; 2 3 3 4 namespace sphereRemap { -
XIOS/dev/dev_trunk_omp/extern/remap/src/gridRemap.cpp
r688 r1602 11 11 12 12 CRemapGrid srcGrid; 13 #pragma omp threadprivate(srcGrid) 14 13 15 CRemapGrid tgtGrid; 16 #pragma omp threadprivate(tgtGrid) 14 17 15 18 } -
XIOS/dev/dev_trunk_omp/extern/remap/src/gridRemap.hpp
r688 r1602 14 14 Coord readPole(std::istream&); 15 15 16 extern CRemapGrid srcGrid;17 extern CRemapGrid tgtGrid;16 //extern CRemapGrid srcGrid; 17 //extern CRemapGrid tgtGrid; 18 18 19 19 } -
XIOS/dev/dev_trunk_omp/extern/remap/src/intersect.cpp
r1158 r1602 14 14 15 15 namespace sphereRemap { 16 17 extern CRemapGrid srcGrid; 18 #pragma omp threadprivate(srcGrid) 19 20 extern CRemapGrid tgtGrid; 21 #pragma omp threadprivate(tgtGrid) 16 22 17 23 using namespace std; -
XIOS/dev/dev_trunk_omp/extern/remap/src/intersection_ym.cpp
r1588 r1602 19 19 namespace sphereRemap { 20 20 21 extern CRemapGrid srcGrid; 22 #pragma omp threadprivate(srcGrid) 23 24 extern CRemapGrid tgtGrid; 25 #pragma omp threadprivate(tgtGrid) 26 27 21 28 using namespace std; 22 29 using namespace ClipperLib ; -
XIOS/dev/dev_trunk_omp/extern/remap/src/libmapper.cpp
r694 r1602 15 15 #include "cputime.hpp" // cputime 16 16 17 using namespace ep_lib; 18 17 19 using namespace sphereRemap ; 18 20 … … 20 22 and deallocated during the second step (computing the weights) */ 21 23 Mapper *mapper; 22 24 #pragma omp threadprivate(mapper) 23 25 24 26 /** xxx_bounds_yyy is of length n_vert_per_cell_xxx*n_cell_xxx -
XIOS/dev/dev_trunk_omp/extern/remap/src/mapper.cpp
r1158 r1602 12 12 13 13 #include "mapper.hpp" 14 using namespace ep_lib; 14 15 15 16 namespace sphereRemap { 17 18 extern CRemapGrid srcGrid; 19 #pragma omp threadprivate(srcGrid) 20 21 extern CRemapGrid tgtGrid; 22 #pragma omp threadprivate(tgtGrid) 23 16 24 17 25 /* A subdivition of an array into N sub-arays … … 303 311 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 304 312 nbSendRequest++; 305 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);313 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 1, communicator, &sendRequest[nbSendRequest]); 306 314 nbSendRequest++; 307 315 if (order == 2) 308 316 { 309 317 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), 310 MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]);318 MPI_DOUBLE, rank, 2, communicator, &sendRequest[nbSendRequest]); 311 319 nbSendRequest++; 312 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);320 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &sendRequest[nbSendRequest]); 313 321 //ym --> attention taille GloId 314 322 nbSendRequest++; … … 316 324 else 317 325 { 318 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]);326 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 4, communicator, &sendRequest[nbSendRequest]); 319 327 //ym --> attention taille GloId 320 328 nbSendRequest++; … … 325 333 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 326 334 nbRecvRequest++; 327 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);335 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 1, communicator, &recvRequest[nbRecvRequest]); 328 336 nbRecvRequest++; 329 337 if (order == 2) 330 338 { 331 339 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 332 MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]);340 MPI_DOUBLE, rank, 2, communicator, &recvRequest[nbRecvRequest]); 333 341 nbRecvRequest++; 334 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);342 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &recvRequest[nbRecvRequest]); 335 343 //ym --> attention taille GloId 336 344 nbRecvRequest++; … … 338 346 else 339 347 { 340 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]);348 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 4, communicator, &recvRequest[nbRecvRequest]); 341 349 //ym --> attention taille GloId 342 350 nbRecvRequest++; -
XIOS/dev/dev_trunk_omp/extern/remap/src/mapper.hpp
r1158 r1602 18 18 { 19 19 public: 20 Mapper(MPI_Comm comm=MPI_COMM_WORLD) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {} 20 Mapper(ep_lib::MPI_Comm comm) : communicator(comm), verbose(SILENT), neighbourElements(NULL), sstree(comm) {} 21 21 22 ~Mapper(); 22 23 void setVerbosity(verbosity v) {verbose=v ;} … … 67 68 68 69 CParallelTree sstree; 69 MPI_Comm communicator ;70 ep_lib::MPI_Comm communicator ; 70 71 std::vector<Elt> sourceElements ; 71 72 std::vector<Node> sourceMesh ; -
XIOS/dev/dev_trunk_omp/extern/remap/src/mpi_cascade.cpp
r688 r1602 1 1 #include "mpi_cascade.hpp" 2 2 #include <iostream> 3 using namespace ep_lib; 3 4 4 5 namespace sphereRemap { -
XIOS/dev/dev_trunk_omp/extern/remap/src/mpi_cascade.hpp
r694 r1602 12 12 { 13 13 public: 14 CCascadeLevel(MPI_Comm comm) : comm(comm)15 16 17 18 19 20 14 CCascadeLevel(ep_lib::MPI_Comm comm) : comm(comm) 15 { 16 ep_lib::MPI_Comm_size(comm, &size); 17 ep_lib::MPI_Comm_rank(comm, &rank); 18 } 19 int colour() const { return rank % group_size; }; 20 int key() const { return p_colour() + rank/(p_grp_size*group_size)*p_grp_size; } 21 21 22 23 24 22 // perpendicular group 23 int p_colour() const { return (rank%group_size + rank/group_size) % p_grp_size; } 24 int p_key() const { return colour() + rank/(p_grp_size*group_size)*group_size; } 25 25 26 27 28 29 30 26 ep_lib::MPI_Comm comm, pg_comm; 27 int rank; 28 int size; 29 int group_size; // group_size and p_grp_size are interchanged?? FIXME 30 int p_grp_size; 31 31 }; 32 32 … … 34 34 { 35 35 public: 36 // 37 CMPICascade(int nodes_per_level, MPI_Comm comm); 36 CMPICascade(int nodes_per_level, ep_lib::MPI_Comm comm); 38 37 39 40 38 int num_levels; 39 std::vector<CCascadeLevel> level; 41 40 }; 42 41 -
XIOS/dev/dev_trunk_omp/extern/remap/src/mpi_routing.cpp
r694 r1602 5 5 #include "timerRemap.hpp" 6 6 #include <iostream> 7 using namespace ep_lib; 7 8 8 9 namespace sphereRemap { … … 150 151 for (int i = 0; i < nbSource; i++) 151 152 { 152 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]); 153 indexRequest++; 154 } 155 MPI_Barrier(communicator); 156 for (int i = 0; i < nbTarget; i++) 157 { 158 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest]); 159 indexRequest++; 153 MPI_Irecv(&sourceRank[i], 1, MPI_INT, -2, 0, communicator, &request[indexRequest++]); 154 } 155 MPI_Barrier(communicator); 156 for (int i = 0; i < nbTarget; i++) 157 { 158 MPI_Isend(&mpiRank, 1, MPI_INT, targetRank[i], 0, communicator, &request[indexRequest++]); 160 159 } 161 160 MPI_Waitall(indexRequest, request, status); … … 170 169 for (int i = 0; i < nbSource; i++) 171 170 { 172 MPI_Irecv(&sourceRank[i], 1, MPI_INT, MPI_ANY_SOURCE, 0, communicator, &request[indexRequest]);171 MPI_Irecv(&sourceRank[i], 1, MPI_INT, -2, 0, communicator, &request[indexRequest]); 173 172 indexRequest++; 174 173 } -
XIOS/dev/dev_trunk_omp/extern/remap/src/mpi_routing.hpp
r694 r1602 11 11 { 12 12 13 MPI_Comm communicator;13 ep_lib::MPI_Comm communicator; 14 14 int mpiRank; 15 15 int mpiSize; … … 29 29 30 30 public: 31 CMPIRouting( MPI_Comm comm);31 CMPIRouting(ep_lib::MPI_Comm comm); 32 32 ~CMPIRouting(); 33 33 template<typename T> void init(const std::vector<T>& route, CMPICascade *cascade = NULL); … … 44 44 template <typename T> 45 45 void alltoalls_known(const std::vector<std::vector<T> >& send, std::vector<std::vector<T> >& recv, 46 const std::vector<int>& ranks, MPI_Comm communicator);46 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 47 47 48 48 template <typename T> 49 49 void alltoalls_unknown(const std::vector<std::vector<T> >& send, std::vector<std::vector<T> >& recv, 50 const std::vector<int>& ranks, MPI_Comm communicator);50 const std::vector<int>& ranks, ep_lib::MPI_Comm communicator); 51 51 } 52 52 #endif -
XIOS/dev/dev_trunk_omp/extern/remap/src/parallel_tree.cpp
r923 r1602 12 12 13 13 #include "parallel_tree.hpp" 14 using namespace ep_lib; 14 15 15 16 namespace sphereRemap { 17 18 extern CRemapGrid srcGrid; 19 #pragma omp threadprivate(srcGrid) 20 21 extern CRemapGrid tgtGrid; 22 #pragma omp threadprivate(tgtGrid) 16 23 17 24 static const int assignLevel = 2; … … 114 121 } 115 122 116 //CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MIN_NODE_SZ*MIN_NODE_SZ, comm) 117 CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MAX_NODE_SZ*MAX_NODE_SZ*2, comm) 118 { 119 treeCascade.reserve(cascade.num_levels); 120 for (int lev = 0; lev < cascade.num_levels; lev++) 121 treeCascade.push_back(CSampleTree(cascade.level[lev].group_size, assignLevel)); 123 CParallelTree::CParallelTree(MPI_Comm comm) : communicator(comm), cascade(MIN_NODE_SZ*MIN_NODE_SZ*2, comm) 124 { 125 treeCascade.reserve(cascade.num_levels); 126 for (int lev = 0; lev < cascade.num_levels; lev++) 127 treeCascade.push_back(CSampleTree(cascade.level[lev].group_size, assignLevel)); 122 128 } 123 129 -
XIOS/dev/dev_trunk_omp/extern/remap/src/parallel_tree.hpp
r694 r1602 12 12 { 13 13 public: 14 CParallelTree(MPI_Comm comm);15 14 CParallelTree(ep_lib::MPI_Comm comm); 15 ~CParallelTree(); 16 16 17 17 void build(vector<Node>& node, vector<Node>& node2); 18 18 19 20 19 void routeNodes(vector<int>& route, vector<Node>& nodes, int level = 0); 20 void routeIntersections(vector<vector<int> >& route, vector<Node>& nodes, int level = 0); 21 21 22 23 22 int nbLocalElements; 23 Elt* localElements; 24 24 25 25 CTree localTree; 26 26 27 27 private: 28 29 30 31 28 void updateCirclesForRouting(Coord rootCentre, double rootRadius, int level = 0); 29 void buildSampleTreeCascade(vector<Node>& sampleNodes, int level = 0); 30 void buildLocalTree(const vector<Node>& node, const vector<int>& route); 31 void buildRouteTree(); 32 32 33 34 35 36 MPI_Comm communicator ;33 //CSampleTree sampleTree; 34 vector<CSampleTree> treeCascade; // first for sample tree, then for routing tree 35 CMPICascade cascade; 36 ep_lib::MPI_Comm communicator ; 37 37 38 38 }; -
XIOS/dev/dev_trunk_omp/extern/remap/src/timerRemap.cpp
r694 r1602 4 4 #include <map> 5 5 #include <iostream> 6 using namespace ep_lib; 6 7 7 8 namespace sphereRemap { … … 9 10 using namespace std; 10 11 11 map<string,CTimer*> CTimer::allTimer; 12 //map<string,CTimer*> CTimer::allTimer; 13 map<string,CTimer*> *CTimer::allTimer_ptr = 0; 12 14 13 15 CTimer::CTimer(const string& name_) : name(name_) … … 56 58 { 57 59 map<string,CTimer*>::iterator it; 58 it=allTimer.find(name); 59 if (it==allTimer.end()) it=allTimer.insert(pair<string,CTimer*>(name,new CTimer(name))).first; 60 if(allTimer_ptr == 0) allTimer_ptr = new map<string,CTimer*>; 61 //it=allTimer.find(name); 62 it=allTimer_ptr->find(name); 63 //if (it==allTimer.end()) it=allTimer.insert(pair<string,CTimer*>(name,new CTimer(name))).first; 64 if (it==allTimer_ptr->end()) it=allTimer_ptr->insert(pair<string,CTimer*>(name,new CTimer(name))).first; 60 65 return *(it->second); 61 66 } -
XIOS/dev/dev_trunk_omp/extern/remap/src/timerRemap.hpp
r694 r1602 26 26 double getCumulatedTime(void); 27 27 void print(void); 28 static map<string,CTimer*> allTimer; 28 //static map<string,CTimer*> allTimer; 29 static map<string,CTimer*> *allTimer_ptr; 30 #pragma omp threadprivate(allTimer_ptr) 29 31 static double getTime(void); 30 32 static CTimer& get(string name);
Note: See TracChangeset
for help on using the changeset viewer.