Changeset 1646
- Timestamp:
- 01/31/19 12:12:52 (6 years ago)
- Location:
- XIOS/dev/dev_trunk_omp
- Files:
-
- 6 added
- 247 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/dev_trunk_omp/bld.cfg
r1628 r1646 56 56 #bld::target test_client.exe 57 57 bld::target test_omp.exe 58 bld::target test_omp2.exe59 bld::target test_send.exe60 bld::target test_send2.exe58 #bld::target test_omp2.exe 59 #bld::target test_send.exe 60 #bld::target test_send2.exe 61 61 #bld::target test_unstruct_complete.exe 62 62 #bld::target test_unstructured.exe … … 71 71 bld::tool::ld %LINKER 72 72 bld::tool::ldflags %LD_FLAGS 73 bld::tool::cflags %CFLAGS %CBASE_INC -I${PWD}/extern/src_netcdf -I${PWD}/extern/boost/include -I${PWD}/extern/rapidxml/include -I${PWD}/extern/blitz/include 73 bld::tool::cflags %CFLAGS %CBASE_INC -I${PWD}/extern/src_netcdf -I${PWD}/extern/boost/include -I${PWD}/extern/rapidxml/include -I${PWD}/extern/blitz/include -I${PWD}/extern/src_ep_dev 74 74 bld::tool::fflags %FFLAGS %FBASE_INC 75 75 bld::tool::cppkeys %CPP_KEY -
XIOS/dev/dev_trunk_omp/extern/ep_dev/ep_message.cpp
r1604 r1646 133 133 int Message_Check(MPI_Comm comm) 134 134 { 135 if(comm->is_ep) return Message_Check_endpoint(comm); 135 if(comm->is_ep) return Message_Check_endpoint(comm); 136 136 } 137 137 -
XIOS/dev/dev_trunk_omp/extern/remap/src/cputime.cpp
r1602 r1646 1 1 #include "mpi.hpp" 2 #ifdef _usingEP 2 3 using namespace ep_lib; 4 #endif 3 5 4 6 namespace sphereRemap { -
XIOS/dev/dev_trunk_omp/extern/remap/src/elt.hpp
r1630 r1646 48 48 int n; /* number of vertices */ 49 49 double area; 50 double given_area ; 50 51 Coord x; /* barycentre */ 51 52 }; … … 80 81 n = rhs.n; 81 82 area = rhs.area; 83 given_area = rhs.given_area; 82 84 x = rhs.x; 83 85 val = rhs.val; -
XIOS/dev/dev_trunk_omp/extern/remap/src/libmapper.cpp
r1630 r1646 15 15 #include "cputime.hpp" // cputime 16 16 17 #ifdef _usingEP 17 18 using namespace ep_lib; 19 #endif 18 20 19 21 using namespace sphereRemap ; … … 43 45 assert(n_cell_dst >= 4); 44 46 assert(1 <= order && order <= 2); 45 47 double* src_area=NULL ; 48 double* dst_area=NULL ; 46 49 mapper = new Mapper(MPI_COMM_WORLD); 47 50 mapper->setVerbosity(PROGRESS) ; 48 mapper->setSourceMesh(src_bounds_lon, src_bounds_lat, n_vert_per_cell_src, n_cell_src, src_pole ) ;49 mapper->setTargetMesh(dst_bounds_lon, dst_bounds_lat, n_vert_per_cell_dst, n_cell_dst, dst_pole ) ;51 mapper->setSourceMesh(src_bounds_lon, src_bounds_lat, src_area, n_vert_per_cell_src, n_cell_src, src_pole ) ; 52 mapper->setTargetMesh(dst_bounds_lon, dst_bounds_lat, dst_area, n_vert_per_cell_dst, n_cell_dst, dst_pole ) ; 50 53 51 54 /* -
XIOS/dev/dev_trunk_omp/extern/remap/src/mapper.cpp
r1630 r1646 12 12 13 13 #include "mapper.hpp" 14 #ifdef _usingEP 14 15 using namespace ep_lib; 16 #endif 15 17 16 18 namespace sphereRemap { … … 29 31 void cptOffsetsFromLengths(const int *lengths, int *offsets, int sz) 30 32 { 31 32 33 34 } 35 36 37 void Mapper::setSourceMesh(const double* boundsLon, const double* boundsLat, int nVertex, int nbCells, const double* pole, const long int* globalId)33 offsets[0] = 0; 34 for (int i = 1; i < sz; i++) 35 offsets[i] = offsets[i-1] + lengths[i-1]; 36 } 37 38 39 void Mapper::setSourceMesh(const double* boundsLon, const double* boundsLat, const double* area, int nVertex, int nbCells, const double* pole, const long int* globalId) 38 40 { 39 41 srcGrid.pole = Coord(pole[0], pole[1], pole[2]); 40 42 41 42 43 44 45 46 43 int mpiRank, mpiSize; 44 MPI_Comm_rank(communicator, &mpiRank); 45 MPI_Comm_size(communicator, &mpiSize); 46 47 sourceElements.reserve(nbCells); 48 sourceMesh.reserve(nbCells); 47 49 sourceGlobalId.resize(nbCells) ; 48 50 … … 57 59 else sourceGlobalId.assign(globalId,globalId+nbCells); 58 60 59 60 61 62 63 64 61 for (int i = 0; i < nbCells; i++) 62 { 63 int offs = i*nVertex; 64 Elt elt(boundsLon + offs, boundsLat + offs, nVertex); 65 elt.src_id.rank = mpiRank; 66 elt.src_id.ind = i; 65 67 elt.src_id.globalId = sourceGlobalId[i]; 66 sourceElements.push_back(elt); 67 sourceMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 68 cptEltGeom(sourceElements[i], Coord(pole[0], pole[1], pole[2])); 69 } 70 71 } 72 73 void Mapper::setTargetMesh(const double* boundsLon, const double* boundsLat, int nVertex, int nbCells, const double* pole, const long int* globalId) 68 sourceElements.push_back(elt); 69 sourceMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 70 cptEltGeom(sourceElements[i], Coord(pole[0], pole[1], pole[2])); 71 if (area!=NULL) sourceElements[i].given_area=area[i] ; 72 else sourceElements[i].given_area=sourceElements[i].area ; 73 } 74 75 } 76 77 void Mapper::setTargetMesh(const double* boundsLon, const double* boundsLat, const double* area, int nVertex, int nbCells, const double* pole, const long int* globalId) 74 78 { 75 79 tgtGrid.pole = Coord(pole[0], pole[1], pole[2]); 76 80 77 78 79 80 81 82 81 int mpiRank, mpiSize; 82 MPI_Comm_rank(communicator, &mpiRank); 83 MPI_Comm_size(communicator, &mpiSize); 84 85 targetElements.reserve(nbCells); 86 targetMesh.reserve(nbCells); 83 87 84 88 targetGlobalId.resize(nbCells) ; … … 93 97 else targetGlobalId.assign(globalId,globalId+nbCells); 94 98 95 for (int i = 0; i < nbCells; i++) 96 { 97 int offs = i*nVertex; 98 Elt elt(boundsLon + offs, boundsLat + offs, nVertex); 99 targetElements.push_back(elt); 100 targetMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 101 cptEltGeom(targetElements[i], Coord(pole[0], pole[1], pole[2])); 102 } 99 for (int i = 0; i < nbCells; i++) 100 { 101 int offs = i*nVertex; 102 Elt elt(boundsLon + offs, boundsLat + offs, nVertex); 103 targetElements.push_back(elt); 104 targetMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 105 cptEltGeom(targetElements[i], Coord(pole[0], pole[1], pole[2])); 106 if (area!=NULL) targetElements[i].given_area=area[i] ; 107 else targetElements[i].given_area=targetElements[i].area ; 108 } 103 109 104 110 … … 119 125 vector<double> Mapper::computeWeights(int interpOrder, bool renormalize, bool quantity) 120 126 { 121 122 123 124 127 vector<double> timings; 128 int mpiSize, mpiRank; 129 MPI_Comm_size(communicator, &mpiSize); 130 MPI_Comm_rank(communicator, &mpiRank); 125 131 126 132 this->buildSSTree(sourceMesh, targetMesh); 127 133 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 134 if (mpiRank == 0 && verbose) cout << "Computing intersections ..." << endl; 135 double tic = cputime(); 136 computeIntersection(&targetElements[0], targetElements.size()); 137 timings.push_back(cputime() - tic); 138 139 tic = cputime(); 140 if (interpOrder == 2) { 141 if (mpiRank == 0 && verbose) cout << "Computing grads ..." << endl; 142 buildMeshTopology(); 143 computeGrads(); 144 } 145 timings.push_back(cputime() - tic); 146 147 /* Prepare computation of weights */ 148 /* compute number of intersections which for the first order case 143 149 corresponds to the number of edges in the remap matrix */ 144 145 146 147 148 149 150 151 152 153 154 155 150 int nIntersections = 0; 151 for (int j = 0; j < targetElements.size(); j++) 152 { 153 Elt &elt = targetElements[j]; 154 for (list<Polyg*>::iterator it = elt.is.begin(); it != elt.is.end(); it++) 155 nIntersections++; 156 } 157 /* overallocate for NMAX neighbours for each elements */ 158 remapMatrix = new double[nIntersections*NMAX]; 159 srcAddress = new int[nIntersections*NMAX]; 160 srcRank = new int[nIntersections*NMAX]; 161 dstAddress = new int[nIntersections*NMAX]; 156 162 sourceWeightId =new long[nIntersections*NMAX]; 157 163 targetWeightId =new long[nIntersections*NMAX]; 158 164 159 165 160 161 162 163 166 if (mpiRank == 0 && verbose) cout << "Remapping..." << endl; 167 tic = cputime(); 168 nWeights = remap(&targetElements[0], targetElements.size(), interpOrder, renormalize, quantity); 169 timings.push_back(cputime() - tic); 164 170 165 171 for (int i = 0; i < targetElements.size(); i++) targetElements[i].delete_intersections(); 166 172 167 173 return timings; 168 174 } 169 175 … … 176 182 int Mapper::remap(Elt *elements, int nbElements, int order, bool renormalize, bool quantity) 177 183 { 178 int mpiSize, mpiRank; 179 MPI_Comm_size(communicator, &mpiSize); 180 MPI_Comm_rank(communicator, &mpiRank); 181 182 /* create list of intersections (super mesh elements) for each rank */ 183 multimap<int, Polyg *> *elementList = new multimap<int, Polyg *>[mpiSize]; 184 for (int j = 0; j < nbElements; j++) 185 { 186 Elt& e = elements[j]; 187 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 188 elementList[(*it)->id.rank].insert(pair<int, Polyg *>((*it)->id.ind, *it)); 189 } 190 191 int *nbSendElement = new int[mpiSize]; 192 int **sendElement = new int*[mpiSize]; /* indices of elements required from other rank */ 193 double **recvValue = new double*[mpiSize]; 194 double **recvArea = new double*[mpiSize]; 195 Coord **recvGrad = new Coord*[mpiSize]; 196 GloId **recvNeighIds = new GloId*[mpiSize]; /* ids of the of the source neighbours which also contribute through gradient */ 197 for (int rank = 0; rank < mpiSize; rank++) 198 { 199 /* get size for allocation */ 200 int last = -1; /* compares unequal to any index */ 201 int index = -1; /* increased to starting index 0 in first iteration */ 202 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 203 { 204 if (last != it->first) 205 index++; 206 (it->second)->id.ind = index; 207 last = it->first; 208 } 209 nbSendElement[rank] = index + 1; 210 211 /* if size is non-zero allocate and collect indices of elements on other ranks that we intersect */ 212 if (nbSendElement[rank] > 0) 213 { 214 sendElement[rank] = new int[nbSendElement[rank]]; 215 recvValue[rank] = new double[nbSendElement[rank]]; 216 recvArea[rank] = new double[nbSendElement[rank]]; 217 if (order == 2) 218 { 219 recvNeighIds[rank] = new GloId[nbSendElement[rank]*(NMAX+1)]; 220 recvGrad[rank] = new Coord[nbSendElement[rank]*(NMAX+1)]; 221 } 222 else 223 recvNeighIds[rank] = new GloId[nbSendElement[rank]]; 224 225 last = -1; 226 index = -1; 227 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 228 { 229 if (last != it->first) 230 index++; 231 sendElement[rank][index] = it->first; 232 last = it->first; 233 } 234 } 235 } 236 237 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 238 int *nbRecvElement = new int[mpiSize]; 239 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); 240 241 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ 242 int nbSendRequest = 0; 243 int nbRecvRequest = 0; 244 int **recvElement = new int*[mpiSize]; 245 double **sendValue = new double*[mpiSize]; 246 double **sendArea = new double*[mpiSize]; 247 Coord **sendGrad = new Coord*[mpiSize]; 248 GloId **sendNeighIds = new GloId*[mpiSize]; 249 MPI_Request *sendRequest = new MPI_Request[4*mpiSize]; 250 MPI_Request *recvRequest = new MPI_Request[4*mpiSize]; 251 for (int rank = 0; rank < mpiSize; rank++) 252 { 253 if (nbSendElement[rank] > 0) 254 { 255 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 256 nbSendRequest++; 257 } 258 259 if (nbRecvElement[rank] > 0) 260 { 261 recvElement[rank] = new int[nbRecvElement[rank]]; 262 sendValue[rank] = new double[nbRecvElement[rank]]; 263 sendArea[rank] = new double[nbRecvElement[rank]]; 264 if (order == 2) 265 { 266 sendNeighIds[rank] = new GloId[nbRecvElement[rank]*(NMAX+1)]; 267 sendGrad[rank] = new Coord[nbRecvElement[rank]*(NMAX+1)]; 268 } 269 else 270 { 271 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 272 } 273 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 274 nbRecvRequest++; 275 } 276 } 277 MPI_Status *status = new MPI_Status[4*mpiSize]; 278 279 MPI_Waitall(nbSendRequest, sendRequest, status); 184 int mpiSize, mpiRank; 185 MPI_Comm_size(communicator, &mpiSize); 186 MPI_Comm_rank(communicator, &mpiRank); 187 188 /* create list of intersections (super mesh elements) for each rank */ 189 multimap<int, Polyg *> *elementList = new multimap<int, Polyg *>[mpiSize]; 190 for (int j = 0; j < nbElements; j++) 191 { 192 Elt& e = elements[j]; 193 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 194 elementList[(*it)->id.rank].insert(pair<int, Polyg *>((*it)->id.ind, *it)); 195 } 196 197 int *nbSendElement = new int[mpiSize]; 198 int **sendElement = new int*[mpiSize]; /* indices of elements required from other rank */ 199 double **recvValue = new double*[mpiSize]; 200 double **recvArea = new double*[mpiSize]; 201 double **recvGivenArea = new double*[mpiSize]; 202 Coord **recvGrad = new Coord*[mpiSize]; 203 GloId **recvNeighIds = new GloId*[mpiSize]; /* ids of the of the source neighbours which also contribute through gradient */ 204 for (int rank = 0; rank < mpiSize; rank++) 205 { 206 /* get size for allocation */ 207 int last = -1; /* compares unequal to any index */ 208 int index = -1; /* increased to starting index 0 in first iteration */ 209 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 210 { 211 if (last != it->first) 212 index++; 213 (it->second)->id.ind = index; 214 last = it->first; 215 } 216 nbSendElement[rank] = index + 1; 217 218 /* if size is non-zero allocate and collect indices of elements on other ranks that we intersect */ 219 if (nbSendElement[rank] > 0) 220 { 221 sendElement[rank] = new int[nbSendElement[rank]]; 222 recvValue[rank] = new double[nbSendElement[rank]]; 223 recvArea[rank] = new double[nbSendElement[rank]]; 224 recvGivenArea[rank] = new double[nbSendElement[rank]]; 225 if (order == 2) 226 { 227 recvNeighIds[rank] = new GloId[nbSendElement[rank]*(NMAX+1)]; 228 recvGrad[rank] = new Coord[nbSendElement[rank]*(NMAX+1)]; 229 } 230 else 231 recvNeighIds[rank] = new GloId[nbSendElement[rank]]; 232 233 last = -1; 234 index = -1; 235 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 236 { 237 if (last != it->first) 238 index++; 239 sendElement[rank][index] = it->first; 240 last = it->first; 241 } 242 } 243 } 244 245 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 246 int *nbRecvElement = new int[mpiSize]; 247 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); 248 249 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ 250 int nbSendRequest = 0; 251 int nbRecvRequest = 0; 252 int **recvElement = new int*[mpiSize]; 253 double **sendValue = new double*[mpiSize]; 254 double **sendArea = new double*[mpiSize]; 255 double **sendGivenArea = new double*[mpiSize]; 256 Coord **sendGrad = new Coord*[mpiSize]; 257 GloId **sendNeighIds = new GloId*[mpiSize]; 258 MPI_Request *sendRequest = new MPI_Request[5*mpiSize]; 259 MPI_Request *recvRequest = new MPI_Request[5*mpiSize]; 260 for (int rank = 0; rank < mpiSize; rank++) 261 { 262 if (nbSendElement[rank] > 0) 263 { 264 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 265 nbSendRequest++; 266 } 267 268 if (nbRecvElement[rank] > 0) 269 { 270 recvElement[rank] = new int[nbRecvElement[rank]]; 271 sendValue[rank] = new double[nbRecvElement[rank]]; 272 sendArea[rank] = new double[nbRecvElement[rank]]; 273 sendGivenArea[rank] = new double[nbRecvElement[rank]]; 274 if (order == 2) 275 { 276 sendNeighIds[rank] = new GloId[nbRecvElement[rank]*(NMAX+1)]; 277 sendGrad[rank] = new Coord[nbRecvElement[rank]*(NMAX+1)]; 278 } 279 else 280 { 281 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 282 } 283 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 284 nbRecvRequest++; 285 } 286 } 287 MPI_Status *status = new MPI_Status[5*mpiSize]; 288 289 MPI_Waitall(nbSendRequest, sendRequest, status); 280 290 MPI_Waitall(nbRecvRequest, recvRequest, status); 281 291 282 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ 283 nbSendRequest = 0; 284 nbRecvRequest = 0; 285 for (int rank = 0; rank < mpiSize; rank++) 286 { 287 if (nbRecvElement[rank] > 0) 288 { 289 int jj = 0; // jj == j if no weight writing 290 for (int j = 0; j < nbRecvElement[rank]; j++) 291 { 292 sendValue[rank][j] = sstree.localElements[recvElement[rank][j]].val; 293 sendArea[rank][j] = sstree.localElements[recvElement[rank][j]].area; 294 if (order == 2) 295 { 296 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].grad; 292 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ 293 nbSendRequest = 0; 294 nbRecvRequest = 0; 295 for (int rank = 0; rank < mpiSize; rank++) 296 { 297 if (nbRecvElement[rank] > 0) 298 { 299 int jj = 0; // jj == j if no weight writing 300 for (int j = 0; j < nbRecvElement[rank]; j++) 301 { 302 sendValue[rank][j] = sstree.localElements[recvElement[rank][j]].val; 303 sendArea[rank][j] = sstree.localElements[recvElement[rank][j]].area; 304 sendGivenArea[rank][j] = sstree.localElements[recvElement[rank][j]].given_area; 305 if (order == 2) 306 { 307 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].grad; 297 308 // cout<<"grad "<<jj<<" "<<recvElement[rank][j]<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; 298 299 300 301 302 309 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].src_id; 310 jj++; 311 for (int i = 0; i < NMAX; i++) 312 { 313 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].gradNeigh[i]; 303 314 // cout<<"grad "<<jj<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; 304 315 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].neighId[i]; 305 jj++; 306 } 307 } 308 else 309 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 310 } 311 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 312 nbSendRequest++; 313 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 1, communicator, &sendRequest[nbSendRequest]); 314 nbSendRequest++; 315 if (order == 2) 316 { 317 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), 318 MPI_DOUBLE, rank, 2, communicator, &sendRequest[nbSendRequest]); 319 nbSendRequest++; 320 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &sendRequest[nbSendRequest]); 316 jj++; 317 } 318 } 319 else 320 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 321 } 322 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 323 nbSendRequest++; 324 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 1, communicator, &sendRequest[nbSendRequest]); 325 nbSendRequest++; 326 MPI_Issend(sendGivenArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 5, communicator, &sendRequest[nbSendRequest]); 327 nbSendRequest++; 328 if (order == 2) 329 { 330 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), MPI_DOUBLE, rank, 2, communicator, &sendRequest[nbSendRequest]); 331 nbSendRequest++; 332 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &sendRequest[nbSendRequest]); 321 333 //ym --> attention taille GloId 322 323 324 325 326 334 nbSendRequest++; 335 } 336 else 337 { 338 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 4, communicator, &sendRequest[nbSendRequest]); 327 339 //ym --> attention taille GloId 328 nbSendRequest++; 329 } 330 } 331 if (nbSendElement[rank] > 0) 332 { 333 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 334 nbRecvRequest++; 335 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 1, communicator, &recvRequest[nbRecvRequest]); 336 nbRecvRequest++; 337 if (order == 2) 338 { 339 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 340 MPI_DOUBLE, rank, 2, communicator, &recvRequest[nbRecvRequest]); 341 nbRecvRequest++; 342 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &recvRequest[nbRecvRequest]); 340 nbSendRequest++; 341 } 342 } 343 if (nbSendElement[rank] > 0) 344 { 345 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 346 nbRecvRequest++; 347 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 1, communicator, &recvRequest[nbRecvRequest]); 348 nbRecvRequest++; 349 MPI_Irecv(recvGivenArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 5, communicator, &recvRequest[nbRecvRequest]); 350 nbRecvRequest++; 351 if (order == 2) 352 { 353 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 354 MPI_DOUBLE, rank, 2, communicator, &recvRequest[nbRecvRequest]); 355 nbRecvRequest++; 356 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &recvRequest[nbRecvRequest]); 343 357 //ym --> attention taille GloId 344 345 346 347 348 358 nbRecvRequest++; 359 } 360 else 361 { 362 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 4, communicator, &recvRequest[nbRecvRequest]); 349 363 //ym --> attention taille GloId 350 351 352 353 364 nbRecvRequest++; 365 } 366 } 367 } 354 368 355 369 MPI_Waitall(nbSendRequest, sendRequest, status); 356 MPI_Waitall(nbRecvRequest, recvRequest, status); 357 358 359 /* now that all values and gradients are available use them to computed interpolated values on target 360 and also to compute weights */ 361 int i = 0; 362 for (int j = 0; j < nbElements; j++) 363 { 364 Elt& e = elements[j]; 365 366 /* since for the 2nd order case source grid elements can contribute to a destination grid element over several "paths" 367 (step1: gradient is computed using neighbours on same grid, step2: intersection uses several elements on other grid) 368 accumulate them so that there is only one final weight between two elements */ 369 map<GloId,double> wgt_map; 370 371 /* for destination element `e` loop over all intersetions/the corresponding source elements */ 372 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 373 { 374 /* it is the intersection element, so it->x and it->area are barycentre and area of intersection element (super mesh) 375 but it->id is id of the source element that it intersects */ 376 int n1 = (*it)->id.ind; 377 int rank = (*it)->id.rank; 378 double fk = recvValue[rank][n1]; 379 double srcArea = recvArea[rank][n1]; 380 double w = (*it)->area; 370 MPI_Waitall(nbRecvRequest, recvRequest, status); 371 372 373 /* now that all values and gradients are available use them to computed interpolated values on target 374 and also to compute weights */ 375 int i = 0; 376 for (int j = 0; j < nbElements; j++) 377 { 378 Elt& e = elements[j]; 379 380 /* since for the 2nd order case source grid elements can contribute to a destination grid element over several "paths" 381 (step1: gradient is computed using neighbours on same grid, step2: intersection uses several elements on other grid) 382 accumulate them so that there is only one final weight between two elements */ 383 map<GloId,double> wgt_map; 384 385 /* for destination element `e` loop over all intersetions/the corresponding source elements */ 386 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 387 { 388 /* it is the intersection element, so it->x and it->area are barycentre and area of intersection element (super mesh) 389 but it->id is id of the source element that it intersects */ 390 int n1 = (*it)->id.ind; 391 int rank = (*it)->id.rank; 392 double fk = recvValue[rank][n1]; 393 double srcArea = recvArea[rank][n1]; 394 double srcGivenArea = recvGivenArea[rank][n1]; 395 double w = (*it)->area; 381 396 if (quantity) w/=srcArea ; 382 383 /* first order: src value times weight (weight = supermesh area), later divide by target area */ 384 int kk = (order == 2) ? n1 * (NMAX + 1) : n1; 385 GloId neighID = recvNeighIds[rank][kk]; 386 wgt_map[neighID] += w; 387 388 if (order == 2) 389 { 390 for (int k = 0; k < NMAX+1; k++) 391 { 392 int kk = n1 * (NMAX + 1) + k; 393 GloId neighID = recvNeighIds[rank][kk]; 394 if (neighID.ind != -1) wgt_map[neighID] += w * scalarprod(recvGrad[rank][kk], (*it)->x); 395 } 396 397 } 398 } 397 else w=w*srcGivenArea/srcArea*e.area/e.given_area ; 398 399 /* first order: src value times weight (weight = supermesh area), later divide by target area */ 400 int kk = (order == 2) ? n1 * (NMAX + 1) : n1; 401 GloId neighID = recvNeighIds[rank][kk]; 402 wgt_map[neighID] += w; 403 404 if (order == 2) 405 { 406 for (int k = 0; k < NMAX+1; k++) 407 { 408 int kk = n1 * (NMAX + 1) + k; 409 GloId neighID = recvNeighIds[rank][kk]; 410 if (neighID.ind != -1) wgt_map[neighID] += w * scalarprod(recvGrad[rank][kk], (*it)->x); 411 } 412 413 } 414 } 399 415 400 416 double renorm=0; 401 417 if (renormalize) 402 for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) renorm+=it->second / e.area; 418 { 419 if (quantity) for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) renorm+=it->second ; 420 else for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) renorm+=it->second / e.area; 421 } 403 422 else renorm=1. ; 404 423 405 424 for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) 406 425 { 407 426 if (quantity) this->remapMatrix[i] = (it->second ) / renorm; 408 409 410 411 427 else this->remapMatrix[i] = (it->second / e.area) / renorm; 428 this->srcAddress[i] = it->first.ind; 429 this->srcRank[i] = it->first.rank; 430 this->dstAddress[i] = j; 412 431 this->sourceWeightId[i]= it->first.globalId ; 413 432 this->targetWeightId[i]= targetGlobalId[j] ; 414 i++; 415 } 416 } 417 418 /* free all memory allocated in this function */ 419 for (int rank = 0; rank < mpiSize; rank++) 420 { 421 if (nbSendElement[rank] > 0) 422 { 423 delete[] sendElement[rank]; 424 delete[] recvValue[rank]; 425 delete[] recvArea[rank]; 426 if (order == 2) 427 { 428 delete[] recvGrad[rank]; 429 } 430 delete[] recvNeighIds[rank]; 431 } 432 if (nbRecvElement[rank] > 0) 433 { 434 delete[] recvElement[rank]; 435 delete[] sendValue[rank]; 436 delete[] sendArea[rank]; 437 if (order == 2) 438 delete[] sendGrad[rank]; 439 delete[] sendNeighIds[rank]; 440 } 441 } 442 delete[] status; 443 delete[] sendRequest; 444 delete[] recvRequest; 445 delete[] elementList; 446 delete[] nbSendElement; 447 delete[] nbRecvElement; 448 delete[] sendElement; 449 delete[] recvElement; 450 delete[] sendValue; 451 delete[] recvValue; 452 delete[] sendGrad; 453 delete[] recvGrad; 454 delete[] sendNeighIds; 455 delete[] recvNeighIds; 456 return i; 433 i++; 434 } 435 } 436 437 /* free all memory allocated in this function */ 438 for (int rank = 0; rank < mpiSize; rank++) 439 { 440 if (nbSendElement[rank] > 0) 441 { 442 delete[] sendElement[rank]; 443 delete[] recvValue[rank]; 444 delete[] recvArea[rank]; 445 delete[] recvGivenArea[rank]; 446 if (order == 2) 447 { 448 delete[] recvGrad[rank]; 449 } 450 delete[] recvNeighIds[rank]; 451 } 452 if (nbRecvElement[rank] > 0) 453 { 454 delete[] recvElement[rank]; 455 delete[] sendValue[rank]; 456 delete[] sendArea[rank]; 457 delete[] sendGivenArea[rank]; 458 if (order == 2) 459 delete[] sendGrad[rank]; 460 delete[] sendNeighIds[rank]; 461 } 462 } 463 delete[] status; 464 delete[] sendRequest; 465 delete[] recvRequest; 466 delete[] elementList; 467 delete[] nbSendElement; 468 delete[] nbRecvElement; 469 delete[] sendElement; 470 delete[] recvElement; 471 delete[] sendValue; 472 delete[] recvValue; 473 delete[] sendGrad; 474 delete[] recvGrad; 475 delete[] sendNeighIds; 476 delete[] recvNeighIds; 477 return i; 457 478 } 458 479 459 480 void Mapper::computeGrads() 460 481 { 461 462 463 464 465 466 467 468 469 470 482 /* array of pointers to collect local elements and elements received from other cpu */ 483 vector<Elt*> globalElements(sstree.nbLocalElements + nbNeighbourElements); 484 int index = 0; 485 for (int i = 0; i < sstree.nbLocalElements; i++, index++) 486 globalElements[index] = &(sstree.localElements[i]); 487 for (int i = 0; i < nbNeighbourElements; i++, index++) 488 globalElements[index] = &neighbourElements[i]; 489 490 update_baryc(sstree.localElements, sstree.nbLocalElements); 491 computeGradients(&globalElements[0], sstree.nbLocalElements); 471 492 } 472 493 … … 475 496 void Mapper::buildMeshTopology() 476 497 { 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 498 int mpiSize, mpiRank; 499 MPI_Comm_size(communicator, &mpiSize); 500 MPI_Comm_rank(communicator, &mpiRank); 501 502 vector<Node> *routingList = new vector<Node>[mpiSize]; 503 vector<vector<int> > routes(sstree.localTree.leafs.size()); 504 505 sstree.routeIntersections(routes, sstree.localTree.leafs); 506 507 for (int i = 0; i < routes.size(); ++i) 508 for (int k = 0; k < routes[i].size(); ++k) 509 routingList[routes[i][k]].push_back(sstree.localTree.leafs[i]); 510 routingList[mpiRank].clear(); 511 512 513 CMPIRouting mpiRoute(communicator); 514 mpiRoute.init(routes); 515 int nRecv = mpiRoute.getTotalSourceElement(); 495 516 // cout << mpiRank << " NRECV " << nRecv << "(" << routes.size() << ")"<< endl; 496 517 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 518 int *nbSendNode = new int[mpiSize]; 519 int *nbRecvNode = new int[mpiSize]; 520 int *sendMessageSize = new int[mpiSize]; 521 int *recvMessageSize = new int[mpiSize]; 522 523 for (int rank = 0; rank < mpiSize; rank++) 524 { 525 nbSendNode[rank] = routingList[rank].size(); 526 sendMessageSize[rank] = 0; 527 for (size_t j = 0; j < routingList[rank].size(); j++) 528 { 529 Elt *elt = (Elt *) (routingList[rank][j].data); 530 sendMessageSize[rank] += packedPolygonSize(*elt); 531 } 532 } 533 534 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 535 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 536 537 char **sendBuffer = new char*[mpiSize]; 538 char **recvBuffer = new char*[mpiSize]; 539 int *pos = new int[mpiSize]; 540 541 for (int rank = 0; rank < mpiSize; rank++) 542 { 543 if (nbSendNode[rank] > 0) sendBuffer[rank] = new char[sendMessageSize[rank]]; 544 if (nbRecvNode[rank] > 0) recvBuffer[rank] = new char[recvMessageSize[rank]]; 545 } 546 547 for (int rank = 0; rank < mpiSize; rank++) 548 { 549 pos[rank] = 0; 550 for (size_t j = 0; j < routingList[rank].size(); j++) 551 { 552 Elt *elt = (Elt *) (routingList[rank][j].data); 553 packPolygon(*elt, sendBuffer[rank], pos[rank]); 554 } 555 } 556 delete [] routingList; 557 558 559 int nbSendRequest = 0; 560 int nbRecvRequest = 0; 561 MPI_Request *sendRequest = new MPI_Request[mpiSize]; 562 MPI_Request *recvRequest = new MPI_Request[mpiSize]; 563 MPI_Status *status = new MPI_Status[mpiSize]; 564 565 for (int rank = 0; rank < mpiSize; rank++) 566 { 567 if (nbSendNode[rank] > 0) 568 { 569 MPI_Issend(sendBuffer[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 570 nbSendRequest++; 571 } 572 if (nbRecvNode[rank] > 0) 573 { 574 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 575 nbRecvRequest++; 576 } 577 } 578 579 MPI_Waitall(nbRecvRequest, recvRequest, status); 580 MPI_Waitall(nbSendRequest, sendRequest, status); 581 582 for (int rank = 0; rank < mpiSize; rank++) 583 if (nbSendNode[rank] > 0) delete [] sendBuffer[rank]; 584 delete [] sendBuffer; 585 586 char **sendBuffer2 = new char*[mpiSize]; 587 char **recvBuffer2 = new char*[mpiSize]; 588 589 for (int rank = 0; rank < mpiSize; rank++) 590 { 591 nbSendNode[rank] = 0; 592 sendMessageSize[rank] = 0; 593 594 if (nbRecvNode[rank] > 0) 595 { 596 set<NodePtr> neighbourList; 597 pos[rank] = 0; 598 for (int j = 0; j < nbRecvNode[rank]; j++) 599 { 600 Elt elt; 601 unpackPolygon(elt, recvBuffer[rank], pos[rank]); 602 Node node(elt.x, cptRadius(elt), &elt); 603 findNeighbour(sstree.localTree.root, &node, neighbourList); 604 } 605 nbSendNode[rank] = neighbourList.size(); 606 for (set<NodePtr>::iterator it = neighbourList.begin(); it != neighbourList.end(); it++) 607 { 608 Elt *elt = (Elt *) ((*it)->data); 609 sendMessageSize[rank] += packedPolygonSize(*elt); 610 } 611 612 sendBuffer2[rank] = new char[sendMessageSize[rank]]; 613 pos[rank] = 0; 614 615 for (set<NodePtr>::iterator it = neighbourList.begin(); it != neighbourList.end(); it++) 616 { 617 Elt *elt = (Elt *) ((*it)->data); 618 packPolygon(*elt, sendBuffer2[rank], pos[rank]); 619 } 620 } 621 } 622 for (int rank = 0; rank < mpiSize; rank++) 623 if (nbRecvNode[rank] > 0) delete [] recvBuffer[rank]; 624 delete [] recvBuffer; 625 626 627 MPI_Barrier(communicator); 628 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 629 MPI_Alltoall(sendMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 630 631 for (int rank = 0; rank < mpiSize; rank++) 632 if (nbRecvNode[rank] > 0) recvBuffer2[rank] = new char[recvMessageSize[rank]]; 633 634 nbSendRequest = 0; 635 nbRecvRequest = 0; 636 637 for (int rank = 0; rank < mpiSize; rank++) 638 { 639 if (nbSendNode[rank] > 0) 640 { 641 MPI_Issend(sendBuffer2[rank], sendMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 642 nbSendRequest++; 643 } 644 if (nbRecvNode[rank] > 0) 645 { 646 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 647 nbRecvRequest++; 648 } 649 } 650 651 MPI_Waitall(nbRecvRequest, recvRequest, status); 652 MPI_Waitall(nbSendRequest, sendRequest, status); 653 654 int nbNeighbourNodes = 0; 655 for (int rank = 0; rank < mpiSize; rank++) 656 nbNeighbourNodes += nbRecvNode[rank]; 657 658 neighbourElements = new Elt[nbNeighbourNodes]; 659 nbNeighbourElements = nbNeighbourNodes; 660 661 int index = 0; 662 for (int rank = 0; rank < mpiSize; rank++) 663 { 664 pos[rank] = 0; 665 for (int j = 0; j < nbRecvNode[rank]; j++) 666 { 667 unpackPolygon(neighbourElements[index], recvBuffer2[rank], pos[rank]); 668 neighbourElements[index].id.ind = sstree.localTree.leafs.size() + index; 669 index++; 670 } 671 } 672 for (int rank = 0; rank < mpiSize; rank++) 673 { 674 if (nbRecvNode[rank] > 0) delete [] recvBuffer2[rank]; 675 if (nbSendNode[rank] > 0) delete [] sendBuffer2[rank]; 676 } 677 delete [] recvBuffer2; 678 delete [] sendBuffer2; 679 delete [] sendMessageSize; 680 delete [] recvMessageSize; 681 delete [] nbSendNode; 682 delete [] nbRecvNode; 683 delete [] sendRequest; 684 delete [] recvRequest; 685 delete [] status; 686 delete [] pos; 687 688 /* re-compute on received elements to avoid having to send this information */ 689 neighbourNodes.resize(nbNeighbourNodes); 690 setCirclesAndLinks(neighbourElements, neighbourNodes); 691 cptAllEltsGeom(neighbourElements, nbNeighbourNodes, srcGrid.pole); 692 693 /* the local SS tree must include nodes from other cpus if they are potential 673 694 intersector of a local node */ 674 675 676 695 sstree.localTree.insertNodes(neighbourNodes); 696 697 /* for every local element, 677 698 use the SS-tree to find all elements (including neighbourElements) 678 699 who are potential neighbours because their circles intersect, 679 680 681 682 683 684 685 686 687 688 689 690 691 692 700 then check all canditates for common edges to build up connectivity information 701 */ 702 for (int j = 0; j < sstree.localTree.leafs.size(); j++) 703 { 704 Node& node = sstree.localTree.leafs[j]; 705 706 /* find all leafs whoes circles that intersect node's circle and save into node->intersectors */ 707 node.search(sstree.localTree.root); 708 709 Elt *elt = (Elt *)(node.data); 710 711 for (int i = 0; i < elt->n; i++) elt->neighbour[i] = NOT_FOUND; 712 713 /* for element `elt` loop through all nodes in the SS-tree 693 714 whoes circles intersect with the circle around `elt` (the SS intersectors) 694 715 and check if they are neighbours in the sense that the two elements share an edge. 695 716 If they do, save this information for elt */ 696 697 698 699 700 717 for (list<NodePtr>::iterator it = (node.intersectors).begin(); it != (node.intersectors).end(); ++it) 718 { 719 Elt *elt2 = (Elt *)((*it)->data); 720 set_neighbour(*elt, *elt2); 721 } 701 722 702 723 /* 703 704 705 706 707 724 for (int i = 0; i < elt->n; i++) 725 { 726 if (elt->neighbour[i] == NOT_FOUND) 727 error_exit("neighbour not found"); 728 } 708 729 */ 709 730 } 710 731 } 711 732 … … 713 734 void Mapper::computeIntersection(Elt *elements, int nbElements) 714 735 { 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 // 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 736 int mpiSize, mpiRank; 737 MPI_Comm_size(communicator, &mpiSize); 738 MPI_Comm_rank(communicator, &mpiRank); 739 740 MPI_Barrier(communicator); 741 742 vector<Node> *routingList = new vector<Node>[mpiSize]; 743 744 vector<Node> routeNodes; routeNodes.reserve(nbElements); 745 for (int j = 0; j < nbElements; j++) 746 { 747 elements[j].id.ind = j; 748 elements[j].id.rank = mpiRank; 749 routeNodes.push_back(Node(elements[j].x, cptRadius(elements[j]), &elements[j])); 750 } 751 752 vector<vector<int> > routes(routeNodes.size()); 753 sstree.routeIntersections(routes, routeNodes); 754 for (int i = 0; i < routes.size(); ++i) 755 for (int k = 0; k < routes[i].size(); ++k) 756 routingList[routes[i][k]].push_back(routeNodes[i]); 757 758 if (verbose >= 2) 759 { 760 cout << " --> rank " << mpiRank << " nbElements " << nbElements << " : "; 761 for (int rank = 0; rank < mpiSize; rank++) 762 cout << routingList[rank].size() << " "; 763 cout << endl; 764 } 765 MPI_Barrier(communicator); 766 767 int *nbSendNode = new int[mpiSize]; 768 int *nbRecvNode = new int[mpiSize]; 769 int *sentMessageSize = new int[mpiSize]; 770 int *recvMessageSize = new int[mpiSize]; 771 772 for (int rank = 0; rank < mpiSize; rank++) 773 { 774 nbSendNode[rank] = routingList[rank].size(); 775 sentMessageSize[rank] = 0; 776 for (size_t j = 0; j < routingList[rank].size(); j++) 777 { 778 Elt *elt = (Elt *) (routingList[rank][j].data); 779 sentMessageSize[rank] += packedPolygonSize(*elt); 780 } 781 } 782 783 MPI_Alltoall(nbSendNode, 1, MPI_INT, nbRecvNode, 1, MPI_INT, communicator); 784 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 785 786 int total = 0; 787 788 for (int rank = 0; rank < mpiSize; rank++) 789 { 790 total = total + nbRecvNode[rank]; 791 } 792 793 if (verbose >= 2) cout << "---> rank " << mpiRank << " : compute intersection : total received nodes " << total << endl; 794 char **sendBuffer = new char*[mpiSize]; 795 char **recvBuffer = new char*[mpiSize]; 796 int *pos = new int[mpiSize]; 797 798 for (int rank = 0; rank < mpiSize; rank++) 799 { 800 if (nbSendNode[rank] > 0) sendBuffer[rank] = new char[sentMessageSize[rank]]; 801 if (nbRecvNode[rank] > 0) recvBuffer[rank] = new char[recvMessageSize[rank]]; 802 } 803 804 for (int rank = 0; rank < mpiSize; rank++) 805 { 806 pos[rank] = 0; 807 for (size_t j = 0; j < routingList[rank].size(); j++) 808 { 809 Elt* elt = (Elt *) (routingList[rank][j].data); 810 packPolygon(*elt, sendBuffer[rank], pos[rank]); 811 } 812 } 813 delete [] routingList; 814 815 int nbSendRequest = 0; 816 int nbRecvRequest = 0; 817 MPI_Request *sendRequest = new MPI_Request[mpiSize]; 818 MPI_Request *recvRequest = new MPI_Request[mpiSize]; 819 MPI_Status *status = new MPI_Status[mpiSize]; 820 821 for (int rank = 0; rank < mpiSize; rank++) 822 { 823 if (nbSendNode[rank] > 0) 824 { 825 MPI_Issend(sendBuffer[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 826 nbSendRequest++; 827 } 828 if (nbRecvNode[rank] > 0) 829 { 830 MPI_Irecv(recvBuffer[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 831 nbRecvRequest++; 832 } 833 } 834 835 MPI_Waitall(nbRecvRequest, recvRequest, status); 836 MPI_Waitall(nbSendRequest, sendRequest, status); 837 char **sendBuffer2 = new char*[mpiSize]; 838 char **recvBuffer2 = new char*[mpiSize]; 839 840 double tic = cputime(); 841 for (int rank = 0; rank < mpiSize; rank++) 842 { 843 sentMessageSize[rank] = 0; 844 845 if (nbRecvNode[rank] > 0) 846 { 847 Elt *recvElt = new Elt[nbRecvNode[rank]]; 848 pos[rank] = 0; 849 for (int j = 0; j < nbRecvNode[rank]; j++) 850 { 851 unpackPolygon(recvElt[j], recvBuffer[rank], pos[rank]); 852 cptEltGeom(recvElt[j], tgtGrid.pole); 853 Node recvNode(recvElt[j].x, cptRadius(recvElt[j]), &recvElt[j]); 854 recvNode.search(sstree.localTree.root); 855 /* for a node holding an element of the target, loop throught candidates for intersecting source */ 856 for (list<NodePtr>::iterator it = (recvNode.intersectors).begin(); it != (recvNode.intersectors).end(); ++it) 857 { 858 Elt *elt2 = (Elt *) ((*it)->data); 859 /* recvElt is target, elt2 is source */ 860 // intersect(&recvElt[j], elt2); 861 intersect_ym(&recvElt[j], elt2); 862 } 863 864 if (recvElt[j].is.size() > 0) sentMessageSize[rank] += packIntersectionSize(recvElt[j]); 865 866 // here recvNode goes out of scope 867 } 868 869 if (sentMessageSize[rank] > 0) 870 { 871 sentMessageSize[rank] += sizeof(int); 872 sendBuffer2[rank] = new char[sentMessageSize[rank]]; 873 *((int *) sendBuffer2[rank]) = 0; 874 pos[rank] = sizeof(int); 875 for (int j = 0; j < nbRecvNode[rank]; j++) 876 { 877 packIntersection(recvElt[j], sendBuffer2[rank], pos[rank]); 878 //FIXME should be deleted: recvElt[j].delete_intersections(); // intersection areas have been packed to buffer and won't be used any more 879 } 880 } 881 delete [] recvElt; 882 883 } 884 } 885 delete [] pos; 886 887 for (int rank = 0; rank < mpiSize; rank++) 888 { 889 if (nbSendNode[rank] > 0) delete [] sendBuffer[rank]; 890 if (nbRecvNode[rank] > 0) delete [] recvBuffer[rank]; 891 nbSendNode[rank] = 0; 892 } 893 894 if (verbose >= 2) cout << "Rank " << mpiRank << " Compute (internal) intersection " << cputime() - tic << " s" << endl; 895 MPI_Alltoall(sentMessageSize, 1, MPI_INT, recvMessageSize, 1, MPI_INT, communicator); 896 897 for (int rank = 0; rank < mpiSize; rank++) 898 if (recvMessageSize[rank] > 0) 899 recvBuffer2[rank] = new char[recvMessageSize[rank]]; 900 901 nbSendRequest = 0; 902 nbRecvRequest = 0; 903 904 for (int rank = 0; rank < mpiSize; rank++) 905 { 906 if (sentMessageSize[rank] > 0) 907 { 908 MPI_Issend(sendBuffer2[rank], sentMessageSize[rank], MPI_CHAR, rank, 0, communicator, &sendRequest[nbSendRequest]); 909 nbSendRequest++; 910 } 911 if (recvMessageSize[rank] > 0) 912 { 913 MPI_Irecv(recvBuffer2[rank], recvMessageSize[rank], MPI_CHAR, rank, 0, communicator, &recvRequest[nbRecvRequest]); 914 nbRecvRequest++; 915 } 916 } 917 918 MPI_Waitall(nbRecvRequest, recvRequest, status); 919 MPI_Waitall(nbSendRequest, sendRequest, status); 920 921 delete [] sendRequest; 922 delete [] recvRequest; 923 delete [] status; 924 for (int rank = 0; rank < mpiSize; rank++) 925 { 926 if (nbRecvNode[rank] > 0) 927 { 928 if (sentMessageSize[rank] > 0) 929 delete [] sendBuffer2[rank]; 930 } 931 932 if (recvMessageSize[rank] > 0) 933 { 934 unpackIntersection(elements, recvBuffer2[rank]); 935 delete [] recvBuffer2[rank]; 936 } 937 } 938 delete [] sendBuffer2; 939 delete [] recvBuffer2; 940 delete [] sendBuffer; 941 delete [] recvBuffer; 942 943 delete [] nbSendNode; 944 delete [] nbRecvNode; 945 delete [] sentMessageSize; 946 delete [] recvMessageSize; 926 947 } 927 948 928 949 Mapper::~Mapper() 929 950 { 930 931 932 933 934 935 } 936 937 } 951 delete [] remapMatrix; 952 delete [] srcAddress; 953 delete [] srcRank; 954 delete [] dstAddress; 955 if (neighbourElements) delete [] neighbourElements; 956 } 957 958 } -
XIOS/dev/dev_trunk_omp/extern/remap/src/mapper.hpp
r1630 r1646 23 23 void setVerbosity(verbosity v) {verbose=v ;} 24 24 25 void setSourceMesh(const double* boundsLon, const double* boundsLat, int nVertex, int nbCells, const double* pole, const long int* globalId=NULL) ;26 void setTargetMesh(const double* boundsLon, const double* boundsLat, int nVertex, int nbCells, const double* pole, const long int* globalId=NULL) ;25 void setSourceMesh(const double* boundsLon, const double* boundsLat, const double* area, int nVertex, int nbCells, const double* pole, const long int* globalId=NULL) ; 26 void setTargetMesh(const double* boundsLon, const double* boundsLat, const double* area, int nVertex, int nbCells, const double* pole, const long int* globalId=NULL) ; 27 27 void setSourceValue(const double* val) ; 28 28 void getTargetValue(double* val) ; -
XIOS/dev/dev_trunk_omp/extern/remap/src/meshutil.cpp
r1630 r1646 2 2 #include "elt.hpp" 3 3 #include "polyg.hpp" 4 #include "intersection_ym.hpp" 5 #include "earcut.hpp" 6 #include <vector> 4 7 5 8 namespace sphereRemap { 6 9 7 10 using namespace std; 11 12 double computePolygoneArea(Elt& a, const Coord &pole) 13 { 14 using N = uint32_t; 15 using Point = array<double, 2>; 16 vector<Point> vect_points; 17 vector< vector<Point> > polyline; 18 19 vector<Coord> dstPolygon ; 20 createGreatCirclePolygon(a, pole, dstPolygon) ; 21 22 int na=dstPolygon.size() ; 23 Coord *a_gno = new Coord[na]; 24 25 Coord OC=barycentre(a.vertex,a.n) ; 26 Coord Oz=OC ; 27 Coord Ox=crossprod(Coord(0,0,1),Oz) ; 28 // choose Ox not too small to avoid rounding error 29 if (norm(Ox)< 0.1) Ox=crossprod(Coord(0,1,0),Oz) ; 30 Ox=Ox*(1./norm(Ox)) ; 31 Coord Oy=crossprod(Oz,Ox) ; 32 double cos_alpha; 33 34 for(int n=0; n<na;n++) 35 { 36 cos_alpha=scalarprod(OC,dstPolygon[n]) ; 37 a_gno[n].x=scalarprod(dstPolygon[n],Ox)/cos_alpha ; 38 a_gno[n].y=scalarprod(dstPolygon[n],Oy)/cos_alpha ; 39 a_gno[n].z=scalarprod(dstPolygon[n],Oz)/cos_alpha ; // must be equal to 1 40 41 vect_points.push_back( array<double, 2>() ); 42 vect_points[n][0] = a_gno[n].x; 43 vect_points[n][1] = a_gno[n].y; 44 45 } 46 47 polyline.push_back(vect_points); 48 vector<N> indices_a_gno = mapbox::earcut<N>(polyline); 49 50 double area_a_gno=0 ; 51 for(int i=0;i<indices_a_gno.size()/3;++i) 52 { 53 Coord x0 = Ox * polyline[0][indices_a_gno[3*i]][0] + Oy* polyline[0][indices_a_gno[3*i]][1] + Oz ; 54 Coord x1 = Ox * polyline[0][indices_a_gno[3*i+1]][0] + Oy* polyline[0][indices_a_gno[3*i+1]][1] + Oz ; 55 Coord x2 = Ox * polyline[0][indices_a_gno[3*i+2]][0] + Oy* polyline[0][indices_a_gno[3*i+2]][1] + Oz ; 56 area_a_gno+=triarea(x0 * (1./norm(x0)),x1* (1./norm(x1)), x2* (1./norm(x2))) ; 57 } 58 59 vect_points.clear(); 60 polyline.clear(); 61 indices_a_gno.clear(); 62 return area_a_gno ; 63 } 64 8 65 9 66 void cptEltGeom(Elt& elt, const Coord &pole) … … 14 71 elt.area = airbar(elt.n, elt.vertex, elt.edge, elt.d, pole, gg); 15 72 elt.x = gg; 16 } 73 // overwrite area computation 74 75 elt.area = computePolygoneArea(elt, pole) ; 76 } 77 17 78 18 79 void cptAllEltsGeom(Elt *elt, int N, const Coord &pole) -
XIOS/dev/dev_trunk_omp/extern/remap/src/mpi_cascade.cpp
r1602 r1646 1 1 #include "mpi_cascade.hpp" 2 2 #include <iostream> 3 #ifdef _usingEP 3 4 using namespace ep_lib; 5 #endif 4 6 5 7 namespace sphereRemap { -
XIOS/dev/dev_trunk_omp/extern/remap/src/mpi_routing.cpp
r1602 r1646 5 5 #include "timerRemap.hpp" 6 6 #include <iostream> 7 #ifdef _usingEP 7 8 using namespace ep_lib; 9 #endif 8 10 9 11 namespace sphereRemap { -
XIOS/dev/dev_trunk_omp/extern/remap/src/parallel_tree.cpp
r1602 r1646 12 12 13 13 #include "parallel_tree.hpp" 14 #ifdef _usingEP 14 15 using namespace ep_lib; 16 #endif 15 17 16 18 namespace sphereRemap { -
XIOS/dev/dev_trunk_omp/extern/remap/src/polyg.cpp
r1630 r1646 218 218 int packedPolygonSize(const Elt& e) 219 219 { 220 return sizeof(e.id) + sizeof(e.src_id) + sizeof(e.x) + sizeof(e.val) +220 return sizeof(e.id) + sizeof(e.src_id) + sizeof(e.x) + sizeof(e.val) + sizeof(e.given_area)+ 221 221 sizeof(e.n) + e.n*(sizeof(double)+sizeof(Coord)); 222 222 } … … 235 235 pos += sizeof(e.val); 236 236 237 *((double*) &(buffer[pos])) = e.given_area; 238 pos += sizeof(e.val); 239 237 240 *((int *) & (buffer[pos])) = e.n; 238 241 pos += sizeof(e.n); … … 262 265 pos += sizeof(double); 263 266 267 e.given_area = *((double *) & (buffer[pos])); 268 pos += sizeof(double); 269 264 270 e.n = *((int *) & (buffer[pos])); 265 271 pos += sizeof(int); … … 291 297 *((double *) &(buffer[pos])) = e.area; 292 298 pos += sizeof(double); 299 293 300 294 301 *((GloId *) &(buffer[pos])) = (*it)->id; … … 322 329 pos += sizeof(double); 323 330 331 324 332 Polyg *polygon = new Polyg; 325 333 -
XIOS/dev/dev_trunk_omp/extern/remap/src/timerRemap.cpp
r1602 r1646 4 4 #include <map> 5 5 #include <iostream> 6 #ifdef _usingEP 6 7 using namespace ep_lib; 8 #endif 7 9 8 10 namespace sphereRemap { -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_accumulate.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 102 103 103 104 } 105 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_allgather.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_gather.cpp … … 135 136 136 137 } 138 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_allgatherv.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_gather.cpp … … 121 122 122 123 } 124 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_allocate.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 85 86 86 87 } 88 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_allreduce.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_reduce.cpp … … 72 73 73 74 } 74 75 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_alltoall.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 64 65 65 66 67 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_barrier.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 51 52 } 52 53 53 54 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_barrier.hpp
r1603 r1646 2 2 #define EP_BARRIER_HPP_INCLUDED 3 3 4 #ifdef _usingEP 4 5 5 6 namespace ep_lib … … 59 60 60 61 } 61 62 #endif 62 63 63 64 -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_bcast.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_bcast.cpp … … 78 79 79 80 } 81 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_buffer.hpp
r1603 r1646 1 1 #ifndef EP_BUFFER_HPP_INCLUDED 2 2 #define EP_BUFFER_HPP_INCLUDED 3 3 #ifdef _usingEP 4 4 5 5 namespace ep_lib … … 15 15 16 16 17 17 #endif 18 18 #endif // EP_BUFFER_HPP_INCLUDED 19 19 -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_comm.hpp
r1603 r1646 1 1 #ifndef EP_COMM_HPP_INCLUDED 2 2 #define EP_COMM_HPP_INCLUDED 3 3 #ifdef _usingEP 4 4 #include "ep_message.hpp" 5 5 #include "ep_barrier.hpp" … … 63 63 64 64 65 65 #endif 66 66 #endif // EP_COMM_HPP_INCLUDED 67 67 -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_compare.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 22 23 23 24 } 25 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_create.cpp
r1605 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_create.cpp … … 135 136 136 137 } //namespace ep_lib 138 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_declaration.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 … … 110 111 111 112 113 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_declaration.hpp
r1603 r1646 1 1 #ifndef EP_DECLARATION_HPP_INCLUDED 2 2 #define EP_DECLARATION_HPP_INCLUDED 3 4 #ifdef _usingEP 3 5 4 6 #undef MPI_INT … … 48 50 extern ep_lib::MPI_Info MPI_INFO_NULL; 49 51 52 #endif 50 53 51 54 #endif // EP_DECLARATION_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_dup.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 72 73 73 74 } 75 #endif 74 76 75 -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_exscan.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_scan.cpp … … 363 364 364 365 } 366 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_fetch.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 22 23 23 24 } 25 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_finalize.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 23 24 } 24 25 25 26 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_fortran.cpp
r1605 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include "ep_lib_fortran.hpp" … … 86 87 87 88 //} 89 #endif 88 90 89 -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_free.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 108 109 109 110 111 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_gather.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_gather.cpp … … 126 127 } 127 128 } 129 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_gatherv.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_gather.cpp … … 93 94 else MPI_Gather_local(&sendcount, 1, MPI_INT, local_recvcounts.data(), 0, comm); 94 95 95 96 97 //if(is_root) printf("rank %d =================local_recvcounts = %d %d\n", ep_rank, local_recvcounts[0], local_recvcounts[1]); 98 //if(!is_root && ep_rank_loc==0 && mpi_rank != root_mpi_rank) printf("rank %d =================local_recvcounts = %d %d\n", ep_rank, local_recvcounts[0], local_recvcounts[1]); 96 99 97 100 if(is_master) … … 108 111 else MPI_Gatherv_local(sendbuf, sendcount, sendtype, local_recvbuf, local_recvcounts.data(), local_displs.data(), 0, comm); 109 112 113 114 //if(is_root) printf("rank %d =================local_recvbuf = %d %d\n", ep_rank, static_cast<int*>(local_recvbuf)[0], static_cast<int*>(local_recvbuf)[1]); 115 //if(!is_root && ep_rank_loc==0 && mpi_rank != root_mpi_rank && ep_rank!=6) printf("rank %d =================local_recvbuf = %d %d\n", ep_rank, static_cast<int*>(local_recvbuf)[0], static_cast<int*>(local_recvbuf)[1]); 116 //if(!is_root && ep_rank_loc==0 && mpi_rank != root_mpi_rank && ep_rank==6) printf("rank %d =================local_recvbuf = %d %d %d\n", ep_rank, static_cast<int*>(local_recvbuf)[0], static_cast<int*>(local_recvbuf)[1], static_cast<int*>(local_recvbuf)[2]); 110 117 111 118 void* tmp_recvbuf; 112 119 int tmp_recvbuf_size = std::accumulate(recvcounts, recvcounts+ep_size, 0); 120 121 113 122 114 123 if(is_root) tmp_recvbuf = new void*[datasize * tmp_recvbuf_size]; … … 121 130 if(is_master) 122 131 { 132 int sendcount_mpi = 0; 133 for(int i=0; i<num_ep; i++) 134 { 135 sendcount_mpi += local_recvcounts[i]; 136 } 137 123 138 for(int i=0; i<ep_size; i++) 124 139 { … … 130 145 131 146 132 ::MPI_Gatherv(local_recvbuf, sendcount*num_ep, to_mpi_type(sendtype), tmp_recvbuf, mpi_recvcounts.data(), mpi_displs.data(), to_mpi_type(recvtype), root_mpi_rank, to_mpi_comm(comm->mpi_comm)); 147 ::MPI_Gatherv(local_recvbuf, sendcount_mpi, to_mpi_type(sendtype), tmp_recvbuf, mpi_recvcounts.data(), mpi_displs.data(), to_mpi_type(recvtype), root_mpi_rank, to_mpi_comm(comm->mpi_comm)); 148 //printf("****************** rank %d, sendcount*num_ep = %d, sendcount_mpi = %d\n", ep_rank, sendcount*num_ep, sendcount_mpi); 149 150 /*if(is_root) printf("rank %d =================tmp_recvbuf = %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d %d\n", ep_rank, static_cast<int*>(tmp_recvbuf)[0], static_cast<int*>(tmp_recvbuf)[1], static_cast<int*>(tmp_recvbuf)[2], 151 static_cast<int*>(tmp_recvbuf)[3], static_cast<int*>(tmp_recvbuf)[4], static_cast<int*>(tmp_recvbuf)[5], 152 static_cast<int*>(tmp_recvbuf)[6], static_cast<int*>(tmp_recvbuf)[7], static_cast<int*>(tmp_recvbuf)[8], 153 static_cast<int*>(tmp_recvbuf)[9], static_cast<int*>(tmp_recvbuf)[10], static_cast<int*>(tmp_recvbuf)[11], 154 static_cast<int*>(tmp_recvbuf)[12], static_cast<int*>(tmp_recvbuf)[13], static_cast<int*>(tmp_recvbuf)[14], 155 static_cast<int*>(tmp_recvbuf)[15], static_cast<int*>(tmp_recvbuf)[16]);*/ 133 156 } 134 157 … … 174 197 175 198 } 199 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_get.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 57 58 58 59 } 60 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_global.hpp
r1603 r1646 1 1 #ifndef EP_GLOBAL_HPP_INCLUDED 2 2 #define EP_GLOBAL_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 extern int MPI_MODE_NOPRECEDE; … … 9 10 10 11 11 12 #endif 12 13 #endif // EP_GLOBAL_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_info.hpp
r1603 r1646 2 2 #define EP_INFO_HPP_INCLUDED 3 3 4 #ifdef _usingEP 4 5 5 6 namespace ep_lib … … 21 22 } 22 23 23 24 #endif 24 25 25 26 #endif // EP_INFO_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_init.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 43 44 } 44 45 45 46 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_intercomm.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 354 355 if(is_real_involved) 355 356 { 357 #pragma omp critical (_mpi_call) 356 358 ::MPI_Intercomm_create(extracted_comm, local_leader_rank_in_extracted_comm, to_mpi_comm(peer_comm->mpi_comm), remote_leader_rank_in_peer_mpi, tag, &mpi_inter_comm); 357 359 ::MPI_Intercomm_merge(mpi_inter_comm, !priority, intracomm); … … 550 552 551 553 } 554 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_lib.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 218 219 } 219 220 220 221 221 #endif 222 -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_lib.hpp
r1603 r1646 1 1 #ifndef EP_LIB_HPP_INCLUDED 2 2 #define EP_LIB_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 #include "ep_type.hpp" … … 87 88 88 89 } 89 90 #endif 90 91 #endif // EP_LIB_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_lib_collective.hpp
r1603 r1646 1 1 #ifndef EP_LIB_COLLECTIVE_HPP_INCLUDED 2 2 #define EP_LIB_COLLECTIVE_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 namespace ep_lib … … 63 64 64 65 } 65 66 #endif 66 67 #endif // EP_LIB_COLLECTIVE_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_lib_endpoint.hpp
r1603 r1646 1 1 #ifndef EP_LIB_ENDPOINT_HPP_INCLUDED 2 2 #define EP_LIB_ENDPOINT_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 namespace ep_lib … … 23 24 24 25 } 25 26 #endif 26 27 #endif // EP_LIB_ENDPOINT_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_lib_fortran.hpp
r1605 r1646 1 1 #ifndef EP_LIB_FORTRAN_HPP_INCLUDED 2 2 #define EP_LIB_FORTRAN_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 #include "ep_type.hpp" … … 10 11 ep_lib::MPI_Comm EP_Comm_f2c(void* comm); 11 12 //} 12 13 #endif 13 14 14 15 #endif // EP_LIB_FORTRAN_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_lib_intercomm.hpp
r1603 r1646 1 1 #ifndef EP_LIB_INTERCOMM_HPP_INCLUDED 2 2 #define EP_LIB_INTERCOMM_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 namespace ep_lib … … 34 35 } 35 36 36 37 #endif 37 38 #endif // EP_LIB_INTERCOMM_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_lib_local.hpp
r1603 r1646 1 1 #ifndef EP_LIB_LOCAL_HPP_INCLUDED 2 2 #define EP_LIB_LOCAL_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 namespace ep_lib … … 32 33 33 34 } 34 35 #endif 35 36 #endif // EP_LIB_LOCAL_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_lib_mpi.hpp
r1603 r1646 1 1 #ifndef EP_LIB_MPI_HPP_INCLUDED 2 2 #define EP_LIB_MPI_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 namespace ep_lib … … 33 34 34 35 } 36 #endif 35 37 36 38 #endif // EP_LIB_MPI_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_lib_win.hpp
r1603 r1646 1 1 #ifndef EP_LIB_WIN_HPP_INCLUDED 2 2 #define EP_LIB_WIN_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 namespace ep_lib … … 51 52 52 53 } 53 54 #endif 54 55 #endif // EP_LIB_COLLECTIVE_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_merge.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 104 105 105 106 } 107 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_message.cpp
r1628 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_message.cpp … … 237 238 238 239 } 239 240 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_message.hpp
r1603 r1646 1 #ifdef _usingEP 1 2 #ifndef EP_MESSAGE_HPP_INCLUDED 2 3 #define EP_MESSAGE_HPP_INCLUDED … … 25 26 26 27 #endif // EP_MESSAGE_HPP_INCLUDED 27 28 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_mpi.hpp
r1603 r1646 2 2 #define EP_MPI_HPP_INCLUDED 3 3 4 #ifdef _usingEP 4 5 #include "ep_type.hpp" 5 6 … … 15 16 MPI_Request* to_mpi_request_ptr(ep_lib::MPI_Request request); 16 17 MPI_Message* to_mpi_message_ptr(ep_lib::MPI_Message message); 18 #endif 17 19 18 20 #endif // EP_MPI_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_probe.cpp
r1628 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 265 266 266 267 268 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_put.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 59 60 60 61 } 62 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_rank.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 29 30 30 31 32 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_recv.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_recv.cpp … … 176 177 177 178 179 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_reduce.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_reduce.cpp … … 341 342 } 342 343 344 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_reduce_scatter.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_reduce.cpp … … 88 89 } 89 90 91 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_request.hpp
r1603 r1646 1 1 #ifndef EP_REQUEST_HPP_INCLUDED 2 2 #define EP_REQUEST_HPP_INCLUDED 3 #ifdef _usingEP 3 4 4 5 #include "ep_comm.hpp" … … 42 43 } 43 44 44 45 #endif 45 46 46 47 #endif // EP_REQUEST_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_scan.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_scan.cpp … … 528 529 529 530 } 531 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_scatter.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_gather.cpp … … 132 133 133 134 } 135 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_scatterv.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_gather.cpp … … 153 154 154 155 } 156 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_send.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_send.hpp … … 202 203 203 204 } 204 205 206 207 208 209 205 #endif 206 207 208 209 210 -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_size.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 42 43 } 43 44 44 45 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_split.cpp
r1605 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 321 322 322 323 } 324 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_status.hpp
r1603 r1646 2 2 #define EP_STATUS_HPP_INCLUDED 3 3 4 #ifdef _usingEP 4 5 5 6 namespace ep_lib … … 24 25 } 25 26 26 27 #endif 27 28 28 29 #endif // EP_STATUS_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_test.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_test.cpp … … 113 114 } 114 115 116 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_type.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 38 39 39 40 41 #endif 40 42 41 43 42 44 43 45 44 -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_type.hpp
r1603 r1646 2 2 #define EP_TYPE_HPP_INCLUDED 3 3 4 #ifdef _usingEP 4 5 5 6 #include <iostream> … … 91 92 92 93 93 94 #endif 94 95 #endif // EP_TYPE_HPP_INCLUDED 95 96 -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_wait.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 /*! 2 3 \file ep_wait.cpp … … 111 112 } 112 113 114 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_win.cpp
r1603 r1646 1 #ifdef _usingEP 1 2 #include "ep_lib.hpp" 2 3 #include <mpi.h> … … 180 181 181 182 183 #endif -
XIOS/dev/dev_trunk_omp/extern/src_ep_dev/ep_window.hpp
r1603 r1646 2 2 #define EP_WINDOW_HPP_INCLUDED 3 3 4 #ifdef _usingEP 4 5 5 6 namespace ep_lib … … 21 22 } 22 23 23 24 #endif 24 25 25 26 #endif // EP_WINDOW_HPP_INCLUDED -
XIOS/dev/dev_trunk_omp/inputs/COMPLETE/iodef.xml
r1129 r1646 19 19 20 20 <variable_group id="parameters" > 21 <variable id="info_level" type="int"> 100</variable>21 <variable id="info_level" type="int">50</variable> 22 22 <variable id="print_file" type="bool">true</variable> 23 23 </variable_group> -
XIOS/dev/dev_trunk_omp/inputs/REMAP/iodef.xml
r1610 r1646 183 183 <variable_group id="parameters" > 184 184 <variable id="using_server" type="bool">true</variable> 185 <variable id="info_level" type="int"> 50</variable>185 <variable id="info_level" type="int">200</variable> 186 186 <variable id="print_file" type="bool">true</variable> 187 187 </variable_group> -
XIOS/dev/dev_trunk_omp/inputs/iodef.xml
r1628 r1646 21 21 <field field_ref="field_A_zoom" name="field_B" /> 22 22 </file> 23 <file id="output1" name="output1" enabled=". FALSE.">23 <file id="output1" name="output1" enabled=".TRUE."> 24 24 <!-- <field field_ref="field_Domain" name="field_A" /> --> 25 25 <field field_ref="field_A" name="field_A" /> … … 79 79 <variable_group id="parameters" > 80 80 <variable id="using_server" type="bool">false</variable> 81 <variable id="info_level" type="int"> 50</variable>81 <variable id="info_level" type="int">80</variable> 82 82 <variable id="print_file" type="bool">true</variable> 83 83 </variable_group> -
XIOS/dev/dev_trunk_omp/src/array_new.hpp
r1290 r1646 532 532 virtual void fromString(const string& str) { istringstream iss(str); iss >> *this; initialized = true; } 533 533 virtual string toString(void) const { ostringstream oss; oss << *this; return oss.str(); } 534 535 virtual string dump(void) const 536 { 537 ostringstream oss; 538 oss << this->shape()<<" "; 539 if (this->shape().numElements() == 1 && this->shape().dataFirst()[0] == 1) 540 oss << this->dataFirst()[0]; 541 else 542 oss << this->dataFirst()[0] <<" ... "<< this->dataFirst()[this->numElements()-1]; 543 return oss.str(); 544 } 545 534 546 virtual void reset(void) { this->free(); initialized = false; } 535 547 virtual bool isEmpty(void) const { return !initialized; } -
XIOS/dev/dev_trunk_omp/src/attribute.hpp
r1158 r1646 41 41 virtual StdString toString(void) const = 0; 42 42 virtual void fromString(const StdString & str) = 0; 43 virtual StdString dump(void) const = 0; 43 44 virtual bool isEqual(const CAttribute& ) = 0; 44 45 -
XIOS/dev/dev_trunk_omp/src/attribute_array.hpp
r1219 r1646 55 55 virtual bool toBuffer (CBufferOut& buffer) const { return _toBuffer(buffer);} 56 56 virtual bool fromBuffer(CBufferIn& buffer) { return _fromBuffer(buffer); } 57 virtual string dump(void) const { return _dump();} 57 58 58 59 virtual void generateCInterface(ostream& oss,const string& className) ; … … 69 70 CArray<T_numtype, N_rank> inheritedValue ; 70 71 StdString _toString(void) const; 72 StdString _dump(void) const; 71 73 void _fromString(const StdString & str); 72 74 bool _toBuffer (CBufferOut& buffer) const; -
XIOS/dev/dev_trunk_omp/src/attribute_array_impl.hpp
r1219 r1646 129 129 } 130 130 131 template <typename T_numtype, int N_rank> 132 StdString CAttributeArray<T_numtype,N_rank>::_dump(void) const 133 { 134 StdOStringStream oss; 135 if (! isEmpty() && this->hasId() && (this->numElements()!=0)) 136 oss << this->getName() << "=\"" << CArray<T_numtype, N_rank>::dump() << "\""; 137 return (oss.str()); 138 } 139 140 131 141 template <typename T_numtype, int N_rank> 132 142 void CAttributeArray<T_numtype, N_rank>::_fromString(const StdString & str) -
XIOS/dev/dev_trunk_omp/src/attribute_enum.hpp
r1219 r1646 62 62 virtual StdString toString(void) const { return _toString();} 63 63 virtual void fromString(const StdString & str) { if (str==resetInheritanceStr) { reset(); _canInherite=false ;} else _fromString(str);} 64 virtual StdString dump(void) const { return _toString();} 64 65 65 66 virtual bool toBuffer (CBufferOut& buffer) const { return _toBuffer(buffer);} -
XIOS/dev/dev_trunk_omp/src/attribute_map.cpp
r1158 r1646 28 28 att.second->reset(); 29 29 } 30 } 31 32 ///-------------------------------------------------------------- 33 /*! 34 Dump of all non-empty attributes of an object 35 */ 36 StdString CAttributeMap::dumpXiosAttributes(void) const 37 { 38 int maxNbChar = 250; 39 StdString str; 40 typedef std::pair<StdString, CAttribute*> StdStrAttPair; 41 auto it = SuperClassMap::begin(), end = SuperClassMap::end(); 42 for (; it != end; it++) 43 { 44 const StdStrAttPair& att = *it; 45 if (!att.second->isEmpty()) 46 { 47 if (str.length() < maxNbChar) 48 { 49 str.append(att.second->dump()); 50 str.append(" "); 51 } 52 else if (str.length() == maxNbChar) 53 { 54 str.append("..."); 55 } 56 } 57 } 58 return str; 30 59 } 31 60 -
XIOS/dev/dev_trunk_omp/src/attribute_map.hpp
r1601 r1646 38 38 void duplicateAttributes(const CAttributeMap* const _parent); 39 39 void clearAllAttributes(void); 40 StdString dumpXiosAttributes(void) const; 40 41 41 42 void clearAttribute(const StdString& key); -
XIOS/dev/dev_trunk_omp/src/attribute_template.hpp
r1601 r1646 73 73 // virtual void toBinary (StdOStream & os) const; 74 74 // virtual void fromBinary(StdIStream & is); 75 virtual StdString dump(void) const { return _dump();} 75 76 76 77 virtual bool toBuffer (CBufferOut& buffer) const { return _toBuffer(buffer);} … … 97 98 bool isEqual_(const CAttributeTemplate& attr); 98 99 StdString _toString(void) const; 100 StdString _dump(void) const; 99 101 void _fromString(const StdString & str); 100 102 bool _toBuffer (CBufferOut& buffer) const; -
XIOS/dev/dev_trunk_omp/src/attribute_template_impl.hpp
r1601 r1646 199 199 CType<T>::fromString(str) ; 200 200 } 201 202 //--------------------------------------------------------------- 203 204 template <class T> 205 StdString CAttributeTemplate<T>::_dump(void) const 206 { 207 StdOStringStream oss; 208 if (!CType<T>::isEmpty() && this->hasId()) 209 oss << this->getName() << "=\"" << CType<T>::dump() << "\""; 210 return (oss.str()); 211 } 212 201 213 202 214 //--------------------------------------------------------------- -
XIOS/dev/dev_trunk_omp/src/buffer_client.cpp
r1601 r1646 9 9 10 10 11 #ifdef _usingEP 11 12 using namespace ep_lib; 13 #endif 12 14 13 15 namespace xios … … 31 33 retBuffer = new CBufferOut(buffer[current], bufferSize); 32 34 #pragma omp critical (_output) 33 info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl; 35 { 36 info(10) << "CClientBuffer: allocated 2 x " << bufferSize << " bytes for server " << serverRank << " with a maximum of " << maxBufferedEvents << " buffered events" << endl; 37 } 34 38 } 35 39 -
XIOS/dev/dev_trunk_omp/src/client.cpp
r1628 r1646 12 12 #include "buffer_client.hpp" 13 13 #include "string_tools.hpp" 14 #ifdef _usingEP 14 15 using namespace ep_lib; 16 #endif 15 17 16 18 namespace xios … … 226 228 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 227 229 #pragma omp critical (_output) 228 info(10)<<"Register new Context : "<<id<<endl ; 230 { 231 info(10)<<"Register new Context : "<<id<<endl ; 232 } 229 233 MPI_Comm inter ; 230 234 MPI_Intercomm_merge(contextInterComm,0,&inter) ; … … 303 307 } 304 308 #pragma omp critical (_output) 305 info(20) << "Client side context is finalized"<<endl ; 309 { 310 info(20) << "Client side context is finalized"<<endl ; 311 } 306 312 307 313 #pragma omp critical (_output) -
XIOS/dev/dev_trunk_omp/src/client_server_mapping.cpp
r1601 r1646 9 9 #include "client_server_mapping.hpp" 10 10 11 #ifdef _usingEP 11 12 using namespace ep_lib; 13 #endif 12 14 13 15 namespace xios { -
XIOS/dev/dev_trunk_omp/src/client_server_mapping_distributed.cpp
r1601 r1646 15 15 #include "context.hpp" 16 16 #include "context_client.hpp" 17 #ifdef _usingEP 17 18 using namespace ep_lib; 19 #endif 18 20 19 21 namespace xios -
XIOS/dev/dev_trunk_omp/src/config/domain_attribute.conf
r1630 r1646 58 58 59 59 DECLARE_ARRAY(double, 2, area) 60 DECLARE_ATTRIBUTE(double, radius) 60 61 61 62 DECLARE_ENUM4(type,rectilinear,curvilinear,unstructured, gaussian) -
XIOS/dev/dev_trunk_omp/src/config/interpolate_domain_attribute.conf
r1630 r1646 10 10 DECLARE_ATTRIBUTE(bool, write_weight) 11 11 DECLARE_ENUM2(read_write_convention, c, fortran) 12 DECLARE_ATTRIBUTE(bool, use_area) -
XIOS/dev/dev_trunk_omp/src/context_client.cpp
r1630 r1646 12 12 #include "cxios.hpp" 13 13 #include "server.hpp" 14 #ifdef _usingEP 14 15 using namespace ep_lib; 16 #endif 15 17 16 18 namespace xios … … 96 98 { 97 99 list<int> ranks = event.getRanks(); 98 100 #pragma omp critical (_output) 101 { 102 info(100)<<"Event "<<timeLine<<" of context "<<context->getId()<<endl ; 103 } 99 104 if (CXios::checkEventSync) 100 105 { … … 124 129 { 125 130 event.send(timeLine, sizes, buffList); 131 #pragma omp critical (_output) 132 { 133 info(100)<<"Event "<<timeLine<<" of context "<<context->getId()<<" sent"<<endl ; 134 } 126 135 127 136 checkBuffers(ranks); … … 140 149 for (list<int>::const_iterator it = sizes.begin(); it != sizes.end(); it++) 141 150 tmpBufferedEvent.buffers.push_back(new CBufferOut(*it)); 142 info(100)<<"DEBUG : temporaly event created : timeline "<<timeLine<<endl ; 151 #pragma omp critical (_output) 152 { 153 info(100)<<"DEBUG : temporaly event created : timeline "<<timeLine<<endl ; 154 } 143 155 event.send(timeLine, tmpBufferedEvent.sizes, tmpBufferedEvent.buffers); 156 #pragma omp critical (_output) 157 { 158 info(100)<<"Event "<<timeLine<<" of context "<<context->getId()<<" sent"<<endl ; 159 } 144 160 } 145 161 } … … 167 183 (*itBuffer)->put((char*)(*it)->start(), (*it)->count()); 168 184 169 info(100)<<"DEBUG : temporaly event sent "<<endl ; 185 #pragma omp critical (_output) 186 { 187 info(100)<<"DEBUG : temporaly event sent "<<endl ; 188 } 170 189 checkBuffers(tmpBufferedEvent.ranks); 171 190 … … 341 360 if (ratio < minBufferSizeEventSizeRatio) minBufferSizeEventSizeRatio = ratio; 342 361 } 362 363 #ifdef _usingEP 343 364 MPI_Allreduce(&minBufferSizeEventSizeRatio, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 365 #elif _usingMPI 366 MPI_Allreduce(MPI_IN_PLACE, &minBufferSizeEventSizeRatio, 1, MPI_DOUBLE, MPI_MIN, intraComm); 367 #endif 344 368 345 369 if (minBufferSizeEventSizeRatio < 1.0) … … 426 450 { 427 451 #pragma omp critical (_output) 428 info(100)<<"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ; 452 { 453 info(100)<<"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ; 454 } 429 455 event.push(*itRank, 1, msg); 430 456 } … … 452 478 { 453 479 #pragma omp critical (_output) 454 report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl 480 { 481 report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl 455 482 << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 483 } 456 484 totalBuf += itMap->second; 457 485 } 458 486 #pragma omp critical (_output) 459 report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; 487 { 488 report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; 489 } 460 490 461 491 //releaseBuffers(); // moved to CContext::finalize() -
XIOS/dev/dev_trunk_omp/src/context_server.cpp
r1601 r1646 18 18 #include <boost/functional/hash.hpp> 19 19 20 #ifdef _usingEP 20 21 using namespace ep_lib; 22 #endif 21 23 22 24 namespace xios … … 265 267 finished=true; 266 268 #pragma omp critical (_output) 267 info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; 269 { 270 info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; 271 } 268 272 context->finalize(); 269 273 std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), … … 273 277 rank = itMap->first; 274 278 #pragma omp critical (_output) 275 report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl 279 { 280 report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl 276 281 << " +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl; 282 } 277 283 totalBuf += itMap->second; 278 284 } 279 285 #pragma omp critical (_output) 280 report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 286 { 287 report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 288 } 281 289 } 282 290 else if (event.classId==CContext::GetType()) CContext::dispatchEvent(event); -
XIOS/dev/dev_trunk_omp/src/cxios.cpp
r1601 r1646 11 11 #include "memtrack.hpp" 12 12 #include "registry.hpp" 13 #include "timer.hpp" 14 #ifdef _usingEP 13 15 using namespace ep_lib; 16 #endif 14 17 15 18 namespace xios … … 21 24 const string CXios::serverPrmFile="./xios_server1"; 22 25 const string CXios::serverSndFile="./xios_server2"; 26 27 bool CXios::xiosStack = true; 28 bool CXios::systemStack = false; 23 29 24 30 bool CXios::isClient ; … … 48 54 #pragma omp critical 49 55 { 50 std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsing rootfile"<<std::endl;56 //std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsing rootfile"<<std::endl; 51 57 parseFile(rootFile); 52 58 std::cout<<"thread "<<tmp_rank<<"("<<omp_get_thread_num()<<")"<<" parsed rootfile"<<std::endl; … … 71 77 printLogs2Files=getin<bool>("print_file",false); 72 78 79 xiosStack=getin<bool>("xios_stack",true) ; 80 systemStack=getin<bool>("system_stack",false) ; 81 if (xiosStack && systemStack) 82 { 83 xiosStack = false; 84 } 85 73 86 StdString bufMemory("memory"); 74 87 StdString bufPerformance("performance"); … … 90 103 checkEventSync = getin<bool>("check_event_sync", checkEventSync); 91 104 92 //globalComm=MPI_COMM_WORLD ; 105 #ifdef _usingMPI 106 globalComm=MPI_COMM_WORLD ; 107 #elif _usingEP 93 108 int num_ep; 94 if(isClient) 95 { 96 num_ep = omp_get_num_threads(); 97 } 98 99 if(isServer) 100 { 101 num_ep = 1; 102 } 109 110 if(isClient) num_ep = omp_get_num_threads(); 111 if(isServer) num_ep = 1; 103 112 104 113 MPI_Info info; … … 111 120 112 121 #pragma omp barrier 113 114 115 122 CXios::globalComm = passage[omp_get_thread_num()]; 123 #endif 116 124 } 117 125 … … 123 131 */ 124 132 void CXios::initClientSide(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 133 TRY 125 134 { 126 135 isClient = true; … … 148 157 } 149 158 } 159 CATCH 150 160 151 161 void CXios::clientFinalize(void) … … 155 165 { 156 166 #pragma omp critical (_output) 157 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 167 { 168 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 169 } 158 170 globalRegistry->toFile("xios_registry.bin") ; 159 171 delete globalRegistry ; … … 232 244 if (CServer::getRank()==0) 233 245 { 234 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 246 #pragma omp critical (_output) 247 { 248 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 249 } 235 250 globalRegistry->toFile("xios_registry.bin") ; 236 251 delete globalRegistry ; … … 289 304 } 290 305 291 info(80)<<"Write data base Registry"<<endl<<globalRegistrySndServers.toString()<<endl ; 306 #pragma omp critical (_output) 307 { 308 info(80)<<"Write data base Registry"<<endl<<globalRegistrySndServers.toString()<<endl ; 309 } 292 310 globalRegistrySndServers.toFile("xios_registry.bin") ; 293 311 … … 296 314 delete globalRegistry; 297 315 } 316 CTimer::get("XIOS").suspend() ; 298 317 CServer::finalize(); 299 318 -
XIOS/dev/dev_trunk_omp/src/cxios.hpp
r1601 r1646 33 33 static const string serverPrmFile; //!< Filename template for primary server in case of two server levels 34 34 static const string serverSndFile; //!< Filename template for secondary server in case of two server levels 35 36 static bool xiosStack; //!< Exception handling 37 static bool systemStack; //!< Exception handling 38 #pragma omp threadprivate(xiosStack, systemStack) 35 39 36 40 static bool isClient ; //!< Check if xios is client -
XIOS/dev/dev_trunk_omp/src/data_output.cpp
r1542 r1646 47 47 48 48 void CDataOutput::writeGrid(CDomain* domain, CAxis* axis) 49 TRY 49 50 { 50 51 this->writeDomain_(domain); 51 52 this->writeAxis_(axis); 52 53 } 54 CATCH 53 55 54 56 void CDataOutput::writeGrid(std::vector<CDomain*> domains, std::vector<CAxis*> axis) 57 TRY 55 58 { 56 59 int domSize = domains.size(); … … 59 62 for (int i = 0; i < aSize; ++i) this->writeAxis_(axis[i]); 60 63 } 64 CATCH 61 65 62 66 void CDataOutput::writeGrid(std::vector<CDomain*> domains, std::vector<CAxis*> axis, std::vector<CScalar*> scalars) 67 TRY 63 68 { 64 69 int domSize = domains.size(); … … 69 74 for (int i = 0; i < sSize; ++i) this->writeScalar_(scalars[i]); 70 75 } 76 CATCH 71 77 72 78 //---------------------------------------------------------------- 73 79 74 80 void CDataOutput::writeGrid(CDomain* domain) 81 TRY 75 82 { 76 83 this->writeDomain_(domain); 77 84 } 85 CATCH 78 86 79 87 void CDataOutput::writeTimeDimension(void) 88 TRY 80 89 { 81 90 this->writeTimeDimension_(); 82 91 } 92 CATCH 83 93 84 94 //---------------------------------------------------------------- 85 95 86 96 void CDataOutput::writeFieldTimeAxis(CField* field) 97 TRY 87 98 { 88 99 CContext* context = CContext::getCurrent() ; … … 91 102 this->writeTimeAxis_(field, calendar); 92 103 } 93 104 CATCH 105 94 106 void CDataOutput::writeField(CField* field) 107 TRY 95 108 { 96 109 this->writeField_(field); 97 110 } 111 CATCH 98 112 99 113 //---------------------------------------------------------------- 100 114 101 115 void CDataOutput::writeFieldGrid(CField* field) 116 TRY 102 117 { 103 118 this->writeGrid(field->getRelGrid(), 104 119 !field->indexed_output.isEmpty() && field->indexed_output); 105 120 } 106 121 CATCH 107 122 //---------------------------------------------------------------- 108 123 109 124 void CDataOutput::writeFieldData(CField* field) 125 TRY 110 126 { 111 127 // CGrid* grid = CGrid::get(field->grid_ref.getValue()); … … 113 129 this->writeFieldData_(field); 114 130 } 131 CATCH 115 132 116 133 ///---------------------------------------------------------------- -
XIOS/dev/dev_trunk_omp/src/dht_auto_indexing.cpp
r1601 r1646 8 8 */ 9 9 #include "dht_auto_indexing.hpp" 10 #ifdef _usingEP 10 11 using namespace ep_lib; 12 #endif 11 13 12 14 namespace xios -
XIOS/dev/dev_trunk_omp/src/distribution_client.cpp
r1593 r1646 15 15 , axisDomainOrder_() 16 16 , nLocal_(), nGlob_(), nBeginLocal_(), nBeginGlobal_() 17 , dataNIndex_(), dataDims_(), dataBegin_(), dataIndex_() , domainMasks_(), axisMasks_()17 , dataNIndex_(), dataDims_(), dataBegin_(), dataIndex_() 18 18 , gridMask_(), indexMap_() 19 19 , isDataDistributed_(true), axisNum_(0), domainNum_(0) … … 36 36 GlobalLocalMap void2 ; 37 37 std::vector<int> void3 ; 38 std::vector< int> void4 ;38 std::vector<bool> void4 ; 39 39 40 40 globalLocalDataSendToServerMap_.swap(void1) ; … … 61 61 // Then check mask of grid 62 62 int gridDim = domList.size() * 2 + axisList.size(); 63 grid->checkMask();64 63 switch (gridDim) { 65 64 case 0: … … 117 116 axisDomainOrder_ = axisDomainOrder; 118 117 119 // Each domain or axis has its mask, of course120 domainMasks_.resize(domainNum_);121 for (int i = 0; i < domainNum_;++i)122 {123 domainMasks_[i].resize(domList[i]->domainMask.numElements());124 domainMasks_[i] = domList[i]->domainMask;125 }126 127 axisMasks_.resize(axisNum_);128 for (int i = 0; i < axisNum_; ++i)129 {130 axisMasks_[i].resize(axisList[i]->mask.numElements());131 axisMasks_[i] = axisList[i]->mask;132 }133 134 118 // Because domain and axis can be in any order (axis1, domain1, axis2, axis3, ) 135 119 // their position should be specified. In axisDomainOrder, domain == true, axis == false … … 282 266 283 267 if ((iIdx >= nBeginLocal_[indexMap_[i]]) && (iIdx < nLocal_[indexMap_[i]]) && 284 (jIdx >= nBeginLocal_[indexMap_[i]+1]) && (jIdx < nLocal_[indexMap_[i]+1]) && 285 (domainMasks_[idxDomain](iIdx + jIdx*nLocal_[indexMap_[i]]))) 268 (jIdx >= nBeginLocal_[indexMap_[i]+1]) && (jIdx < nLocal_[indexMap_[i]+1])) 286 269 { 287 270 ++count; … … 325 308 elementIndexData_[i].resize(dataNIndex_[i]); 326 309 elementIndexData_[i] = false; 327 int iIdx = 0, count = 0 , localIndex = 0;310 int iIdx = 0, count = 0; 328 311 for (int j = 0; j < dataNIndex_[i]; ++j) 329 312 { 330 313 iIdx = getAxisIndex((dataIndex_[indexMap_[i]])(j), dataBegin_[indexMap_[i]], nLocal_[indexMap_[i]]); 331 314 if ((iIdx >= nBeginLocal_[indexMap_[i]]) && 332 (iIdx < nLocal_[indexMap_[i]]) && (axisMasks_[idxAxis](iIdx)))315 (iIdx < nLocal_[indexMap_[i]]) )//&& (axisMasks_[idxAxis](iIdx))) 333 316 { 334 317 ++count; … … 413 396 414 397 for (int i = 0; i < numElement_; ++i) ssize *= eachElementSize[i]; 415 while (idx < ssize) 416 { 417 for (int i = 0; i < numElement_-1; ++i) 418 { 419 if (idxLoop[i] == eachElementSize[i]) 420 { 421 idxLoop[i] = 0; 422 ++idxLoop[i+1]; 423 } 424 } 425 426 // Find out outer index 427 // Depending the inner-most element is axis or domain, 428 // The outer loop index begins correspondingly at one (1) or zero (0) 429 for (int i = 1; i < numElement_; ++i) 430 { 431 currentIndex[i] = elementLocalIndex_[i](idxLoop[i]); 432 } 433 434 // Inner most index 435 for (int i = 0; i < innerLoopSize; ++i) 436 { 437 int gridMaskIndex = 0; 438 currentIndex[0] = elementLocalIndex_[0](i); 439 440 // If defined, iterate on grid mask 441 if (!gridMask_.isEmpty()) 442 { 443 for (int k = 0; k < this->numElement_; ++k) 444 { 445 gridMaskIndex += (currentIndex[k])*elementNLocal_[k]; 446 } 447 if (gridMask_(gridMaskIndex)) ++indexLocalDataOnClientCount; 448 } 449 // If grid mask is not defined, iterate on elements' mask 450 else 451 { 452 bool maskTmp = true; 453 int idxDomain = 0, idxAxis = 0; 454 for (int elem = 0; elem < numElement_; ++elem) 455 { 456 if (2 == axisDomainOrder_(elem)) 457 { 458 maskTmp = maskTmp && domainMasks_[idxDomain](currentIndex[elem]); 459 ++idxDomain; 460 } 461 else if (1 == axisDomainOrder_(elem)) 462 { 463 maskTmp = maskTmp && axisMasks_[idxAxis](currentIndex[elem]); 464 ++idxAxis; 465 } 466 } 467 if (maskTmp) ++indexLocalDataOnClientCount; 468 } 469 470 } 471 idxLoop[0] += innerLoopSize; 472 idx += innerLoopSize; 473 } 474 475 // Now allocate these arrays 476 localDataIndex_.resize(indexLocalDataOnClientCount); 477 localMaskIndex_.resize(indexLocalDataOnClientCount); 478 localMaskedDataIndex_.resize(indexLocalDataOnClientCount); 479 globalDataIndex_.rehash(std::ceil(indexLocalDataOnClientCount/globalDataIndex_.max_load_factor())); 480 globalLocalDataSendToServerMap_.rehash(std::ceil(indexLocalDataOnClientCount/globalLocalDataSendToServerMap_.max_load_factor())); 398 399 localDataIndex_.resize(ssize); 400 if (!gridMask_.isEmpty()) localMaskIndex_.resize(ssize); 401 localMaskedDataIndex_.resize(ssize); 402 globalDataIndex_.rehash(std::ceil(ssize/globalDataIndex_.max_load_factor())); 403 globalLocalDataSendToServerMap_.rehash(std::ceil(ssize/globalLocalDataSendToServerMap_.max_load_factor())); 404 481 405 482 406 // We need to loop with data index … … 535 459 if (isCurrentIndexDataCorrect) 536 460 { 537 int gridMaskIndex = 0; 538 for (int k = 0; k < this->numElement_; ++k) 461 bool maskTmp = true; 462 bool maskGridTmp = true; 463 size_t globalIndex = 0; 464 for (int k = 0; k < numElement_; ++k) 539 465 { 540 g ridMaskIndex += (currentIndex[k])*elementNLocal_[k];466 globalIndex += (currentGlobalIndex[k])*elementNGlobal_[k]; 541 467 } 542 543 bool maskTmp = true; 544 // If defined, apply grid mask 545 if (!gridMask_.isEmpty()) 468 globalDataIndex_[globalIndex] = indexLocalDataOnClientCount; 469 localDataIndex_[indexLocalDataOnClientCount] = countLocalData; 470 globalLocalDataSendToServerMap_[globalIndex] = indexLocalDataOnClientCount; 471 localMaskedDataIndex_[indexLocalDataOnClientCount] = indexLocalDataOnClientCount; 472 473 // Grid mask: unmasked values will be replaces by NaN and then all values will be sent 474 if (!gridMask_.isEmpty()) 546 475 { 547 maskTmp = gridMask_(gridMaskIndex); 476 int gridMaskIndex = 0; 477 for (int k = 0; k < this->numElement_; ++k) 478 { 479 gridMaskIndex += (currentIndex[k])*elementNLocal_[k]; 480 } 481 maskGridTmp = gridMask_(gridMaskIndex); 482 if (maskGridTmp) 483 localMaskIndex_[indexLocalDataOnClientCount] = true; 484 else 485 localMaskIndex_[indexLocalDataOnClientCount] = false; 548 486 } 549 // If grid mask is not defined, apply elements' mask 550 else 551 { 552 int idxDomain = 0, idxAxis = 0; 553 for (int elem = 0; elem < numElement_; ++elem) 554 { 555 if (2 == axisDomainOrder_(elem)) 556 { 557 maskTmp = maskTmp && domainMasks_[idxDomain](currentIndex[elem]); 558 ++idxDomain; 559 } 560 else if (1 == axisDomainOrder_(elem)) 561 { 562 maskTmp = maskTmp && axisMasks_[idxAxis](currentIndex[elem]); 563 ++idxAxis; 564 } 565 } 566 } 567 568 if (maskTmp) 569 { 570 size_t globalIndex = 0; 571 for (int k = 0; k < numElement_; ++k) 572 { 573 globalIndex += (currentGlobalIndex[k])*elementNGlobal_[k]; 574 } 575 globalDataIndex_[globalIndex] = indexLocalDataOnClientCount; 576 localDataIndex_[indexLocalDataOnClientCount] = countLocalData; 577 globalLocalDataSendToServerMap_[globalIndex] = indexLocalDataOnClientCount; 578 localMaskIndex_[indexLocalDataOnClientCount] = gridMaskIndex; 579 localMaskedDataIndex_[indexLocalDataOnClientCount] = indexLocalDataOnClientCount; 580 ++indexLocalDataOnClientCount; 581 } 487 488 ++indexLocalDataOnClientCount; 489 582 490 } 583 491 ++countLocalData; … … 586 494 } 587 495 else countLocalData+=innerLoopSize ; 588 496 589 497 idxLoop[0] += innerLoopSize; 590 498 idx += innerLoopSize; … … 614 522 const int& dataDim, const int& ni, int& j) 615 523 { 524 int i; 616 525 int tempI = dataIIndex + dataIBegin, 617 526 tempJ = (dataJIndex + dataJBegin); 618 527 if (ni == 0) 619 528 { 620 i nt i = 0;621 j = 0;529 i = -1; 530 j = -1; 622 531 return i; 623 532 } 624 int i = (dataDim == 1) ? (tempI) % ni 625 : (tempI) ; 626 j = (dataDim == 1) ? (tempI) / ni 627 : (tempJ) ; 628 533 if ((tempI < 0) || (tempJ < 0)) 534 { 535 i = -1; 536 j = -1; 537 return i; 538 } 539 else 540 { 541 i = (dataDim == 1) ? (tempI) % ni : (tempI) ; 542 j = (dataDim == 1) ? (tempI) / ni : (tempJ) ; 543 } 629 544 return i; 630 545 } … … 643 558 return -1; 644 559 } 645 int tempI = dataIndex + dataBegin;560 int tempI = dataIndex; 646 561 if ((tempI < 0) || (tempI > ni)) 647 562 return -1; … … 677 592 Return local mask index of client 678 593 */ 679 const std::vector< int>& CDistributionClient::getLocalMaskIndexOnClient()594 const std::vector<bool>& CDistributionClient::getLocalMaskIndexOnClient() 680 595 { 681 596 if (!isComputed_) createGlobalIndexSendToServer(); -
XIOS/dev/dev_trunk_omp/src/distribution_client.hpp
r1562 r1646 33 33 34 34 public: 35 /** Default constructor */ 35 /** Default constructor */ 36 36 CDistributionClient(int rank, CGrid* grid); 37 37 … … 44 44 GlobalLocalDataMap& getGlobalLocalDataSendToServer(); 45 45 GlobalLocalDataMap& getGlobalDataIndexOnClient(); 46 const std::vector< int>& getLocalMaskIndexOnClient();46 const std::vector<bool>& getLocalMaskIndexOnClient(); 47 47 const std::vector<int>& getLocalMaskedDataIndexOnClient(); 48 48 … … 83 83 GlobalLocalDataMap globalLocalDataSendToServerMap_; 84 84 GlobalLocalDataMap globalDataIndex_; 85 86 /*! Array holding masked data indexes. 87 * It includes: 88 * masking on data (data_i/j_index or data_ni/nj and data_ibegin) 89 * masking on grid elements (domains, axes or scalars) 90 * It DOES NOT include grid mask. 91 * The array size defines the data size entering the workflow. It is used by source filter of client or server1. 92 */ 85 93 std::vector<int> localDataIndex_; 86 std::vector<int> localMaskIndex_; 94 95 /*! Array holding grid mask. If grid mask is not defined, its size is zero. 96 * It is used by source filter of client for replacing unmasked data by NaN. 97 */ 98 std::vector<bool> localMaskIndex_; 99 87 100 std::vector<int> localMaskedDataIndex_; 88 101 … … 104 117 std::vector<CArray<int,1> > dataIndex_; //!< Data index 105 118 std::vector<CArray<int,1> > infoIndex_; //!< i_index, j_index 106 107 std::vector<CArray<bool,1> > domainMasks_; //!< Domain mask108 std::vector<CArray<bool,1> > axisMasks_; //!< Axis mask109 119 110 120 std::vector<int> indexMap_; //!< Mapping element index to dimension index -
XIOS/dev/dev_trunk_omp/src/event_client.cpp
r1601 r1646 53 53 { 54 54 #pragma omp critical(_output) 55 info(100)<<"Send event "<<timeLine<<" classId : "<<classId<<" typeId : "<<typeId<<endl ; 55 { 56 info(100)<<"Send event "<<timeLine<<" classId : "<<classId<<" typeId : "<<typeId<<endl ; 57 } 56 58 } 57 59 for (; itBuff != buffers.end(); ++itBuff, ++itSizes, ++itSenders, ++itMsg) -
XIOS/dev/dev_trunk_omp/src/event_scheduler.cpp
r1601 r1646 4 4 #include "tracer.hpp" 5 5 6 #ifdef _usingEP 6 7 using namespace ep_lib; 8 #endif 7 9 8 10 namespace xios -
XIOS/dev/dev_trunk_omp/src/exception.cpp
r828 r1646 5 5 #include "client.hpp" 6 6 #include "server.hpp" 7 #include "cxios.hpp"8 7 #include "log.hpp" 9 8 … … 12 11 /// ////////////////////// Définitions ////////////////////// /// 13 12 CException::CException(void) 14 : CObject(), desc_rethrow(true) 13 : CObject(), desc_rethrow(true), stream() 15 14 { /* Ne rien faire de plus */ } 16 15 17 CException::CException(const StdString & id)18 : CObject(id), desc_rethrow(true) 16 CException::CException(const std::string & id) 17 : CObject(id), desc_rethrow(true), stream() 19 18 { /* Ne rien faire de plus */ } 20 19 21 20 CException::CException(const CException & exception) 22 : std::basic_ios<char>()23 ,CObject(exception.getId())24 , StdOStringStream()25 , desc_rethrow(false)21 // : std::basic_ios<char>() 22 : CObject(exception.getId()) 23 // , StdOStringStream() 24 // , desc_rethrow(false) 26 25 { (*this) << exception.str(); } 27 26 28 27 CException::~CException(void) 29 28 { 30 if (desc_rethrow) 31 #ifdef __XIOS_NOABORT 32 { 33 throw (*this); 34 } 35 #else 36 { 37 error << this->getMessage() << std::endl; 38 MPI_Abort(CXios::globalComm, -1); //abort(); 39 } 40 #endif 29 // if (desc_rethrow) 30 //#ifdef __XIOS_NOABORT 31 // { 32 // throw (*this); 33 // } 34 //#else 35 // { 36 // error << this->getMessage() << std::endl; 37 // throw 4; 38 // MPI_Abort(CXios::globalComm, -1); //abort(); 39 // } 40 //#endif 41 41 } 42 42 43 43 //--------------------------------------------------------------- 44 44 45 StdString CException::getMessage(void) const45 std::string CException::getMessage(void) const 46 46 { 47 StdOStringStream oss; 48 oss << "> Error [" << this->getId() << "] : " << this->str(); 49 return (oss.str()); 47 // StdOStringStream oss; 48 // oss << "> Error [" << this->getId() << "] : " << this->str(); 49 // return (oss.str()); 50 return (stream.str()); 50 51 } 51 52 52 53 StdOStringStream & CException::getStream(void) 53 { return (*boost::polymorphic_cast<StdOStringStream*>(this)); } 54 // { return (*boost::polymorphic_cast<StdOStringStream*>(this)); } 55 { return stream; } 54 56 55 StdString CException::toString(void) const 56 { return (StdString(this->getMessage())); } 57 std::string CException::toString(void) const 58 // { return (std::string(this->getMessage())); } 59 { return stream.str(); } 57 60 58 void CException::fromString(const StdString & str) 59 { this->str(str); } 61 void CException::fromString(const std::string & str) 62 { } 63 // { this->str(str); } 60 64 61 65 //--------------------------------------------------------------- -
XIOS/dev/dev_trunk_omp/src/exception.hpp
r792 r1646 5 5 #include "xios_spl.hpp" 6 6 #include "object.hpp" 7 #include <iomanip> 8 #include <stdexcept> 7 9 8 10 namespace xios 9 11 { 10 12 /// ////////////////////// Déclarations ////////////////////// /// 13 11 14 class CException 12 15 : private CObject, public StdOStringStream … … 33 36 virtual void fromString(const StdString & str); 34 37 38 struct StackInfo 39 { 40 StdString file; 41 StdString function; 42 int line; 43 StdString info; 44 }; 45 46 std::list<StackInfo> stack; 47 35 48 private : 36 49 37 50 /// Propriétés /// 38 51 bool desc_rethrow; // throw destructor 52 StdOStringStream stream; 39 53 40 54 }; // CException … … 43 57 /// //////////////////////////// Macros //////////////////////////// /// 44 58 59 #define FILE_NAME (std::strrchr("/" __FILE__, '/') + 1) 60 61 #define FUNCTION_NAME (StdString(BOOST_CURRENT_FUNCTION).length() > 100 ? \ 62 StdString(BOOST_CURRENT_FUNCTION).substr(0,100).append("...)") : BOOST_CURRENT_FUNCTION) 63 45 64 #define INFO(x) \ 46 "In file \ '" __FILE__ "\',line " << __LINE__ << " -> " x << std::endl;65 "In file \""<< FILE_NAME <<"\", function \"" << BOOST_CURRENT_FUNCTION <<"\", line " << __LINE__ << " -> " x << std::endl; 47 66 48 67 #ifdef __XIOS_DEBUG … … 52 71 #endif 53 72 54 #define ERROR(id, x) xios::CException(id).getStream() << INFO(x) 73 #define ERROR(id, x) \ 74 { \ 75 xios::CException exc(id); \ 76 exc.getStream() << INFO(x); \ 77 error << exc.getMessage() << std::endl; \ 78 throw exc; \ 79 } 80 81 #ifdef __XIOS_EXCEPTION 82 #define TRY \ 83 { \ 84 int funcFirstLine = __LINE__; \ 85 try 86 #define CATCH \ 87 catch(CException& e) \ 88 { \ 89 CException::StackInfo stk; \ 90 stk.file = FILE_NAME; \ 91 stk.function = FUNCTION_NAME; \ 92 stk.line = funcFirstLine; \ 93 e.stack.push_back(stk); \ 94 if (CXios::xiosStack) \ 95 throw; \ 96 else \ 97 throw 0; \ 98 } \ 99 } 100 #define CATCH_DUMP_ATTR \ 101 catch(CException& e) \ 102 { \ 103 CException::StackInfo stk; \ 104 stk.info.append(StdString("Object id=\"" + this->getId()) + "\" object type=\"" + this->getName() + "\"\n"); \ 105 stk.info.append("*** XIOS attributes as defined in XML file(s) or via Fortran interface:\n"); \ 106 stk.info.append("[" + this->dumpXiosAttributes() + "]\n"); \ 107 stk.info.append("*** Additional information:\n"); \ 108 stk.info.append("[" + this->dumpClassAttributes() + "]\n"); \ 109 stk.file = FILE_NAME; \ 110 stk.function = FUNCTION_NAME; \ 111 stk.line = funcFirstLine; \ 112 e.stack.push_back(stk); \ 113 if (CXios::xiosStack) \ 114 throw; \ 115 else \ 116 throw 0; \ 117 } \ 118 } 119 #define CATCH_DUMP_STACK \ 120 catch(CException& e) \ 121 { \ 122 CException::StackInfo stk; \ 123 int i = 1; \ 124 stk.file = FILE_NAME; \ 125 stk.function = FUNCTION_NAME; \ 126 stk.line = funcFirstLine; \ 127 e.stack.push_back(stk); \ 128 for (auto itr = e.stack.crbegin(); itr!=e.stack.crend(); ++itr) \ 129 { \ 130 error << "("<< i <<") **************** "; \ 131 error << itr->function << std::endl; \ 132 error << itr->info << std::endl; \ 133 ++i; \ 134 } \ 135 error << left << " "; \ 136 error << left << std::setw(40) << "File " ; \ 137 error << left << std::setw(106) << "Function " ; \ 138 error << std::setw(5) << "Line " << std::endl; \ 139 i = e.stack.size(); \ 140 for (auto itr = e.stack.begin(); itr!=e.stack.end(); itr++) \ 141 { \ 142 StdOStringStream tmp; \ 143 tmp << "("<< i <<")"; \ 144 error << left << std::setw(6) << tmp.str(); \ 145 error << left << std::setw(40) << itr->file; \ 146 error << left << std::setw(106) << itr->function; \ 147 error << left << std::setw(5) << itr->line << std::endl; \ 148 --i; \ 149 } \ 150 throw; \ 151 } \ 152 } 153 #else 154 #define TRY 155 #define CATCH 156 #define CATCH_DUMP_ATTR 157 #define CATCH_DUMP_STACK 158 #endif 55 159 56 160 #endif // __XIOS_CException__ 161 -
XIOS/dev/dev_trunk_omp/src/filter/file_writer_filter.cpp
r1474 r1646 17 17 void CFileWriterFilter::onInputReady(std::vector<CDataPacketPtr> data) 18 18 { 19 const bool detectMissingValue = ( !field->detect_missing_value.isEmpty()20 && !field->default_value.isEmpty()21 && field->detect_missing_value == true);19 const bool detectMissingValue = ( !field->default_value.isEmpty() && 20 ( (!field->detect_missing_value.isEmpty() || field->detect_missing_value == true) 21 || field->hasGridMask()) ); 22 22 23 23 CArray<double, 1> dataArray = (detectMissingValue) ? data[0]->data.copy() : data[0]->data; -
XIOS/dev/dev_trunk_omp/src/filter/source_filter.cpp
r1250 r1646 7 7 namespace xios 8 8 { 9 CSourceFilter::CSourceFilter(CGarbageCollector& gc, CGrid* grid, bool compression, 9 CSourceFilter::CSourceFilter(CGarbageCollector& gc, CGrid* grid, 10 bool compression /*= true*/, bool mask /*= false*/, 10 11 const CDuration offset /*= NoneDu*/, bool manualTrigger /*= false*/, 11 12 bool hasMissingValue /*= false*/, … … 14 15 , grid(grid) 15 16 , compression(compression) 17 , mask(mask) 16 18 , offset(offset) 17 19 , hasMissingValue(hasMissingValue), defaultValue(defaultValue) … … 40 42 } 41 43 else 42 grid->inputField(data, packet->data); 43 44 45 46 // if (compression) grid->inputField(data, packet->data) ; 47 // else 48 // { 49 // // just make a flat copy 50 // CArray<double, N> data_tmp(data.copy()) ; // supress const attribute 51 // CArray<double,1> dataTmp2(data_tmp.dataFirst(),shape(data.numElements()),neverDeleteData) ; 52 // packet->data = dataTmp2 ; 53 // } 44 { 45 if (mask) 46 grid->maskField(data, packet->data); 47 else 48 grid->inputField(data, packet->data); 49 } 54 50 // Convert missing values to NaN 55 51 if (hasMissingValue) -
XIOS/dev/dev_trunk_omp/src/filter/source_filter.hpp
r1241 r1646 21 21 * \param gc the garbage collector associated with this filter 22 22 * \param grid the grid to which the data is attached 23 * \param compression 24 * \param mask 23 25 * \param offset the offset applied to the timestamp of all packets 24 26 * \param manualTrigger whether the output should be triggered manually … … 27 29 */ 28 30 CSourceFilter(CGarbageCollector& gc, CGrid* grid, 29 bool compression=true, 31 bool compression = true, 32 bool mask = false, 30 33 const CDuration offset = NoneDu, bool manualTrigger = false, 31 34 bool hasMissingValue = false, … … 61 64 62 65 private: 63 CGrid* grid; //!< The grid attached to the data the filter can accept64 const CDuration offset; //!< The offset applied to the timestamp of all packets66 CGrid* grid; //!< The grid attached to the data the filter can accept 67 const CDuration offset; //!< The offset applied to the timestamp of all packets 65 68 const bool hasMissingValue; 66 69 const double defaultValue; 67 const bool compression ; //!< indicate if the data need to be compressed : on client size : true, on server side : false 70 const bool compression ; //!< indicates if data need to be compressed : on client side : true, on server side : false 71 const bool mask ; //!< indicates whether grid mask should be applied (true for clients, false for servers) 68 72 }; // class CSourceFilter 69 73 } // namespace xios -
XIOS/dev/dev_trunk_omp/src/filter/spatial_transform_filter.cpp
r1601 r1646 5 5 #include "context_client.hpp" 6 6 #include "timer.hpp" 7 #ifdef _usingEP 7 8 using namespace ep_lib; 9 #endif 8 10 9 11 namespace xios … … 197 199 198 200 CContextClient* client = CContext::getCurrent()->client; 201 int rank; 202 MPI_Comm_rank (client->intraComm, &rank); 199 203 200 204 // Get default value for output data … … 206 210 const std::list<CGridTransformation::RecvIndexGridDestinationMap>& listLocalIndexToReceive = gridTransformation->getLocalIndexToReceiveOnGridDest(); 207 211 const std::list<size_t>& listNbLocalIndexToReceive = gridTransformation->getNbLocalIndexToReceiveOnGridDest(); 208 const std::list<std::vector<bool> >& listLocalIndexMaskOnDest = gridTransformation->getLocalMaskIndexOnGridDest();209 212 const std::vector<CGenericAlgorithmTransformation*>& listAlgos = gridTransformation->getAlgos(); 210 213 … … 215 218 std::list<CGridTransformation::RecvIndexGridDestinationMap>::const_iterator itListRecv = listLocalIndexToReceive.begin(); 216 219 std::list<size_t>::const_iterator itNbListRecv = listNbLocalIndexToReceive.begin(); 217 std::list<std::vector<bool> >::const_iterator itLocalMaskIndexOnDest = listLocalIndexMaskOnDest.begin();218 220 std::vector<CGenericAlgorithmTransformation*>::const_iterator itAlgo = listAlgos.begin(); 219 221 220 for (; itListSend != iteListSend; ++itListSend, ++itListRecv, ++itNbListRecv, ++it LocalMaskIndexOnDest, ++itAlgo)222 for (; itListSend != iteListSend; ++itListSend, ++itListRecv, ++itNbListRecv, ++itAlgo) 221 223 { 222 224 CArray<double,1> dataCurrentSrc(dataCurrentDest); … … 228 230 int idxSendBuff = 0; 229 231 std::vector<double*> sendBuff(localIndexToSend.size()); 232 double* sendBuffRank; 230 233 for (itSend = itbSend; itSend != iteSend; ++itSend, ++idxSendBuff) 231 234 { 235 int destRank = itSend->first; 232 236 if (0 != itSend->second.numElements()) 233 sendBuff[idxSendBuff] = new double[itSend->second.numElements()]; 237 { 238 if (rank != itSend->first) 239 sendBuff[idxSendBuff] = new double[itSend->second.numElements()]; 240 else 241 sendBuffRank = new double[itSend->second.numElements()]; 242 } 234 243 } 235 244 … … 242 251 const CArray<int,1>& localIndex_p = itSend->second; 243 252 int countSize = localIndex_p.numElements(); 244 for (int idx = 0; idx < countSize; ++idx) 245 { 246 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 247 } 248 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest[position++]); 253 if (destRank != rank) 254 { 255 for (int idx = 0; idx < countSize; ++idx) 256 { 257 sendBuff[idxSendBuff][idx] = dataCurrentSrc(localIndex_p(idx)); 258 } 259 MPI_Isend(sendBuff[idxSendBuff], countSize, MPI_DOUBLE, destRank, 12, client->intraComm, &sendRecvRequest[position++]); 260 261 } 262 else 263 { 264 for (int idx = 0; idx < countSize; ++idx) 265 { 266 sendBuffRank[idx] = dataCurrentSrc(localIndex_p(idx)); 267 } 268 } 249 269 } 250 270 … … 254 274 iteRecv = localIndexToReceive.end(); 255 275 int recvBuffSize = 0; 256 for (itRecv = itbRecv; itRecv != iteRecv; ++itRecv) recvBuffSize += itRecv->second.size(); //(recvBuffSize < itRecv->second.size()) 257 //? itRecv->second.size() : recvBuffSize; 276 for (itRecv = itbRecv; itRecv != iteRecv; ++itRecv) 277 { 278 if (itRecv->first != rank ) 279 recvBuffSize += itRecv->second.size(); 280 } 281 //(recvBuffSize < itRecv->second.size()) ? itRecv->second.size() : recvBuffSize; 258 282 double* recvBuff; 283 259 284 if (0 != recvBuffSize) recvBuff = new double[recvBuffSize]; 260 285 int currentBuff = 0; … … 262 287 { 263 288 int srcRank = itRecv->first; 264 int countSize = itRecv->second.size(); 265 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest[position++]); 266 currentBuff += countSize; 289 if (srcRank != rank) 290 { 291 int countSize = itRecv->second.size(); 292 MPI_Irecv(recvBuff + currentBuff, countSize, MPI_DOUBLE, srcRank, 12, client->intraComm, &sendRecvRequest[position++]); 293 currentBuff += countSize; 294 } 267 295 } 268 296 std::vector<MPI_Status> status(sendRecvRequest.size()); 269 MPI_Waitall(sendRecvRequest.size(), &sendRecvRequest[0], &status[0]); 297 MPI_Waitall(position, &sendRecvRequest[0], &status[0]); 298 299 270 300 271 301 dataCurrentDest.resize(*itNbListRecv); 272 const std::vector<bool>& localMaskDest = *itLocalMaskIndexOnDest; 273 for (int i = 0; i < localMaskDest.size(); ++i) 274 if (localMaskDest[i]) dataCurrentDest(i) = 0.0; 275 else dataCurrentDest(i) = defaultValue; 302 dataCurrentDest = 0.0; 276 303 277 304 std::vector<bool> localInitFlag(dataCurrentDest.numElements(), true); … … 280 307 for (itRecv = itbRecv; itRecv != iteRecv; ++itRecv) 281 308 { 282 int countSize = itRecv->second.size();283 309 const std::vector<std::pair<int,double> >& localIndex_p = itRecv->second; 284 (*itAlgo)->apply(localIndex_p, 285 recvBuff+currentBuff, 286 dataCurrentDest, 287 localInitFlag, 288 ignoreMissingValue,firstPass); 289 290 currentBuff += countSize; 310 int srcRank = itRecv->first; 311 if (srcRank != rank) 312 { 313 int countSize = itRecv->second.size(); 314 (*itAlgo)->apply(localIndex_p, 315 recvBuff+currentBuff, 316 dataCurrentDest, 317 localInitFlag, 318 ignoreMissingValue,firstPass); 319 currentBuff += countSize; 320 } 321 else 322 { 323 (*itAlgo)->apply(localIndex_p, 324 sendBuffRank, 325 dataCurrentDest, 326 localInitFlag, 327 ignoreMissingValue,firstPass); 328 } 329 291 330 firstPass=false ; 292 331 } … … 298 337 { 299 338 if (0 != itSend->second.numElements()) 300 delete [] sendBuff[idxSendBuff]; 339 { 340 if (rank != itSend->first) 341 delete [] sendBuff[idxSendBuff]; 342 else 343 delete [] sendBuffRank; 344 } 301 345 } 302 346 if (0 != recvBuffSize) delete [] recvBuff; -
XIOS/dev/dev_trunk_omp/src/filter/store_filter.cpp
r1474 r1646 48 48 { 49 49 std::map<Time, CDataPacketPtr>::const_iterator it ; 50 info(0)<<"Impossible to get the packet with timestamp = " << timestamp<<std::endl<<"Available timestamp are : "<<std::endl ; 51 for(it=packets.begin();it!=packets.end();++it) info(0)<<it->first<<" "; 52 info(0)<<std::endl ; 50 #pragma omp critical (_output) 51 { 52 info(0)<<"Impossible to get the packet with timestamp = " << timestamp<<std::endl<<"Available timestamp are : "<<std::endl ; 53 } 54 for(it=packets.begin();it!=packets.end();++it) 55 { 56 #pragma omp critical (_output) 57 { 58 info(0)<<it->first<<" "; 59 } 60 } 61 #pragma omp critical (_output) 62 { 63 info(0)<<std::endl ; 64 } 53 65 ERROR("CConstDataPacketPtr CStoreFilter::getPacket(Time timestamp) const", 54 66 << "Impossible to get the packet with timestamp = " << timestamp); -
XIOS/dev/dev_trunk_omp/src/generate_fortran_interface.cpp
r1558 r1646 317 317 file.open((path+"ireorder_domain_attr.F90").c_str()); 318 318 reorderDomain.generateFortranInterface(file); 319 320 file.open((path+"extract_domain_interface_attr.F90").c_str());321 extractDomain.generateFortran2003Interface(file);322 file.close();323 324 file.open((path+"icextract_domain_attr.cpp").c_str());325 extractDomain.generateCInterface(file);326 file.close();327 328 file.open((path+"iextract_domain_attr.F90").c_str());329 extractDomain.generateFortranInterface(file);330 331 319 file.close(); 332 320 -
XIOS/dev/dev_trunk_omp/src/generate_interface_impl.hpp
r1158 r1646 215 215 string fortranKindC=getStrFortranKindC<T>(); 216 216 217 oss << "SUBROUTINE cxios_set_" << className << "_" << name << "(" << className << "_hdl, " << name << ") BIND(C)" << iendl; 217 // oss << "SUBROUTINE cxios_set_" << className << "_" << name << "(" << className << "_hdl, " << name << ") BIND(C)" << iendl; 218 int indent = oss.iword(iendl.index); 219 string str = "SUBROUTINE cxios_set_" + className + "_" + name + "(" + className + "_hdl, " + name + ") BIND(C)"; 220 if ((str.length() + indent) >132) 221 { 222 oss << str.substr(0,130-indent) ; 223 oss << "&" << endl; 224 oss << "&" << str.substr(130-indent,str.length()) ; 225 } 226 else 227 { 228 oss << str; 229 } 230 oss << iendl; 218 231 oss << " USE ISO_C_BINDING" << iendl; 219 232 oss << " INTEGER (kind = C_INTPTR_T), VALUE :: " << className << "_hdl" << iendl; … … 231 244 void CInterface::AttributeFortran2003Interface<string>(ostream& oss, const string& className, const string& name) 232 245 { 233 oss << "SUBROUTINE cxios_set_" << className << "_" << name << "(" << className << "_hdl, " << name << ", " << name << "_size) BIND(C)" << iendl; 246 // oss << "SUBROUTINE cxios_set_" << className << "_" << name << "(" << className << "_hdl, " << name << ", " << name << "_size) BIND(C)" << iendl; 247 int indent = oss.iword(iendl.index); 248 string str ="SUBROUTINE cxios_set_" + className + "_" + name + "(" + className + "_hdl, " + name + ", " + name + "_size) BIND(C)"; 249 if ((str.length() + indent) >132) 250 { 251 oss << str.substr(0,130-indent) ; 252 oss << "&" << endl; 253 oss << "&" << str.substr(130-indent,str.length()) ; 254 } 255 else 256 { 257 oss << str; 258 } 259 oss << iendl; 234 260 oss << " USE ISO_C_BINDING" << iendl; 235 261 oss << " INTEGER (kind = C_INTPTR_T), VALUE :: " << className << "_hdl" << iendl; … … 238 264 oss << "END SUBROUTINE cxios_set_" << className << "_" << name << std::endl; 239 265 oss << iendl; 240 oss << "SUBROUTINE cxios_get_" << className << "_" << name << "(" << className << "_hdl, " << name << ", " << name << "_size) BIND(C)" << iendl; 266 // oss << "SUBROUTINE cxios_get_" << className << "_" << name << "(" << className << "_hdl, " << name << ", " << name << "_size) BIND(C)" << iendl; 267 str = "SUBROUTINE cxios_get_" + className + "_" + name + "(" + className + "_hdl, " + name + ", " + name + "_size) BIND(C)"; 268 if ((str.length() + indent) >132) 269 { 270 oss << str.substr(0,130-indent) ; 271 oss << "&" << endl; 272 oss << "&" << str.substr(130-indent,str.length()) ; 273 } 274 else 275 { 276 oss << str; 277 } 278 oss << iendl; 241 279 oss << " USE ISO_C_BINDING" << iendl; 242 280 oss << " INTEGER (kind = C_INTPTR_T), VALUE :: " << className << "_hdl" << iendl; … … 247 285 248 286 template <> 249 void CInterface::AttributeFortran2003Interface<CDate>(ostream& oss, const string& className, const string& name) 250 { 251 oss << "SUBROUTINE cxios_set_" << className << "_" << name << "(" << className << "_hdl, " << name << ") BIND(C)" << iendl; 252 oss << " USE ISO_C_BINDING" << iendl; 253 oss << " USE IDATE" << iendl; 254 oss << " INTEGER (kind = C_INTPTR_T), VALUE :: " << className << "_hdl" << iendl; 255 oss << " TYPE(txios(date)), VALUE :: " << name << iendl; 256 oss << "END SUBROUTINE cxios_set_" << className << "_" << name << std::endl; 287 void CInterface::AttributeFortran2003Interface<CDuration>(ostream& oss, const string& className, const string& name) 288 { 289 // oss << "SUBROUTINE cxios_set_" << className << "_" << name << "(" << className << "_hdl, " << name << ") BIND(C)" << iendl; 290 string str = "SUBROUTINE cxios_set_" + className + "_" + name + "(" + className + "_hdl, " + name + ") BIND(C)"; 291 int indent = oss.iword(iendl.index); 292 if ((str.length() + indent) >132) 293 { 294 oss << str.substr(0,130-indent) ; 295 oss << "&" << endl; 296 oss << "&" << str.substr(130-indent,str.length()) ; 297 } 298 else 299 { 300 oss << str; 301 } 257 302 oss << iendl; 258 oss << "SUBROUTINE cxios_get_" << className << "_" << name << "(" << className << "_hdl, " << name << ") BIND(C)" << iendl;259 oss << " USE ISO_C_BINDING" << iendl;260 oss << " USE IDATE" << iendl;261 oss << " INTEGER (kind = C_INTPTR_T), VALUE :: " << className << "_hdl" << iendl;262 oss << " TYPE(txios(date)) :: " << name << iendl;263 oss << "END SUBROUTINE cxios_get_" << className << "_" << name << std::endl;264 }265 266 template <>267 void CInterface::AttributeFortran2003Interface<CDuration>(ostream& oss, const string& className, const string& name)268 {269 oss << "SUBROUTINE cxios_set_" << className << "_" << name << "(" << className << "_hdl, " << name << ") BIND(C)" << iendl;270 303 oss << " USE ISO_C_BINDING" << iendl; 271 304 oss << " USE IDURATION" << iendl; -
XIOS/dev/dev_trunk_omp/src/interface/c/icaxis.cpp
r1542 r1646 28 28 29 29 void cxios_axis_handle_create (XAxisPtr * _ret, const char * _id, int _id_len) 30 TRY 30 31 { 31 32 std::string id; … … 35 36 CTimer::get("XIOS").suspend() ; 36 37 } 38 CATCH_DUMP_STACK 37 39 38 40 void cxios_axisgroup_handle_create (XAxisGroupPtr * _ret, const char * _id, int _id_len) 41 TRY 39 42 { 40 43 std::string id; … … 44 47 CTimer::get("XIOS").suspend() ; 45 48 } 49 CATCH_DUMP_STACK 46 50 47 51 // -------------------- Vérification des identifiants ----------------------- 48 52 49 53 void cxios_axis_valid_id (bool * _ret, const char * _id, int _id_len) 54 TRY 50 55 { 51 56 std::string id; … … 56 61 CTimer::get("XIOS").suspend() ; 57 62 } 63 CATCH_DUMP_STACK 58 64 59 65 void cxios_axisgroup_valid_id (bool * _ret, const char * _id, int _id_len) 66 TRY 60 67 { 61 68 std::string id; … … 67 74 68 75 } 76 CATCH_DUMP_STACK 69 77 70 78 } // extern "C" -
XIOS/dev/dev_trunk_omp/src/interface/c/iccalendar.cpp
r1542 r1646 10 10 { 11 11 void cxios_update_calendar(int step) 12 TRY 12 13 { 13 14 CTimer::get("XIOS").resume(); … … 19 20 CTimer::get("XIOS").suspend(); 20 21 } 22 CATCH_DUMP_STACK 21 23 22 24 void cxios_get_current_date(cxios_date* current_date_c) 25 TRY 23 26 { 24 27 CTimer::get("XIOS").resume(); … … 37 40 CTimer::get("XIOS").suspend(); 38 41 } 42 CATCH_DUMP_STACK 39 43 40 44 int cxios_get_year_length_in_seconds(int year) 45 TRY 41 46 { 42 47 CTimer::get("XIOS").resume(); … … 50 55 return length; 51 56 } 57 CATCH_DUMP_STACK 52 58 53 59 int cxios_get_day_length_in_seconds() 60 TRY 54 61 { 55 62 CTimer::get("XIOS").resume(); … … 63 70 return length; 64 71 } 72 CATCH_DUMP_STACK 65 73 } -
XIOS/dev/dev_trunk_omp/src/interface/c/iccalendar_wrapper.cpp
r1542 r1646 29 29 30 30 void cxios_calendar_wrapper_handle_create(XCalendarWrapperPtr* _ret, const char* _id, int _id_len) 31 TRY 31 32 { 32 33 std::string id; … … 36 37 CTimer::get("XIOS").suspend(); 37 38 } 39 CATCH_DUMP_STACK 38 40 39 41 void cxios_get_current_calendar_wrapper(XCalendarWrapperPtr* _ret) 42 TRY 40 43 { 41 44 CTimer::get("XIOS").resume(); … … 43 46 CTimer::get("XIOS").suspend(); 44 47 } 48 CATCH_DUMP_STACK 45 49 46 50 // -------------------- Vérification des identifiants ----------------------- 47 51 48 52 void cxios_calendar_wrapper_valid_id(bool* _ret, const char* _id, int _id_len) 53 TRY 49 54 { 50 55 std::string id; … … 54 59 CTimer::get("XIOS").suspend(); 55 60 } 61 CATCH_DUMP_STACK 56 62 57 63 // ----------------------- Custom getters and setters ----------------------- 58 64 59 65 void cxios_set_calendar_wrapper_date_start_date(XCalendarWrapperPtr calendarWrapper_hdl, cxios_date start_date_c) 66 TRY 60 67 { 61 68 CTimer::get("XIOS").resume(); … … 70 77 CTimer::get("XIOS").suspend(); 71 78 } 79 CATCH_DUMP_STACK 72 80 73 81 void cxios_get_calendar_wrapper_date_start_date(XCalendarWrapperPtr calendarWrapper_hdl, cxios_date* start_date_c) 82 TRY 74 83 { 75 84 CTimer::get("XIOS").resume(); … … 83 92 CTimer::get("XIOS").suspend(); 84 93 } 94 CATCH_DUMP_STACK 85 95 86 96 void cxios_set_calendar_wrapper_date_time_origin(XCalendarWrapperPtr calendarWrapper_hdl, cxios_date time_origin_c) 97 TRY 87 98 { 88 99 CTimer::get("XIOS").resume(); … … 97 108 CTimer::get("XIOS").suspend(); 98 109 } 110 CATCH_DUMP_STACK 99 111 100 112 void cxios_get_calendar_wrapper_date_time_origin(XCalendarWrapperPtr calendarWrapper_hdl, cxios_date* time_origin_c) 113 TRY 101 114 { 102 115 CTimer::get("XIOS").resume(); … … 110 123 CTimer::get("XIOS").suspend(); 111 124 } 125 CATCH_DUMP_STACK 112 126 113 127 // ----------------------- Calendar creation and update ---------------------- 114 128 115 129 void cxios_create_calendar(XCalendarWrapperPtr calendarWrapper_hdl) 130 TRY 116 131 { 117 132 CTimer::get("XIOS").resume(); … … 119 134 CTimer::get("XIOS").suspend(); 120 135 } 136 CATCH_DUMP_STACK 121 137 122 138 void cxios_update_calendar_timestep(XCalendarWrapperPtr calendarWrapper_hdl) 139 TRY 123 140 { 124 141 CTimer::get("XIOS").resume(); … … 126 143 CTimer::get("XIOS").suspend(); 127 144 } 145 CATCH_DUMP_STACK 128 146 } // extern "C" -
XIOS/dev/dev_trunk_omp/src/interface/c/iccompute_connectivity_domain.cpp
r1542 r1646 25 25 // ------------------------ Création des handle ----------------------------- 26 26 void cxios_compute_connectivity_domain_handle_create(XComConDomainPtr * _ret, const char * _id, int _id_len) 27 TRY 27 28 { 28 29 std::string id; … … 32 33 CTimer::get("XIOS").suspend() ; 33 34 } 35 CATCH_DUMP_STACK 34 36 35 37 // -------------------- Vérification des identifiants ----------------------- 36 38 void cxios_compute_connectivity_domain_valid_id(bool * _ret, const char * _id, int _id_len) 39 TRY 37 40 { 38 41 std::string id; … … 43 46 CTimer::get("XIOS").suspend() ; 44 47 } 48 CATCH_DUMP_STACK 45 49 } // extern "C" -
XIOS/dev/dev_trunk_omp/src/interface/c/iccontext.cpp
r1542 r1646 32 32 33 33 void cxios_context_handle_create (XContextPtr * _ret, const char * _id, int _id_len) 34 TRY 34 35 { 35 36 std::string id; … … 54 55 // Lever une exeception ici 55 56 } 57 CATCH_DUMP_STACK 56 58 57 59 // ------------------------ Changements de contextes ------------------------ 58 60