Changeset 1460
- Timestamp:
- 03/22/18 10:43:20 (4 years ago)
- Location:
- XIOS/dev/branch_openmp
- Files:
-
- 31 added
- 164 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/arch/arch-X64_ADA.fcm
r1287 r1460 10 10 %PROD_CFLAGS -O3 -D BOOST_DISABLE_ASSERTS 11 11 %DEV_CFLAGS -g -traceback 12 %DEBUG_CFLAGS -DBZ_DEBUG -g -fno-inline -std=c++11 12 %DEBUG_CFLAGS -DBZ_DEBUG -g -fno-inline 13 #-std=c++11 13 14 14 15 %BASE_FFLAGS -D__NONE__ -
XIOS/dev/branch_openmp/extern/remap/src/gridRemap.hpp
r1335 r1460 14 14 Coord readPole(std::istream&); 15 15 16 16 //extern CRemapGrid srcGrid; 17 //extern CRemapGrid tgtGrid; 17 18 18 19 } -
XIOS/dev/branch_openmp/extern/remap/src/mapper.cpp
r1341 r1460 29 29 void cptOffsetsFromLengths(const int *lengths, int *offsets, int sz) 30 30 { 31 32 33 31 offsets[0] = 0; 32 for (int i = 1; i < sz; i++) 33 offsets[i] = offsets[i-1] + lengths[i-1]; 34 34 } 35 35 … … 39 39 srcGrid.pole = Coord(pole[0], pole[1], pole[2]); 40 40 41 42 43 44 45 46 41 int mpiRank, mpiSize; 42 MPI_Comm_rank(communicator, &mpiRank); 43 MPI_Comm_size(communicator, &mpiSize); 44 45 sourceElements.reserve(nbCells); 46 sourceMesh.reserve(nbCells); 47 47 sourceGlobalId.resize(nbCells) ; 48 48 … … 57 57 else sourceGlobalId.assign(globalId,globalId+nbCells); 58 58 59 60 61 62 63 64 59 for (int i = 0; i < nbCells; i++) 60 { 61 int offs = i*nVertex; 62 Elt elt(boundsLon + offs, boundsLat + offs, nVertex); 63 elt.src_id.rank = mpiRank; 64 elt.src_id.ind = i; 65 65 elt.src_id.globalId = sourceGlobalId[i]; 66 67 68 69 66 sourceElements.push_back(elt); 67 sourceMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 68 cptEltGeom(sourceElements[i], Coord(pole[0], pole[1], pole[2])); 69 } 70 70 71 71 } … … 75 75 tgtGrid.pole = Coord(pole[0], pole[1], pole[2]); 76 76 77 78 79 80 81 82 77 int mpiRank, mpiSize; 78 MPI_Comm_rank(communicator, &mpiRank); 79 MPI_Comm_size(communicator, &mpiSize); 80 81 targetElements.reserve(nbCells); 82 targetMesh.reserve(nbCells); 83 83 84 84 targetGlobalId.resize(nbCells) ; … … 93 93 else targetGlobalId.assign(globalId,globalId+nbCells); 94 94 95 96 97 98 99 100 101 102 95 for (int i = 0; i < nbCells; i++) 96 { 97 int offs = i*nVertex; 98 Elt elt(boundsLon + offs, boundsLat + offs, nVertex); 99 targetElements.push_back(elt); 100 targetMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 101 cptEltGeom(targetElements[i], Coord(pole[0], pole[1], pole[2])); 102 } 103 103 104 104 … … 119 119 vector<double> Mapper::computeWeights(int interpOrder, bool renormalize, bool quantity) 120 120 { 121 vector<double> timings; 122 int mpiSize, mpiRank; 123 MPI_Comm_size(communicator, &mpiSize); 124 MPI_Comm_rank(communicator, &mpiRank); 125 121 vector<double> timings; 122 int mpiSize, mpiRank; 123 MPI_Comm_size(communicator, &mpiSize); 124 MPI_Comm_rank(communicator, &mpiRank); 126 125 127 126 this->buildSSTree(sourceMesh, targetMesh); 128 127 129 if (mpiRank == 0 && verbose) cout << "Computing intersections ..." << endl; 130 double tic = cputime(); 131 computeIntersection(&targetElements[0], targetElements.size()); 132 timings.push_back(cputime() - tic); 133 134 tic = cputime(); 135 if (interpOrder == 2) 136 { 137 if (mpiRank == 0 && verbose) cout << "Computing grads ..." << endl; 138 buildMeshTopology(); 139 computeGrads(); 140 } 141 timings.push_back(cputime() - tic); 142 143 /* Prepare computation of weights */ 144 /* compute number of intersections which for the first order case 145 corresponds to the number of edges in the remap matrix */ 146 int nIntersections = 0; 147 for (int j = 0; j < targetElements.size(); j++) 148 { 149 Elt &elt = targetElements[j]; 150 for (list<Polyg*>::iterator it = elt.is.begin(); it != elt.is.end(); it++) 151 nIntersections++; 152 } 153 /* overallocate for NMAX neighbours for each elements */ 154 remapMatrix = new double[nIntersections*NMAX]; 155 srcAddress = new int[nIntersections*NMAX]; 156 srcRank = new int[nIntersections*NMAX]; 157 dstAddress = new int[nIntersections*NMAX]; 128 if (mpiRank == 0 && verbose) cout << "Computing intersections ..." << endl; 129 double tic = cputime(); 130 computeIntersection(&targetElements[0], targetElements.size()); 131 timings.push_back(cputime() - tic); 132 133 tic = cputime(); 134 if (interpOrder == 2) { 135 if (mpiRank == 0 && verbose) cout << "Computing grads ..." << endl; 136 buildMeshTopology(); 137 computeGrads(); 138 } 139 timings.push_back(cputime() - tic); 140 141 /* Prepare computation of weights */ 142 /* compute number of intersections which for the first order case 143 corresponds to the number of edges in the remap matrix */ 144 int nIntersections = 0; 145 for (int j = 0; j < targetElements.size(); j++) 146 { 147 Elt &elt = targetElements[j]; 148 for (list<Polyg*>::iterator it = elt.is.begin(); it != elt.is.end(); it++) 149 nIntersections++; 150 } 151 /* overallocate for NMAX neighbours for each elements */ 152 remapMatrix = new double[nIntersections*NMAX]; 153 srcAddress = new int[nIntersections*NMAX]; 154 srcRank = new int[nIntersections*NMAX]; 155 dstAddress = new int[nIntersections*NMAX]; 158 156 sourceWeightId =new long[nIntersections*NMAX]; 159 157 targetWeightId =new long[nIntersections*NMAX]; 160 158 161 159 162 163 164 165 160 if (mpiRank == 0 && verbose) cout << "Remapping..." << endl; 161 tic = cputime(); 162 nWeights = remap(&targetElements[0], targetElements.size(), interpOrder, renormalize, quantity); 163 timings.push_back(cputime() - tic); 166 164 167 165 for (int i = 0; i < targetElements.size(); i++) targetElements[i].delete_intersections(); 168 166 169 167 return timings; 170 168 } 171 169 … … 178 176 int Mapper::remap(Elt *elements, int nbElements, int order, bool renormalize, bool quantity) 179 177 { 180 int mpiSize, mpiRank; 181 MPI_Comm_size(communicator, &mpiSize); 182 MPI_Comm_rank(communicator, &mpiRank); 183 184 /* create list of intersections (super mesh elements) for each rank */ 185 multimap<int, Polyg *> *elementList = new multimap<int, Polyg *>[mpiSize]; 186 for (int j = 0; j < nbElements; j++) 187 { 188 Elt& e = elements[j]; 189 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 190 { 191 elementList[(*it)->id.rank].insert(pair<int, Polyg *>((*it)->id.ind, *it)); 192 //std::cout<<"elementList["<<(*it)->id.rank<<"].size = "<< elementList[(*it)->id.rank].size()<<std::endl; 193 } 194 } 195 196 int *nbSendElement = new int[mpiSize]; 197 int **sendElement = new int*[mpiSize]; /* indices of elements required from other rank */ 198 double **recvValue = new double*[mpiSize]; 199 double **recvArea = new double*[mpiSize]; 200 Coord **recvGrad = new Coord*[mpiSize]; 201 GloId **recvNeighIds = new GloId*[mpiSize]; /* ids of the of the source neighbours which also contribute through gradient */ 202 for (int rank = 0; rank < mpiSize; rank++) 203 { 204 /* get size for allocation */ 205 int last = -1; /* compares unequal to any index */ 206 int index = -1; /* increased to starting index 0 in first iteration */ 207 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 208 { 209 if (last != it->first) 210 index++; 211 (it->second)->id.ind = index; 212 last = it->first; 213 } 214 nbSendElement[rank] = index + 1; 215 216 /* if size is non-zero allocate and collect indices of elements on other ranks that we intersect */ 217 if (nbSendElement[rank] > 0) 218 { 219 sendElement[rank] = new int[nbSendElement[rank]]; 220 recvValue[rank] = new double[nbSendElement[rank]]; 221 recvArea[rank] = new double[nbSendElement[rank]]; 222 if (order == 2) 223 { 224 recvNeighIds[rank] = new GloId[nbSendElement[rank]*(NMAX+1)]; 225 recvGrad[rank] = new Coord[nbSendElement[rank]*(NMAX+1)]; 226 } 227 else 228 recvNeighIds[rank] = new GloId[nbSendElement[rank]]; 229 230 last = -1; 231 index = -1; 232 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 233 { 234 if (last != it->first) 235 index++; 236 sendElement[rank][index] = it->first; 237 last = it->first; 238 } 239 } 240 } 241 242 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 243 int *nbRecvElement = new int[mpiSize]; 244 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); 245 246 247 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ 248 int nbSendRequest = 0; 249 int nbRecvRequest = 0; 250 int **recvElement = new int*[mpiSize]; 251 double **sendValue = new double*[mpiSize]; 252 double **sendArea = new double*[mpiSize]; 253 Coord **sendGrad = new Coord*[mpiSize]; 254 GloId **sendNeighIds = new GloId*[mpiSize]; 255 MPI_Request *sendRequest = new MPI_Request[4*mpiSize]; 256 MPI_Request *recvRequest = new MPI_Request[4*mpiSize]; 257 for (int rank = 0; rank < mpiSize; rank++) 258 { 259 if (nbSendElement[rank] > 0) 260 { 261 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 262 nbSendRequest++; 263 } 264 265 if (nbRecvElement[rank] > 0) 266 { 267 recvElement[rank] = new int[nbRecvElement[rank]]; 268 sendValue[rank] = new double[nbRecvElement[rank]]; 269 sendArea[rank] = new double[nbRecvElement[rank]]; 270 if (order == 2) 271 { 272 sendNeighIds[rank] = new GloId[nbRecvElement[rank]*(NMAX+1)]; 273 sendGrad[rank] = new Coord[nbRecvElement[rank]*(NMAX+1)]; 274 } 275 else 276 { 277 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 278 } 279 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 280 nbRecvRequest++; 281 } 282 } 283 284 MPI_Status *status = new MPI_Status[4*mpiSize]; 285 286 MPI_Waitall(nbSendRequest, sendRequest, status); 287 MPI_Waitall(nbRecvRequest, recvRequest, status); 288 289 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ 290 nbSendRequest = 0; 291 nbRecvRequest = 0; 292 for (int rank = 0; rank < mpiSize; rank++) 293 { 294 if (nbRecvElement[rank] > 0) 295 { 296 int jj = 0; // jj == j if no weight writing 297 for (int j = 0; j < nbRecvElement[rank]; j++) 298 { 299 sendValue[rank][j] = sstree.localElements[recvElement[rank][j]].val; 300 sendArea[rank][j] = sstree.localElements[recvElement[rank][j]].area; 301 if (order == 2) 302 { 303 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].grad; 304 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].src_id; 305 jj++; 306 for (int i = 0; i < NMAX; i++) 307 { 308 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].gradNeigh[i]; 178 int mpiSize, mpiRank; 179 MPI_Comm_size(communicator, &mpiSize); 180 MPI_Comm_rank(communicator, &mpiRank); 181 182 /* create list of intersections (super mesh elements) for each rank */ 183 multimap<int, Polyg *> *elementList = new multimap<int, Polyg *>[mpiSize]; 184 for (int j = 0; j < nbElements; j++) 185 { 186 Elt& e = elements[j]; 187 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 188 elementList[(*it)->id.rank].insert(pair<int, Polyg *>((*it)->id.ind, *it)); 189 } 190 191 int *nbSendElement = new int[mpiSize]; 192 int **sendElement = new int*[mpiSize]; /* indices of elements required from other rank */ 193 double **recvValue = new double*[mpiSize]; 194 double **recvArea = new double*[mpiSize]; 195 Coord **recvGrad = new Coord*[mpiSize]; 196 GloId **recvNeighIds = new GloId*[mpiSize]; /* ids of the of the source neighbours which also contribute through gradient */ 197 for (int rank = 0; rank < mpiSize; rank++) 198 { 199 /* get size for allocation */ 200 int last = -1; /* compares unequal to any index */ 201 int index = -1; /* increased to starting index 0 in first iteration */ 202 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 203 { 204 if (last != it->first) 205 index++; 206 (it->second)->id.ind = index; 207 last = it->first; 208 } 209 nbSendElement[rank] = index + 1; 210 211 /* if size is non-zero allocate and collect indices of elements on other ranks that we intersect */ 212 if (nbSendElement[rank] > 0) 213 { 214 sendElement[rank] = new int[nbSendElement[rank]]; 215 recvValue[rank] = new double[nbSendElement[rank]]; 216 recvArea[rank] = new double[nbSendElement[rank]]; 217 if (order == 2) 218 { 219 recvNeighIds[rank] = new GloId[nbSendElement[rank]*(NMAX+1)]; 220 recvGrad[rank] = new Coord[nbSendElement[rank]*(NMAX+1)]; 221 } 222 else 223 recvNeighIds[rank] = new GloId[nbSendElement[rank]]; 224 225 last = -1; 226 index = -1; 227 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 228 { 229 if (last != it->first) 230 index++; 231 sendElement[rank][index] = it->first; 232 last = it->first; 233 } 234 } 235 } 236 237 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 238 int *nbRecvElement = new int[mpiSize]; 239 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); 240 241 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ 242 int nbSendRequest = 0; 243 int nbRecvRequest = 0; 244 int **recvElement = new int*[mpiSize]; 245 double **sendValue = new double*[mpiSize]; 246 double **sendArea = new double*[mpiSize]; 247 Coord **sendGrad = new Coord*[mpiSize]; 248 GloId **sendNeighIds = new GloId*[mpiSize]; 249 MPI_Request *sendRequest = new MPI_Request[4*mpiSize]; 250 MPI_Request *recvRequest = new MPI_Request[4*mpiSize]; 251 for (int rank = 0; rank < mpiSize; rank++) 252 { 253 if (nbSendElement[rank] > 0) 254 { 255 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 256 nbSendRequest++; 257 } 258 259 if (nbRecvElement[rank] > 0) 260 { 261 recvElement[rank] = new int[nbRecvElement[rank]]; 262 sendValue[rank] = new double[nbRecvElement[rank]]; 263 sendArea[rank] = new double[nbRecvElement[rank]]; 264 if (order == 2) 265 { 266 sendNeighIds[rank] = new GloId[nbRecvElement[rank]*(NMAX+1)]; 267 sendGrad[rank] = new Coord[nbRecvElement[rank]*(NMAX+1)]; 268 } 269 else 270 { 271 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 272 } 273 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 274 nbRecvRequest++; 275 } 276 } 277 MPI_Status *status = new MPI_Status[4*mpiSize]; 278 279 MPI_Waitall(nbSendRequest, sendRequest, status); 280 MPI_Waitall(nbRecvRequest, recvRequest, status); 281 282 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ 283 nbSendRequest = 0; 284 nbRecvRequest = 0; 285 for (int rank = 0; rank < mpiSize; rank++) 286 { 287 if (nbRecvElement[rank] > 0) 288 { 289 int jj = 0; // jj == j if no weight writing 290 for (int j = 0; j < nbRecvElement[rank]; j++) 291 { 292 sendValue[rank][j] = sstree.localElements[recvElement[rank][j]].val; 293 sendArea[rank][j] = sstree.localElements[recvElement[rank][j]].area; 294 if (order == 2) 295 { 296 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].grad; 297 // cout<<"grad "<<jj<<" "<<recvElement[rank][j]<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; 298 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].src_id; 299 jj++; 300 for (int i = 0; i < NMAX; i++) 301 { 302 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].gradNeigh[i]; 303 // cout<<"grad "<<jj<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; 309 304 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].neighId[i]; 310 jj++; 311 } 312 } 313 else 314 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 315 } 316 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 317 nbSendRequest++; 318 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 1, communicator, &sendRequest[nbSendRequest]); 319 nbSendRequest++; 320 if (order == 2) 321 { 322 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), MPI_DOUBLE, rank, 2, communicator, &sendRequest[nbSendRequest]); 323 nbSendRequest++; 324 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &sendRequest[nbSendRequest]); 305 jj++; 306 } 307 } 308 else 309 sendNeighIds[rank][j] = sstree.localElements[recvElement[rank][j]].src_id; 310 } 311 MPI_Issend(sendValue[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 312 nbSendRequest++; 313 MPI_Issend(sendArea[rank], nbRecvElement[rank], MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 314 nbSendRequest++; 315 if (order == 2) 316 { 317 MPI_Issend(sendGrad[rank], 3*nbRecvElement[rank]*(NMAX+1), 318 MPI_DOUBLE, rank, 0, communicator, &sendRequest[nbSendRequest]); 319 nbSendRequest++; 320 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 325 321 //ym --> attention taille GloId 326 327 328 329 330 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 4, communicator, &sendRequest[nbSendRequest]);322 nbSendRequest++; 323 } 324 else 325 { 326 MPI_Issend(sendNeighIds[rank], 4*nbRecvElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 331 327 //ym --> attention taille GloId 332 nbSendRequest++; 333 } 334 } 335 if (nbSendElement[rank] > 0) 336 { 337 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 338 nbRecvRequest++; 339 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 1, communicator, &recvRequest[nbRecvRequest]); 340 nbRecvRequest++; 341 if (order == 2) 342 { 343 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), MPI_DOUBLE, rank, 2, communicator, &recvRequest[nbRecvRequest]); 344 nbRecvRequest++; 345 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 3, communicator, &recvRequest[nbRecvRequest]); 328 nbSendRequest++; 329 } 330 } 331 if (nbSendElement[rank] > 0) 332 { 333 MPI_Irecv(recvValue[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 334 nbRecvRequest++; 335 MPI_Irecv(recvArea[rank], nbSendElement[rank], MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 336 nbRecvRequest++; 337 if (order == 2) 338 { 339 MPI_Irecv(recvGrad[rank], 3*nbSendElement[rank]*(NMAX+1), 340 MPI_DOUBLE, rank, 0, communicator, &recvRequest[nbRecvRequest]); 341 nbRecvRequest++; 342 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank]*(NMAX+1), MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 346 343 //ym --> attention taille GloId 347 348 349 350 351 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 4, communicator, &recvRequest[nbRecvRequest]);344 nbRecvRequest++; 345 } 346 else 347 { 348 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 352 349 //ym --> attention taille GloId 353 354 355 356 350 nbRecvRequest++; 351 } 352 } 353 } 357 354 358 MPI_Waitall(nbSendRequest, sendRequest, status);359 355 MPI_Waitall(nbSendRequest, sendRequest, status); 356 MPI_Waitall(nbRecvRequest, recvRequest, status); 360 357 361 358 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 359 /* now that all values and gradients are available use them to computed interpolated values on target 360 and also to compute weights */ 361 int i = 0; 362 for (int j = 0; j < nbElements; j++) 363 { 364 Elt& e = elements[j]; 365 366 /* since for the 2nd order case source grid elements can contribute to a destination grid element over several "paths" 367 (step1: gradient is computed using neighbours on same grid, step2: intersection uses several elements on other grid) 368 accumulate them so that there is only one final weight between two elements */ 369 map<GloId,double> wgt_map; 370 371 /* for destination element `e` loop over all intersetions/the corresponding source elements */ 372 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 373 { 374 /* it is the intersection element, so it->x and it->area are barycentre and area of intersection element (super mesh) 375 but it->id is id of the source element that it intersects */ 376 int n1 = (*it)->id.ind; 377 int rank = (*it)->id.rank; 378 double fk = recvValue[rank][n1]; 379 double srcArea = recvArea[rank][n1]; 380 double w = (*it)->area; 384 381 if (quantity) w/=srcArea ; 385 382 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 383 /* first order: src value times weight (weight = supermesh area), later divide by target area */ 384 int kk = (order == 2) ? n1 * (NMAX + 1) : n1; 385 GloId neighID = recvNeighIds[rank][kk]; 386 wgt_map[neighID] += w; 387 388 if (order == 2) 389 { 390 for (int k = 0; k < NMAX+1; k++) 391 { 392 int kk = n1 * (NMAX + 1) + k; 393 GloId neighID = recvNeighIds[rank][kk]; 394 if (neighID.ind != -1) wgt_map[neighID] += w * scalarprod(recvGrad[rank][kk], (*it)->x); 395 } 396 397 } 398 } 402 399 403 400 double renorm=0; … … 407 404 408 405 for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) 409 406 { 410 407 if (quantity) this->remapMatrix[i] = (it->second ) / renorm; 411 412 413 414 408 else this->remapMatrix[i] = (it->second / e.area) / renorm; 409 this->srcAddress[i] = it->first.ind; 410 this->srcRank[i] = it->first.rank; 411 this->dstAddress[i] = j; 415 412 this->sourceWeightId[i]= it->first.globalId ; 416 413 this->targetWeightId[i]= targetGlobalId[j] ; 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 414 i++; 415 } 416 } 417 418 /* free all memory allocated in this function */ 419 for (int rank = 0; rank < mpiSize; rank++) 420 { 421 if (nbSendElement[rank] > 0) 422 { 423 delete[] sendElement[rank]; 424 delete[] recvValue[rank]; 425 delete[] recvArea[rank]; 426 if (order == 2) 427 { 428 delete[] recvGrad[rank]; 429 } 430 delete[] recvNeighIds[rank]; 431 } 432 if (nbRecvElement[rank] > 0) 433 { 434 delete[] recvElement[rank]; 435 delete[] sendValue[rank]; 436 delete[] sendArea[rank]; 437 if (order == 2) 438 delete[] sendGrad[rank]; 439 delete[] sendNeighIds[rank]; 440 } 441 } 442 delete[] status; 443 delete[] sendRequest; 444 delete[] recvRequest; 445 delete[] elementList; 446 delete[] nbSendElement; 447 delete[] nbRecvElement; 448 delete[] sendElement; 449 delete[] recvElement; 450 delete[] sendValue; 451 delete[] recvValue; 452 delete[] sendGrad; 453 delete[] recvGrad; 454 delete[] sendNeighIds; 455 delete[] recvNeighIds; 456 return i; 460 457 } 461 458 … … 496 493 mpiRoute.init(routes); 497 494 int nRecv = mpiRoute.getTotalSourceElement(); 495 // cout << mpiRank << " NRECV " << nRecv << "(" << routes.size() << ")"<< endl; 498 496 499 497 int *nbSendNode = new int[mpiSize]; … … 558 556 } 559 557 560 561 558 MPI_Waitall(nbRecvRequest, recvRequest, status); 559 MPI_Waitall(nbSendRequest, sendRequest, status); 562 560 563 561 for (int rank = 0; rank < mpiSize; rank++) … … 702 700 } 703 701 702 /* 703 for (int i = 0; i < elt->n; i++) 704 { 705 if (elt->neighbour[i] == NOT_FOUND) 706 error_exit("neighbour not found"); 707 } 708 */ 704 709 } 705 710 } … … 809 814 MPI_Waitall(nbRecvRequest, recvRequest, status); 810 815 MPI_Waitall(nbSendRequest, sendRequest, status); 811 812 816 char **sendBuffer2 = new char*[mpiSize]; 813 817 char **recvBuffer2 = new char*[mpiSize]; … … 836 840 intersect_ym(&recvElt[j], elt2); 837 841 } 842 838 843 if (recvElt[j].is.size() > 0) sentMessageSize[rank] += packIntersectionSize(recvElt[j]); 839 844 … … 854 859 } 855 860 delete [] recvElt; 861 856 862 } 857 863 } … … 889 895 } 890 896 891 892 897 MPI_Waitall(nbRecvRequest, recvRequest, status); 898 MPI_Waitall(nbSendRequest, sendRequest, status); 893 899 894 900 delete [] sendRequest; -
XIOS/dev/branch_openmp/extern/remap/src/node.cpp
r1328 r1460 281 281 } 282 282 283 283 284 return q; 284 285 } -
XIOS/dev/branch_openmp/extern/remap/src/parallel_tree.cpp
r1355 r1460 197 197 delete[] displs; 198 198 199 /* unpack */ 200 /* 201 randomArray.resize(nrecv); 202 randomizeArray(randomArray); 203 tree.leafs.resize(nrecv); 204 index = 0; 205 206 207 for (int i = 0; i < nrecv; i++) 208 { 209 Coord x = *(Coord *)(&recvBuffer[index]); 210 index += sizeof(Coord)/sizeof(*recvBuffer); 211 double radius = recvBuffer[index++]; 212 tree.leafs[randomArray[i]].centre = x; 213 tree.leafs[randomArray[i]].radius = radius; 214 215 } 216 */ 199 217 200 218 randomArray.resize(blocSize); … … 231 249 cerr << comm.rank << ": PROBLEM: (node assign)" << tree.levelSize[assignLevel] << " != " << comm.group_size << " (keepNodes)" 232 250 << " node size : "<<node.size()<<" bloc size : "<<blocSize<<" total number of leaf : "<<tree.leafs.size()<<endl ; 233 251 /* 252 MPI_Allreduce(&ok, &allok, 1, MPI_INT, MPI_PROD, communicator); 253 if (!allok) { 254 MPI_Finalize(); 255 exit(1); 256 } 257 */ 234 258 MPI_Abort(MPI_COMM_WORLD,-1) ; 235 259 } … … 302 326 nb=nb1+nb2 ; 303 327 MPI_Allreduce(&nb, &nbTot, 1, MPI_LONG, MPI_SUM, communicator) ; 304 305 306 328 int commSize ; 307 329 MPI_Comm_size(communicator,&commSize) ; … … 326 348 randomizeArray(randomArray2); 327 349 350 /* 351 int s1,s2 ; 352 353 if (node.size()< nbSampleNodes/2) 354 { 355 s1 = node.size() ; 356 s2 = nbSampleNodes-s1 ; 357 } 358 else if (node2.size()< nbSampleNodes/2) 359 { 360 s2 = node.size() ; 361 s1 = nbSampleNodes-s2 ; 362 } 363 else 364 { 365 s1=nbSampleNodes/2 ; 366 s2=nbSampleNodes/2 ; 367 } 368 */ 328 369 for (int i = 0; i <nbSampleNodes1; i++) sampleNodes.push_back(Node(node[randomArray1[i%nb1]].centre, node[randomArray1[i%nb1]].radius, NULL)); 329 370 for (int i = 0; i <nbSampleNodes2; i++) sampleNodes.push_back(Node(node2[randomArray2[i%nb2]].centre, node2[randomArray2[i%nb2]].radius, NULL)); 330 371 372 /* 373 for (int i = 0; i < nbSampleNodes/2; i++) 374 { 375 sampleNodes.push_back(Node(node[randomArray1[i]].centre, node[randomArray1[i]].radius, NULL)); 376 sampleNodes.push_back(Node(node2[randomArray2[i]].centre, node2[randomArray2[i]].radius, NULL)); 377 } 378 */ 331 379 CTimer::get("buildParallelSampleTree").resume(); 332 380 //sampleTree.buildParallelSampleTree(sampleNodes, cascade); -
XIOS/dev/branch_openmp/extern/remap/src/parallel_tree.hpp
r1328 r1460 6 6 #include "mpi_cascade.hpp" 7 7 #include "mpi.hpp" 8 8 9 namespace sphereRemap { 9 10 -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_declaration.cpp
r1373 r1460 24 24 ::MPI_Datatype MPI_UNSIGNED_LONG_STD = MPI_UNSIGNED_LONG; 25 25 ::MPI_Datatype MPI_UNSIGNED_CHAR_STD = MPI_UNSIGNED_CHAR; 26 ::MPI_Datatype MPI_UINT64_T_STD = MPI_UINT64_T; 26 27 27 28 #undef MPI_INT … … 32 33 #undef MPI_UNSIGNED_LONG 33 34 #undef MPI_UNSIGNED_CHAR 35 #undef MPI_UINT64_T 34 36 35 37 … … 37 39 ::MPI_Op MPI_MAX_STD = MPI_MAX; 38 40 ::MPI_Op MPI_MIN_STD = MPI_MIN; 41 ::MPI_Op MPI_LOR_STD = MPI_LOR; 39 42 40 43 #undef MPI_SUM 41 44 #undef MPI_MAX 42 45 #undef MPI_MIN 43 44 45 /*#undef MPI_INT 46 #undef MPI_FLOAT 47 #undef MPI_DOUBLE 48 #undef MPI_CHAR 49 #undef MPI_LONG 50 #undef MPI_UNSIGNED_LONG 51 #undef MPI_UNSIGNED_CHAR 52 53 #undef MPI_SUM 54 #undef MPI_MAX 55 #undef MPI_MIN 56 57 #undef MPI_COMM_WORLD 58 #undef MPI_COMM_NULL 59 60 #undef MPI_STATUS_IGNORE 61 #undef MPI_REQUEST_NULL 62 #undef MPI_INFO_NULL 63 */ 46 #undef MPI_LOR 64 47 65 48 … … 73 56 extern ::MPI_Datatype MPI_UNSIGNED_LONG_STD; 74 57 extern ::MPI_Datatype MPI_UNSIGNED_CHAR_STD; 58 extern ::MPI_Datatype MPI_UINT64_T_STD; 59 75 60 76 61 extern ::MPI_Op MPI_SUM_STD; 77 62 extern ::MPI_Op MPI_MAX_STD; 78 63 extern ::MPI_Op MPI_MIN_STD; 64 extern ::MPI_Op MPI_LOR_STD; 79 65 80 66 extern ::MPI_Comm MPI_COMM_WORLD_STD; … … 92 78 ep_lib::MPI_Datatype MPI_UNSIGNED_LONG = &MPI_UNSIGNED_LONG_STD; 93 79 ep_lib::MPI_Datatype MPI_UNSIGNED_CHAR = &MPI_UNSIGNED_CHAR_STD; 80 ep_lib::MPI_Datatype MPI_UINT64_T = &MPI_UINT64_T_STD; 81 94 82 95 83 ep_lib::MPI_Op MPI_SUM = &MPI_SUM_STD; 96 84 ep_lib::MPI_Op MPI_MAX = &MPI_MAX_STD; 97 85 ep_lib::MPI_Op MPI_MIN = &MPI_MIN_STD; 86 ep_lib::MPI_Op MPI_LOR = &MPI_LOR_STD; 98 87 99 88 ep_lib::MPI_Comm MPI_COMM_WORLD(&MPI_COMM_WORLD_STD); -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_declaration.hpp
r1369 r1460 9 9 #undef MPI_UNSIGNED_LONG 10 10 #undef MPI_UNSIGNED_CHAR 11 #undef MPI_UINT64_T 11 12 12 13 #undef MPI_SUM 13 14 #undef MPI_MAX 14 15 #undef MPI_MIN 16 #undef MPI_LOR 15 17 16 18 #undef MPI_COMM_WORLD … … 28 30 extern ep_lib::MPI_Datatype MPI_UNSIGNED_LONG; 29 31 extern ep_lib::MPI_Datatype MPI_UNSIGNED_CHAR; 32 extern ep_lib::MPI_Datatype MPI_UINT64_T; 30 33 31 34 extern ep_lib::MPI_Op MPI_SUM; 32 35 extern ep_lib::MPI_Op MPI_MAX; 33 36 extern ep_lib::MPI_Op MPI_MIN; 37 extern ep_lib::MPI_Op MPI_LOR; 34 38 35 39 extern ep_lib::MPI_Comm MPI_COMM_WORLD; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_reduce.cpp
r1365 r1460 29 29 30 30 template<typename T> 31 T lor_op(T a, T b) 32 { 33 return a||b; 34 } 35 36 template<typename T> 31 37 void reduce_max(const T * buffer, T* recvbuf, int count) 32 38 { … … 44 50 { 45 51 transform(buffer, buffer+count, recvbuf, recvbuf, std::plus<T>()); 52 } 53 54 template<typename T> 55 void reduce_lor(const T * buffer, T* recvbuf, int count) 56 { 57 transform(buffer, buffer+count, recvbuf, recvbuf, lor_op<T>); 46 58 } 47 59 … … 112 124 } 113 125 126 else if(datatype == MPI_UINT64_T) 127 { 128 assert(datasize == sizeof(uint64_t)); 129 for(int i=1; i<num_ep; i++) 130 reduce_max<uint64_t>(static_cast<uint64_t*>(comm.my_buffer->void_buffer[i]), static_cast<uint64_t*>(recvbuf), count); 131 } 132 114 133 else printf("datatype Error\n"); 115 134 … … 160 179 } 161 180 181 else if(datatype == MPI_UINT64_T) 182 { 183 assert(datasize == sizeof(uint64_t)); 184 for(int i=1; i<num_ep; i++) 185 reduce_min<uint64_t>(static_cast<uint64_t*>(comm.my_buffer->void_buffer[i]), static_cast<uint64_t*>(recvbuf), count); 186 } 187 162 188 else printf("datatype Error\n"); 163 189 … … 209 235 } 210 236 237 else if(datatype ==MPI_UINT64_T) 238 { 239 assert(datasize == sizeof(uint64_t)); 240 for(int i=1; i<num_ep; i++) 241 reduce_sum<uint64_t>(static_cast<uint64_t*>(comm.my_buffer->void_buffer[i]), static_cast<uint64_t*>(recvbuf), count); 242 } 243 211 244 else printf("datatype Error\n"); 212 245 246 } 247 248 if(op == MPI_SUM) 249 { 250 if(datatype != MPI_INT) 251 printf("datatype Error, must be MPI_INT\n"); 252 else 253 { 254 assert(datasize == sizeof(int)); 255 for(int i=1; i<num_ep; i++) 256 reduce_lor<int>(static_cast<int*>(comm.my_buffer->void_buffer[i]), static_cast<int*>(recvbuf), count); 257 } 213 258 } 214 259 } -
XIOS/dev/branch_openmp/inputs/TOY/iodef.xml
r1118 r1460 9 9 <context id="xios"> 10 10 <variable_definition> 11 <variable_group id="server"> 12 <variable id="using_server2" type="bool">false</variable> 13 </variable_group> 14 11 15 <variable_group id="buffer"> 12 16 <variable id="optimal_buffer_size" type="string">performance</variable> -
XIOS/dev/branch_openmp/src/array_new.hpp
r1334 r1460 307 307 size_t nbArr = array.numElements(); 308 308 if (nbThis != nbArr) return false; 309 if (nbThis==0 && nbArr==0) return true; 309 310 typename Array<T_numtype,N_rank>::const_iterator itx=array.begin(), itxe=array.end(), ity=this->begin() ; 310 311 for(;itx!=itxe;++itx,++ity) if (*itx!=*ity) return false ; -
XIOS/dev/branch_openmp/src/attribute.hpp
r1112 r1460 45 45 //! Returns true if and only if the attribute should be publicly exposed in the API 46 46 virtual bool isPublic() const { return true; } 47 48 //! Return true if the attribute should be sent to other clients or servers 49 virtual bool doSend() const { return true; } 47 50 48 51 /* -
XIOS/dev/branch_openmp/src/attribute_array.hpp
r1112 r1460 43 43 CArray<T_numtype, N_rank> getInheritedValue(void) const ; 44 44 bool hasInheritedValue(void) const; 45 46 bool isEqual(const CAttributeArray& attr); 45 47 46 bool isEqual(const CAttribute& attr); 48 47 … … 67 66 68 67 private : 68 bool isEqual_(const CAttributeArray& attr); 69 69 CArray<T_numtype, N_rank> inheritedValue ; 70 70 StdString _toString(void) const; -
XIOS/dev/branch_openmp/src/attribute_array_impl.hpp
r1112 r1460 104 104 105 105 template <typename T_numtype, int N_rank> 106 bool CAttributeArray<T_numtype,N_rank>::isEqual(const CAttribute Array& attr)106 bool CAttributeArray<T_numtype,N_rank>::isEqual(const CAttribute& attr) 107 107 { 108 return ((dynamic_cast<CArray<T_numtype,N_rank>& >(*this)) == (dynamic_cast<const CArray<T_numtype,N_rank>& >(attr))); 108 const CAttributeArray<T_numtype,N_rank>& tmp = dynamic_cast<const CAttributeArray<T_numtype,N_rank>& >(attr); 109 return this->isEqual_(tmp); 109 110 } 110 111 111 112 template <typename T_numtype, int N_rank> 112 bool CAttributeArray<T_numtype,N_rank>::isEqual (const CAttribute& attr)113 bool CAttributeArray<T_numtype,N_rank>::isEqual_(const CAttributeArray& attr) 113 114 { 114 return ((*this) == (dynamic_cast<const CAttributeArray<T_numtype,N_rank>& >(attr))); 115 if ((!this->hasInheritedValue() && !attr.hasInheritedValue())) 116 return true; 117 if (this->hasInheritedValue() && attr.hasInheritedValue()) 118 return (this->getInheritedValue() == attr.getInheritedValue()); 119 else 120 return false; 115 121 } 116 122 -
XIOS/dev/branch_openmp/src/attribute_enum.hpp
r1328 r1460 49 49 T_enum getInheritedValue(void) const; 50 50 string getInheritedStringValue(void) const; 51 bool hasInheritedValue(void) const; 52 53 bool isEqual(const CAttributeEnum& attr ); 51 bool hasInheritedValue(void) const; 52 54 53 bool isEqual(const CAttribute& attr ); 55 54 … … 77 76 78 77 private : 78 bool isEqual_(const CAttributeEnum& attr ); 79 79 StdString _toString(void) const; 80 80 void _fromString(const StdString & str); -
XIOS/dev/branch_openmp/src/attribute_enum_impl.hpp
r1328 r1460 114 114 bool CAttributeEnum<T>::isEqual(const CAttribute& attr) 115 115 { 116 return (this->isEqual(dynamic_cast<const CAttributeEnum<T>& >(attr))); 117 } 118 119 template <class T> 120 bool CAttributeEnum<T>::isEqual(const CAttributeEnum& attr) 121 { 122 return ((dynamic_cast<const CEnum<T>& >(*this)) == (dynamic_cast<const CEnum<T>& >(attr))); 116 const CAttributeEnum<T>& tmp = dynamic_cast<const CAttributeEnum<T>& >(attr); 117 return this->isEqual_(tmp); 118 } 119 120 template <class T> 121 bool CAttributeEnum<T>::isEqual_(const CAttributeEnum& attr) 122 { 123 if ((!this->hasInheritedValue() && !attr.hasInheritedValue())) 124 return true; 125 if (this->hasInheritedValue() && attr.hasInheritedValue()) 126 return (this->getInheritedValue() == attr.getInheritedValue()); 127 else 128 return false; 123 129 } 124 130 -
XIOS/dev/branch_openmp/src/attribute_map.cpp
r1117 r1460 278 278 } 279 279 */ 280 281 280 282 281 void CAttributeMap::generateCInterface(ostream& oss, const string& className) -
XIOS/dev/branch_openmp/src/attribute_template.hpp
r1117 r1460 57 57 T getInheritedValue(void) const ; 58 58 bool hasInheritedValue(void) const; 59 60 bool isEqual_(const CAttributeTemplate& attr ); 59 61 60 bool isEqual(const CAttribute& attr ); 62 61 … … 95 94 // CAttributeTemplate(void); // Not implemented. 96 95 private : 96 bool isEqual_(const CAttributeTemplate& attr); 97 97 StdString _toString(void) const; 98 98 void _fromString(const StdString & str); -
XIOS/dev/branch_openmp/src/buffer_client.cpp
r1360 r1460 44 44 } 45 45 46 intCClientBuffer::remain(void)46 StdSize CClientBuffer::remain(void) 47 47 { 48 48 return bufferSize - count; 49 49 } 50 50 51 bool CClientBuffer::isBufferFree( intsize)51 bool CClientBuffer::isBufferFree(StdSize size) 52 52 { 53 53 if (size > bufferSize) 54 ERROR("bool CClientBuffer::isBufferFree( intsize)",54 ERROR("bool CClientBuffer::isBufferFree(StdSize size)", 55 55 << "The requested size (" << size << " bytes) is too big to fit the buffer (" << bufferSize << " bytes), please increase the client buffer size." << endl); 56 56 … … 71 71 72 72 73 CBufferOut* CClientBuffer::getBuffer( intsize)73 CBufferOut* CClientBuffer::getBuffer(StdSize size) 74 74 { 75 75 if (size <= remain()) … … 82 82 else 83 83 { 84 ERROR("CBufferOut* CClientBuffer::getBuffer( intsize)",84 ERROR("CBufferOut* CClientBuffer::getBuffer(StdSize size)", 85 85 << "Not enough space in buffer, this should not have happened..."); 86 86 return NULL; -
XIOS/dev/branch_openmp/src/buffer_client.hpp
r1355 r1460 18 18 ~CClientBuffer(); 19 19 20 bool isBufferFree( intsize);21 CBufferOut* getBuffer( intsize);20 bool isBufferFree(StdSize size); 21 CBufferOut* getBuffer(StdSize size); 22 22 bool checkBuffer(void); 23 23 bool hasPendingRequest(void); 24 intremain(void);24 StdSize remain(void); 25 25 26 26 private: … … 28 28 29 29 int current; 30 int count; 31 int bufferedEvents; 32 int maxEventSize; 33 const int maxBufferedEvents; 34 const int bufferSize; 35 const int estimatedMaxEventSize; 30 31 StdSize count; 32 StdSize bufferedEvents; 33 StdSize maxEventSize; 34 const StdSize maxBufferedEvents; 35 const StdSize bufferSize; 36 const StdSize estimatedMaxEventSize; 37 38 36 39 const int serverRank; 37 40 bool pending; -
XIOS/dev/branch_openmp/src/calendar.cpp
r1342 r1460 9 9 CCalendar::CCalendar(void) 10 10 : CObject() 11 , step(0) 11 12 , initDate(*this) 12 13 , timeOrigin(*this) … … 16 17 CCalendar::CCalendar(const StdString& id) 17 18 : CObject(id) 19 , step(0) 18 20 , initDate(*this) 19 21 , timeOrigin(*this) … … 25 27 int hr /*= 0*/, int min /*= 0*/, int sec /*= 0*/) 26 28 : CObject(id) 29 , step(0) 27 30 , initDate(*this) 28 31 , timeOrigin(*this) … … 34 37 CCalendar::CCalendar(const StdString& id, const CDate& startDate) 35 38 : CObject(id) 39 , step(0) 36 40 , initDate(startDate) 37 41 , timeOrigin(startDate) … … 44 48 CCalendar::CCalendar(const StdString& id, const CDate& startDate, const CDate& timeOrigin) 45 49 : CObject(id) 50 , step(0) 46 51 , initDate(startDate) 47 52 , timeOrigin(timeOrigin) … … 115 120 } 116 121 122 int CCalendar::getStep(void) const 123 { 124 return step; 125 } 126 117 127 const CDate& CCalendar::update(int step) 118 128 { 119 129 #pragma omp critical (_output) 120 130 info(20) << "update step : " << step << " timestep " << this->timestep << std::endl; 131 this->step = step; 121 132 return (this->currentDate = this->getInitDate() + step * this->timestep); 122 133 } -
XIOS/dev/branch_openmp/src/calendar.hpp
r591 r1460 70 70 virtual StdString getType(void) const; 71 71 72 int getStep(void) const; 73 72 74 virtual int getMonthLength(const CDate& date) const; 73 75 … … 112 114 113 115 private : 116 int step; 114 117 115 118 /// Propriétés privées /// -
XIOS/dev/branch_openmp/src/client.cpp
r1374 r1460 22 22 int CClient::serverLeader ; 23 23 bool CClient::is_MPI_Initialized ; 24 int CClient::rank = INVALID_RANK;24 int CClient::rank_ = INVALID_RANK; 25 25 StdOFStream CClient::m_infoStream; 26 26 StdOFStream CClient::m_errorStream; … … 28 28 StdOFStream CClient::array_infoStream[16]; 29 29 30 void CClient::initialize(const string& codeId,MPI_Comm& localComm,MPI_Comm& returnComm) 30 MPI_Comm& CClient::getInterComm(void) { return (interComm); } 31 32 ///--------------------------------------------------------------- 33 /*! 34 * \fn void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 35 * Function creates intraComm (CClient::intraComm) for client group with id=codeId and interComm (CClient::interComm) between client and server groups. 36 * \param [in] codeId identity of context. 37 * \param [in/out] localComm local communicator. 38 * \param [in/out] returnComm (intra)communicator of client group. 39 */ 40 41 void CClient::initialize(const string& codeId, MPI_Comm& localComm, MPI_Comm& returnComm) 31 42 { 32 43 int initialized ; … … 34 45 if (initialized) is_MPI_Initialized=true ; 35 46 else is_MPI_Initialized=false ; 47 int rank ; 36 48 37 49 // don't use OASIS 38 50 if (!CXios::usingOasis) 39 51 { 40 // localComm doesn't given52 // localComm isn't given 41 53 if (localComm == MPI_COMM_NULL) 42 54 { … … 58 70 59 71 MPI_Comm_size(CXios::globalComm,&size) ; 60 MPI_Comm_rank(CXios::globalComm,&rank );72 MPI_Comm_rank(CXios::globalComm,&rank_); 61 73 62 74 hashAll=new unsigned long[size] ; … … 88 100 } 89 101 90 myColor=colors[hashClient] ; 91 92 MPI_Comm_split(CXios::globalComm,myColor,rank,&intraComm) ; 102 myColor=colors[hashClient]; 103 MPI_Comm_split(CXios::globalComm,myColor,rank_,&intraComm) ; 93 104 94 105 if (CXios::usingServer) … … 96 107 int clientLeader=leaders[hashClient] ; 97 108 serverLeader=leaders[hashServer] ; 98 99 109 int intraCommSize, intraCommRank ; 100 110 MPI_Comm_size(intraComm,&intraCommSize) ; … … 102 112 #pragma omp critical (_output) 103 113 { 104 info(50)<<"intercommCreate::client "<<rank<<" intraCommSize : "<<intraCommSize 105 <<" intraCommRank :"<<intraCommRank<<" serverLeader "<< serverLeader<<endl ; 106 } 107 MPI_Intercomm_create(intraComm,0,CXios::globalComm,serverLeader,0,&interComm) ; 114 info(50)<<"intercommCreate::client "<<rank_<<" intraCommSize : "<<intraCommSize 115 <<" intraCommRank :"<<intraCommRank<<" clientLeader "<< serverLeader<<endl ; 116 } 117 MPI_Intercomm_create(intraComm, 0, CXios::globalComm, serverLeader, 0, &interComm) ; 118 //rank_ = intraCommRank; 108 119 } 109 120 else … … 128 139 } 129 140 // using OASIS 130 /*else131 { 132 // localComm doesn't given141 else 142 { 143 // localComm isn't given 133 144 if (localComm == MPI_COMM_NULL) 134 145 { … … 144 155 { 145 156 MPI_Status status ; 146 MPI_Comm_rank(intraComm,&rank ) ;157 MPI_Comm_rank(intraComm,&rank_) ; 147 158 148 159 oasis_get_intercomm(interComm,CXios::xiosCodeId) ; 149 if (rank ==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ;160 if (rank_==0) MPI_Recv(&serverLeader,1, MPI_INT, 0, 0, interComm, &status) ; 150 161 MPI_Bcast(&serverLeader,1,MPI_INT,0,intraComm) ; 151 152 162 } 153 163 else MPI_Comm_dup(intraComm,&interComm) ; 154 164 } 155 */ 165 156 166 MPI_Comm_dup(intraComm,&returnComm) ; 157 167 } 158 168 159 160 void CClient::registerContext(const string& id,MPI_Comm contextComm) 169 ///--------------------------------------------------------------- 170 /*! 171 * \fn void CClient::registerContext(const string& id, MPI_Comm contextComm) 172 * \brief Sends a request to create a context to server. Creates client/server contexts. 173 * \param [in] id id of context. 174 * \param [in] contextComm. 175 * Function is only called by client. 176 */ 177 void CClient::registerContext(const string& id, MPI_Comm contextComm) 161 178 { 162 179 CContext::setCurrent(id) ; … … 165 182 idServer += "_server"; 166 183 167 if (!CXios::isServer) 184 if (CXios::isServer && !context->hasServer) 185 // Attached mode 186 { 187 MPI_Comm contextInterComm ; 188 MPI_Comm_dup(contextComm,&contextInterComm) ; 189 CContext* contextServer = CContext::create(idServer); 190 191 // Firstly, initialize context on client side 192 context->initClient(contextComm,contextInterComm, contextServer); 193 194 // Secondly, initialize context on server side 195 contextServer->initServer(contextComm,contextInterComm, context); 196 197 // Finally, we should return current context to context client 198 CContext::setCurrent(id); 199 200 //contextInterComms.push_back(contextInterComm); 201 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 202 contextInterComms_ptr->push_back(contextInterComm); 203 } 204 else 168 205 { 169 206 int size,rank,globalRank ; … … 177 214 if (rank!=0) globalRank=0 ; 178 215 179 180 216 CMessage msg ; 181 217 msg<<idServer<<size<<globalRank ; … … 189 225 MPI_Send((void*)buff,buffer.count(),MPI_CHAR,serverLeader,1,CXios::globalComm) ; 190 226 191 delete [] buff ;192 193 //MPI_Barrier(CXios::globalComm);194 195 227 MPI_Intercomm_create(contextComm,0,CXios::globalComm,serverLeader,10+globalRank,&contextInterComm) ; 196 228 #pragma omp critical (_output) 197 { 198 info(10)<<"Register new Context : "<<id<<endl ; 199 } 200 229 info(10)<<"Register new Context : "<<id<<endl ; 201 230 MPI_Comm inter ; 202 231 MPI_Intercomm_merge(contextInterComm,0,&inter) ; … … 208 237 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 209 238 contextInterComms_ptr->push_back(contextInterComm); 239 210 240 MPI_Comm_free(&inter); 211 } 212 else 213 { 214 MPI_Comm contextInterComm ; 215 MPI_Comm_dup(contextComm,&contextInterComm) ; 216 CContext* contextServer = CContext::create(idServer); 217 218 // Firstly, initialize context on client side 219 context->initClient(contextComm,contextInterComm, contextServer); 220 221 // Secondly, initialize context on server side 222 contextServer->initServer(contextComm,contextInterComm, context); 223 224 // Finally, we should return current context to context client 225 CContext::setCurrent(id); 226 227 //contextInterComms.push_back(contextInterComm); 228 if(contextInterComms_ptr == NULL) contextInterComms_ptr = new std::list<MPI_Comm>; 229 contextInterComms_ptr->push_back(contextInterComm); 241 delete [] buff ; 242 230 243 } 231 244 } … … 264 277 #pragma omp critical (_output) 265 278 info(20) << "Client side context is finalized"<<endl ; 266 267 268 /*#pragma omp critical (_output) 279 280 #pragma omp critical (_output) 269 281 { 270 282 report(0) <<" Performance report : Whole time from XIOS init and finalize: "<< CTimer::get("XIOS init/finalize").getCumulatedTime()<<" s"<<endl ; … … 273 285 report(0)<< " Performance report : Ratio : "<< CTimer::get("Blocking time").getCumulatedTime()/CTimer::get("XIOS init/finalize").getCumulatedTime()*100.<<" %"<<endl ; 274 286 report(0)<< " Performance report : This ratio must be close to zero. Otherwise it may be usefull to increase buffer size or numbers of server"<<endl ; 287 // report(0)<< " Memory report : Current buffer_size : "<<CXios::bufferSize<<endl ; 275 288 report(0)<< " Memory report : Minimum buffer size required : " << CClientBuffer::maxRequestSize << " bytes" << endl ; 276 289 report(0)<< " Memory report : increasing it by a factor will increase performance, depending of the volume of data wrote in file at each time step of the file"<<endl ; 277 290 report(100)<<CTimer::getAllCumulatedTime()<<endl ; 278 } */291 } 279 292 } 280 293 294 /*! 295 * Return global rank without oasis and current rank in model intraComm in case of oasis 296 */ 281 297 int CClient::getRank() 282 298 { 283 return rank ;299 return rank_; 284 300 } 285 301 … … 297 313 int numDigit = 0; 298 314 int size = 0; 315 int rank; 299 316 MPI_Comm_size(CXios::globalComm, &size); 300 317 while (size) … … 304 321 } 305 322 306 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 323 if (CXios::usingOasis) 324 { 325 MPI_Comm_rank(CXios::globalComm,&rank); 326 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << rank << ext; 327 } 328 else 329 fileNameClient << fileName << "_" << std::setfill('0') << std::setw(numDigit) << getRank() << ext; 330 331 307 332 fb->open(fileNameClient.str().c_str(), std::ios::out); 308 333 if (!fb->is_open()) -
XIOS/dev/branch_openmp/src/client.hpp
r1342 r1460 16 16 static ep_lib::MPI_Comm intraComm; 17 17 #pragma omp threadprivate(intraComm) 18 18 19 19 static ep_lib::MPI_Comm interComm; 20 20 #pragma omp threadprivate(interComm) … … 26 26 static int serverLeader; 27 27 #pragma omp threadprivate(serverLeader) 28 28 29 29 static bool is_MPI_Initialized ; 30 30 #pragma omp threadprivate(is_MPI_Initialized) 31 31 32 //! Get rank of the current process 32 static ep_lib::MPI_Comm& getInterComm(); 33 34 //! Get global rank without oasis and current rank in model intraComm in case of oasis 33 35 static int getRank(); 34 36 … … 48 50 49 51 protected: 50 static int rank ;51 #pragma omp threadprivate(rank )52 static int rank_; //!< Rank in model intraComm 53 #pragma omp threadprivate(rank_) 52 54 53 55 static StdOFStream m_infoStream; -
XIOS/dev/branch_openmp/src/client_client_dht_template_impl.hpp
r1373 r1460 330 330 sendInfoToClients(rank, client2ClientInfoOnReturn[rank], 331 331 sendNbIndexOnReturn[idx]*ProcessDHTElement<InfoType>::typeSize(), commLevel, &requestOnReturn[requestOnReturn_position++]); 332 333 332 } 334 333 currentIndex += recvNbIndexClientCount[idx]; -
XIOS/dev/branch_openmp/src/client_server_mapping.cpp
r1365 r1460 99 99 } 100 100 101 CClientServerMapping::GlobalIndexMap& CClientServerMapping::getGlobalIndexOnServer() 102 { 103 return indexGlobalOnServer_; 104 } 105 101 106 } //namespace xios -
XIOS/dev/branch_openmp/src/client_server_mapping.hpp
r1328 r1460 34 34 35 35 // Only need global index on client to calculate mapping (supposed client has info of distribution) 36 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient ) = 0;36 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, int nbServer) = 0; 37 37 38 38 static std::map<int,int> computeConnectedClients(int nbServer, int nbClient, … … 41 41 42 42 const GlobalIndexMap& getGlobalIndexOnServer() const; 43 44 GlobalIndexMap& getGlobalIndexOnServer(); 43 45 44 46 protected: -
XIOS/dev/branch_openmp/src/client_server_mapping_distributed.cpp
r1328 r1460 36 36 Compute mapping global index of server which client sends to. 37 37 \param [in] globalIndexOnClient global index client has 38 \param [in] nbServer size of server's intracomm 38 39 */ 39 void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient )40 void CClientServerMappingDistributed::computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClient, int nbServer) 40 41 { 41 CContext* context=CContext::getCurrent() ;42 CContextClient* client=context->client ;43 int nbServer=client->serverSize;44 42 45 43 ccDHT_->computeIndexInfoMapping(globalIndexOnClient); -
XIOS/dev/branch_openmp/src/client_server_mapping_distributed.hpp
r1134 r1460 38 38 bool isDataDistributed = true); 39 39 40 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer );40 virtual void computeServerIndexMapping(const CArray<size_t,1>& globalIndexOnClientSendToServer, int nbServer); 41 41 42 42 /** Default destructor */ -
XIOS/dev/branch_openmp/src/config/axis_attribute.conf
r1050 r1460 4 4 5 5 DECLARE_ATTRIBUTE(StdString, unit) 6 DECLARE_ATTRIBUTE(StdString, formula) 7 DECLARE_ATTRIBUTE(StdString, formula_term) 8 DECLARE_ATTRIBUTE(StdString, formula_bounds) 9 DECLARE_ATTRIBUTE(StdString, formula_term_bounds) 10 DECLARE_ATTRIBUTE(StdString, bounds_name) 6 11 7 12 DECLARE_ATTRIBUTE(int, n_glo) 8 13 DECLARE_ENUM2(positive, up, down) 14 DECLARE_ENUM4(axis_type, X, Y, Z, T) 15 16 DECLARE_ATTRIBUTE(StdString, dim_name) 9 17 10 18 /* GLOBAL */ -
XIOS/dev/branch_openmp/src/config/axis_attribute_private.conf
r1205 r1460 2 2 DECLARE_ATTRIBUTE_PRIVATE(int, global_zoom_n) 3 3 DECLARE_ARRAY_PRIVATE(int, 1, global_zoom_index) 4 5 /* LOCAL DATA*/ 6 DECLARE_ARRAY_PRIVATE(int , 1 , zoom_index) 7 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_begin) 8 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_n) -
XIOS/dev/branch_openmp/src/config/domain_attribute.conf
r1045 r1460 3 3 DECLARE_ATTRIBUTE(StdString , standard_name) 4 4 DECLARE_ATTRIBUTE(StdString , long_name) 5 DECLARE_ATTRIBUTE(StdString , lon_name) 6 DECLARE_ATTRIBUTE(StdString , lat_name) 7 DECLARE_ATTRIBUTE(StdString , bounds_lon_name) 8 DECLARE_ATTRIBUTE(StdString , bounds_lat_name) 9 DECLARE_ATTRIBUTE(StdString , dim_i_name) 10 DECLARE_ATTRIBUTE(StdString , dim_j_name) 5 11 6 12 /* GLOBAL */ … … 9 15 10 16 /* LOCAL */ 11 DECLARE_ATTRIBUTE(int , ibegin )12 DECLARE_ATTRIBUTE(int , ni )17 DECLARE_ATTRIBUTE(int , ibegin, false) 18 DECLARE_ATTRIBUTE(int , ni, false) 13 19 14 20 /* LOCAL */ 15 DECLARE_ATTRIBUTE(int , jbegin )16 DECLARE_ATTRIBUTE(int , nj )21 DECLARE_ATTRIBUTE(int , jbegin, false) 22 DECLARE_ATTRIBUTE(int , nj, false) 17 23 18 24 19 DECLARE_ARRAY(int,1 , i_index )20 DECLARE_ARRAY(int,1 , j_index )25 DECLARE_ARRAY(int,1 , i_index, false) 26 DECLARE_ARRAY(int,1 , j_index, false) 21 27 22 28 /* LOCAL */ 23 DECLARE_ARRAY(bool, 1 , mask_1d )24 DECLARE_ARRAY(bool, 2 , mask_2d )29 DECLARE_ARRAY(bool, 1 , mask_1d, false) 30 DECLARE_ARRAY(bool, 2 , mask_2d, false) 25 31 26 32 /* GLOBAL */ … … 28 34 29 35 /* LOCAL */ 30 DECLARE_ATTRIBUTE(int , data_ni )31 DECLARE_ATTRIBUTE(int , data_nj )32 DECLARE_ATTRIBUTE(int , data_ibegin )33 DECLARE_ATTRIBUTE(int , data_jbegin )36 DECLARE_ATTRIBUTE(int , data_ni, false) 37 DECLARE_ATTRIBUTE(int , data_nj, false) 38 DECLARE_ATTRIBUTE(int , data_ibegin, false) 39 DECLARE_ATTRIBUTE(int , data_jbegin, false) 34 40 35 41 /* LOCAL */ 36 DECLARE_ARRAY(int, 1 , data_i_index )37 DECLARE_ARRAY(int, 1, data_j_index )42 DECLARE_ARRAY(int, 1 , data_i_index, false) 43 DECLARE_ARRAY(int, 1, data_j_index, false) 38 44 39 45 /* LOCAL */ 40 DECLARE_ARRAY(double, 1, lonvalue_1d )41 DECLARE_ARRAY(double, 1, latvalue_1d )42 DECLARE_ARRAY(double, 2, lonvalue_2d )43 DECLARE_ARRAY(double, 2, latvalue_2d )46 DECLARE_ARRAY(double, 1, lonvalue_1d, false) 47 DECLARE_ARRAY(double, 1, latvalue_1d, false) 48 DECLARE_ARRAY(double, 2, lonvalue_2d, false) 49 DECLARE_ARRAY(double, 2, latvalue_2d, false) 44 50 45 51 DECLARE_ATTRIBUTE(int, nvertex) 46 DECLARE_ARRAY(double, 2, bounds_lon_1d )47 DECLARE_ARRAY(double, 2, bounds_lat_1d )48 DECLARE_ARRAY(double, 3, bounds_lon_2d )49 DECLARE_ARRAY(double, 3, bounds_lat_2d )52 DECLARE_ARRAY(double, 2, bounds_lon_1d, false) 53 DECLARE_ARRAY(double, 2, bounds_lat_1d, false) 54 DECLARE_ARRAY(double, 3, bounds_lon_2d, false) 55 DECLARE_ARRAY(double, 3, bounds_lat_2d, false) 50 56 51 57 DECLARE_ARRAY(double, 2, area) -
XIOS/dev/branch_openmp/src/config/domain_attribute_private.conf
r1064 r1460 10 10 DECLARE_ATTRIBUTE_PRIVATE(double, bounds_lat_end) 11 11 12 // Array contain whole value (non distributed) of longitude and latitude of rectilinearread from a file12 // Array contain whole value (non distributed) of longitude and latitude read from a file 13 13 DECLARE_ARRAY_PRIVATE(double, 1, lonvalue_rectilinear_read_from_file) 14 14 DECLARE_ARRAY_PRIVATE(double, 1, latvalue_rectilinear_read_from_file) 15 15 16 // Array contain whole value (non distributed) of longitude and latitude of curvilinearread from a file16 // Array containing longitude and latitude of LOCAL curvilinear domain to be read from a file 17 17 DECLARE_ARRAY_PRIVATE(double, 2, lonvalue_curvilinear_read_from_file) 18 18 DECLARE_ARRAY_PRIVATE(double, 2, latvalue_curvilinear_read_from_file) … … 20 20 DECLARE_ARRAY_PRIVATE(double, 3, bounds_latvalue_curvilinear_read_from_file) 21 21 22 // Array contain whole value (non distributed) of longitude and latitude of unstructuredread from a file22 // Array containing longitude and latitude of LOCAL unstructured domain to be read from a file 23 23 DECLARE_ARRAY_PRIVATE(double, 1, lonvalue_unstructured_read_from_file) 24 24 DECLARE_ARRAY_PRIVATE(double, 1, latvalue_unstructured_read_from_file) … … 30 30 DECLARE_ATTRIBUTE_PRIVATE(int, global_zoom_jbegin) 31 31 DECLARE_ATTRIBUTE_PRIVATE(int, global_zoom_nj) 32 33 // Local zoom information 34 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_ibegin) 35 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_ni) 36 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_jbegin) 37 DECLARE_ATTRIBUTE_PRIVATE(int, zoom_nj) 38 39 40 DECLARE_ARRAY_PRIVATE(int , 1 , zoom_i_index, false) 41 DECLARE_ARRAY_PRIVATE(int , 1 , zoom_j_index, false) -
XIOS/dev/branch_openmp/src/config/file_attribute.conf
r1205 r1460 25 25 DECLARE_ENUM2(time_units, seconds, days) 26 26 DECLARE_ATTRIBUTE(int, record_offset) 27 DECLARE_ATTRIBUTE(bool, 27 DECLARE_ATTRIBUTE(bool, cyclic) 28 28 29 29 DECLARE_ATTRIBUTE(int, compression_level) -
XIOS/dev/branch_openmp/src/config/grid_attribute.conf
r932 r1460 2 2 DECLARE_ATTRIBUTE(StdString, description) 3 3 4 DECLARE_ARRAY(bool, 1, mask_1d )5 DECLARE_ARRAY(bool, 2, mask_2d )6 DECLARE_ARRAY(bool, 3, mask_3d )7 DECLARE_ARRAY(bool, 4, mask_4d )8 DECLARE_ARRAY(bool, 5, mask_5d )9 DECLARE_ARRAY(bool, 6, mask_6d )10 DECLARE_ARRAY(bool, 7, mask_7d )4 DECLARE_ARRAY(bool, 1, mask_1d, false) 5 DECLARE_ARRAY(bool, 2, mask_2d, false) 6 DECLARE_ARRAY(bool, 3, mask_3d, false) 7 DECLARE_ARRAY(bool, 4, mask_4d, false) 8 DECLARE_ARRAY(bool, 5, mask_5d, false) 9 DECLARE_ARRAY(bool, 6, mask_6d, false) 10 DECLARE_ARRAY(bool, 7, mask_7d, false) 11 11 12 // Meaningless, only server for coherent purpose (for scalar grid) 13 DECLARE_ARRAY(bool, 1, mask_0d, false) 12 14 13 15 // An array contains order of axis and domains composing of the grid -
XIOS/dev/branch_openmp/src/config/interpolate_domain_attribute.conf
r1114 r1460 3 3 DECLARE_ATTRIBUTE(bool, renormalize) 4 4 DECLARE_ATTRIBUTE(bool, quantity) 5 DECLARE_ATTRIBUTE(bool, detect_missing_value) 5 6 6 7 /* Write interpolation weights into file */ … … 8 9 DECLARE_ATTRIBUTE(StdString, weight_filename) 9 10 DECLARE_ATTRIBUTE(bool, write_weight) 11 DECLARE_ENUM2(read_write_convention, c, fortran) -
XIOS/dev/branch_openmp/src/config/node_type.conf
r976 r1460 67 67 #endif //__XIOS_CReduceDomainToAxis__ 68 68 69 #ifdef __XIOS_CReduceAxisToAxis__ 70 DECLARE_NODE(ReduceAxisToAxis, reduce_axis_to_axis) 71 #endif //__XIOS_CReduceAxisToAxis__ 72 69 73 #ifdef __XIOS_CExtractDomainToAxis__ 70 74 DECLARE_NODE(ExtractDomainToAxis, extract_domain) … … 87 91 #endif //__XIOS_CReduceDomainToScalar__ 88 92 93 #ifdef __XIOS_CTemporalSplitting__ 94 DECLARE_NODE(TemporalSplitting, temporal_splitting) 95 #endif //__XIOS_CTemporalSplitting__ 96 97 #ifdef __XIOS_CDuplicateScalarToAxis__ 98 DECLARE_NODE(DuplicateScalarToAxis, duplicate_scalar_to_axis) 99 #endif //__XIOS_CDuplicateScalarToAxis__ 100 101 #ifdef __XIOS_CReduceScalarToScalar__ 102 DECLARE_NODE(ReduceScalarToScalar, reduce_scalar_to_scalar) 103 #endif //__XIOS_CReduceScalarToScalar_ 104 105 #ifdef __XIOS_CReorderDomain__ 106 DECLARE_NODE(ReorderDomain, reorder_domain) 107 #endif //__XIOS_CReduceScalarToScalar_ 108 89 109 #ifdef __XIOS_CContext__ 90 110 DECLARE_NODE_PAR(Context, context) -
XIOS/dev/branch_openmp/src/config/reduce_domain_to_axis_attribute.conf
r980 r1460 3 3 /* Direction to apply operation (i, j) */ 4 4 DECLARE_ENUM2(direction, iDir, jDir) 5 DECLARE_ATTRIBUTE(bool, local) -
XIOS/dev/branch_openmp/src/config/reduce_domain_to_scalar_attribute.conf
r976 r1460 1 1 DECLARE_ENUM4(operation, min, max, sum, average) 2 DECLARE_ATTRIBUTE(bool, local) -
XIOS/dev/branch_openmp/src/config/scalar_attribute.conf
r1045 r1460 7 7 /* LOCAL and GLOBAL*/ 8 8 DECLARE_ATTRIBUTE(double, value) 9 DECLARE_ATTRIBUTE(StdString, bounds_name) 10 DECLARE_ARRAY(double, 1 , bounds) 9 11 10 12 DECLARE_ATTRIBUTE(StdString, scalar_ref) 11 13 DECLARE_ATTRIBUTE(int, prec) 14 15 DECLARE_ENUM4(axis_type, X, Y, Z, T) 16 DECLARE_ENUM2(positive, up, down) 17 DECLARE_ATTRIBUTE(StdString, label) -
XIOS/dev/branch_openmp/src/context_client.cpp
r1356 r1460 11 11 #include "timer.hpp" 12 12 #include "cxios.hpp" 13 #include "server.hpp" 13 14 using namespace ep_lib; 14 15 … … 19 20 \param [in] intraComm_ communicator of group client 20 21 \param [in] interComm_ communicator of group server 21 \cxtSer [in] cxtSer Pointer to context of server side. (It is only used on case of attached mode)22 \cxtSer [in] cxtSer Pointer to context of server side. (It is only used in case of attached mode). 22 23 */ 23 24 CContextClient::CContextClient(CContext* parent, MPI_Comm intraComm_, MPI_Comm interComm_, CContext* cxtSer) … … 40 41 else MPI_Comm_size(interComm, &serverSize); 41 42 43 computeLeader(clientRank, clientSize, serverSize, ranksServerLeader, ranksServerNotLeader); 44 45 timeLine = 0; 46 } 47 48 void CContextClient::computeLeader(int clientRank, int clientSize, int serverSize, 49 std::list<int>& rankRecvLeader, 50 std::list<int>& rankRecvNotLeader) 51 { 52 if ((0 == clientSize) || (0 == serverSize)) return; 53 42 54 if (clientSize < serverSize) 43 55 { … … 55 67 56 68 for (int i = 0; i < serverByClient; i++) 57 rank sServerLeader.push_back(rankStart + i);58 59 rank sServerNotLeader.resize(0);69 rankRecvLeader.push_back(rankStart + i); 70 71 rankRecvNotLeader.resize(0); 60 72 } 61 73 else … … 67 79 { 68 80 if (clientRank % (clientByServer + 1) == 0) 69 rank sServerLeader.push_back(clientRank / (clientByServer + 1));81 rankRecvLeader.push_back(clientRank / (clientByServer + 1)); 70 82 else 71 rank sServerNotLeader.push_back(clientRank / (clientByServer + 1));83 rankRecvNotLeader.push_back(clientRank / (clientByServer + 1)); 72 84 } 73 85 else … … 75 87 int rank = clientRank - (clientByServer + 1) * remain; 76 88 if (rank % clientByServer == 0) 77 rank sServerLeader.push_back(remain + rank / clientByServer);89 rankRecvLeader.push_back(remain + rank / clientByServer); 78 90 else 79 ranksServerNotLeader.push_back(remain + rank / clientByServer); 80 } 81 } 82 83 timeLine = 0; 91 rankRecvNotLeader.push_back(remain + rank / clientByServer); 92 } 93 } 84 94 } 85 95 … … 92 102 list<int> ranks = event.getRanks(); 93 103 104 if (CXios::checkEventSync) 105 { 106 int typeId, classId, typeId_in, classId_in, timeLine_out; 107 typeId_in=event.getTypeId() ; 108 classId_in=event.getClassId() ; 109 MPI_Allreduce(&timeLine,&timeLine_out, 1, MPI_UINT64_T, MPI_SUM, intraComm) ; 110 MPI_Allreduce(&typeId_in,&typeId, 1, MPI_INT, MPI_SUM, intraComm) ; 111 MPI_Allreduce(&classId_in,&classId, 1, MPI_INT, MPI_SUM, intraComm) ; 112 if (typeId/clientSize!=event.getTypeId() || classId/clientSize!=event.getClassId() || timeLine_out/clientSize!=timeLine) 113 { 114 ERROR("void CContextClient::sendEvent(CEventClient& event)", 115 << "Event are not coherent between client."); 116 } 117 } 118 94 119 if (!event.isEmpty()) 95 120 { 96 121 list<int> sizes = event.getSizes(); 97 122 98 // We force the getBuffers call to be non-blocking on theservers123 // We force the getBuffers call to be non-blocking on classical servers 99 124 list<CBufferOut*> buffList; 100 bool couldBuffer = getBuffers(ranks, sizes, buffList, !CXios::isClient); 125 bool couldBuffer = getBuffers(ranks, sizes, buffList, (!CXios::isClient && (CServer::serverLevel == 0) )); 126 // bool couldBuffer = getBuffers(ranks, sizes, buffList, CXios::isServer ); 101 127 102 128 if (couldBuffer) … … 119 145 for (list<int>::const_iterator it = sizes.begin(); it != sizes.end(); it++) 120 146 tmpBufferedEvent.buffers.push_back(new CBufferOut(*it)); 121 147 info(100)<<"DEBUG : temporaly event created : timeline "<<timeLine<<endl ; 122 148 event.send(timeLine, tmpBufferedEvent.sizes, tmpBufferedEvent.buffers); 123 149 } … … 146 172 (*itBuffer)->put((char*)(*it)->start(), (*it)->count()); 147 173 174 info(100)<<"DEBUG : temporaly event sent "<<endl ; 148 175 checkBuffers(tmpBufferedEvent.ranks); 149 176 … … 187 214 * \return whether the already allocated buffers could be used 188 215 */ 189 bool CContextClient::getBuffers(const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers, bool nonBlocking /*= false*/) 216 bool CContextClient::getBuffers(const list<int>& serverList, const list<int>& sizeList, list<CBufferOut*>& retBuffers, 217 bool nonBlocking /*= false*/) 190 218 { 191 219 list<int>::const_iterator itServer, itSize; … … 216 244 { 217 245 checkBuffers(); 218 context->server->listen(); 246 if (CServer::serverLevel == 0) 247 context->server->listen(); 248 249 else if (CServer::serverLevel == 1) 250 { 251 context->server->listen(); 252 for (int i = 0; i < context->serverPrimServer.size(); ++i) 253 context->serverPrimServer[i]->listen(); 254 CServer::contextEventLoop(false) ; // avoid dead-lock at finalize... 255 } 256 257 else if (CServer::serverLevel == 2) 258 context->server->listen(); 259 219 260 } 220 261 } while (!areBuffersFree && !nonBlocking); 262 221 263 CTimer::get("Blocking time").suspend(); 222 264 … … 257 299 map<int,CClientBuffer*>::iterator itBuff; 258 300 bool pending = false; 259 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) pending |= itBuff->second->checkBuffer(); 301 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) 302 pending |= itBuff->second->checkBuffer(); 260 303 return pending; 261 304 } 262 305 263 306 //! Release all buffers 264 void CContextClient::releaseBuffers( void)307 void CContextClient::releaseBuffers() 265 308 { 266 309 map<int,CClientBuffer*>::iterator itBuff; 267 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) delete itBuff->second; 310 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) 311 { 312 delete itBuff->second; 313 } 314 buffers.clear(); 268 315 } 269 316 … … 362 409 363 410 /*! 364 Finalize context client and do some reports 365 */ 366 void CContextClient::finalize(void) 367 { 368 map<int,CClientBuffer*>::iterator itBuff; 369 bool stop = false; 370 371 CTimer::get("Blocking time").resume(); 372 while (hasTemporarilyBufferedEvent()) 373 { 374 checkBuffers(); 375 sendTemporarilyBufferedEvent(); 376 } 377 CTimer::get("Blocking time").suspend(); 378 379 CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE); 380 if (isServerLeader()) 381 { 382 CMessage msg; 383 const std::list<int>& ranks = getRanksServerLeader(); 384 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 385 event.push(*itRank, 1, msg); 386 sendEvent(event); 387 } 388 else sendEvent(event); 389 390 CTimer::get("Blocking time").resume(); 391 while (!stop) 392 { 393 checkBuffers(); 394 if (hasTemporarilyBufferedEvent()) 395 sendTemporarilyBufferedEvent(); 396 397 stop = true; 398 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) stop &= !itBuff->second->hasPendingRequest(); 399 } 400 CTimer::get("Blocking time").suspend(); 401 402 std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(), 403 iteMap = mapBufferSize_.end(), itMap; 404 StdSize totalBuf = 0; 405 for (itMap = itbMap; itMap != iteMap; ++itMap) 406 { 407 //report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl 408 // << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 409 totalBuf += itMap->second; 410 } 411 //report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; 412 413 releaseBuffers(); 414 } 411 * Finalize context client and do some reports. Function is non-blocking. 412 */ 413 void CContextClient::finalize(void) 414 { 415 map<int,CClientBuffer*>::iterator itBuff; 416 bool stop = false; 417 418 CTimer::get("Blocking time").resume(); 419 while (hasTemporarilyBufferedEvent()) 420 { 421 checkBuffers(); 422 sendTemporarilyBufferedEvent(); 423 } 424 CTimer::get("Blocking time").suspend(); 425 426 CEventClient event(CContext::GetType(), CContext::EVENT_ID_CONTEXT_FINALIZE); 427 if (isServerLeader()) 428 { 429 CMessage msg; 430 const std::list<int>& ranks = getRanksServerLeader(); 431 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 432 { 433 #pragma omp critical (_output) 434 info(100)<<"DEBUG : Sent context Finalize event to rank "<<*itRank<<endl ; 435 event.push(*itRank, 1, msg); 436 } 437 sendEvent(event); 438 } 439 else sendEvent(event); 440 441 CTimer::get("Blocking time").resume(); 442 // while (!stop) 443 { 444 checkBuffers(); 445 if (hasTemporarilyBufferedEvent()) 446 sendTemporarilyBufferedEvent(); 447 448 stop = true; 449 // for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) stop &= !itBuff->second->hasPendingRequest(); 450 } 451 CTimer::get("Blocking time").suspend(); 452 453 std::map<int,StdSize>::const_iterator itbMap = mapBufferSize_.begin(), 454 iteMap = mapBufferSize_.end(), itMap; 455 456 StdSize totalBuf = 0; 457 for (itMap = itbMap; itMap != iteMap; ++itMap) 458 { 459 #pragma omp critical (_output) 460 report(10) << " Memory report : Context <" << context->getId() << "> : client side : memory used for buffer of each connection to server" << endl 461 << " +) To server with rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 462 totalBuf += itMap->second; 463 } 464 #pragma omp critical (_output) 465 report(0) << " Memory report : Context <" << context->getId() << "> : client side : total memory used for buffer " << totalBuf << " bytes" << endl; 466 467 //releaseBuffers(); // moved to CContext::finalize() 468 } 469 470 471 /*! 472 */ 473 bool CContextClient::havePendingRequests(void) 474 { 475 bool pending = false; 476 map<int,CClientBuffer*>::iterator itBuff; 477 for (itBuff = buffers.begin(); itBuff != buffers.end(); itBuff++) 478 pending |= itBuff->second->hasPendingRequest(); 479 return pending; 480 } 481 482 415 483 } -
XIOS/dev/branch_openmp/src/context_client.hpp
r1205 r1460 40 40 bool checkBuffers(void); 41 41 void releaseBuffers(void); 42 bool havePendingRequests(void); 42 43 43 44 bool isServerLeader(void) const; … … 47 48 48 49 bool isAttachedModeEnabled() const; 49 50 50 bool hasTemporarilyBufferedEvent() const { return !tmpBufferedEvent.isEmpty(); }; 51 51 52 static void computeLeader(int clientRank, int clientSize, int serverSize, 53 std::list<int>& rankRecvLeader, 54 std::list<int>& rankRecvNotLeader); 55 52 56 // Close and finalize context client 53 void closeContext(void); 57 // void closeContext(void); Never been implemented. 54 58 void finalize(void); 55 59 … … 106 110 std::list<int> ranksServerNotLeader; 107 111 108 public: // Some function should be removed in the future109 // void registerEvent(CEventClient& event);110 // list<CBufferOut*> newEvent(CEventClient& event,list<int>& sizes);111 // bool locked;112 // set<int> connectedServer;113 114 112 }; 115 113 } -
XIOS/dev/branch_openmp/src/context_server.cpp
r1342 r1460 29 29 MPI_Comm_size(intraComm,&intraCommSize); 30 30 MPI_Comm_rank(intraComm,&intraCommRank); 31 31 32 interComm=interComm_; 32 33 int flag; … … 34 35 if (flag) MPI_Comm_remote_size(interComm,&commSize); 35 36 else MPI_Comm_size(interComm,&commSize); 37 36 38 currentTimeLine=0; 37 39 scheduled=false; 38 40 finished=false; 39 40 41 boost::hash<string> hashString; 41 hashId=hashString(context->getId()); 42 43 } 42 if (CServer::serverLevel == 1) 43 hashId=hashString(context->getId() + boost::lexical_cast<string>(context->clientPrimServer.size())); 44 else 45 hashId=hashString(context->getId()); 46 } 47 44 48 void CContextServer::setPendingEvent(void) 45 49 { … … 142 146 } 143 147 148 144 149 void CContextServer::checkPendingRequest(void) 145 150 { … … 182 187 map<size_t,CEventServer*>::iterator it; 183 188 189 CTimer::get("Process request").resume(); 184 190 while(count>0) 185 191 { … … 195 201 count=buffer.remain(); 196 202 } 197 203 CTimer::get("Process request").suspend(); 198 204 } 199 205 … … 241 247 } 242 248 243 244 249 void CContextServer::dispatchEvent(CEventServer& event) 245 250 { … … 249 254 int rank; 250 255 list<CEventServer::SSubEvent>::iterator it; 251 CContext::setCurrent(context->getId()); 256 StdString ctxId = context->getId(); 257 CContext::setCurrent(ctxId); 258 StdSize totalBuf = 0; 252 259 253 260 if (event.classId==CContext::GetType() && event.type==CContext::EVENT_ID_CONTEXT_FINALIZE) … … 255 262 finished=true; 256 263 #pragma omp critical (_output) 257 info(20)<<"Server Side context <"<<context->getId()<<"> finalized"<<endl; 264 info(20)<<" CContextServer: Receive context <"<<context->getId()<<"> finalize."<<endl; 265 context->finalize(); 258 266 std::map<int, StdSize>::const_iterator itbMap = mapBufferSize_.begin(), 259 iteMap = mapBufferSize_.end(), itMap; 260 StdSize totalBuf = 0; 267 iteMap = mapBufferSize_.end(), itMap; 261 268 for (itMap = itbMap; itMap != iteMap; ++itMap) 262 269 { 263 //report(10)<< " Memory report : Context <"<<context->getId()<<"> : server side : memory used for buffer of each connection to client" << endl 264 // << " +) With client of rank " << itMap->first << " : " << itMap->second << " bytes " << endl; 270 rank = itMap->first; 271 #pragma omp critical (_output) 272 report(10)<< " Memory report : Context <"<<ctxId<<"> : server side : memory used for buffer of each connection to client" << endl 273 << " +) With client of rank " << rank << " : " << itMap->second << " bytes " << endl; 265 274 totalBuf += itMap->second; 266 275 } 267 context->finalize();268 //report(0)<< " Memory report : Context <"<<context->getId()<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl;276 #pragma omp critical (_output) 277 report(0)<< " Memory report : Context <"<<ctxId<<"> : server side : total memory used for buffer "<<totalBuf<<" bytes"<<endl; 269 278 } 270 279 else if (event.classId==CContext::GetType()) CContext::dispatchEvent(event); -
XIOS/dev/branch_openmp/src/context_server.hpp
r1328 r1460 21 21 void processRequest(int rank, char* buff,int count) ; 22 22 void processEvents(void) ; 23 bool hasFinished(void); 23 24 void dispatchEvent(CEventServer& event) ; 24 25 void setPendingEvent(void) ; 25 26 bool hasPendingEvent(void) ; 26 bool hasFinished(void);27 27 28 28 ep_lib::MPI_Comm intraComm ; … … 45 45 bool scheduled ; /*!< event of current timeline is alreading scheduled ? */ 46 46 size_t hashId ; 47 47 48 ~CContextServer() ; 48 49 -
XIOS/dev/branch_openmp/src/cxios.cpp
r1355 r1460 19 19 const string CXios::clientFile="./xios_client"; 20 20 const string CXios::serverFile="./xios_server"; 21 const string CXios::serverPrmFile="./xios_server1"; 22 const string CXios::serverSndFile="./xios_server2"; 21 23 22 24 bool CXios::isClient ; … … 25 27 bool CXios::usingOasis ; 26 28 bool CXios::usingServer = false; 29 bool CXios::usingServer2 = false; 30 int CXios::ratioServer2 = 50; 31 int CXios::nbPoolsServer2 = 1; 27 32 double CXios::bufferSizeFactor = 1.0; 28 33 const double CXios::defaultBufferSizeFactor = 1.0; 29 34 StdSize CXios::minBufferSize = 1024 * sizeof(double); 35 StdSize CXios::maxBufferSize = std::numeric_limits<int>::max() ; 30 36 bool CXios::printLogs2Files; 31 37 bool CXios::isOptPerformance = true; 32 38 CRegistry* CXios::globalRegistry = 0; 33 double CXios::recvFieldTimeout = 10.0; 34 39 double CXios::recvFieldTimeout = 300.0; 40 bool CXios::checkEventSync=false ; 41 35 42 //! Parse configuration file and create some objects from it 36 43 void CXios::initialize() … … 57 64 usingOasis=getin<bool>("using_oasis",false) ; 58 65 usingServer=getin<bool>("using_server",false) ; 66 usingServer2=getin<bool>("using_server2",false) ; 67 ratioServer2=getin<int>("ratio_server2",50); 68 nbPoolsServer2=getin<int>("number_pools_server2",1); 59 69 info.setLevel(getin<int>("info_level",0)) ; 60 70 report.setLevel(getin<int>("info_level",50)); … … 73 83 bufferSizeFactor = getin<double>("buffer_size_factor", defaultBufferSizeFactor); 74 84 minBufferSize = getin<int>("min_buffer_size", 1024 * sizeof(double)); 75 recvFieldTimeout = getin<double>("recv_field_timeout", 10.0); 85 maxBufferSize = getin<int>("max_buffer_size", std::numeric_limits<int>::max()); 86 recvFieldTimeout = getin<double>("recv_field_timeout", recvFieldTimeout); 76 87 if (recvFieldTimeout < 0.0) 77 88 ERROR("CXios::parseXiosConfig()", "recv_field_timeout cannot be negative."); 89 90 checkEventSync = getin<bool>("check_event_sync", checkEventSync); 78 91 79 92 //globalComm=MPI_COMM_WORLD ; … … 112 125 { 113 126 isClient = true; 114 127 115 128 initialize() ; 116 129 … … 179 192 isClient = false; 180 193 isServer = true; 181 194 182 195 initServer(); 183 196 184 197 // Initialize all aspects MPI 185 198 CServer::initialize(); 186 if (CServer::getRank()==0 ) globalRegistry = new CRegistry(CServer::intraComm) ;199 if (CServer::getRank()==0 && CServer::serverLevel != 1) globalRegistry = new CRegistry(CServer::intraComm) ; 187 200 188 201 if (printLogs2Files) 189 202 { 190 CServer::openInfoStream(serverFile); 191 CServer::openErrorStream(serverFile); 203 if (CServer::serverLevel == 0) 204 { 205 CServer::openInfoStream(serverFile); 206 CServer::openErrorStream(serverFile); 207 } 208 else if (CServer::serverLevel == 1) 209 { 210 CServer::openInfoStream(serverPrmFile); 211 CServer::openErrorStream(serverPrmFile); 212 } 213 else 214 { 215 CServer::openInfoStream(serverSndFile); 216 CServer::openErrorStream(serverSndFile); 217 } 192 218 } 193 219 else … … 201 227 202 228 // Finalize 203 if (CServer::getRank()==0) 204 { 205 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 206 globalRegistry->toFile("xios_registry.bin") ; 207 delete globalRegistry ; 208 } 229 if (CServer::serverLevel == 0) 230 { 231 if (CServer::getRank()==0) 232 { 233 info(80)<<"Write data base Registry"<<endl<<globalRegistry->toString()<<endl ; 234 globalRegistry->toFile("xios_registry.bin") ; 235 delete globalRegistry ; 236 } 237 } 238 else 239 { 240 // If using two server levels: 241 // (1) merge registries on each pool 242 // (2) send merged registries to the first pool 243 // (3) merge received registries on the first pool 244 if (CServer::serverLevel == 2) 245 { 246 vector<int>& secondaryServerGlobalRanks = CServer::getSecondaryServerGlobalRanks(); 247 int firstPoolGlobalRank = secondaryServerGlobalRanks[0]; 248 int rankGlobal; 249 MPI_Comm_rank(globalComm, &rankGlobal); 250 251 // Merge registries defined on each pools 252 CRegistry globalRegistrySndServers (CServer::intraComm); 253 254 // All pools (except the first): send globalRegistry to the first pool 255 for (int i=1; i<secondaryServerGlobalRanks.size(); i++) 256 { 257 if (rankGlobal == secondaryServerGlobalRanks[i]) 258 { 259 globalRegistrySndServers.mergeRegistry(*globalRegistry) ; 260 int registrySize = globalRegistrySndServers.size(); 261 MPI_Send(®istrySize,1,MPI_LONG,firstPoolGlobalRank,15,CXios::globalComm) ; 262 CBufferOut buffer(registrySize) ; 263 globalRegistrySndServers.toBuffer(buffer) ; 264 MPI_Send(buffer.start(),registrySize,MPI_CHAR,firstPoolGlobalRank,15,CXios::globalComm) ; 265 } 266 } 267 268 // First pool: receive globalRegistry of all secondary server pools, merge and write the resultant registry 269 if (rankGlobal == firstPoolGlobalRank) 270 { 271 MPI_Status status; 272 char* recvBuff; 273 274 globalRegistrySndServers.mergeRegistry(*globalRegistry) ; 275 276 for (int i=1; i< secondaryServerGlobalRanks.size(); i++) 277 { 278 int rank = secondaryServerGlobalRanks[i]; 279 int registrySize = 0; 280 MPI_Recv(®istrySize, 1, MPI_LONG, rank, 15, CXios::globalComm, &status); 281 recvBuff = new char[registrySize]; 282 MPI_Recv(recvBuff, registrySize, MPI_CHAR, rank, 15, CXios::globalComm, &status); 283 CBufferIn buffer(recvBuff, registrySize) ; 284 CRegistry recvRegistry; 285 recvRegistry.fromBuffer(buffer) ; 286 globalRegistrySndServers.mergeRegistry(recvRegistry) ; 287 delete[] recvBuff; 288 } 289 290 info(80)<<"Write data base Registry"<<endl<<globalRegistrySndServers.toString()<<endl ; 291 globalRegistrySndServers.toFile("xios_registry.bin") ; 292 293 } 294 } 295 delete globalRegistry; 296 } 209 297 CServer::finalize(); 210 298 -
XIOS/dev/branch_openmp/src/cxios.hpp
r1331 r1460 27 27 28 28 public: 29 static const string rootFile ; //!< Configuration filename 30 static const string xiosCodeId ; //!< Identity for XIOS 31 static const string clientFile; //!< Filename template for client 32 static const string serverFile; //!< Filename template for server 33 //#pragma omp threadprivate(rootFile, xiosCodeId, clientFile, serverFile) 29 static const string rootFile ; //!< Configuration filename 30 static const string xiosCodeId ; //!< Identity for XIOS 31 static const string clientFile; //!< Filename template for client 32 static const string serverFile; //!< Filename template for server 33 static const string serverPrmFile; //!< Filename template for primary server in case of two server levels 34 static const string serverSndFile; //!< Filename template for secondary server in case of two server levels 34 35 35 36 37 38 36 static bool isClient ; //!< Check if xios is client 37 #pragma omp threadprivate(isClient) 38 static bool isServer ; //!< Check if xios is server 39 #pragma omp threadprivate(isServer) 39 40 40 41 41 static ep_lib::MPI_Comm globalComm ; //!< Global communicator 42 #pragma omp threadprivate(globalComm) 42 43 43 static bool printLogs2Files; //!< Printing out logs into files 44 #pragma omp threadprivate(printLogs2Files) 45 static bool usingOasis ; //!< Using Oasis 46 #pragma omp threadprivate(usingOasis) 47 static bool usingServer ; //!< Using server (server mode) 48 #pragma omp threadprivate(usingServer) 49 static double bufferSizeFactor; //!< Factor used to tune the buffer size 50 #pragma omp threadprivate(bufferSizeFactor) 51 static const double defaultBufferSizeFactor; //!< Default factor value 52 static StdSize minBufferSize; //!< Minimum buffer size 53 #pragma omp threadprivate(minBufferSize) 54 static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 55 #pragma omp threadprivate(isOptPerformance) 56 static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 57 #pragma omp threadprivate(globalRegistry) 58 static double recvFieldTimeout; //!< Time to wait for data before issuing an error when receiving a field 59 #pragma omp threadprivate(recvFieldTimeout) 44 static bool printLogs2Files; //!< Printing out logs into files 45 #pragma omp threadprivate(printLogs2Files) 46 static bool usingOasis ; //!< Using Oasis 47 #pragma omp threadprivate(usingOasis) 48 static bool usingServer ; //!< Using server (server mode) 49 #pragma omp threadprivate(usingServer) 50 static bool usingServer2 ; //!< Using secondary server (server mode). IMPORTANT: Use this variable ONLY in CServer::initialize(). 51 #pragma omp threadprivate(usingServer2) 52 static int ratioServer2 ; //!< Percentage of server processors dedicated to secondary server 53 #pragma omp threadprivate(ratioServer2) 54 static int nbPoolsServer2 ; //!< Number of pools created on the secondary server 55 #pragma omp threadprivate(nbPoolsServer2) 56 static double bufferSizeFactor; //!< Factor used to tune the buffer size 57 #pragma omp threadprivate(bufferSizeFactor) 58 static const double defaultBufferSizeFactor; //!< Default factor value 59 static StdSize minBufferSize; //!< Minimum buffer size 60 #pragma omp threadprivate(minBufferSize) 61 static StdSize maxBufferSize; //!< Maximum buffer size 62 #pragma omp threadprivate(minBufferSize) 63 static bool isOptPerformance; //!< Check if buffer size is for performance (as large as possible) 64 #pragma omp threadprivate(isOptPerformance) 65 static CRegistry* globalRegistry ; //!< global registry which is wrote by the root process of the servers 66 #pragma omp threadprivate(globalRegistry) 67 static double recvFieldTimeout; //!< Time to wait for data before issuing an error when receiving a field 68 #pragma omp threadprivate(recvFieldTimeout) 69 static bool checkEventSync; //!< For debuuging, check if event are coherent and synchrone on client side 60 70 61 71 public: 62 63 72 //! Setting xios to use server mode 73 static void setUsingServer(); 64 74 65 66 75 //! Setting xios NOT to use server mode 76 static void setNotUsingServer(); 67 77 68 69 78 //! Initialize server (if any) 79 static void initServer(); 70 80 71 81 private: -
XIOS/dev/branch_openmp/src/declare_attribute.hpp
r778 r1460 3 3 4 4 /// ///////////////////////////// Macros ///////////////////////////// /// 5 #define DECLARE_ATTRIBUTE(type, name )\5 #define DECLARE_ATTRIBUTE(type, name, ...) \ 6 6 class name##_attr : public CAttributeTemplate<type> \ 7 7 { \ 8 8 public : \ 9 name##_attr(void) 9 name##_attr(void) \ 10 10 : CAttributeTemplate<type> \ 11 11 (#name, *CAttributeMap::Current) \ … … 13 13 type operator=(const type & value) \ 14 14 { return (CAttributeTemplate<type>::operator=(value)); } \ 15 virtual ~name##_attr(void) \ 15 virtual bool doSend() const { return helper(__VA_ARGS__); } \ 16 bool helper(bool returnTrue=true) const { return returnTrue; } \ 17 virtual ~name##_attr(void) \ 16 18 { /* Ne rien faire de plus */ } \ 17 19 } name; 18 20 19 #define DECLARE_ATTRIBUTE_PRIVATE(type, name )\20 class name##_attr : public CAttributeTemplate<type> \21 #define DECLARE_ATTRIBUTE_PRIVATE(type, name, ...) \ 22 class name##_attr : public CAttributeTemplate<type> \ 21 23 { \ 22 24 public : \ 23 name##_attr(void) 25 name##_attr(void) \ 24 26 : CAttributeTemplate<type> \ 25 27 (#name, *CAttributeMap::Current) \ … … 29 31 virtual bool isPublic() const \ 30 32 { return false; } \ 33 virtual bool doSend() const { return helper(__VA_ARGS__); } \ 34 bool helper(bool returnTrue=true) const { return returnTrue; } \ 31 35 virtual ~name##_attr(void) \ 32 36 { /* Ne rien faire de plus */ } \ 33 37 } name; 34 38 35 #define DECLARE_ARRAY(T_num, T_rank, name )\39 #define DECLARE_ARRAY(T_num, T_rank, name, ...) \ 36 40 class name##_attr : public CAttributeArray<T_num, T_rank> \ 37 41 { \ … … 39 43 using CAttributeArray<T_num, T_rank>::operator = ; \ 40 44 name##_attr(void) : CAttributeArray<T_num, T_rank> (#name, *CAttributeMap::Current) {} \ 45 virtual bool doSend() const { return helper(__VA_ARGS__); } \ 46 bool helper(bool returnTrue=true) const { return returnTrue; } \ 41 47 virtual ~name##_attr(void) {} \ 42 48 } name; 43 49 44 #define DECLARE_ARRAY_PRIVATE(T_num, T_rank, name )\50 #define DECLARE_ARRAY_PRIVATE(T_num, T_rank, name, ...) \ 45 51 class name##_attr : public CAttributeArray<T_num, T_rank> \ 46 52 { \ … … 50 56 virtual bool isPublic() const \ 51 57 { return false; } \ 58 virtual bool doSend() const { return helper(__VA_ARGS__); } \ 59 bool helper(bool returnTrue=true) const { return returnTrue; } \ 52 60 virtual ~name##_attr(void) {} \ 53 61 } name; 54 62 55 #define DECLARE_CLASS_ENUM(name )\56 class name##_attr : public CAttributeEnum<Enum_##name> 63 #define DECLARE_CLASS_ENUM(name, ...) \ 64 class name##_attr : public CAttributeEnum<Enum_##name> \ 57 65 { \ 58 66 public : \ 59 67 name##_attr(void) : CAttributeEnum<Enum_##name>(#name, *CAttributeMap::Current) { } \ 60 virtual ~name##_attr(void) {} \ 68 virtual bool doSend() const { return helper(__VA_ARGS__); } \ 69 bool helper(bool returnTrue=true) const { return returnTrue; } \ 70 virtual ~name##_attr(void) {} \ 61 71 } name; 62 72 … … 71 81 DECLARE_CLASS_ENUM(name) 72 82 73 #define DECLARE_ENUM3(name,arg1,arg2,arg3) 83 #define DECLARE_ENUM3(name,arg1,arg2,arg3) \ 74 84 class Enum_##name \ 75 85 { \ 76 86 public: \ 77 enum t_enum { arg1=0, arg2, arg3} ; 87 enum t_enum { arg1=0, arg2, arg3} ; \ 78 88 const char** getStr(void) const { static const char * enumStr[] = { #arg1, #arg2, #arg3 } ; return enumStr ; } \ 79 89 int getSize(void) const { return 3 ; } \ … … 81 91 DECLARE_CLASS_ENUM(name) 82 92 83 #define DECLARE_ENUM4(name,arg1,arg2,arg3,arg4) 93 #define DECLARE_ENUM4(name,arg1,arg2,arg3,arg4) \ 84 94 class Enum_##name \ 85 95 { \ 86 96 public: \ 87 enum t_enum { arg1=0, arg2, arg3,arg4} ; 97 enum t_enum { arg1=0, arg2, arg3,arg4} ; \ 88 98 const char** getStr(void) const { static const char * enumStr[] = { #arg1, #arg2, #arg3,#arg4 } ; return enumStr ; } \ 89 99 int getSize(void) const { return 4 ; } \ … … 91 101 DECLARE_CLASS_ENUM(name) 92 102 93 #define DECLARE_ENUM5(name,arg1,arg2,arg3,arg4,arg5) 103 #define DECLARE_ENUM5(name,arg1,arg2,arg3,arg4,arg5) \ 94 104 class Enum_##name \ 95 105 { \ 96 106 public: \ 97 enum t_enum { arg1=0, arg2, arg3,arg4,arg5} ; 107 enum t_enum { arg1=0, arg2, arg3,arg4,arg5} ; \ 98 108 const char** getStr(void) const { static const char * enumStr[] = { #arg1, #arg2, #arg3,#arg4,#arg5 } ; return enumStr ; } \ 99 109 int getSize(void) const { return 5 ; } \ … … 101 111 DECLARE_CLASS_ENUM(name) 102 112 103 #define DECLARE_ENUM6(name,arg1,arg2,arg3,arg4,arg5,arg6) 113 #define DECLARE_ENUM6(name,arg1,arg2,arg3,arg4,arg5,arg6) \ 104 114 class Enum_##name \ 105 115 { \ 106 116 public: \ 107 enum t_enum { arg1=0, arg2, arg3,arg4,arg5,arg6} ; 117 enum t_enum { arg1=0, arg2, arg3,arg4,arg5,arg6} ; \ 108 118 const char** getStr(void) const { static const char * enumStr[] = { #arg1, #arg2, #arg3,#arg4,#arg5,#arg6 } ; return enumStr ; } \ 109 119 int getSize(void) const { return 6 ; } \ … … 111 121 DECLARE_CLASS_ENUM(name) 112 122 113 #define DECLARE_ENUM7(name,arg1,arg2,arg3,arg4,arg5,arg6,arg7) 123 #define DECLARE_ENUM7(name,arg1,arg2,arg3,arg4,arg5,arg6,arg7) \ 114 124 class Enum_##name \ 115 125 { \ 116 126 public: \ 117 enum t_enum { arg1=0, arg2, arg3,arg4,arg5,arg6,arg7} ; 127 enum t_enum { arg1=0, arg2, arg3,arg4,arg5,arg6,arg7} ; \ 118 128 const char** getStr(void) const { static const char * enumStr[] = { #arg1, #arg2, #arg3,#arg4,#arg5,#arg6,#arg7 } ; return enumStr ; } \ 119 129 int getSize(void) const { return 7 ; } \ … … 125 135 { \ 126 136 public: \ 127 enum t_enum { arg1=0, arg2, arg3,arg4,arg5,arg6,arg7,arg8} ; 137 enum t_enum { arg1=0, arg2, arg3,arg4,arg5,arg6,arg7,arg8} ; \ 128 138 const char** getStr(void) const { static const char * enumStr[] = { #arg1, #arg2, #arg3,#arg4,#arg5,#arg6,#arg7,#arg8 } ; return enumStr ; } \ 129 139 int getSize(void) const { return 8 ; } \ -
XIOS/dev/branch_openmp/src/declare_ref_func.hpp
r996 r1460 51 51 SuperClassAttribute::setAttributes(refer_ptr, apply); \ 52 52 } \ 53 if (this->hasAttribute("name") && this->name.isEmpty()) \53 if (this->hasAttribute("name") && this->name.isEmpty()) \ 54 54 this->name.setValue(this->get##type##OutputName()); \ 55 55 } \ … … 71 71 bool C##type::hasDirect##type##Reference(void) const \ 72 72 { \ 73 return !this->name_##_ref.isEmpty(); \ 73 return (!this->name_##_ref.isEmpty() && \ 74 C##type::has(this->name_##_ref)); \ 74 75 } \ 75 76 \ … … 89 90 } \ 90 91 \ 91 const StdString C##type::get##type##OutputName(void) const \92 const StdString C##type::get##type##OutputName(void) const \ 92 93 { \ 93 if (!this->name.isEmpty()) \ 94 return this->name; \ 95 else if (hasDirect##type##Reference()) \ 94 if (!this->name.isEmpty()) return this->name; \ 95 else if (this->hasAutoGeneratedId() && hasDirect##type##Reference()) \ 96 96 { \ 97 97 const C##type* refer_ptr = this, *tmp_ptr; \ 98 98 StdString nameRef = this->name_##_ref; \ 99 std::set<const C##type*> tmpRefObjects; 99 std::set<const C##type*> tmpRefObjects; \ 100 100 while (refer_ptr->hasAutoGeneratedId() && \ 101 101 (C##type::has(nameRef))) \ -
XIOS/dev/branch_openmp/src/distribution.cpp
r887 r1460 18 18 } 19 19 20 const CArray<size_t,1>& CDistribution:: getGlobalIndex() const 21 { 22 return globalIndex_; 23 } 24 20 25 CDistribution::~CDistribution() 21 26 { /* Nothing to do */ } -
XIOS/dev/branch_openmp/src/distribution.hpp
r930 r1460 35 35 int getDims() const; //! Get dimension size 36 36 int getRank() const; //! Get rank of current process 37 37 const CArray<size_t,1>& getGlobalIndex() const; 38 38 39 protected: 39 40 virtual void createGlobalIndex() {}; -
XIOS/dev/branch_openmp/src/distribution_client.cpp
r932 r1460 10 10 11 11 namespace xios { 12 13 CDistributionClient::CDistributionClient(int rank, int dims, const CArray<size_t,1>& globalIndex)14 : CDistribution(rank, dims, globalIndex)15 , axisDomainOrder_()16 , nLocal_(), nGlob_(), nBeginLocal_(), nBeginGlobal_(),nZoomBegin_(), nZoomEnd_()17 , dataNIndex_(), dataDims_(), dataBegin_(), dataIndex_(), domainMasks_(), axisMasks_()18 , gridMask_(), indexMap_()19 , isDataDistributed_(true), axisNum_(0), domainNum_(0)20 , localDataIndex_(), localMaskIndex_()21 , globalLocalDataSendToServerMap_()22 , infoIndex_(), isComputed_(false)23 , elementLocalIndex_(), elementGlobalIndex_(), elementIndexData_()24 , elementZoomMask_(), elementNLocal_(), elementNGlobal_()25 {26 }27 12 28 13 CDistributionClient::CDistributionClient(int rank, CGrid* grid) … … 46 31 { /* Nothing to do */ } 47 32 33 void CDistributionClient::partialClear() 34 { 35 GlobalLocalMap void1 ; 36 GlobalLocalMap void2 ; 37 std::vector<int> void3 ; 38 std::vector<int> void4 ; 39 40 globalLocalDataSendToServerMap_.swap(void1) ; 41 globalDataIndex_.swap(void2) ; 42 localDataIndex_.swap(void3); 43 localMaskIndex_.swap(void4) ; 44 } 45 48 46 /*! 49 47 Read information of a grid to generate distribution. … … 123 121 for (int i = 0; i < domainNum_;++i) 124 122 { 125 domainMasks_[i].resize(domList[i]-> mask_1d.numElements());126 domainMasks_[i] = domList[i]-> mask_1d;123 domainMasks_[i].resize(domList[i]->domainMask.numElements()); 124 domainMasks_[i] = domList[i]->domainMask; 127 125 } 128 126 … … 197 195 nBeginLocal_.at(indexMap_[idx]+1) = 0; 198 196 nBeginGlobal_.at(indexMap_[idx]+1) = domList[domIndex]->jbegin; 199 nZoomBegin_.at((indexMap_[idx]+1)) = domList[domIndex]->global_zoom_jbegin;200 nZoomEnd_.at((indexMap_[idx]+1)) = domList[domIndex]-> global_zoom_jbegin + domList[domIndex]->global_zoom_nj-1;197 nZoomBegin_.at((indexMap_[idx]+1)) = 0; //domList[domIndex]->global_zoom_jbegin; 198 nZoomEnd_.at((indexMap_[idx]+1)) = domList[domIndex]->nj_glo.getValue()- 1; //domList[domIndex]->global_zoom_jbegin + domList[domIndex]->global_zoom_nj-1; 201 199 202 200 dataBegin_.at(indexMap_[idx]+1) = domList[domIndex]->data_jbegin.getValue(); … … 209 207 nBeginLocal_.at(indexMap_[idx]) = 0; 210 208 nBeginGlobal_.at(indexMap_[idx]) = domList[domIndex]->ibegin; 211 nZoomBegin_.at((indexMap_[idx])) = domList[domIndex]->global_zoom_ibegin;212 nZoomEnd_.at((indexMap_[idx])) = domList[domIndex]-> global_zoom_ibegin + domList[domIndex]->global_zoom_ni-1;209 nZoomBegin_.at((indexMap_[idx])) = 0; // domList[domIndex]->global_zoom_ibegin; 210 nZoomEnd_.at((indexMap_[idx])) = domList[domIndex]->ni_glo.getValue() -1; //domList[domIndex]->global_zoom_ibegin + domList[domIndex]->global_zoom_ni-1; 213 211 214 212 dataBegin_.at(indexMap_[idx]) = domList[domIndex]->data_ibegin.getValue(); … … 231 229 nBeginLocal_.at(indexMap_[idx]) = 0; 232 230 nBeginGlobal_.at(indexMap_[idx]) = axisList[axisIndex]->begin.getValue(); 233 nZoomBegin_.at((indexMap_[idx])) = axisList[axisIndex]->global_zoom_begin;234 nZoomEnd_.at((indexMap_[idx])) = axisList[axisIndex]-> global_zoom_begin + axisList[axisIndex]->global_zoom_n-1;231 nZoomBegin_.at((indexMap_[idx])) = 0; //axisList[axisIndex]->global_zoom_begin; 232 nZoomEnd_.at((indexMap_[idx])) = axisList[axisIndex]->n_glo.getValue() - 1; //axisList[axisIndex]->global_zoom_begin + axisList[axisIndex]->global_zoom_n-1; 235 233 236 234 dataBegin_.at(indexMap_[idx]) = axisList[axisIndex]->data_begin.getValue(); … … 493 491 localDataIndex_.resize(indexLocalDataOnClientCount); 494 492 localMaskIndex_.resize(indexSend2ServerCount); 493 localMaskedDataIndex_.resize(indexSend2ServerCount); 494 globalDataIndex_.rehash(std::ceil(indexLocalDataOnClientCount/globalDataIndex_.max_load_factor())); //globalLocalDataSendToServerMap_.reserve(indexSend2ServerCount); 495 495 globalLocalDataSendToServerMap_.rehash(std::ceil(indexSend2ServerCount/globalLocalDataSendToServerMap_.max_load_factor())); //globalLocalDataSendToServerMap_.reserve(indexSend2ServerCount); 496 496 … … 558 558 if (gridMask_(gridMaskIndex)) 559 559 { 560 size_t globalIndex = 0; 561 for (int k = 0; k < numElement_; ++k) 562 { 563 globalIndex += (currentGlobalIndex[k])*elementNGlobal_[k]; 564 } 565 globalDataIndex_[globalIndex] = indexLocalDataOnClientCount; 560 566 localDataIndex_[indexLocalDataOnClientCount] = countLocalData; 561 567 bool isIndexOnServer = true; … … 567 573 if (isIndexOnServer) 568 574 { 569 size_t globalIndex = 0;570 for (int k = 0; k < numElement_; ++k)571 {572 globalIndex += (currentGlobalIndex[k])*elementNGlobal_[k];573 }574 575 globalLocalDataSendToServerMap_[globalIndex] = indexLocalDataOnClientCount; 575 576 localMaskIndex_[indexSend2ServerCount] = gridMaskIndex; 577 localMaskedDataIndex_[indexSend2ServerCount] = indexLocalDataOnClientCount; 576 578 ++indexSend2ServerCount; 577 579 } … … 642 644 } 643 645 646 CDistributionClient::GlobalLocalDataMap& CDistributionClient::getGlobalDataIndexOnClient() 647 { 648 if (!isComputed_) createGlobalIndexSendToServer(); 649 return globalDataIndex_; 650 } 651 644 652 /*! 645 653 Return local data index of client … … 660 668 } 661 669 670 /*! 671 Return local mask index of client 672 */ 673 const std::vector<int>& CDistributionClient::getLocalMaskedDataIndexOnClient() 674 { 675 if (!isComputed_) createGlobalIndexSendToServer(); 676 return localMaskedDataIndex_; 677 } 678 662 679 } // namespace xios -
XIOS/dev/branch_openmp/src/distribution_client.hpp
r930 r1460 33 33 34 34 public: 35 /** Default constructor */ 36 CDistributionClient(int rank, int dims, const CArray<size_t,1>& globalIndex = CArray<size_t,1>()); 35 /** Default constructor */ 37 36 CDistributionClient(int rank, CGrid* grid); 38 37 … … 44 43 virtual const std::vector<int>& getLocalDataIndexOnClient(); 45 44 GlobalLocalDataMap& getGlobalLocalDataSendToServer(); 45 GlobalLocalDataMap& getGlobalDataIndexOnClient(); 46 46 const std::vector<int>& getLocalMaskIndexOnClient(); 47 const std::vector<int>& getLocalMaskedDataIndexOnClient(); 47 48 48 49 std::vector<int> getNGlob() { return nGlob_; } … … 55 56 const int& dataDim, const int& ni, int& j); 56 57 static int getAxisIndex(const int& dataIndex, const int& dataBegin, const int& ni); 58 59 void partialClear(void) ; //! clear heavy sized attibutes 57 60 58 61 protected: … … 79 82 //!< LocalData index on client 80 83 GlobalLocalDataMap globalLocalDataSendToServerMap_; 84 GlobalLocalDataMap globalDataIndex_; 81 85 std::vector<int> localDataIndex_; 82 86 std::vector<int> localMaskIndex_; 87 std::vector<int> localMaskedDataIndex_; 83 88 84 89 private: -
XIOS/dev/branch_openmp/src/distribution_server.cpp
r1205 r1460 11 11 12 12 namespace xios { 13 14 CDistributionServer::CDistributionServer(int rank, int dims, const CArray<size_t,1>& globalIndex)15 : CDistribution(rank, dims, globalIndex), nGlobal_(), nZoomSize_(), nZoomBegin_(), globalLocalIndexMap_()16 {17 }18 19 CDistributionServer::CDistributionServer(int rank, const std::vector<int>& nZoomBegin,20 const std::vector<int>& nZoomSize, const std::vector<int>& nGlobal)21 : CDistribution(rank, nGlobal.size()), nGlobal_(nGlobal), nZoomSize_(nZoomSize), nZoomBegin_(nZoomBegin), globalLocalIndexMap_()22 {23 createGlobalIndex();24 }25 13 26 14 CDistributionServer::CDistributionServer(int rank, const std::vector<int>& nZoomBegin, … … 37 25 const std::vector<CArray<int,1> >& globalIndexElements, 38 26 const CArray<int,1>& elementOrder, 39 const std::vector<int>& nZoomBegin Server,40 const std::vector<int>& nZoomSize Server,27 const std::vector<int>& nZoomBegin, 28 const std::vector<int>& nZoomSize, 41 29 const std::vector<int>& nZoomBeginGlobal, 42 30 const std::vector<int>& nGlobal) 43 31 : CDistribution(rank, nGlobal.size()), nGlobal_(nGlobal), nZoomBeginGlobal_(nZoomBeginGlobal), 44 nZoomSize_(nZoomSize Server), nZoomBegin_(nZoomBeginServer), globalLocalIndexMap_()32 nZoomSize_(nZoomSize), nZoomBegin_(nZoomBegin), globalLocalIndexMap_() 45 33 { 46 34 createGlobalIndex(globalIndexElements, elementOrder); … … 184 172 /*! 185 173 Compute local index for writing data on server 186 \param [in] globalIndex global index received from client187 \return local index of written data 188 */ 189 CArray<size_t,1> CDistributionServer::computeLocalIndex(const CArray<size_t,1>& globalIndex) 190 { 191 int ssize = globalIndex.numElements();192 CArray<size_t,1> localIndex( ssize);174 \param [in] globalIndex Global index received from client 175 */ 176 void CDistributionServer::computeLocalIndex(CArray<size_t,1>& globalIndex) 177 { 178 size_t ssize = globalIndex.numElements(); 179 size_t localIndexSize = std::min(globalIndex_.numElements(), ssize); 180 CArray<size_t,1> localIndex(localIndexSize); 193 181 GlobalLocalMap::const_iterator ite = globalLocalIndexMap_.end(), it; 194 for (int idx = 0; idx < ssize; ++idx) 182 int i = 0; 183 for (size_t idx = 0; idx < ssize; ++idx) 195 184 { 196 185 it = globalLocalIndexMap_.find(globalIndex(idx)); 197 186 if (ite != it) 198 localIndex(idx) = it->second; 199 } 200 201 return localIndex; 202 } 203 204 /*! 205 Compute local index for writing data on server 206 \param [in] globalIndex Global index received from client 207 */ 208 void CDistributionServer::computeLocalIndex(CArray<size_t,1>& globalIndex) 209 { 210 int ssize = globalIndex.numElements(); 211 CArray<size_t,1> localIndex(ssize); 212 GlobalLocalMap::const_iterator ite = globalLocalIndexMap_.end(), it; 213 for (int idx = 0; idx < ssize; ++idx) 214 { 215 it = globalLocalIndexMap_.find(globalIndex(idx)); 216 if (ite != it) 217 localIndex(idx) = it->second; 187 { 188 localIndex(i) = it->second; 189 ++i; 190 } 218 191 } 219 192 … … 234 207 } 235 208 209 /*! 210 Get the size of grid index in server (e.x: sizeGrid *= size of each dimensiion) 211 */ 212 int CDistributionServer::getGridSize() const 213 { 214 return globalLocalIndexMap_.size(); 215 } 236 216 237 217 const std::vector<int>& CDistributionServer::getZoomBeginGlobal() const … … 249 229 return nZoomSize_; 250 230 } 231 232 void CDistributionServer::partialClear(void) 233 { 234 GlobalLocalMap void1 ; 235 globalLocalIndexMap_.swap(void1) ; 236 } 237 251 238 } // namespace xios -
XIOS/dev/branch_openmp/src/distribution_server.hpp
r1205 r1460 45 45 const std::vector<int>& getZoomBeginServer() const; 46 46 const std::vector<int>& getZoomSizeServer() const; 47 48 virtual CArray<size_t,1> computeLocalIndex(const CArray<size_t,1>& globalIndex); 47 const GlobalLocalMap& getGlobalLocalIndex() const { return globalLocalIndexMap_; } 48 int getGridSize() const; 49 49 50 virtual void computeLocalIndex(CArray<size_t,1>& globalIndex); 50 51 virtual void computeGlobalIndex(CArray<int,1>& indexes) const; 52 virtual void partialClear(void); //! clear heavy sized attibutes 51 53 52 54 protected: … … 54 56 void createGlobalIndex(const std::vector<CArray<int,1> >& globalIndexElements, 55 57 const CArray<int,1>& elementOrder); 56 58 57 59 protected: 58 60 GlobalLocalMap globalLocalIndexMap_; -
XIOS/dev/branch_openmp/src/event_client.cpp
r814 r1460 5 5 #include "type.hpp" 6 6 #include "mpi.hpp" 7 #include "cxios.hpp" 7 8 8 9 namespace xios … … 49 50 std::list<CMessage*>::iterator itMsg = messages.begin(); 50 51 52 if (CXios::checkEventSync) 53 { 54 #pragma omp critical(_output) 55 info(100)<<"Send event "<<timeLine<<" classId : "<<classId<<" typeId : "<<typeId<<endl ; 56 } 51 57 for (; itBuff != buffers.end(); ++itBuff, ++itSizes, ++itSenders, ++itMsg) 52 58 { -
XIOS/dev/branch_openmp/src/event_client.hpp
r731 r1460 21 21 std::list<int> getRanks(void); 22 22 std::list<int> getSizes(void); 23 23 int getClassId(void) { return classId; } 24 int getTypeId(void) { return typeId; } 25 24 26 private: 25 27 int classId; -
XIOS/dev/branch_openmp/src/event_scheduler.cpp
r1365 r1460 2 2 #include "xios_spl.hpp" 3 3 #include "mpi.hpp" 4 #include "tracer.hpp" 4 5 5 6 using namespace ep_lib; … … 82 83 { 83 84 85 traceOff() ; 84 86 SPendingRequest* sentRequest=new SPendingRequest ; 85 87 sentRequest->buffer[0]=timeLine ; … … 89 91 pendingSentParentRequest.push(sentRequest) ; 90 92 MPI_Isend(sentRequest->buffer,3, MPI_UNSIGNED_LONG, parent[lev], 0, communicator, &sentRequest->request) ; 93 traceOn() ; 91 94 } 92 95 … … 104 107 void CEventScheduler::checkEvent(void) 105 108 { 109 traceOff() ; 106 110 checkChildRequest() ; 107 111 checkParentRequest() ; 108 109 } 110 111 112 traceOn() ; 113 114 } 112 115 113 116 void CEventScheduler::checkParentRequest(void) -
XIOS/dev/branch_openmp/src/filter/file_writer_filter.cpp
r1328 r1460 37 37 } 38 38 39 bool CFileWriterFilter::mustAutoTrigger() const 40 { 41 return true; 42 } 43 39 44 bool CFileWriterFilter::isDataExpected(const CDate& date) const 40 45 { -
XIOS/dev/branch_openmp/src/filter/file_writer_filter.hpp
r1119 r1460 24 24 25 25 /*! 26 * Tests if the filter must auto-trigger. 27 * 28 * \return true if the filter must auto-trigger 29 */ 30 bool virtual mustAutoTrigger() const; 31 32 /*! 26 33 * Tests whether data is expected for the specified date. 27 34 * -
XIOS/dev/branch_openmp/src/filter/filter.cpp
r1119 r1460 41 41 } 42 42 43 bool CFilter::mustAutoTrigger() const 44 { 45 return COutputPin::mustAutoTrigger(); 46 } 47 43 48 bool CFilter::isDataExpected(const CDate& date) const 44 49 { -
XIOS/dev/branch_openmp/src/filter/filter.hpp
r1119 r1460 49 49 50 50 /*! 51 * Tests if the filter must auto-trigger. 52 * 53 * \return true if the filter must auto-trigger 54 */ 55 bool virtual mustAutoTrigger() const; 56 57 /*! 51 58 * Tests whether data is expected for the specified date. 52 59 * -
XIOS/dev/branch_openmp/src/filter/input_pin.hpp
r1119 r1460 60 60 61 61 /*! 62 * Tests if the pin must auto-trigger. 63 * 64 * \return true if the pin must auto-trigger 65 */ 66 bool virtual mustAutoTrigger() const = 0; 67 68 /*! 62 69 * Tests whether data is expected for the specified date. 63 70 * -
XIOS/dev/branch_openmp/src/filter/output_pin.cpp
r1119 r1460 66 66 } 67 67 68 bool COutputPin::mustAutoTrigger() const 69 { 70 std::vector<std::pair<boost::shared_ptr<CInputPin>, size_t> >::const_iterator it, itEnd; 71 for (it = outputs.begin(), itEnd = outputs.end(); it != itEnd; ++it) 72 { 73 if (it->first->mustAutoTrigger()) 74 return true; 75 } 76 77 return false; 78 } 79 68 80 void COutputPin::setOutputTriggers() 69 81 { -
XIOS/dev/branch_openmp/src/filter/output_pin.hpp
r1119 r1460 44 44 */ 45 45 bool virtual canBeTriggered() const; 46 47 /*! 48 * Tests if the pin must auto-trigger. 49 * 50 * \return true if the pin must auto-trigger 51 */ 52 bool virtual mustAutoTrigger() const; 46 53 47 54 /*! -
XIOS/dev/branch_openmp/src/filter/source_filter.cpp
r1328 r1460 7 7 namespace xios 8 8 { 9 CSourceFilter::CSourceFilter(CGarbageCollector& gc, CGrid* grid, 9 CSourceFilter::CSourceFilter(CGarbageCollector& gc, CGrid* grid, bool compression, 10 10 const CDuration offset /*= NoneDu*/, bool manualTrigger /*= false*/, 11 11 bool hasMissingValue /*= false*/, … … 13 13 : COutputPin(gc, manualTrigger) 14 14 , grid(grid) 15 , compression(compression) 15 16 , offset(offset) 16 17 , hasMissingValue(hasMissingValue), defaultValue(defaultValue) … … 31 32 packet->status = CDataPacket::NO_ERROR; 32 33 33 packet->data.resize(grid->storeIndex_client.numElements()); 34 grid->inputField(data, packet->data); 34 packet->data.resize(grid->storeIndex_client.numElements()); 35 36 if (compression) 37 { 38 packet->data = defaultValue; 39 grid->uncompressField(data, packet->data); 40 } 41 else 42 grid->inputField(data, packet->data); 35 43 44 45 46 // if (compression) grid->inputField(data, packet->data) ; 47 // else 48 // { 49 // // just make a flat copy 50 // CArray<double, N> data_tmp(data.copy()) ; // supress const attribute 51 // CArray<double,1> dataTmp2(data_tmp.dataFirst(),shape(data.numElements()),neverDeleteData) ; 52 // packet->data = dataTmp2 ; 53 // } 36 54 // Convert missing values to NaN 37 55 if (hasMissingValue) … … 65 83 packet->timestamp = date; 66 84 packet->status = CDataPacket::NO_ERROR; 67 68 // if (data.size() != grid->storeIndex_toSrv.size()) 85 69 86 if (data.size() != grid->storeIndex_fromSrv.size()) 70 87 ERROR("CSourceFilter::streamDataFromServer(CDate date, const std::map<int, CArray<double, 1> >& data)", … … 75 92 std::map<int, CArray<double, 1> >::const_iterator it, itEnd = data.end(); 76 93 for (it = data.begin(); it != itEnd; it++) 77 { 78 // CArray<int,1>& index = grid->storeIndex_toSrv[it->first]; 94 { 79 95 CArray<int,1>& index = grid->storeIndex_fromSrv[it->first]; 80 96 for (int n = 0; n < index.numElements(); n++) -
XIOS/dev/branch_openmp/src/filter/source_filter.hpp
r1205 r1460 27 27 */ 28 28 CSourceFilter(CGarbageCollector& gc, CGrid* grid, 29 bool compression=true, 29 30 const CDuration offset = NoneDu, bool manualTrigger = false, 30 31 bool hasMissingValue = false, … … 64 65 const bool hasMissingValue; 65 66 const double defaultValue; 67 const bool compression ; //!< indicate if the data need to be compressed : on client size : true, on server side : false 66 68 }; // class CSourceFilter 67 69 } // namespace xios -
XIOS/dev/branch_openmp/src/filter/spatial_transform_filter.cpp
r1328 r1460 4 4 #include "context.hpp" 5 5 #include "context_client.hpp" 6 #include "timer.hpp" 6 7 using namespace ep_lib; 7 8 … … 29 30 size_t inputCount = 1 + (auxInputs.empty() ? 0 : auxInputs.size()); 30 31 double defaultValue = (hasMissingValue) ? std::numeric_limits<double>::quiet_NaN() : 0.0; 31 boost::shared_ptr<CSpatialTransformFilter> filter(new CSpatialTransformFilter(gc, engine, defaultValue, inputCount)); 32 32 33 34 const CGridTransformationSelector::ListAlgoType& algoList = gridTransformation->getAlgoList() ; 35 CGridTransformationSelector::ListAlgoType::const_iterator it ; 36 37 bool isSpatialTemporal=false ; 38 for (it=algoList.begin();it!=algoList.end();++it) if (it->second.first == TRANS_TEMPORAL_SPLITTING) isSpatialTemporal=true ; 39 40 boost::shared_ptr<CSpatialTransformFilter> filter ; 41 if( isSpatialTemporal) filter = boost::shared_ptr<CSpatialTransformFilter>(new CSpatialTemporalFilter(gc, engine, gridTransformation, defaultValue, inputCount)); 42 else filter = boost::shared_ptr<CSpatialTransformFilter>(new CSpatialTransformFilter(gc, engine, defaultValue, inputCount)); 43 44 33 45 if (!lastFilter) 34 46 lastFilter = filter; … … 58 70 onOutputReady(outputPacket); 59 71 } 72 73 74 75 76 77 CSpatialTemporalFilter::CSpatialTemporalFilter(CGarbageCollector& gc, CSpatialTransformFilterEngine* engine, CGridTransformation* gridTransformation, double outputValue, size_t inputSlotsCount) 78 : CSpatialTransformFilter(gc, engine, outputValue, inputSlotsCount), record(0) 79 { 80 const CGridTransformationSelector::ListAlgoType& algoList = gridTransformation->getAlgoList() ; 81 CGridTransformationSelector::ListAlgoType::const_iterator it ; 82 83 int pos ; 84 for (it=algoList.begin();it!=algoList.end();++it) 85 if (it->second.first == TRANS_TEMPORAL_SPLITTING) 86 { 87 pos=it->first ; 88 if (pos < algoList.size()-1) 89 ERROR("SpatialTemporalFilter::CSpatialTemporalFilter(CGarbageCollector& gc, CSpatialTransformFilterEngine* engine, CGridTransformation* gridTransformation, double outputValue, size_t inputSlotsCount))", 90 "temporal splitting operation must be the last of whole transformation on same grid") ; 91 } 92 93 CGrid* grid=gridTransformation->getGridDestination() ; 94 95 CAxis* axis = grid->getAxis(gridTransformation->getElementPositionInGridDst2AxisPosition().find(pos)->second) ; 96 97 nrecords = axis->index.numElements() ; 98 } 99 100 101 void CSpatialTemporalFilter::onInputReady(std::vector<CDataPacketPtr> data) 102 { 103 CSpatialTransformFilterEngine* spaceFilter = static_cast<CSpatialTransformFilterEngine*>(engine); 104 CDataPacketPtr outputPacket = spaceFilter->applyFilter(data, outputDefaultValue); 105 106 if (outputPacket) 107 { 108 size_t nelements=outputPacket->data.numElements() ; 109 if (!tmpData.numElements()) 110 { 111 tmpData.resize(nelements); 112 tmpData=outputDefaultValue ; 113 } 114 115 nelements/=nrecords ; 116 size_t offset=nelements*record ; 117 for(size_t i=0;i<nelements;++i) tmpData(i+offset) = outputPacket->data(i) ; 118 119 record ++ ; 120 if (record==nrecords) 121 { 122 record=0 ; 123 CDataPacketPtr packet = CDataPacketPtr(new CDataPacket); 124 packet->date = data[0]->date; 125 packet->timestamp = data[0]->timestamp; 126 packet->status = data[0]->status; 127 packet->data.resize(tmpData.numElements()); 128 packet->data = tmpData; 129 onOutputReady(packet); 130 tmpData.resize(0) ; 131 } 132 } 133 } 134 60 135 61 136 CSpatialTransformFilterEngine::CSpatialTransformFilterEngine(CGridTransformation* gridTransformation) … … 122 197 void CSpatialTransformFilterEngine::apply(const CArray<double, 1>& dataSrc, CArray<double,1>& dataDest) 123 198 { 199 CTimer::get("CSpatialTransformFilterEngine::apply").resume(); 200 124 201 CContextClient* client = CContext::getCurrent()->client; 125 202 … … 235 312 236 313 dataDest = dataCurrentDest; 314 315 CTimer::get("CSpatialTransformFilterEngine::apply").suspend() ; 237 316 } 238 317 } // namespace xios -
XIOS/dev/branch_openmp/src/filter/spatial_transform_filter.hpp
r1334 r1460 3 3 4 4 #include "filter.hpp" 5 5 6 namespace xios 6 7 { … … 48 49 double outputDefaultValue; 49 50 }; // class CSpatialTransformFilter 51 52 53 /*! 54 * A specific spatial filter for the temporal_splitting transformation scalar -> axis. An incoming flux will be stored in an aditional dimension given by the destination axis. 55 * At each flux received, the storing index (record) is increased. When it reach the size of the axis (nrecords) a new flux is generated and the record is reset to 0 56 */ 57 58 class CSpatialTemporalFilter : public CSpatialTransformFilter 59 { 60 public: 61 /*! 62 * Constructs a filter wrapping the specified spatial transformation. 63 * 64 * \param gc the associated garbage collector 65 * \param engine the engine defining the spatial transformation 66 * \param [in] gridTransformation the associated transformations 67 * \param outputValue default value of output pin 68 * \param [in] inputSlotsCount number of input, by default there is only one for field src 69 */ 70 CSpatialTemporalFilter(CGarbageCollector& gc, CSpatialTransformFilterEngine* engine, CGridTransformation* gridTransformation, double outputValue, size_t inputSlotsCount = 1); 71 72 73 protected: 74 /*! 75 Overriding this function to process transformations with auxillary inputs 76 */ 77 void virtual onInputReady(std::vector<CDataPacketPtr> data); 78 //! Current record in the filter 79 int record ; 80 //! Maximum number of records 81 int nrecords; 82 //! Temporary storage for output flux 83 CArray<double, 1> tmpData; 84 85 86 }; // class CSpatialTemporalFilter 87 50 88 51 89 /*! -
XIOS/dev/branch_openmp/src/filter/store_filter.cpp
r1328 r1460 111 111 } 112 112 113 bool CStoreFilter::mustAutoTrigger() const 114 { 115 return false; 116 } 117 113 118 bool CStoreFilter::isDataExpected(const CDate& date) const 114 119 { -
XIOS/dev/branch_openmp/src/filter/store_filter.hpp
r1205 r1460 53 53 54 54 /*! 55 * Tests if the filter must auto-trigger. 56 * 57 * \return true if the filter must auto-trigger 58 */ 59 bool virtual mustAutoTrigger() const; 60 61 /*! 55 62 * Tests whether data is expected for the specified date. 56 63 * -
XIOS/dev/branch_openmp/src/filter/temporal_filter.cpp
r1328 r1460 5 5 namespace xios 6 6 { 7 static func::CFunctor* createFunctor(const std::string& opId, bool ignoreMissingValue, double missingValue,CArray<double, 1>& tmpData);7 static func::CFunctor* createFunctor(const std::string& opId, bool ignoreMissingValue, CArray<double, 1>& tmpData); 8 8 9 9 CTemporalFilter::CTemporalFilter(CGarbageCollector& gc, const std::string& opId, 10 10 const CDate& initDate, const CDuration samplingFreq, const CDuration samplingOffset, const CDuration opFreq, 11 bool ignoreMissingValue /*= false*/ , double missingValue /*= 0.0*/)11 bool ignoreMissingValue /*= false*/) 12 12 : CFilter(gc, 1, this) 13 , functor(createFunctor(opId, ignoreMissingValue, missingValue,tmpData))13 , functor(createFunctor(opId, ignoreMissingValue, tmpData)) 14 14 , isOnceOperation(functor->timeType() == func::CFunctor::once) 15 15 , isInstantOperation(functor->timeType() == func::CFunctor::instant) … … 17 17 , samplingOffset(samplingOffset) 18 18 , opFreq(opFreq) 19 , nextSamplingDate(initDate + this->samplingOffset + initDate.getRelCalendar().getTimeStep()) 20 , nextOperationDate(initDate + this->samplingOffset + opFreq) 19 , offsetMonth({0, this->samplingOffset.month, 0, 0, 0, 0, 0}) 20 , offsetAllButMonth({this->samplingOffset.year, 0 , this->samplingOffset.day, 21 this->samplingOffset.hour, this->samplingOffset.minute, 22 this->samplingOffset.second, this->samplingOffset.timestep}) 23 , initDate(initDate) 24 , nextSamplingDate(initDate + (this->samplingOffset + initDate.getRelCalendar().getTimeStep())) 25 , nbOperationDates(1) 26 // , nextOperationDate(initDate + opFreq + this->samplingOffset) 21 27 , isFirstOperation(true) 22 28 { … … 35 41 { 36 42 usePacket = (data[0]->date >= nextSamplingDate); 37 outputResult = (data[0]->date + samplingFreq > nextOperationDate); 43 // outputResult = (data[0]->date + samplingFreq > nextOperationDate); 44 outputResult = (data[0]->date > initDate + nbOperationDates*opFreq - samplingFreq + offsetMonth + offsetAllButMonth); 38 45 copyLess = (isInstantOperation && usePacket && outputResult); 39 46 } … … 54 61 if (outputResult) 55 62 { 63 nbOperationDates ++; 56 64 if (!copyLess) 57 65 { … … 69 77 70 78 isFirstOperation = false; 71 nextOperationDate = nextOperationDate + samplingFreq + opFreq - samplingFreq;79 // nextOperationDate = initDate + samplingFreq + nbOperationDates*opFreq - samplingFreq + offsetMonth + offsetAllButMonth; 72 80 } 73 81 } … … 76 84 } 77 85 86 bool CTemporalFilter::mustAutoTrigger() const 87 { 88 return true; 89 } 90 78 91 bool CTemporalFilter::isDataExpected(const CDate& date) const 79 92 { 80 return isOnceOperation ? isFirstOperation : (date >= nextSamplingDate || date + samplingFreq > nextOperationDate); 93 // return isOnceOperation ? isFirstOperation : (date >= nextSamplingDate || date + samplingFreq > nextOperationDate); 94 return isOnceOperation ? isFirstOperation : (date >= nextSamplingDate || date > initDate + nbOperationDates*opFreq - samplingFreq + offsetMonth + offsetAllButMonth); 81 95 } 82 96 83 static func::CFunctor* createFunctor(const std::string& opId, bool ignoreMissingValue, double missingValue,CArray<double, 1>& tmpData)97 static func::CFunctor* createFunctor(const std::string& opId, bool ignoreMissingValue, CArray<double, 1>& tmpData) 84 98 { 85 99 func::CFunctor* functor = NULL; 86 100 87 double defaultValue = ignoreMissingValue ? std::numeric_limits<double>::quiet_NaN() : missingValue;101 double defaultValue = std::numeric_limits<double>::quiet_NaN(); 88 102 89 103 #define DECLARE_FUNCTOR(MType, mtype) \ -
XIOS/dev/branch_openmp/src/filter/temporal_filter.hpp
r1205 r1460 26 26 * \param ignoreMissingValue true if and only if the missing value must be ignored 27 27 when doing the operation 28 * \param missingValue the missing value29 28 */ 30 29 CTemporalFilter(CGarbageCollector& gc, const std::string& opId, 31 30 const CDate& initDate, const CDuration samplingFreq, const CDuration samplingOffset, const CDuration opFreq, 32 bool ignoreMissingValue = false , double missingValue = 0.0);31 bool ignoreMissingValue = false); 33 32 34 33 /*! … … 39 38 */ 40 39 CDataPacketPtr virtual apply(std::vector<CDataPacketPtr> data); 40 41 /*! 42 * Tests if the filter must auto-trigger. 43 * 44 * \return true if the filter must auto-trigger 45 */ 46 bool virtual mustAutoTrigger() const; 41 47 42 48 /*! … … 56 62 const CDuration samplingOffset; //!< The sampling offset, i.e. the offset after which the input data will be used 57 63 const CDuration opFreq; //!< The operation frequency, i.e. the frequency at which the output data will be computed 64 const CDuration offsetMonth; //!< The month duration of samplingOffset 65 CDuration offsetAllButMonth; //!< All but the month duration of samplingOffset 66 const CDate initDate; 58 67 CDate nextSamplingDate; //!< The date of the next sampling 59 CDate nextOperationDate; //!< The date of the next operation 68 int nbOperationDates; //!< The number of times an operation is performed 69 // CDate nextOperationDate; //!< The date of the next operation 60 70 bool isFirstOperation; //!< True before the first operation was been computed 61 71 }; // class CTemporalFilter -
XIOS/dev/branch_openmp/src/generate_fortran_interface.cpp
r981 r1460 35 35 CComputeConnectivityDomain compConDomain; 36 36 CExpandDomain expandDomain; 37 CReorderDomain reorderDomain; 37 38 38 39 CInterpolateAxis interpolateAxis; … … 40 41 CInverseAxis inverseAxis; 41 42 CReduceDomainToAxis reduceDomainToAxis; 43 CReduceAxisToAxis reduceAxisToAxis; 42 44 CExtractDomainToAxis extractDomainToAxis; 45 CTemporalSplitting temporalSplitting; 43 46 44 47 CReduceAxisToScalar reduceAxisToScalar; 45 48 CExtractAxisToScalar extractAxisToScalar; 46 49 CReduceDomainToScalar reduceDomainToScalar; 47 50 CDuplicateScalarToAxis duplicateScalarToAxis; 51 CReduceScalarToScalar reduceScalarToScalar; 52 48 53 ostringstream oss; 49 54 ofstream file; … … 297 302 file.open((path+"iexpand_domain_attr.F90").c_str()); 298 303 expandDomain.generateFortranInterface(file); 304 305 file.open((path+"reorder_domain_interface_attr.F90").c_str()); 306 reorderDomain.generateFortran2003Interface(file); 307 file.close(); 308 309 file.open((path+"icreorder_domain_attr.cpp").c_str()); 310 reorderDomain.generateCInterface(file); 311 file.close(); 312 313 file.open((path+"ireorder_domain_attr.F90").c_str()); 314 reorderDomain.generateFortranInterface(file); 315 299 316 file.close(); 300 317 … … 341 358 reduceDomainToAxis.generateFortran2003Interface(file); 342 359 file.close(); 343 360 344 361 file.open((path+"icreduce_domain_to_axis_attr.cpp").c_str()); 345 362 reduceDomainToAxis.generateCInterface(file); … … 348 365 file.open((path+"ireduce_domain_to_axis_attr.F90").c_str()); 349 366 reduceDomainToAxis.generateFortranInterface(file); 367 file.close(); 368 369 file.open((path+"reduce_axis_to_axis_interface_attr.F90").c_str()); 370 reduceAxisToAxis.generateFortran2003Interface(file); 371 file.close(); 372 373 file.open((path+"icreduce_axis_to_axis_attr.cpp").c_str()); 374 reduceAxisToAxis.generateCInterface(file); 375 file.close(); 376 377 file.open((path+"ireduce_axis_to_axis_attr.F90").c_str()); 378 reduceAxisToAxis.generateFortranInterface(file); 350 379 file.close(); 351 380 … … 363 392 364 393 394 file.open((path+"temporal_splitting_interface_attr.F90").c_str()); 395 temporalSplitting.generateFortran2003Interface(file); 396 file.close(); 397 398 file.open((path+"ictemporal_splitting_attr.cpp").c_str()); 399 temporalSplitting.generateCInterface(file); 400 file.close(); 401 402 file.open((path+"itemporal_splitting_attr.F90").c_str()); 403 temporalSplitting.generateFortranInterface(file); 404 file.close(); 405 406 407 file.open((path+"duplicate_scalar_to_axis_interface_attr.F90").c_str()); 408 duplicateScalarToAxis.generateFortran2003Interface(file); 409 file.close(); 410 411 file.open((path+"icduplicate_scalar_to_axis_attr.cpp").c_str()); 412 duplicateScalarToAxis.generateCInterface(file); 413 file.close(); 414 415 file.open((path+"iduplicate_scalar_to_axis_attr.F90").c_str()); 416 duplicateScalarToAxis.generateFortranInterface(file); 417 file.close(); 365 418 /*! 366 419 Scalar transformations … … 390 443 file.close(); 391 444 445 392 446 file.open((path+"reduce_domain_to_scalar_interface_attr.F90").c_str()); 393 447 reduceDomainToScalar.generateFortran2003Interface(file); … … 402 456 file.close(); 403 457 458 459 file.open((path+"reduce_scalar_to_scalar_interface_attr.F90").c_str()); 460 reduceScalarToScalar.generateFortran2003Interface(file); 461 file.close(); 462 463 file.open((path+"icreduce_scalar_to_scalar_attr.cpp").c_str()); 464 reduceScalarToScalar.generateCInterface(file); 465 file.close(); 466 467 file.open((path+"ireduce_scalar_to_scalar_attr.F90").c_str()); 468 reduceScalarToScalar.generateFortranInterface(file); 469 file.close(); 470 471 472 473 474 404 475 file.open((path+"context_interface_attr.F90").c_str()); 405 476 context->generateFortran2003Interface(file); -
XIOS/dev/branch_openmp/src/group_factory_decl.cpp
r1334 r1460 34 34 macro(CReduceAxisToScalarGroup) 35 35 macro(CReduceDomainToAxisGroup) 36 macro(CReduceAxisToAxisGroup) 36 37 macro(CExtractDomainToAxisGroup) 37 38 macro(CComputeConnectivityDomainGroup) … … 39 40 macro(CExtractAxisToScalarGroup) 40 41 macro(CReduceDomainToScalarGroup) 42 macro(CTemporalSplittingGroup) 43 macro(CDuplicateScalarToAxisGroup) 44 macro(CReduceScalarToScalarGroup) 45 macro(CReorderDomainGroup) 41 46 } -
XIOS/dev/branch_openmp/src/group_template.hpp
r591 r1460 6 6 #include "event_server.hpp" 7 7 #include "object_template.hpp" 8 #include "context_client.hpp" 8 9 9 10 namespace xios … … 72 73 static bool dispatchEvent(CEventServer& event) ; 73 74 void sendCreateChild(const string& id="") ; 75 void sendCreateChild(const string& id, CContextClient* client) ; 74 76 void sendCreateChildGroup(const string& id="") ; 75 77 static void recvCreateChild(CEventServer& event) ; -
XIOS/dev/branch_openmp/src/group_template_decl.cpp
r976 r1460 23 23 macro(ReduceAxisToScalar) 24 24 macro(ReduceDomainToAxis) 25 macro(ReduceAxisToAxis) 25 26 macro(ExtractDomainToAxis) 26 27 macro(ComputeConnectivityDomain) … … 28 29 macro(ExtractAxisToScalar) 29 30 macro(ReduceDomainToScalar) 31 macro(TemporalSplitting) 32 macro(DuplicateScalarToAxis) 33 macro(ReduceScalarToScalar) 34 macro(ReorderDomain) 30 35 31 36 } -
XIOS/dev/branch_openmp/src/group_template_impl.hpp
r595 r1460 373 373 void CGroupTemplate<U, V, W>::sendCreateChild(const string& id) 374 374 { 375 CContext* context=CContext::getCurrent() ; 376 377 if (context->hasClient) 378 // if (!context->hasServer ) 379 { 380 // Use correct context client to send message 381 // CContextClient* contextClientTmp = (0 != context->clientPrimServer) ? context->clientPrimServer : context->client; 382 int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 383 for (int i = 0; i < nbSrvPools; ++i) 384 { 385 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] : context->client; 386 387 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD) ; 388 if (contextClientTmp->isServerLeader()) 389 { 390 CMessage msg ; 391 msg<<this->getId() ; 392 msg<<id ; 393 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 394 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 395 event.push(*itRank,1,msg) ; 396 contextClientTmp->sendEvent(event) ; 397 } 398 else contextClientTmp->sendEvent(event) ; 399 } 400 } 401 402 } 403 404 template <class U, class V, class W> 405 void CGroupTemplate<U, V, W>::sendCreateChild(const string& id, CContextClient* client) 406 { 407 408 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD) ; 409 if (client->isServerLeader()) 410 { 411 CMessage msg ; 412 msg<<this->getId() ; 413 msg<<id ; 414 const std::list<int>& ranks = client->getRanksServerLeader(); 415 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 416 event.push(*itRank,1,msg) ; 417 client->sendEvent(event) ; 418 } 419 else client->sendEvent(event) ; 420 } 421 422 423 template <class U, class V, class W> 424 void CGroupTemplate<U, V, W>::sendCreateChildGroup(const string& id) 425 { 375 426 CContext* context=CContext::getCurrent() ; 376 377 if (! context->hasServer ) 427 if (context->hasClient) 378 428 { 379 CContextClient* client=context->client ; 380 381 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD) ; 382 if (client->isServerLeader()) 383 { 384 CMessage msg ; 385 msg<<this->getId() ; 386 msg<<id ; 387 const std::list<int>& ranks = client->getRanksServerLeader(); 388 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 389 event.push(*itRank,1,msg) ; 390 client->sendEvent(event) ; 391 } 392 else client->sendEvent(event) ; 429 // Use correct context client to send message 430 // int nbSrvPools = (context->hasServer) ? context->clientPrimServer.size() : 1; 431 int nbSrvPools = (context->hasServer) ? (context->hasClient ? context->clientPrimServer.size() : 1) : 1; 432 for (int i = 0; i < nbSrvPools; ++i) 433 { 434 CContextClient* contextClientTmp = (context->hasServer) ? context->clientPrimServer[i] : context->client; 435 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD_GROUP) ; 436 if (contextClientTmp->isServerLeader()) 437 { 438 CMessage msg ; 439 msg<<this->getId() ; 440 msg<<id ; 441 const std::list<int>& ranks = contextClientTmp->getRanksServerLeader(); 442 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank) 443 event.push(*itRank,1,msg) ; 444 contextClientTmp->sendEvent(event) ; 445 } 446 else contextClientTmp->sendEvent(event) ; 447 } 393 448 } 394 395 }396 397 template <class U, class V, class W>398 void CGroupTemplate<U, V, W>::sendCreateChildGroup(const string& id)399 {400 CContext* context=CContext::getCurrent() ;401 if (! context->hasServer )402 {403 CContextClient* client=context->client ;404 405 CEventClient event(this->getType(),EVENT_ID_CREATE_CHILD_GROUP) ;406 if (client->isServerLeader())407 {408 CMessage msg ;409 msg<<this->getId() ;410 msg<<id ;411 const std::list<int>& ranks = client->getRanksServerLeader();412 for (std::list<int>::const_iterator itRank = ranks.begin(), itRankEnd = ranks.end(); itRank != itRankEnd; ++itRank)413 event.push(*itRank,1,msg) ;414 client->sendEvent(event) ;415 }416 else client->sendEvent(event) ;417 }418 419 449 } 420 450 … … 422 452 void CGroupTemplate<U, V, W>::recvCreateChild(CEventServer& event) 423 453 { 424 425 454 CBufferIn* buffer=event.subEvents.begin()->buffer; 426 455 string id; -
XIOS/dev/branch_openmp/src/indent_xml.hpp
r1331 r1460 8 8 namespace xios 9 9 { 10 /// ////////////////////// Déclarations ////////////////////// ///11 class CIndent12 {13 public :10 /// ////////////////////// Déclarations ////////////////////// /// 11 class CIndent 12 { 13 public : 14 14 15 /// Méthodes statiques ///16 static StdOStream & NIndent (StdOStream & out);17 static StdOStream & IncIndent(StdOStream & out);18 static StdOStream & DecEndl (StdOStream & out);15 /// Méthodes statiques /// 16 static StdOStream & NIndent (StdOStream & out); 17 static StdOStream & IncIndent(StdOStream & out); 18 static StdOStream & DecEndl (StdOStream & out); 19 19 20 private :20 private : 21 21 22 /// Propriétés statiques ///23 static unsigned int Indent;24 #pragma omp threadprivate(Indent)25 static StdString Increm;26 #pragma omp threadprivate(Increm)27 static bool WithLine;28 #pragma omp threadprivate(WithLine)22 /// Propriétés statiques /// 23 static unsigned int Indent; 24 #pragma omp threadprivate(Indent) 25 static StdString Increm; 26 #pragma omp threadprivate(Increm) 27 static bool WithLine; 28 #pragma omp threadprivate(WithLine) 29 29 30 }; // class CIndent30 }; // class CIndent 31 31 32 ///--------------------------------------------------------------32 ///-------------------------------------------------------------- 33 33 34 class CIndentedXml 35 { 36 public : 37 /// Méthode statique /// 38 static StdString Indented(const StdString & content); 34 class CIndentedXml 35 { 36 public : 39 37 40 }; // class CIndentedXml 38 /// Méthode statique /// 39 static StdString Indented(const StdString & content); 41 40 42 ///-------------------------------------------------------------- 41 }; // class CIndentedXml 42 43 ///-------------------------------------------------------------- 43 44 44 45 } // namespace xios -
XIOS/dev/branch_openmp/src/interface/c/icdata.cpp
r1369 r1460 1 1 /* ************************************************************************** * 2 * Copyright IPSL/LSCE, xios, Avril 2010 - Octobre 2011 *2 * Copyright © IPSL/LSCE, xios, Avril 2010 - Octobre 2011 * 3 3 * ************************************************************************** */ 4 4 … … 25 25 26 26 #include "timer.hpp" 27 #include "array_new.hpp" 28 27 29 28 30 extern "C" 29 31 { 30 // /////////////////////////////// D finitions ////////////////////////////// //31 32 // ----------------------- Red finition de types ----------------------------32 // /////////////////////////////// Définitions ////////////////////////////// // 33 34 // ----------------------- Redéfinition de types ---------------------------- 33 35 34 36 typedef enum { NETCDF4 = 0 } XFileType; … … 36 38 typedef xios::CContext* XContextPtr; 37 39 38 // -------------------- Traitement des donn es ------------------------------40 // -------------------- Traitement des données ------------------------------ 39 41 40 42 // This function is not exported to the public Fortran interface, … … 55 57 ep_lib::MPI_Comm local_comm; 56 58 ep_lib::MPI_Comm return_comm; 57 58 59 59 60 if (!cstr2string(client_id, len_client_id, str)) return; … … 379 380 380 381 381 // ---------------------- Ecriture des donn es ------------------------------382 // ---------------------- Ecriture des données ------------------------------ 382 383 383 384 void cxios_write_data_k80(const char* fieldid, int fieldid_size, double* data_k8, int data_Xsize) … … 712 713 } 713 714 714 // ---------------------- Lecture des donn es ------------------------------715 // ---------------------- Lecture des données ------------------------------ 715 716 716 717 void cxios_read_data_k80(const char* fieldid, int fieldid_size, double* data_k8, int data_Xsize) -
XIOS/dev/branch_openmp/src/io/inetcdf4.cpp
r1362 r1460 85 85 std::pair<nc_type, StdSize> retvalue; 86 86 int grpid = this->getGroup(path); 87 int varid = (var != NULL ) ? this->getVariable(*var, path) : NC_GLOBAL;87 int varid = (var != NULL && this->hasVariable(*var, path)) ? this->getVariable(*var, path) : NC_GLOBAL; 88 88 CNetCdfInterface::inqAtt(grpid, varid, attname, retvalue.first, retvalue.second); 89 89 return retvalue; … … 218 218 int nbdim = 0, *dimid = NULL; 219 219 int grpid = this->getGroup(path); 220 int varid = (var != NULL ) ? this->getVariable(*var, path) : NC_GLOBAL;220 int varid = (var != NULL && this->hasVariable(*var, path)) ? this->getVariable(*var, path) : NC_GLOBAL; 221 221 std::list<StdString> retvalue; 222 222 223 if (var != NULL )223 if (var != NULL && this->hasVariable(*var, path)) 224 224 { 225 225 CNetCdfInterface::inqVarNDims(grpid, varid, nbdim); … … 249 249 int nbdim = 0, *dimid = NULL; 250 250 int grpid = this->getGroup(path); 251 int varid = (var != NULL ) ? this->getVariable(*var, path) : NC_GLOBAL;251 int varid = (var != NULL && this->hasVariable(*var, path)) ? this->getVariable(*var, path) : NC_GLOBAL; 252 252 std::map<StdString, StdSize> retvalue; 253 253 254 if (var != NULL )254 if (var != NULL && this->hasVariable(*var, path)) 255 255 { 256 256 CNetCdfInterface::inqVarNDims(grpid, varid, nbdim); … … 284 284 std::list<StdString> retvalue; 285 285 int grpid = this->getGroup(path); 286 int varid = (var != NULL ) ? this->getVariable(*var, path) : NC_GLOBAL;287 288 if (var != NULL )286 int varid = (var != NULL && this->hasVariable(*var, path)) ? this->getVariable(*var, path) : NC_GLOBAL; 287 288 if (var != NULL && this->hasVariable(*var, path)) 289 289 CNetCdfInterface::inqVarNAtts(grpid, varid, nbatt); 290 290 else … … 378 378 { 379 379 int grpid = this->getGroup(path); 380 int varid = (var != NULL ) ? this->getVariable(*var, path) : NC_GLOBAL;380 int varid = (var != NULL && this->hasVariable(*var, path)) ? this->getVariable(*var, path) : NC_GLOBAL; 381 381 std::pair<nc_type , StdSize> attinfos = this->getAttribute(name, var, path); 382 382 std::vector<T> retvalue(attinfos.second); … … 496 496 bool CINetCDF4::isRectilinear(const StdString& name, const CVarPath* const path) 497 497 { 498 std::list<StdString> coords = this->getCoordinatesIdList(name, path); 499 std::list<StdString>::const_iterator it = coords.begin(), end = coords.end(); 498 std::list<StdString> varCoords = this->getCoordinatesIdList(name, path); 499 std::list<StdString> varDims = this->getDimensionsList(&name, path); 500 std::list<StdString>::const_iterator it = varCoords.begin(), end = varCoords.end(); 501 std::set<StdString> varDims1D; 502 503 // Firstly, loop over coordinate list 500 504 for (; it != end; it++) 501 505 { 502 506 const StdString& coord = *it; 503 if (this->hasVariable(coord, path) && !this->isTemporal(coord, path)) 504 { 505 std::map<StdString, StdSize> dimvar = this->getDimensions(&coord, path); 506 if ((dimvar.size() == 1) && (dimvar.find(coord) != dimvar.end())) 507 if (this->hasVariable(coord, path) && !this->isTemporal(coord, path) ) 508 { 509 std::map<StdString, StdSize> coordDims = this->getDimensions(&coord, path); 510 for (std::map<StdString, StdSize>::const_iterator itTmp = coordDims.begin(); itTmp != coordDims.end(); itTmp++) 511 { 512 varDims.remove(itTmp->first); 513 } 514 if (this->isLonOrLat(coord, path) && coordDims.size() == 1) 515 { 516 varDims1D.insert(coordDims.begin()->first); 507 517 continue; 508 else 509 return false; 510 } 511 } 512 return true; 518 } 519 } 520 } 521 // Secondly, loop over remaining dimensions 522 for (it= varDims.begin(); it != varDims.end(); it++) 523 { 524 const StdString& coord = *it; 525 std::map<StdString, StdSize> coordDims = this->getDimensions(&coord, path); 526 if (this->hasVariable(coord, path) && !this->isTemporal(coord, path) ) 527 { 528 if (this->isLonOrLat(coord, path) && coordDims.size() == 1) 529 { 530 varDims1D.insert(coordDims.begin()->first); 531 continue; 532 } 533 } 534 } 535 536 return (varDims1D.size() == 2); 513 537 } 514 538 … … 525 549 { 526 550 const StdString& coord = *it; 527 if (this->hasVariable(coord, path) && !this->isTemporal(coord, path) )551 if (this->hasVariable(coord, path) && !this->isTemporal(coord, path) && this->isLonOrLat(coord, path)) 528 552 { 529 553 std::map<StdString, StdSize> dimvar = this->getDimensions(&coord, path); … … 552 576 { 553 577 const StdString& coord = *it; 554 if (this->hasVariable(coord, path) && !this->isTemporal(coord, path) )578 if (this->hasVariable(coord, path) && !this->isTemporal(coord, path) && this->isLonOrLat(coord, path)) 555 579 { 556 580 std::map<StdString, StdSize> dimvar = this->getDimensions(&coord, path); … … 782 806 return *(++(++clist.rbegin())); 783 807 } 808 809 bool CINetCDF4::isLonOrLat(const StdString& varname, const CVarPath* const path) 810 { 811 if (this->hasAttribute(CCFKeywords::XIOS_CF_units, &varname, path)) 812 { 813 StdString unit = this->getAttributeValue(CCFKeywords::XIOS_CF_units, &varname, path); 814 return (CCFConvention::XIOS_CF_Latitude_units.end() != CCFConvention::XIOS_CF_Latitude_units.find(unit) 815 || CCFConvention::XIOS_CF_Longitude_units.end() != CCFConvention::XIOS_CF_Longitude_units.find(unit)); 816 } 817 } 818 784 819 } // namespace xios -
XIOS/dev/branch_openmp/src/io/inetcdf4.hpp
r1328 r1460 135 135 bool isCellGrid(const StdString& name, const CVarPath* const path = NULL); 136 136 137 bool isLonOrLat(const StdString& varname, const CVarPath* const path = NULL); 138 137 139 protected: 138 140 /// Getters /// -
XIOS/dev/branch_openmp/src/io/nc4_data_input.cpp
r1338 r1460 188 188 listDimSize.push_front(*itMap); 189 189 */ 190 for (std::list<StdString>::const_iterator it = dimList.begin(); it != dimList.end(); ++it) 191 listDimSize.push_front(*dimSizeMap.find(*it)); 190 191 if (!SuperClassWriter::isRectilinear(fieldId)) 192 { 193 for (std::list<StdString>::const_iterator it = dimList.begin(); it != dimList.end(); ++it) 194 listDimSize.push_front(*dimSizeMap.find(*it)); 195 } 196 else 197 { 198 std::list<StdString> coords = SuperClassWriter::getCoordinatesIdList(fieldId); 199 std::list<StdString>::const_iterator itCoord = coords.begin(); 200 for (; itCoord != coords.end(); itCoord++) 201 { 202 const StdString& coord = *itCoord; 203 if (SuperClassWriter::hasVariable(coord) && !SuperClassWriter::isTemporal(coord)) 204 { 205 std::map<StdString, StdSize> dimsTmp = SuperClassWriter::getDimensions(&coord); 206 StdString dimNameTmp = dimsTmp.begin()->first; 207 StdSize dimSizeTmp = dimsTmp.begin()->second; 208 listDimSize.push_front(make_pair(coord, dimSizeTmp)); 209 dimSizeMap.erase(dimNameTmp); 210 dimList.remove(dimNameTmp); 211 } 212 } 213 for (std::list<StdString>::const_iterator it = dimList.begin(); it != dimList.end(); ++it) 214 listDimSize.push_front(*dimSizeMap.find(*it)); 215 } 192 216 193 217 // Now process domain and axis … … 268 292 if ((CDomain::type_attr::rectilinear == domain->type)) 269 293 { 270 // Ok, try to read some f..attributes such as longitude and latitude294 // Ok, try to read some attributes such as longitude and latitude 271 295 bool hasLat = SuperClassWriter::hasVariable(itMapNj->first); 272 296 if (hasLat) … … 287 311 else if ((CDomain::type_attr::curvilinear == domain->type)) 288 312 { 289 int ni = domain->ni; 290 int nj = domain->nj; 313 // Make sure that if there is no local domain defined on a process, the process still reads just one value. 314 int ni, nj, ibegin, jbegin; 315 if (domain->ni == 0) 316 { 317 ni = 1; 318 ibegin = 0; 319 } 320 else 321 { 322 ni = domain->ni; 323 ibegin = domain->ibegin; 324 } 325 if (domain->nj == 0) 326 { 327 nj = 1; 328 jbegin = 0; 329 } 330 else 331 { 332 nj = domain->nj; 333 jbegin = domain->jbegin; 334 } 335 291 336 std::vector<StdSize> nBeginLatLon(2), nSizeLatLon(2); 292 nBeginLatLon[0] = 0; nBeginLatLon[1] = 0;293 nSizeLatLon[0] = domain->nj_glo.getValue(); nSizeLatLon[1] = domain->ni_glo.getValue();337 nBeginLatLon[0] = jbegin; nBeginLatLon[1] = ibegin; 338 nSizeLatLon[0] = nj; nSizeLatLon[1] = ni; 294 339 295 340 StdString latName = this->getLatCoordName(fieldId); 296 341 if (SuperClassWriter::hasVariable(latName)) 297 342 { 298 domain->latvalue_curvilinear_read_from_file.resize( domain->ni_glo,domain->nj_glo);343 domain->latvalue_curvilinear_read_from_file.resize(ni, nj); 299 344 readFieldVariableValue(domain->latvalue_curvilinear_read_from_file, latName, nBeginLatLon, nSizeLatLon); 300 345 } … … 302 347 if (SuperClassWriter::hasVariable(lonName)) 303 348 { 304 domain->lonvalue_curvilinear_read_from_file.resize( domain->ni_glo,domain->nj_glo);349 domain->lonvalue_curvilinear_read_from_file.resize(ni, nj); 305 350 readFieldVariableValue(domain->lonvalue_curvilinear_read_from_file, lonName, nBeginLatLon, nSizeLatLon); 306 351 } … … 309 354 StdString boundsLonName = this->getBoundsId(lonName); 310 355 311 int nbVertex = this->getNbVertex(fieldId);356 int nbVertex = 4; //this->getNbVertex(fieldId); 312 357 if (!domain->nvertex.isEmpty() && (domain->nvertex != nbVertex)) 313 358 { … … 324 369 325 370 std::vector<StdSize> nBeginBndsLatLon(3), nSizeBndsLatLon(3); 326 nBeginBndsLatLon[0] = 0; nSizeBndsLatLon[0] = domain->nj_glo.getValue();327 nBeginBndsLatLon[1] = 0; nSizeBndsLatLon[1] = domain->ni_glo.getValue();371 nBeginBndsLatLon[0] = jbegin; nSizeBndsLatLon[0] = nj; 372 nBeginBndsLatLon[1] = ibegin; nSizeBndsLatLon[1] = ni; 328 373 nBeginBndsLatLon[2] = 0; nSizeBndsLatLon[2] = nbVertex; 329 374 330 375 if (SuperClassWriter::hasVariable(boundsLatName)) 331 376 { 332 domain->bounds_latvalue_curvilinear_read_from_file.resize(nbVertex, domain->ni_glo,domain->nj_glo);377 domain->bounds_latvalue_curvilinear_read_from_file.resize(nbVertex, ni, nj); 333 378 readFieldVariableValue(domain->bounds_latvalue_curvilinear_read_from_file, boundsLatName, nBeginBndsLatLon, nSizeBndsLatLon); 334 379 … … 336 381 if (SuperClassWriter::hasVariable(boundsLonName)) 337 382 { 338 domain->bounds_lonvalue_curvilinear_read_from_file.resize(nbVertex, domain->ni_glo,domain->nj_glo);383 domain->bounds_lonvalue_curvilinear_read_from_file.resize(nbVertex, ni, nj); 339 384 readFieldVariableValue(domain->bounds_lonvalue_curvilinear_read_from_file, boundsLonName, nBeginBndsLatLon, nSizeBndsLatLon); 340 385 } … … 342 387 else if ((CDomain::type_attr::unstructured == domain->type))// || (this->isUnstructured(fieldId))) 343 388 { 389 // Make sure that if there is no local domain defined on a process, the process still reads just one value. 390 int ni, ibegin; 391 if (domain->ni == 0) 392 { 393 ni = 1; 394 ibegin = 0; 395 } 396 else 397 { 398 ni = domain->ni; 399 ibegin = domain->ibegin; 400 } 401 344 402 std::vector<StdSize> nBeginLatLon(1,0), nSizeLatLon(1,0); 345 n SizeLatLon[0] = domain->ni_glo.getValue();346 CArray<double,1> globalLonLat(domain->ni_glo.getValue());403 nBeginLatLon[0] = ibegin; 404 nSizeLatLon[0] = ni; 347 405 348 406 StdString latName = this->getLatCoordName(fieldId); 349 407 if (SuperClassWriter::hasVariable(latName)) 350 408 { 351 domain->latvalue_unstructured_read_from_file.resize( domain->ni_glo);409 domain->latvalue_unstructured_read_from_file.resize(ni); 352 410 readFieldVariableValue(domain->latvalue_unstructured_read_from_file, latName, nBeginLatLon, nSizeLatLon); 353 411 } … … 356 414 if (SuperClassWriter::hasVariable(lonName)) //(0 != lonName.compare("")) 357 415 { 358 // readFieldVariableValue(globalLonLat, lonName, nBeginLatLon, nSizeLatLon); 359 domain->lonvalue_unstructured_read_from_file.resize(domain->ni_glo); 416 domain->lonvalue_unstructured_read_from_file.resize(ni); 360 417 readFieldVariableValue(domain->lonvalue_unstructured_read_from_file, lonName, nBeginLatLon, nSizeLatLon); 361 418 } … … 379 436 380 437 std::vector<StdSize> nBeginBndsLatLon(2), nSizeBndsLatLon(2); 381 nBeginBndsLatLon[0] = 0; nSizeBndsLatLon[0] = domain->ni_glo.getValue();438 nBeginBndsLatLon[0] = ibegin; nSizeBndsLatLon[0] = ni; 382 439 nBeginBndsLatLon[1] = 0; nSizeBndsLatLon[1] = nbVertex; 383 440 … … 496 553 497 554 { // Read axis value 498 std::vector<StdSize> nBegin(1, 0), nSize(1, itMapN->second); 499 CArray<double,1> readAxisValue(itMapN->second); 500 readFieldVariableValue(readAxisValue, itMapN->first, nBegin, nSize, true); 501 int begin = 0, n = itMapN->second; 502 if (!axis->begin.isEmpty()) begin = axis->begin.getValue(); 503 if (!axis->n.isEmpty()) n = axis->n.getValue(); 504 axis->value.resize(n); 505 for (int i = 0; i < n; ++i) axis->value(i) = readAxisValue(begin + i); 555 bool hasValue = SuperClassWriter::hasVariable(itMapN->first); 556 if (hasValue) 557 { 558 std::vector<StdSize> nBegin(1, 0), nSize(1, itMapN->second); 559 CArray<double,1> readAxisValue(itMapN->second); 560 readFieldVariableValue(readAxisValue, itMapN->first, nBegin, nSize, true); 561 int begin = 0, n = itMapN->second; 562 if (!axis->begin.isEmpty()) begin = axis->begin.getValue(); 563 if (!axis->n.isEmpty()) n = axis->n.getValue(); 564 axis->value.resize(n); 565 for (int i = 0; i < n; ++i) axis->value(i) = readAxisValue(begin + i); 566 } 506 567 } 507 568 } -
XIOS/dev/branch_openmp/src/io/nc4_data_output.cpp
r1328 r1460 1 1 #include "nc4_data_output.hpp" 2 2 3 #include <boost/lexical_cast.hpp>4 3 #include "attribute_template.hpp" 5 4 #include "group_template.hpp" … … 24 23 { 25 24 SuperClass::type = MULTI_FILE; 25 compressionLevel= file->compression_level.isEmpty() ? 0 :file->compression_level ; 26 26 } 27 27 … … 37 37 { 38 38 SuperClass::type = (multifile) ? MULTI_FILE : ONE_FILE; 39 if (file==NULL) compressionLevel = 0 ; 40 else compressionLevel= file->compression_level.isEmpty() ? 0 :file->compression_level ; 39 41 } 40 42 … … 53 55 void CNc4DataOutput::writeDomain_(CDomain* domain) 54 56 { 57 StdString lonName,latName ; 58 59 domain->computeWrittenIndex(); 60 domain->computeWrittenCompressedIndex(comm_file); 61 55 62 if (domain->type == CDomain::type_attr::unstructured) 56 63 { … … 76 83 if (isWrittenDomain(domid)) return ; 77 84 else setWrittenDomain(domid); 85 86 int nvertex = (domain->nvertex.isEmpty()) ? 0 : domain->nvertex; 78 87 79 88 … … 89 98 { 90 99 case CDomain::type_attr::curvilinear : 91 dimXid = StdString("x").append(appendDomid); 92 dimYid = StdString("y").append(appendDomid); 100 101 if (domain->lon_name.isEmpty()) lonName = "nav_lon"; 102 else lonName = domain->lon_name; 103 104 if (domain->lat_name.isEmpty()) latName = "nav_lat"; 105 else latName = domain->lat_name; 106 107 if (domain->dim_i_name.isEmpty()) dimXid=StdString("x").append(appendDomid); 108 else dimXid=domain->dim_i_name.getValue() + appendDomid; 109 110 if (domain->dim_j_name.isEmpty()) dimYid=StdString("y").append(appendDomid); 111 else dimYid=domain->dim_j_name.getValue() + appendDomid; 112 93 113 break ; 114 94 115 case CDomain::type_attr::rectilinear : 95 dimXid = StdString("lon").append(appendDomid); 96 dimYid = StdString("lat").append(appendDomid); 116 117 if (domain->lon_name.isEmpty()) 118 { 119 if (domain->dim_i_name.isEmpty()) 120 lonName = "lon"; 121 else 122 lonName = domain->dim_i_name.getValue(); 123 } 124 else lonName = domain->lon_name; 125 126 if (domain->lat_name.isEmpty()) 127 { 128 if (domain->dim_j_name.isEmpty()) 129 latName = "lat"; 130 else 131 latName = domain->dim_j_name.getValue(); 132 } 133 else latName = domain->lat_name; 134 135 if (domain->dim_i_name.isEmpty()) dimXid = lonName+appendDomid; 136 else dimXid = domain->dim_i_name.getValue()+appendDomid; 137 138 if (domain->dim_j_name.isEmpty()) dimYid = latName+appendDomid; 139 else dimYid = domain->dim_j_name.getValue()+appendDomid; 97 140 break; 98 141 } … … 111 154 */ 112 155 156 CArray<size_t, 1>& indexToWrite = domain->localIndexToWriteOnServer; 157 int nbWritten = indexToWrite.numElements(); 158 CArray<double,1> writtenLat, writtenLon; 159 CArray<double,2> writtenBndsLat, writtenBndsLon; 160 CArray<double,1> writtenArea; 161 162 if (domain->hasLonLat) 163 { 164 writtenLat.resize(nbWritten); 165 writtenLon.resize(nbWritten); 166 for (int idx = 0; idx < nbWritten; ++idx) 167 { 168 if (idx < domain->latvalue.numElements()) 169 { 170 writtenLat(idx) = domain->latvalue(indexToWrite(idx)); 171 writtenLon(idx) = domain->lonvalue(indexToWrite(idx)); 172 } 173 else 174 { 175 writtenLat(idx) = 0.; 176 writtenLon(idx) = 0.; 177 } 178 } 179 180 181 if (domain->hasBounds) 182 { 183 int nvertex = domain->nvertex, idx; 184 writtenBndsLat.resize(nvertex, nbWritten); 185 writtenBndsLon.resize(nvertex, nbWritten); 186 CArray<double,2>& boundslat = domain->bounds_latvalue; 187 CArray<double,2>& boundslon = domain->bounds_lonvalue; 188 for (idx = 0; idx < nbWritten; ++idx) 189 for (int nv = 0; nv < nvertex; ++nv) 190 { 191 if (idx < boundslat.columns()) 192 { 193 writtenBndsLat(nv, idx) = boundslat(nv, int(indexToWrite(idx))); 194 writtenBndsLon(nv, idx) = boundslon(nv, int(indexToWrite(idx))); 195 } 196 else 197 { 198 writtenBndsLat(nv, idx) = 0.; 199 writtenBndsLon(nv, idx) = 0.; 200 } 201 } 202 } 203 } 204 205 if (domain->hasArea) 206 { 207 writtenArea.resize(nbWritten); 208 for (int idx = 0; idx < nbWritten; ++idx) 209 { 210 if (idx < domain->areavalue.numElements()) 211 writtenArea(idx) = domain->areavalue(indexToWrite(idx)); 212 else 213 writtenArea(idx) = 0.; 214 } 215 } 216 113 217 try 114 218 { … … 129 233 case CDomain::type_attr::curvilinear : 130 234 dim0.push_back(dimYid); dim0.push_back(dimXid); 131 lonid = StdString("nav_lon").append(appendDomid);132 latid = StdString("nav_lat").append(appendDomid);235 lonid = lonName+appendDomid; 236 latid = latName+appendDomid; 133 237 break ; 134 238 case CDomain::type_attr::rectilinear : 135 lonid = StdString("lon").append(appendDomid);136 latid = StdString("lat").append(appendDomid);239 lonid = lonName+appendDomid; 240 latid = latName+appendDomid; 137 241 dim0.push_back(dimYid); 138 242 dim1.push_back(dimXid); 139 243 break; 140 244 } 141 142 bounds_lonid = StdString("bounds_lon").append(appendDomid); 143 bounds_latid = StdString("bounds_lat").append(appendDomid); 144 145 SuperClassWriter::addDimension(dimXid, domain->zoom_ni_srv); 146 SuperClassWriter::addDimension(dimYid, domain->zoom_nj_srv); 245 if (!domain->bounds_lon_name.isEmpty()) bounds_lonid = domain->bounds_lon_name; 246 else bounds_lonid = "bounds_"+lonName+appendDomid; 247 if (!domain->bounds_lat_name.isEmpty()) bounds_latid = domain->bounds_lat_name; 248 else bounds_latid = "bounds_"+latName+appendDomid; 249 250 SuperClassWriter::addDimension(dimXid, domain->zoom_ni); 251 SuperClassWriter::addDimension(dimYid, domain->zoom_nj); 147 252 148 253 if (domain->hasBounds) … … 151 256 if (server->intraCommSize > 1) 152 257 { 153 this->writeLocalAttributes(domain->zoom_ibegin _srv,154 domain->zoom_ni _srv,155 domain->zoom_jbegin _srv,156 domain->zoom_nj _srv,258 this->writeLocalAttributes(domain->zoom_ibegin, 259 domain->zoom_ni, 260 domain->zoom_jbegin, 261 domain->zoom_nj, 157 262 appendDomid); 158 263 159 264 if (singleDomain) 160 265 this->writeLocalAttributes_IOIPSL(dimXid, dimYid, 161 domain->zoom_ibegin _srv,162 domain->zoom_ni _srv,163 domain->zoom_jbegin _srv,164 domain->zoom_nj _srv,266 domain->zoom_ibegin, 267 domain->zoom_ni, 268 domain->zoom_jbegin, 269 domain->zoom_nj, 165 270 domain->ni_glo,domain->nj_glo, 166 271 server->intraCommRank,server->intraCommSize); … … 172 277 { 173 278 case CDomain::type_attr::curvilinear : 174 SuperClassWriter::addVariable(latid, typePrec, dim0 );175 SuperClassWriter::addVariable(lonid, typePrec, dim0 );279 SuperClassWriter::addVariable(latid, typePrec, dim0, compressionLevel); 280 SuperClassWriter::addVariable(lonid, typePrec, dim0, compressionLevel); 176 281 break ; 177 282 case CDomain::type_attr::rectilinear : 178 SuperClassWriter::addVariable(latid, typePrec, dim0 );179 SuperClassWriter::addVariable(lonid, typePrec, dim1 );283 SuperClassWriter::addVariable(lat