Changeset 1468
- Timestamp:
- 03/27/18 20:40:31 (7 years ago)
- Location:
- XIOS/dev/branch_openmp
- Files:
-
- 10 edited
Legend:
- Unmodified
- Added
- Removed
-
XIOS/dev/branch_openmp/bld.cfg
r1464 r1468 33 33 bld::target libxios.a 34 34 #bld::target generate_fortran_interface.exe 35 bld::target xios_server.exe35 #bld::target xios_server.exe 36 36 #bld::target test_regular.exe 37 37 #bld::target test_expand_domain.exe 38 38 #bld::target test_new_features.exe 39 bld::target test_unstruct_complete.exe40 #bld::target test_omp.exe39 #bld::target test_unstruct_complete.exe 40 bld::target test_omp.exe 41 41 bld::target test_complete_omp.exe 42 bld::target test_remap.exe43 bld::target test_remap_ref.exe42 #bld::target test_remap.exe 43 #bld::target test_remap_ref.exe 44 44 bld::target test_remap_omp.exe 45 45 #bld::target test_unstruct_omp.exe … … 50 50 #bld::target test_xios2_cmip6.exe 51 51 #bld::target test_connectivity_expand.exe 52 bld::target toy_cmip6.exe52 #bld::target toy_cmip6.exe 53 53 bld::target toy_cmip6_omp.exe 54 54 bld::exe_dep -
XIOS/dev/branch_openmp/extern/remap/src/mapper.cpp
r1463 r1468 29 29 void cptOffsetsFromLengths(const int *lengths, int *offsets, int sz) 30 30 { 31 32 33 31 offsets[0] = 0; 32 for (int i = 1; i < sz; i++) 33 offsets[i] = offsets[i-1] + lengths[i-1]; 34 34 } 35 35 … … 39 39 srcGrid.pole = Coord(pole[0], pole[1], pole[2]); 40 40 41 42 43 44 45 46 41 int mpiRank, mpiSize; 42 MPI_Comm_rank(communicator, &mpiRank); 43 MPI_Comm_size(communicator, &mpiSize); 44 45 sourceElements.reserve(nbCells); 46 sourceMesh.reserve(nbCells); 47 47 sourceGlobalId.resize(nbCells) ; 48 48 … … 57 57 else sourceGlobalId.assign(globalId,globalId+nbCells); 58 58 59 60 61 62 63 64 59 for (int i = 0; i < nbCells; i++) 60 { 61 int offs = i*nVertex; 62 Elt elt(boundsLon + offs, boundsLat + offs, nVertex); 63 elt.src_id.rank = mpiRank; 64 elt.src_id.ind = i; 65 65 elt.src_id.globalId = sourceGlobalId[i]; 66 67 68 69 66 sourceElements.push_back(elt); 67 sourceMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 68 cptEltGeom(sourceElements[i], Coord(pole[0], pole[1], pole[2])); 69 } 70 70 71 71 } … … 75 75 tgtGrid.pole = Coord(pole[0], pole[1], pole[2]); 76 76 77 78 79 80 81 82 77 int mpiRank, mpiSize; 78 MPI_Comm_rank(communicator, &mpiRank); 79 MPI_Comm_size(communicator, &mpiSize); 80 81 targetElements.reserve(nbCells); 82 targetMesh.reserve(nbCells); 83 83 84 84 targetGlobalId.resize(nbCells) ; … … 93 93 else targetGlobalId.assign(globalId,globalId+nbCells); 94 94 95 96 97 98 99 100 101 102 95 for (int i = 0; i < nbCells; i++) 96 { 97 int offs = i*nVertex; 98 Elt elt(boundsLon + offs, boundsLat + offs, nVertex); 99 targetElements.push_back(elt); 100 targetMesh.push_back(Node(elt.x, cptRadius(elt), &sourceElements.back())); 101 cptEltGeom(targetElements[i], Coord(pole[0], pole[1], pole[2])); 102 } 103 103 104 104 … … 119 119 vector<double> Mapper::computeWeights(int interpOrder, bool renormalize, bool quantity) 120 120 { 121 122 123 124 121 vector<double> timings; 122 int mpiSize, mpiRank; 123 MPI_Comm_size(communicator, &mpiSize); 124 MPI_Comm_rank(communicator, &mpiRank); 125 125 126 126 this->buildSSTree(sourceMesh, targetMesh); 127 127 128 if (mpiRank == 0 && verbose) cout << "Computing intersections ..." << endl; 129 double tic = cputime(); 130 computeIntersection(&targetElements[0], targetElements.size()); 131 timings.push_back(cputime() - tic); 132 133 tic = cputime(); 134 if (interpOrder == 2) { 135 if (mpiRank == 0 && verbose) cout << "Computing grads ..." << endl; 136 buildMeshTopology(); 137 computeGrads(); 138 } 139 timings.push_back(cputime() - tic); 140 141 /* Prepare computation of weights */ 142 /* compute number of intersections which for the first order case 143 corresponds to the number of edges in the remap matrix */ 144 int nIntersections = 0; 145 for (int j = 0; j < targetElements.size(); j++) 146 { 147 Elt &elt = targetElements[j]; 148 for (list<Polyg*>::iterator it = elt.is.begin(); it != elt.is.end(); it++) 149 nIntersections++; 150 } 151 /* overallocate for NMAX neighbours for each elements */ 152 remapMatrix = new double[nIntersections*NMAX]; 153 srcAddress = new int[nIntersections*NMAX]; 154 srcRank = new int[nIntersections*NMAX]; 155 dstAddress = new int[nIntersections*NMAX]; 128 if (mpiRank == 0 && verbose) cout << "Computing intersections ..." << endl; 129 double tic = cputime(); 130 computeIntersection(&targetElements[0], targetElements.size()); 131 timings.push_back(cputime() - tic); 132 133 tic = cputime(); 134 if (interpOrder == 2) 135 { 136 if (mpiRank == 0 && verbose) cout << "Computing grads ..." << endl; 137 buildMeshTopology(); 138 computeGrads(); 139 } 140 timings.push_back(cputime() - tic); 141 142 /* Prepare computation of weights */ 143 /* compute number of intersections which for the first order case 144 corresponds to the number of edges in the remap matrix */ 145 int nIntersections = 0; 146 for (int j = 0; j < targetElements.size(); j++) 147 { 148 Elt &elt = targetElements[j]; 149 for (list<Polyg*>::iterator it = elt.is.begin(); it != elt.is.end(); it++) 150 nIntersections++; 151 } 152 /* overallocate for NMAX neighbours for each elements */ 153 remapMatrix = new double[nIntersections*NMAX]; 154 srcAddress = new int[nIntersections*NMAX]; 155 srcRank = new int[nIntersections*NMAX]; 156 dstAddress = new int[nIntersections*NMAX]; 156 157 sourceWeightId =new long[nIntersections*NMAX]; 157 158 targetWeightId =new long[nIntersections*NMAX]; 158 159 159 160 160 161 162 163 161 if (mpiRank == 0 && verbose) cout << "Remapping..." << endl; 162 tic = cputime(); 163 nWeights = remap(&targetElements[0], targetElements.size(), interpOrder, renormalize, quantity); 164 timings.push_back(cputime() - tic); 164 165 165 166 for (int i = 0; i < targetElements.size(); i++) targetElements[i].delete_intersections(); 166 167 167 168 return timings; 168 169 } 169 170 … … 176 177 int Mapper::remap(Elt *elements, int nbElements, int order, bool renormalize, bool quantity) 177 178 { 178 int mpiSize, mpiRank; 179 MPI_Comm_size(communicator, &mpiSize); 180 MPI_Comm_rank(communicator, &mpiRank); 181 182 /* create list of intersections (super mesh elements) for each rank */ 183 multimap<int, Polyg *> *elementList = new multimap<int, Polyg *>[mpiSize]; 184 for (int j = 0; j < nbElements; j++) 185 { 186 Elt& e = elements[j]; 187 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 188 elementList[(*it)->id.rank].insert(pair<int, Polyg *>((*it)->id.ind, *it)); 189 } 190 191 int *nbSendElement = new int[mpiSize]; 192 int **sendElement = new int*[mpiSize]; /* indices of elements required from other rank */ 193 double **recvValue = new double*[mpiSize]; 194 double **recvArea = new double*[mpiSize]; 195 Coord **recvGrad = new Coord*[mpiSize]; 196 GloId **recvNeighIds = new GloId*[mpiSize]; /* ids of the of the source neighbours which also contribute through gradient */ 197 for (int rank = 0; rank < mpiSize; rank++) 198 { 199 /* get size for allocation */ 200 int last = -1; /* compares unequal to any index */ 201 int index = -1; /* increased to starting index 0 in first iteration */ 202 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 203 { 204 if (last != it->first) 205 index++; 206 (it->second)->id.ind = index; 207 last = it->first; 208 } 209 nbSendElement[rank] = index + 1; 210 211 /* if size is non-zero allocate and collect indices of elements on other ranks that we intersect */ 212 if (nbSendElement[rank] > 0) 213 { 214 sendElement[rank] = new int[nbSendElement[rank]]; 215 recvValue[rank] = new double[nbSendElement[rank]]; 216 recvArea[rank] = new double[nbSendElement[rank]]; 217 if (order == 2) 218 { 219 recvNeighIds[rank] = new GloId[nbSendElement[rank]*(NMAX+1)]; 220 recvGrad[rank] = new Coord[nbSendElement[rank]*(NMAX+1)]; 221 } 222 else 223 recvNeighIds[rank] = new GloId[nbSendElement[rank]]; 224 225 last = -1; 226 index = -1; 227 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 228 { 229 if (last != it->first) 230 index++; 231 sendElement[rank][index] = it->first; 232 last = it->first; 233 } 234 } 235 } 236 237 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 238 int *nbRecvElement = new int[mpiSize]; 239 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); 240 241 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ 242 int nbSendRequest = 0; 243 int nbRecvRequest = 0; 244 int **recvElement = new int*[mpiSize]; 245 double **sendValue = new double*[mpiSize]; 246 double **sendArea = new double*[mpiSize]; 247 Coord **sendGrad = new Coord*[mpiSize]; 248 GloId **sendNeighIds = new GloId*[mpiSize]; 249 MPI_Request *sendRequest = new MPI_Request[4*mpiSize]; 250 MPI_Request *recvRequest = new MPI_Request[4*mpiSize]; 251 for (int rank = 0; rank < mpiSize; rank++) 252 { 253 if (nbSendElement[rank] > 0) 254 { 255 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 256 nbSendRequest++; 257 } 258 259 if (nbRecvElement[rank] > 0) 260 { 261 recvElement[rank] = new int[nbRecvElement[rank]]; 262 sendValue[rank] = new double[nbRecvElement[rank]]; 263 sendArea[rank] = new double[nbRecvElement[rank]]; 264 if (order == 2) 265 { 266 sendNeighIds[rank] = new GloId[nbRecvElement[rank]*(NMAX+1)]; 267 sendGrad[rank] = new Coord[nbRecvElement[rank]*(NMAX+1)]; 268 } 269 else 270 { 271 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 272 } 273 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 274 nbRecvRequest++; 275 } 276 } 277 MPI_Status *status = new MPI_Status[4*mpiSize]; 278 279 MPI_Waitall(nbSendRequest, sendRequest, status); 280 MPI_Waitall(nbRecvRequest, recvRequest, status); 281 282 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ 283 nbSendRequest = 0; 284 nbRecvRequest = 0; 285 for (int rank = 0; rank < mpiSize; rank++) 286 { 287 if (nbRecvElement[rank] > 0) 288 { 289 int jj = 0; // jj == j if no weight writing 290 for (int j = 0; j < nbRecvElement[rank]; j++) 291 { 292 sendValue[rank][j] = sstree.localElements[recvElement[rank][j]].val; 293 sendArea[rank][j] = sstree.localElements[recvElement[rank][j]].area; 294 if (order == 2) 295 { 296 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].grad; 297 // cout<<"grad "<<jj<<" "<<recvElement[rank][j]<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; 298 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].src_id; 299 jj++; 300 for (int i = 0; i < NMAX; i++) 301 { 302 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].gradNeigh[i]; 303 // cout<<"grad "<<jj<<" "<<sendGrad[rank][jj]<<" "<<sstree.localElements[recvElement[rank][j]].grad<<endl ; 179 int mpiSize, mpiRank; 180 MPI_Comm_size(communicator, &mpiSize); 181 MPI_Comm_rank(communicator, &mpiRank); 182 183 /* create list of intersections (super mesh elements) for each rank */ 184 multimap<int, Polyg *> *elementList = new multimap<int, Polyg *>[mpiSize]; 185 for (int j = 0; j < nbElements; j++) 186 { 187 Elt& e = elements[j]; 188 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 189 { 190 elementList[(*it)->id.rank].insert(pair<int, Polyg *>((*it)->id.ind, *it)); 191 } 192 } 193 194 int *nbSendElement = new int[mpiSize]; 195 int **sendElement = new int*[mpiSize]; /* indices of elements required from other rank */ 196 double **recvValue = new double*[mpiSize]; 197 double **recvArea = new double*[mpiSize]; 198 Coord **recvGrad = new Coord*[mpiSize]; 199 GloId **recvNeighIds = new GloId*[mpiSize]; /* ids of the of the source neighbours which also contribute through gradient */ 200 for (int rank = 0; rank < mpiSize; rank++) 201 { 202 /* get size for allocation */ 203 int last = -1; /* compares unequal to any index */ 204 int index = -1; /* increased to starting index 0 in first iteration */ 205 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 206 { 207 if (last != it->first) 208 index++; 209 (it->second)->id.ind = index; 210 last = it->first; 211 } 212 nbSendElement[rank] = index + 1; 213 214 /* if size is non-zero allocate and collect indices of elements on other ranks that we intersect */ 215 if (nbSendElement[rank] > 0) 216 { 217 sendElement[rank] = new int[nbSendElement[rank]]; 218 recvValue[rank] = new double[nbSendElement[rank]]; 219 recvArea[rank] = new double[nbSendElement[rank]]; 220 if (order == 2) 221 { 222 recvNeighIds[rank] = new GloId[nbSendElement[rank]*(NMAX+1)]; 223 recvGrad[rank] = new Coord[nbSendElement[rank]*(NMAX+1)]; 224 } 225 else 226 recvNeighIds[rank] = new GloId[nbSendElement[rank]]; 227 228 last = -1; 229 index = -1; 230 for (multimap<int, Polyg *>::iterator it = elementList[rank].begin(); it != elementList[rank].end(); ++it) 231 { 232 if (last != it->first) 233 index++; 234 sendElement[rank][index] = it->first; 235 last = it->first; 236 } 237 } 238 } 239 240 /* communicate sizes of source elements to be sent (index lists and later values and gradients) */ 241 int *nbRecvElement = new int[mpiSize]; 242 MPI_Alltoall(nbSendElement, 1, MPI_INT, nbRecvElement, 1, MPI_INT, communicator); 243 244 245 /* communicate indices of source elements on other ranks whoes value and gradient we need (since intersection) */ 246 int nbSendRequest = 0; 247 int nbRecvRequest = 0; 248 int **recvElement = new int*[mpiSize]; 249 double **sendValue = new double*[mpiSize]; 250 double **sendArea = new double*[mpiSize]; 251 Coord **sendGrad = new Coord*[mpiSize]; 252 GloId **sendNeighIds = new GloId*[mpiSize]; 253 MPI_Request *sendRequest = new MPI_Request[4*mpiSize]; 254 MPI_Request *recvRequest = new MPI_Request[4*mpiSize]; 255 for (int rank = 0; rank < mpiSize; rank++) 256 { 257 if (nbSendElement[rank] > 0) 258 { 259 MPI_Issend(sendElement[rank], nbSendElement[rank], MPI_INT, rank, 0, communicator, &sendRequest[nbSendRequest]); 260 nbSendRequest++; 261 } 262 263 if (nbRecvElement[rank] > 0) 264 { 265 recvElement[rank] = new int[nbRecvElement[rank]]; 266 sendValue[rank] = new double[nbRecvElement[rank]]; 267 sendArea[rank] = new double[nbRecvElement[rank]]; 268 if (order == 2) 269 { 270 sendNeighIds[rank] = new GloId[nbRecvElement[rank]*(NMAX+1)]; 271 sendGrad[rank] = new Coord[nbRecvElement[rank]*(NMAX+1)]; 272 } 273 else 274 { 275 sendNeighIds[rank] = new GloId[nbRecvElement[rank]]; 276 } 277 MPI_Irecv(recvElement[rank], nbRecvElement[rank], MPI_INT, rank, 0, communicator, &recvRequest[nbRecvRequest]); 278 nbRecvRequest++; 279 } 280 } 281 282 MPI_Status *status = new MPI_Status[4*mpiSize]; 283 284 MPI_Waitall(nbSendRequest, sendRequest, status); 285 MPI_Waitall(nbRecvRequest, recvRequest, status); 286 287 /* for all indices that have been received from requesting ranks: pack values and gradients, then send */ 288 nbSendRequest = 0; 289 nbRecvRequest = 0; 290 for (int rank = 0; rank < mpiSize; rank++) 291 { 292 if (nbRecvElement[rank] > 0) 293 { 294 int jj = 0; // jj == j if no weight writing 295 for (int j = 0; j < nbRecvElement[rank]; j++) 296 { 297 sendValue[rank][j] = sstree.localElements[recvElement[rank][j]].val; 298 sendArea[rank][j] = sstree.localElements[recvElement[rank][j]].area; 299 if (order == 2) 300 { 301 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].grad; 302 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].src_id; 303 jj++; 304 for (int i = 0; i < NMAX; i++) 305 { 306 sendGrad[rank][jj] = sstree.localElements[recvElement[rank][j]].gradNeigh[i]; 304 307 sendNeighIds[rank][jj] = sstree.localElements[recvElement[rank][j]].neighId[i]; 305 308 jj++; … … 348 351 MPI_Irecv(recvNeighIds[rank], 4*nbSendElement[rank], MPI_INT, rank, 4, communicator, &recvRequest[nbRecvRequest]); 349 352 //ym --> attention taille GloId 350 351 352 353 353 nbRecvRequest++; 354 } 355 } 356 } 354 357 355 356 358 MPI_Waitall(nbSendRequest, sendRequest, status); 359 MPI_Waitall(nbRecvRequest, recvRequest, status); 357 360 358 361 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 362 /* now that all values and gradients are available use them to computed interpolated values on target 363 and also to compute weights */ 364 int i = 0; 365 for (int j = 0; j < nbElements; j++) 366 { 367 Elt& e = elements[j]; 368 369 /* since for the 2nd order case source grid elements can contribute to a destination grid element over several "paths" 370 (step1: gradient is computed using neighbours on same grid, step2: intersection uses several elements on other grid) 371 accumulate them so that there is only one final weight between two elements */ 372 map<GloId,double> wgt_map; 373 374 /* for destination element `e` loop over all intersetions/the corresponding source elements */ 375 for (list<Polyg *>::iterator it = e.is.begin(); it != e.is.end(); it++) 376 { 377 /* it is the intersection element, so it->x and it->area are barycentre and area of intersection element (super mesh) 378 but it->id is id of the source element that it intersects */ 379 int n1 = (*it)->id.ind; 380 int rank = (*it)->id.rank; 381 double fk = recvValue[rank][n1]; 382 double srcArea = recvArea[rank][n1]; 383 double w = (*it)->area; 381 384 if (quantity) w/=srcArea ; 382 385 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 386 /* first order: src value times weight (weight = supermesh area), later divide by target area */ 387 int kk = (order == 2) ? n1 * (NMAX + 1) : n1; 388 GloId neighID = recvNeighIds[rank][kk]; 389 wgt_map[neighID] += w; 390 391 if (order == 2) 392 { 393 for (int k = 0; k < NMAX+1; k++) 394 { 395 int kk = n1 * (NMAX + 1) + k; 396 GloId neighID = recvNeighIds[rank][kk]; 397 if (neighID.ind != -1) wgt_map[neighID] += w * scalarprod(recvGrad[rank][kk], (*it)->x); 398 } 399 400 } 401 } 399 402 400 403 double renorm=0; … … 404 407 405 408 for (map<GloId,double>::iterator it = wgt_map.begin(); it != wgt_map.end(); it++) 406 409 { 407 410 if (quantity) this->remapMatrix[i] = (it->second ) / renorm; 408 409 410 411 411 else this->remapMatrix[i] = (it->second / e.area) / renorm; 412 this->srcAddress[i] = it->first.ind; 413 this->srcRank[i] = it->first.rank; 414 this->dstAddress[i] = j; 412 415 this->sourceWeightId[i]= it->first.globalId ; 413 416 this->targetWeightId[i]= targetGlobalId[j] ; 414 415 416 417 i++; 418 } 419 } 417 420 418 421 MPI_Barrier(communicator); 419 422 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 423 /* free all memory allocated in this function */ 424 for (int rank = 0; rank < mpiSize; rank++) 425 { 426 if (nbSendElement[rank] > 0) 427 { 428 delete[] sendElement[rank]; 429 delete[] recvValue[rank]; 430 delete[] recvArea[rank]; 431 if (order == 2) 432 { 433 delete[] recvGrad[rank]; 434 } 435 delete[] recvNeighIds[rank]; 436 } 437 if (nbRecvElement[rank] > 0) 438 { 439 delete[] recvElement[rank]; 440 delete[] sendValue[rank]; 441 delete[] sendArea[rank]; 442 if (order == 2) 443 delete[] sendGrad[rank]; 444 delete[] sendNeighIds[rank]; 445 } 446 } 447 delete[] status; 448 delete[] sendRequest; 449 delete[] recvRequest; 450 delete[] elementList; 451 delete[] nbSendElement; 452 delete[] nbRecvElement; 453 delete[] sendElement; 454 delete[] recvElement; 455 delete[] sendValue; 456 delete[] recvValue; 457 delete[] sendGrad; 458 delete[] recvGrad; 459 delete[] sendNeighIds; 460 delete[] recvNeighIds; 461 return i; 459 462 } 460 463 … … 495 498 mpiRoute.init(routes); 496 499 int nRecv = mpiRoute.getTotalSourceElement(); 497 // cout << mpiRank << " NRECV " << nRecv << "(" << routes.size() << ")"<< endl;498 500 499 501 int *nbSendNode = new int[mpiSize]; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_dup.cpp
r1354 r1468 94 94 out_comm[i].ep_comm_ptr->intercomm->remote_rank_map = new RANK_MAP; 95 95 96 out_comm[i].ep_comm_ptr->intercomm->intercomm_rank_map = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->intercomm_rank_map; 97 out_comm[i].ep_comm_ptr->intercomm->local_rank_map = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->local_rank_map; 98 out_comm[i].ep_comm_ptr->intercomm->remote_rank_map = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->remote_rank_map; 96 int map_size = 0; 97 map_size = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->intercomm_rank_map->size(); 98 out_comm[i].ep_comm_ptr->intercomm->intercomm_rank_map->resize(map_size); 99 for(int ii=0; ii<map_size; ii++) 100 out_comm[i].ep_comm_ptr->intercomm->intercomm_rank_map->at(ii) = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->intercomm_rank_map->at(ii); 101 102 map_size = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->local_rank_map->size(); 103 out_comm[i].ep_comm_ptr->intercomm->local_rank_map->resize(map_size); 104 for(int ii=0; ii<map_size; ii++) 105 out_comm[i].ep_comm_ptr->intercomm->local_rank_map->at(ii) = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->local_rank_map->at(ii); 106 107 map_size = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->remote_rank_map->size(); 108 out_comm[i].ep_comm_ptr->intercomm->remote_rank_map->resize(map_size); 109 for(int ii=0; ii<map_size; ii++) 110 out_comm[i].ep_comm_ptr->intercomm->remote_rank_map->at(ii) = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->remote_rank_map->at(ii); 111 112 113 //out_comm[i].ep_comm_ptr->intercomm->intercomm_rank_map = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->intercomm_rank_map; 114 //out_comm[i].ep_comm_ptr->intercomm->local_rank_map = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->local_rank_map; 115 //out_comm[i].ep_comm_ptr->intercomm->remote_rank_map = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->remote_rank_map; 99 116 100 117 out_comm[i].ep_comm_ptr->intercomm->local_comm = comm.ep_comm_ptr->comm_list[i].ep_comm_ptr->intercomm->local_comm; -
XIOS/dev/branch_openmp/extern/src_ep_dev/ep_free.cpp
r1379 r1468 9 9 int MPI_Comm_free(MPI_Comm *comm) 10 10 { 11 12 11 if(! comm->is_ep) 13 12 { -
XIOS/dev/branch_openmp/inputs/COMPLETE/iodef.xml
r718 r1468 12 12 <variable id="buffer_size_factor" type="double">1.0</variable> 13 13 </variable_group> 14 15 <variable_group id="server"> 16 <variable id="using_server2" type="bool">true</variable> 17 <variable id="ratio_server2" type="int">50</variable> 18 </variable_group> 14 19 15 20 <variable_group id="parameters" > -
XIOS/dev/branch_openmp/inputs/REMAP/iodef.xml
r1463 r1468 171 171 <variable_definition> 172 172 <variable_group id="server"> 173 <variable id="using_server2" type="bool"> false</variable>173 <variable id="using_server2" type="bool">true</variable> 174 174 <variable id="ratio_server2" type="int">50</variable> 175 175 </variable_group> -
XIOS/dev/branch_openmp/inputs/TOY/iodef.xml
r1460 r1468 10 10 <variable_definition> 11 11 <variable_group id="server"> 12 <variable id="using_server2" type="bool">false</variable> 12 <variable id="using_server2" type="bool">true</variable> 13 <variable id="ratio_server2" type="int">50</variable> 13 14 </variable_group> 14 15 -
XIOS/dev/branch_openmp/inputs/iodef.xml
r1134 r1468 59 59 60 60 <variable_group id="parameters" > 61 <variable id="using_server" type="bool">false</variable> 61 <variable id="using_server2" type="bool">true</variable> 62 <variable id="ratio_server2" type="int">50</variable> 62 63 <variable id="info_level" type="int">50</variable> 63 64 <variable id="print_file" type="bool">true</variable> -
XIOS/dev/branch_openmp/src/node/context.cpp
r1460 r1468 257 257 258 258 259 if (CServer::serverLevel != 1) 259 //if (CServer::serverLevel != 1) 260 if (CServer::serverLevel == 0) 260 261 // initClient is called by client 261 {262 { 262 263 client = new CContextClient(this, intraComm, interComm, cxtServer); 263 264 if (cxtServer) // Attached mode … … 289 290 server = new CContextServer(this, intraCommServer, interCommServer); 290 291 } 291 else 292 else if(CServer::serverLevel == 1) 292 293 // initClient is called by primary server 293 294 { -
XIOS/dev/branch_openmp/src/test/test_remap_omp.f90
r1463 r1468 231 231 !CALL xios_recv_field("src_field_curvilinear", tmp_field_1) 232 232 !CALL xios_recv_field("src_field_unstructured", tmp_field_2) 233 CALL xios_recv_field("src_field_regular", tmp_field_0) 234 !CALL xios_recv_field("src_field_curvilinear", tmp_field_1) 235 !CALL xios_recv_field("src_field_unstructured", tmp_field_2) 233 236 CALL xios_update_calendar(ts) 234 237
Note: See TracChangeset
for help on using the changeset viewer.